id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11390120 | from . import (
align_submodels,
bundle,
compute_depthmaps,
compute_statistics,
create_rig,
create_submodels,
create_tracks,
detect_features,
export_bundler,
export_colmap,
export_geocoords,
export_openmvs,
export_ply,
export_pmvs,
export_report,
export_visualsfm,
extract_metadata,
match_features,
mesh,
reconstruct,
undistort,
)
from .command_runner import command_runner
opensfm_commands = [
extract_metadata,
detect_features,
match_features,
create_rig,
create_tracks,
reconstruct,
bundle,
mesh,
undistort,
compute_depthmaps,
compute_statistics,
export_ply,
export_openmvs,
export_visualsfm,
export_pmvs,
export_bundler,
export_colmap,
export_geocoords,
export_report,
create_submodels,
align_submodels,
]
| StarcoderdataPython |
196350 | <gh_stars>1-10
from .activities import (
Activity,
ActivityRegistration,
)
from .event import Event
from .polls import (
Poll,
PollDesign,
PollVote,
)
__all__ = [
"Activity",
"ActivityRegistration",
"Event",
"Poll",
"PollDesign",
"PollVote",
]
| StarcoderdataPython |
8172565 | <reponame>vikas04599/Fire-X
import asyncio
import random
from uniborg.util import fire_on_cmd
from firebot import CMD_HELP
@fire.on(fire_on_cmd(pattern=r"lol"))
async def _(event):
if event.fwd_from:
return
await event.edit("Typing...")
await asyncio.sleep(2)
x = random.randrange(1, 28)
if x == 1:
await event.edit(";l;;o;;l;")
if x == 2:
await event.edit("lloll")
if x == 3:
await event.edit(";l;o;l;")
if x == 4:
await event.edit("lo/;l")
if x == 5:
await event.edit("/lol/*")
if x == 6:
await event.edit("\lol")
if x == 7:
await event.edit("lllolll")
if x == 8:
await event.edit("l-o-l")
if x == 9:
await event.edit("-l;o;l-")
if x == 10:
await event.edit("[lol]")
if x == 11:
await event.edit(";;loo;l")
if x == 12:
await event.edit("l.o.l")
if x == 13:
await event.edit(";l.o.l;")
if x == 14:
await event.edit("llooll")
if x == 15:
await event.edit("phuck.lol")
if x == 16:
await event.edit("/l:o;l.")
if x == 17:
await event.edit("ll;oo;lll")
if x == 18:
await event.edit("loooooooooooool")
if x == 19:
await event.edit("lollll.lll;lol")
if x == 20:
await event.edit(";;;llloo;oo;ollll;;")
if x == 21:
await event.edit("lol me laughed hehehe")
if x == 22:
await event.edit("tum haso ab lol")
if x == 23:
await event.edit(":l:o:l:")
if x == 24:
await event.edit("l-o+l")
if x == 25:
await event.edit("l+o=l")
if x == 26:
await event.edit("l|o|l")
if x == 27:
await event.edit("hola lol")
if x == 28:
await event.edit("llllllllllllllooooooooooollllllllll")
CMD_HELP.update(
{
"lol": "**Lol**\
\n\n**Syntax : **`.lol`\
\n**Usage :** Pulls up a random lol string."
}
)
| StarcoderdataPython |
3262774 | # -*- coding: utf-8 -*-
"""
Created on Fri May 15 00:41:28 2020
@author: Tom
"""
num_dict = {}
for i in range(1, 28123):
d_a = 0
for j in range(1, int(i ** 0.5) + 1):
if i % j == 0:
if j != i // j:
d_a += j + i // j
else:
d_a += j
num_dict[i] = d_a - i
# print(num_dict)
abundant_nums = []
for key in num_dict:
if num_dict[key] > key:
abundant_nums.append(key)
# print(abundant_nums)
# print(len(abundant_nums))
sums = [0] * 28124
for i in range(0, len(abundant_nums)):
for j in range(i, len(abundant_nums)):
combo_sum = abundant_nums[i] + abundant_nums[j]
if combo_sum <= 28123:
if sums[combo_sum] == 0:
sums[combo_sum] = combo_sum
total = 0
for i in range(1, len(sums)):
if (sums[i] == 0):
total += i
print(total)
| StarcoderdataPython |
12858084 | #
# Program: www_utils.py
# Version: 0.10
# Description:
# Generic search functions for Cheshire 3
#
# Language: Python
# Author: <NAME> <<EMAIL>>
# Date: 19 December 2007
#
# Copyright: © University of Liverpool 2005-2007
#
# Version History:
# 0.01 - 13/04/2005 - JH - Ported from Cheshire II compatible scripts
# 0.02 - 14/06/2005 - JH - Improved CGI encoding/decoding
# - Mixed phrase and plain term searching handled
# (e.g. wyndham "science fiction" triffids)
# 0.03 - 17/10/2005 - JH - File logger class added
# keeps all logs for a single request in mem until complete, then flushes to file
# - html_encode() added to allow display of raw SGML in the browser
# 0.04 - 26/01/2006 - JH - Modifications to cgiReplacements
# 0.05 - 31/01/2006 - JH - More tweaks to cgiReplacement characters
# - Speech marks handled sensibly in exact or /string searches
# 0.06 - 27/02/2006 - JH - Booleans extracted first in generate_cqlQuery() - debugs 'NOT' searches
# 0.07 - 04/01/2007 - JH - Check for noComponents moved out of generic generate_cqlQuery function
# - Allow limit to collection
# 0.08 - 25/01/2007 - JH - Mods to allow date searching - decode < > etc from form
# 0.09 - 07/09/2007 - JH - renamed: wwwSearch.py --> www_utils.py
# 0.10 - 19/12/2007 - JH - handling of form character set implemented
# - can handle multiple indexes to be specified in fieldidx
# multiple indexes combine with or/relevant/proxinfo
#
import re
import time
import urlparse
from urllib import unquote
class FieldStorageDict(dict):
"""A sub-class of dict to behave like FieldStorage for testing.
Note, does not support multiple values for the same key.
"""
def getfirst(self, key, default=None):
return self.get(key, default)
def getlist(self, key):
val = self.get(key)
if val:
return [val]
return []
def generate_cqlQuery(form):
global phraseRe
formcodec = form.getfirst('_charset_', 'utf-8')
qClauses = []
bools = []
i = 1
while 'fieldcont{0}'.format(i) in form:
boolean = form.getfirst('fieldbool{0}'.format(i - 1),
'and/relevant/proxinfo'
)
bools.append(boolean)
i += 1
i = 1
while 'fieldcont{0}'.format(i) in form:
cont = form.getfirst('fieldcont{0}'.format(i))
if isinstance(cont, unicode):
# Encode any unicode back to raw byte string for compatibility
# with legacy code
cont = cont.encode(formcodec)
idxs = unquote(
form.getfirst('fieldidx{0}'.format(i),
'cql.anywhere'
)
)
rel = unquote(
form.getfirst('fieldrel{0}'.format(i),
'all/relevant/proxinfo'
)
)
idxClauses = []
# In case they're trying to do phrase searching
if (
rel.startswith('exact') or
rel.startswith('=') or
'/string' in rel
):
# Don't allow phrase searching for exact or /string searches
cont = cont.replace('"', '\\"')
for idx in idxs.split('||'):
subClauses = []
if (rel.startswith('all')):
subBool = ' and/relevant/proxinfo '
else:
subBool = ' or/relevant/proxinfo '
# In case they're trying to do phrase searching
if (
'exact' in rel or
'=' in rel or
'/string' in rel
):
# Don't allow phrase searching for exact or /string searches
# we already did quote escaping
subcont = cont
else:
phrases = phraseRe.findall(cont)
for ph in phrases:
subClauses.append(
'({0} =/relevant/proxinfo {1})'.format(idx, ph)
)
subcont = phraseRe.sub('', cont)
if (idx and rel and subcont):
subClauses.append(
'{0} {1} "{2}"'.format(idx, rel, subcont.strip())
)
if (len(subClauses)):
idxClauses.append('({0})'.format(subBool.join(subClauses)))
qClauses.append(
'({0})'.format(' or/rel.combine=sum/proxinfo '.join(idxClauses))
)
# If there's another clause and a corresponding boolean
try:
qClauses.append(bools[i])
except:
break
i += 1
qString = ' '.join(qClauses)
return qString.decode(formcodec).encode('utf8')
def parse_url(url):
u"""Parse a URL to split it into its component parts."""
bits = urlparse.urlsplit(url)
print bits
transport = bits[0]
uphp = bits[1].split('@')
user = ''
passwd = ''
if len(uphp) == 2:
(user, passwd) = uphp.pop(0).split(':')
hp = uphp[0].split(':')
host = hp[0]
if len(hp) == 2:
port = int(hp[1])
else:
# Require subclass to default
port = 0
dirname, filename = bits[2].rsplit('/', 1)
# params = map(lambda x: x.split('='), bits[3].split('&'))
params = [x.split('=') for x in bits[3].split('&')]
try:
params = dict(params)
except ValueError:
params = {}
anchor = bits[4]
return (transport, user, passwd, host, port, dirname, filename, params, anchor)
phraseRe = re.compile('".*?"')
cgiReplacements = {
#'%': '%25',
'+': '%2B',
' ': '%20',
'<': '%3C',
'>': '%3E',
'#': '%23',
'{': '%7B',
'}': '%7D',
'|': '%7C',
'"': '%22',
"'": '%27',
'^': '%5E',
'~': '%7E',
'[': '%5B',
']': '%5D',
'`': '%60',
';': '%3B',
'/': '%2F',
'?': '%3F',
':': '%3A',
'@': '%40',
'=': '%3D',
'&': '%26',
'$': '%24'
#'=': "%3D",
#'\n\t': "%0A",
#',': "%2C",
#'\'': "%27",
#'/': "%2F",
#'"': "%22",
#'@': "%40",
#'#': "%23",
#'{': "%7B",
#'}': "%7D",
#'[': "%5B",
#']': "%5D",
#'\\': "%5C",
#';': "%3B"
}
def cgi_encode(txt):
global cgiReplacements
txt = txt.replace('%', '%25')
#txt = txt.strip()
for key, val in cgiReplacements.iteritems():
txt = txt.replace(key, val)
return txt
#- end cgi_encode
def cgi_decode(txt):
global cgiReplacements
#txt = txt.strip()
for key, val in cgiReplacements.iteritems():
txt = txt.replace(val, key)
txt = txt.replace('%25', '%')
return txt
#- end cgi_decode
rawSgmlReplacements = {'<': '<'
,'>': '>'
,"'": '''
,'"': '"'
}
def html_encode(txt):
global rawSgmlReplacements
txt = txt.replace('&', '&')
for key, val in rawSgmlReplacements.iteritems():
txt = txt.replace(key, val)
return txt
#- end html_encode
def multiReplace(txt, params):
for k,v in params.iteritems():
try:
txt = txt.replace(k,unicode(v).encode('ascii', 'xmlcharrefreplace'))
except UnicodeDecodeError:
txt = txt.replace(k,unicode(v, 'utf8').encode('ascii', 'xmlcharrefreplace'))
return txt
#- end multiReplace
def read_file(fileName):
fileH = open(fileName, 'r')
cont = fileH.read()
fileH.close()
return cont
#- end read_file()
def write_file(fileName, txt):
fileH = open(fileName, 'w')
cont = fileH.write(txt)
fileH.close()
#- end write_file()
class FileLogger:
u"""DEPRECATED: A quick and dirty transaction logger that isn't actually a Cheshire3 object and doesn't match the API.
Please use cheshire3.web.logger.TransactionLogger instead.
"""
st = None
llt = None
fp = None
rh = None
lsl = None
def __init__(self, path, rh):
self.st = time.time()
self.llt = self.st
self.fp = path
self.rh = rh
self.lsl = ['\n[%s]: Request received from %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.st)), self.rh)]
def log(self,txt):
now = time.time()
diff = now - self.llt
self.lsl.append('...[+%f s]: %s' % (diff, txt))
self.llt = now
def flush(self):
now = time.time()
total = now - self.st
self.lsl.append('...Total time: %f secs' % (total))
fileh = file(self.fp, 'a')
fileh.write('\n'.join(self.lsl))
fileh.close()
#- end class FileLogger ---------------------------------------------------
| StarcoderdataPython |
12866035 | <gh_stars>0
import assistantResume
from speak_module import speak
from database import speak_is_on
def output(o):
# For command line input
if speak_is_on():
speak(o)
print(assistantResume.name +": "+o+"\n") | StarcoderdataPython |
4806052 | <gh_stars>0
#!/usr/bin/env python3
import socket
import time
import select
class Connection:
"""
Establishes a connection to a server,
then keeps it up (TODO!),
or reconnects if needed.
You should only ever need the method run()
"""
def __init__ (self, server, port, nicknames, realname, ident, admins):
"""
Parameters:
-----------
server: string (e. g. "chat.freenode.net")
port: integer (e. g. 6667)
nicknames: tuple of strings (e. g. ("iobot", "i0bot"))
realname: string (e. g. "iobot")
ident: string (e. g. "iobot")
admins: tuple of strings (
e. g. your nicknames on the server:
("terry", "john", "michael")
"""
self.SERVER = server
self.PORT = port
self.NICKNAMES = nicknames
self.reconnects = 0
self.REALNAME = realname
self.IDENT = ident
self.ADMINS = admins
self.lastping = time.time()
self.pingtimeout = 500
self.sleep_before_reconnect = 10
self.connected = False
def run (self):
"""
If you set up this class properly, this should just establish
and keep up a connection to an IRC server of your choosing.
"""
run = True
stub = ''
while run:
if not self.connected:
try:
if not self.reconnects == 0:
print ("*** Waiting %i seconds, \
then reconnecting [%i] ***" % \
(self.sleep_before_reconnect, \
self.reconnects))
self.s.close ()
self.lastping = time.time()
self.nickname = self.NICKNAMES[self.reconnects % \
len(self.NICKNAMES)]
print ("Connecting to " + self.SERVER)
self.connected = True
self.connect()
for a in self.ADMINS:
msg = "PRIVMSG " + a + " :Connected successfully! [%i]\n" % self.reconnects
self.send_to_server(msg)
self.reconnects += 1
except Exception as e:
self.connected = False
print ("Something went wrong while connecting:")
raise e
stream = self.listen (4096)
if not stream == "":
print (stream)
self.parse(stream)
if self.lastping + self.pingtimeout < time.time():
self.connected = False
print ("*** Lost connection ***")
def connect (self):
self.s = socket.socket()
self.s.connect((self.SERVER, self.PORT))
connection_msg = \
"NICK " + self.nickname + "\n" + \
"User " + self.IDENT + " " + \
self.SERVER + " bla: " + \
self.REALNAME + "\n"
self.send_to_server(connection_msg)
def listen (self, chars):
s_ready = select.select([self.s],[],[],10)
if s_ready:
return self.s.recv(chars).decode("UTF-8")
def parse (self, stream):
lines = stream.split("\n")
for l in lines:
if l[:4] == "PING":
pong = "PONG" + l[4:] + "\n"
self.send_to_server(pong)
self.lastping = time.time()
print (pong)
def send_to_server(self, message):
self.s.send(message.encode("UTF-8"))
if __name__ == '__main__':
print ("*** This file shouldn't be executed. ***")
| StarcoderdataPython |
74628 | #!/usr/bin/python
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
import sys
filename = sys.argv[1]
def extract_artifact(line):
splitline = line.split('%')
org = re.sub(r'^revision\.[a-z_]+\.', '', splitline[0])
name = re.sub(r'=.*', '', splitline[1].rstrip())
return (org, name)
with open(filename) as f:
base_dir = os.path.dirname(filename)
content = f.readlines()
for line in content:
# For each line get the org and name, make a directory with these
# and open the publish file.
artifact = extract_artifact(line)
(org, name) = artifact
publish_dir = os.path.join(base_dir, org, name)
if not os.path.exists(publish_dir):
os.makedirs(publish_dir)
with open(os.path.join(publish_dir, 'publish.properties'), 'a') as output:
output.write(line)
| StarcoderdataPython |
1651113 | import numpy as np
from .filter import Filter
from .ideal_high_pass import IdealHighPass as HP
class HighBoost(Filter):
def __init__(self, shape, cutoff, a):
super().__init__(shape)
self.cutoff = cutoff
self.a = a
def build_filter(self):
hp = HP(self.shape, self.cutoff)
hp.build_filter()
self.mask = (self.a - 1) - hp.get_filter()
| StarcoderdataPython |
3219320 | #*****************************************
# split big .csv file into small .csv file
# <NAME>
# <EMAIL>
#*****************************************
import pandas as pd
import math
import sys
species = sys.argv[1]
df = pd.read_csv('../'+species+'_merge.csv')
line_num = df.shape[0]
part_num = int(math.ceil(line_num / 1000000.0))
for i in range(0, part_num-1):
start = i * 1000000
end = start + 1000000 - 1
df.loc[start: end].to_csv('./csv_parts/'+species+'_part'+str(i)+'.csv', index = False)
i += 1
df.loc[end+1:].to_csv('./csv_parts/'+species+'_part'+str(i)+'.csv', index = False)
| StarcoderdataPython |
5072622 | <reponame>hbutsuak95/iv_rl
from .dqn import DQNAgent, LossAttDQN
from .mcdropDQN import MCDropDQN
from .ensembleDQN import EnsembleDQN, MaskEnsembleDQN, RPFMaskEnsembleDQN, BootstrapDQN, RPFBootstrapDQN, Lakshminarayan, LakshmiBootstrapDQN
from .iv_dqn import IV_DQN, IV_MaskEnsembleDQN, IV_LossAttDQN, IV_RPFMaskEnsembleDQN, IV_BootstrapDQN, IV_MCDropDQN, IV_RPFBootstrapDQN, IV_Lakshminarayan, IV_LakshmiBootstrapDQN
from .sunrise_dqn import Sunrise_DQN, Sunrise_MaskEnsembleDQN, Sunrise_RPFMaskEnsembleDQN, Sunrise_LossAttDQN, Sunrise_BootstrapDQN, Sunrise_MCDropDQN, Sunrise_LakshmiBootstrapDQN
from .uwac_dqn import UWAC_DQN, UWAC_LakshmiBootstrapDQN
| StarcoderdataPython |
9714575 | <filename>cogs/pokemon.py
import re
import disnake
from disnake.ext import commands
from utils.command.clip import *
from utils.tools.globals import httpgetter, logger
from utils.tools.helpers import *
from cogs.audio import AudioPlayerNotFoundError
from cogs.mangocog import *
async def pokeapi_query(url, fullurl=False):
if not fullurl:
url = f"http://pokeapi.co/api/v2{url}"
return await httpgetter.get(url, cache=True, errors={
404: "Pokemon not found",
"default": "pokeapi said we did things wrong 😢. status code: {}"
})
def poke_color(color):
return {
"black": disnake.Color(0x000000),
"blue": disnake.Color.blue(),
"brown": disnake.Color(0xD2691E),
"gray": disnake.Color(0xA9A9A9),
"green": disnake.Color.green(),
"pink": disnake.Color(0xFF69B4),
"purple": disnake.Color.purple(),
"red": disnake.Color.red(),
"white": disnake.Color(0xFFFFFF),
"yellow": disnake.Color(0xFFFF00)
}[color]
def localize(list_data, end_key):
for item in list_data:
if item["language"]["name"] == "en":
return item[end_key]
return None
class Pokemon(MangoCog):
"""Pokemon related commands
A few commands using the [pokeapi](https://pokeapi.co/) which provides information about pokemon
Note that the above API does not include information about pokemon Sun and Moon, so I can't look up those pokemon for you"""
def __init__(self, bot):
MangoCog.__init__(self, bot)
def poke_type(self, type_name):
if type_name == "shadow" or type_name == "unknown":
return type_name
else:
return self.get_emoji(f"poke_{type_name}")
async def get_pokemon_data(self, pokemon):
# Sanitize input first
pokemon = pokemon.lower()
replacements = { " ": "-", "♂": "-m", "♀": "-f" }
for key in replacements:
pokemon = pokemon.replace(key, replacements[key])
pokemon = re.sub(r'[^a-z0-9\-]', '', pokemon)
words = pokemon.split("-")
if "mega" in words:
words.remove("mega")
words.insert(1, "mega")
pokemon = "-".join(words)
try:
form_data = await pokeapi_query(f"/pokemon-form/{pokemon}/")
data = await pokeapi_query(form_data["pokemon"]["url"], True)
except Http404Error as e:
form_data = None
data = await pokeapi_query(f"/pokemon/{pokemon}/")
species_data = await pokeapi_query(data["species"]["url"], True)
data["localized_name"] = localize(species_data["names"], "name")
if data["id"] >= 1000:
data["id"] = species_data["id"]
data["wiki_url"] = f"http://www.serebii.net/pokedex-sm/{data['id']:03d}.shtml"
if form_data:
data["sprites"] = form_data["sprites"]
name = localize(form_data["names"], "name")
if name:
data["localized_name"] = name
if form_data.get("is_mega"):
data["wiki_url"] += "#mega"
return data, species_data
# returns True on success, False on failure
async def play_pokecry(self, inter: disnake.CmdInter, poke_id, namestring, old=False, print=False):
is_mega = "mega" in str(namestring).lower()
clipid = str(poke_id)
if old:
clipid = f"old_{clipid}"
if is_mega:
clipid = f"mega_{clipid}"
try:
clip = await self.get_clip(f"poke:{clipid}", inter)
clip.volume = 0.1
await self.play_clip(clip, inter, print=print)
except Http404Error:
return False
except AudioPlayerNotFoundError as e:
if print:
await inter.send(e.message)
return True
return True
@commands.slash_command()
async def pokemon(self, inter: disnake.CmdInter, pokemon: str, shiny: bool = False):
"""Looks up information about the given pokemon
Parameters
----------
pokemon: The name or id of the pokemon
shiny: Set to true if you want to see the shiny version of the pokemon
"""
await inter.response.defer()
data, species_data = await self.get_pokemon_data(pokemon)
types = []
for t in sorted(data["types"], key=lambda t: t["slot"]):
types.append(self.poke_type(t["type"]["name"]))
flavor_text = localize(species_data["flavor_text_entries"], "flavor_text")
flavor_text = flavor_text.replace("\n", " ")
embed = disnake.Embed(description=flavor_text, color=poke_color(species_data["color"]["name"]))
embed.title = data["localized_name"] + f" #{data['id']}"
embed.url = data["wiki_url"]
if shiny and data["sprites"].get("front_shiny"):
embed.set_thumbnail(url=data["sprites"].get("front_shiny"))
else:
embed.set_thumbnail(url=data["sprites"]["front_default"])
embed.add_field(name=f"Type{'s' if len(types) > 1 else ''}", value=f"{''.join(types)}")
if species_data.get("habitat"):
embed.add_field(name="Habitat", value=f"{species_data['habitat']['name']}")
embed.add_field(name="Weight", value=f"{data['weight'] / 10} kg")
embed.add_field(name="Height", value=f"{data['height'] / 10} m")
# fails silently if there's no cry for this pokemon
await self.play_pokecry(inter, data["id"], pokemon)
await inter.send(embed=embed)
@commands.slash_command()
async def pokecry(self, inter: disnake.CmdInter, pokemon: str, old: bool = False):
"""Plays the pokemon's sound effect
Parameters
----------
pokemon: The name or id of the pokemon
old: Set to true to use the old version of the pokemon's cry, if it exists
"""
await inter.response.defer()
# Audio files for these pokemon cries were gotten from [Veekun](https://veekun.com/dex/downloads). Veekun does not have the cries for Generation VII yet, so I won't be able to play those.
words = pokemon.split(" ")
pokemon = " ".join(words)
data, species_data = await self.get_pokemon_data(pokemon)
if data["id"] > 721:
raise UserError("Sorry, I don't have the cries for pokemon in Generation VII yet")
success = await self.play_pokecry(inter, data["id"], pokemon, old=old, print=True)
if not success:
raise UserError(f"Couldn't find the cry for {data['localized_name']}")
def setup(bot):
bot.add_cog(Pokemon(bot))
| StarcoderdataPython |
9628962 | <gh_stars>10-100
import torch
import numpy as np
from torchvision import transforms
import cv2
from PIL import Image
import custom_model
# Number of classes in the dataset
num_classes = 5
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model, input_size = custom_model.initialize_model(num_classes, keep_feature_extract=True, use_pretrained=False)
state_dict = torch.load("training_output_Skydiver_dataset_final/best_DeepLabV3_Skydiver.pth", map_location=device)
model = model.to(device)
model.load_state_dict(state_dict)
model.eval()
transforms_image = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
for idx in range(1, 3000, 25):
image = Image.open(f"/tmp/pycharm_project_782/03.03.20_saut_4/{idx:06}.png")
image_np = np.asarray(image)
# image_np = cv2.resize(image_np, 0.5, 0.5, cv2.INTER_CUBIC)
width = int(image_np.shape[1] * 0.3)
height = int(image_np.shape[0] * 0.3)
dim = (width, height)
image_np = cv2.resize(image_np, dim, interpolation=cv2.INTER_AREA)
image = Image.fromarray(image_np)
image = transforms_image(image)
image = image.unsqueeze(0)
image = image.to(device)
outputs = model(image)["out"]
_, preds = torch.max(outputs, 1)
preds = preds.to("cpu")
preds_np = preds.squeeze(0).cpu().numpy().astype(np.uint8)
print(preds_np.shape)
print(image_np.shape)
# preds_np = cv2.cvtColor(preds_np, cv2.COLOR_GRAY2BGR)
image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
preds_np_color = cv2.applyColorMap(preds_np * 50, cv2.COLORMAP_HSV)
cv2.imwrite(f"./results/{idx:04}_segmentation.png", preds_np_color)
cv2.imwrite(f"./results/{idx:04}_image.png", image_np)
| StarcoderdataPython |
6341 | # Copyright (c) 2010-2014 openpyxl
import pytest
from openpyxl.styles.borders import Border, Side
from openpyxl.styles.fills import GradientFill
from openpyxl.styles.colors import Color
from openpyxl.writer.styles import StyleWriter
from openpyxl.tests.helper import get_xml, compare_xml
class DummyWorkbook:
style_properties = []
def test_write_gradient_fill():
fill = GradientFill(degree=90, stop=[Color(theme=0), Color(theme=4)])
writer = StyleWriter(DummyWorkbook())
writer._write_gradient_fill(writer._root, fill)
xml = get_xml(writer._root)
expected = """<?xml version="1.0" ?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<gradientFill degree="90" type="linear">
<stop position="0">
<color theme="0"/>
</stop>
<stop position="1">
<color theme="4"/>
</stop>
</gradientFill>
</styleSheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_borders():
borders = Border()
writer = StyleWriter(DummyWorkbook())
writer._write_border(writer._root, borders)
xml = get_xml(writer._root)
expected = """<?xml version="1.0"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</styleSheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
| StarcoderdataPython |
8023276 | <gh_stars>1-10
# This program is free software: you can redistribute it and/or modify it under the
# terms of the Apache License (v2.0) as published by the Apache Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the Apache License for more details.
#
# You should have received a copy of the Apache License along with this program.
# If not, see <https://www.apache.org/licenses/LICENSE-2.0>.
"""Build and installation script for hyper-shell."""
# standard libs
import os
from setuptools import setup, find_packages
# metadata
from hyper_shell.__meta__ import (__appname__, __version__, __description__,
__authors__, __contact__, __keywords__,
__license__, __website__)
with open('README.rst', mode='r') as readme:
long_description = readme.read()
# basic dependencies
DEPS = ['cmdkit==1.5.5', 'logalpha==2.0.2', 'psutil>=5.7.0']
# add dependencies for readthedocs.io
if os.environ.get('READTHEDOCS') == 'True':
DEPS.extend(['sphinxbootstrap4theme'])
setup(
name = __appname__,
version = __version__,
author = __authors__,
author_email = __contact__,
description = ' '.join(__description__.strip().split('\n')),
license = __license__,
keywords = __keywords__,
url = __website__,
packages = find_packages(),
include_package_data = True,
long_description = long_description,
long_description_content_type = 'text/x-rst',
classifiers = ['Development Status :: 4 - Beta',
'Topic :: Utilities',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'License :: OSI Approved :: Apache Software License', ],
install_requires = DEPS,
entry_points = {'console_scripts': ['hyper-shell=hyper_shell.apps:main', ]},
data_files = [
('share/man/man1', ['man/man1/hyper-shell.1', ])
],
)
| StarcoderdataPython |
9650193 | <reponame>satyrus3/satyrus3.github.io<filename>docs/v3.0.7/_static/examples/text.py
from satyrus import Posiform, SatAPI
class text(SatAPI):
def solve(self, energy: Posiform, **params):
return str(energy)
| StarcoderdataPython |
9739943 | <filename>torchlab/nnlib/affine_align/__init__.py
from .pose_align import templates
from .pose_align import templates17to29
from .pose_align import Template
from .pose_align import PoseAffineTemplate
from .alignlayer import calcAffineMatrix
from .alignlayer import affine_align
from .alignlayer import affine_align_gpu
| StarcoderdataPython |
398064 | from datetime import datetime, date
import numpy
import pandas
import copy
import uuid
from past.builtins import basestring # pip install future
from pandas.io.formats.style import Styler
from functools import partial, reduce
from .offline import iplot, plot
from IPython.core.display import HTML, display
import plotly.io as pio
pio.renderers.default = 'iframe_connected' #required to return a 'text/html' iframe bundle that can then be dropped as html
DEFAULT_COLORS = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
from itertools import zip_longest
def grouped(iterable, n):
"group a sequence of objects into a sequence of tuples each containing 'n' objects"
"s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
#from http://stackoverflow.com/a/5389547/1280629
return zip_longest(*[iter(iterable)]*n)
def charts_table(charts, cols):
"draw a sequence of HTML charts (e.g. plotly interactive charts) as 'subplots' in a table with 'cols' columns"
table_content = """
<style>
/* for chart subplots tables produced by 'draw_charts_table' */
.table-no-border {
border: none !important;
}
</style>
<table class="table-no-border">
"""
div_ids = []
for row in grouped(charts, cols):
table_content += '<tr class="table-no-border">'
for chart in row:
if chart is not None:
if isinstance(chart, _PlotlyChartBundle):
#odd re-writing of width and height needed to ensure they are not
#overwritten by multiple charts plotted simultaneously
if 'layout' in chart.data_layout:
layout = chart.data_layout['layout']
layout['width'] = chart.width
layout['height'] = chart.height
bundle = chart._repr_mimebundle_()
bundle_content = None
for k in ['text/html', 'image/svg+xml']:
bundle_content = bundle.get(k, None)
if bundle_content is not None:
break
if bundle_content is None:
if 'image/png' in bundle:
base64_img = bundle['image/png']
bundle_content = f'<img src="data:image/png;base64,{base64_img}"</img>'
else:
raise ValueError('No html, svg or png bundle available (only %s available) - check value of plotly.pio.renderers.default'
% ', '.join(bundle.keys()))
elif isinstance(chart, Styler):
bundle_content = chart.render()
table_content += '<td class="table-no-border">%s</td>' % bundle_content
table_content += '</tr>'
table_content += '</table>'
display(HTML(table_content))
def percent_axis(axis_settings = {}, tick_precision = 0, hover_precision = 2):
return dict(axis_settings, **{
'tickformat': ',.%d%%' % tick_precision,
'hoverformat': ',.%d%%' % hover_precision,
})
default_layout = {
#'yaxis': {
# 'hoverformat': '.2f',
#},
#'xaxis': {
# 'hoverformat': '.2f',
#},
'template': 'plotly_white',
'margin': {
'l': 60,
'r': 50,
'b': 50,
't': 50,
'pad': 4
},
'autosize': True
}
default_config = {'showLink': False}
def dict_merge(a, b, path=None):
"merges b into a, recursively copying any subdicts"
if path is None: path = []
for key, bval in b.items():
if key in a and isinstance(a[key], dict) and isinstance(bval, dict):
dict_merge(a[key], bval, path + [str(key)])
elif isinstance(bval, dict):
a[key] = copy.deepcopy(bval)
else:
a[key] = bval
return a
class _PlotlyChartBundle(object):
"""Class for returning a displayable object wrapping a plotly chart.
This is used to wrap a plotted chart so we can then drop it into a table if required."""
def __init__(self, data_layout, width, height, config):
self.data_layout = data_layout
self.width = width
self.height = height
self.config = config
def _repr_mimebundle_(self, *args, **kwargs):
#use iplot to return a renderable bundle
bundle = iplot(self.data_layout,
image_width = self.width,
image_height = self.height,
config = self.config,
return_bundle = True)
return bundle
def set_title(self, title):
self.data_layout['layout']['title'] = title
return self
def _get_or_create_subdict(self, path):
d = self.data_layout
for k in path:
if k not in d:
d[k] = {}
d = d[k]
return d
def set_axislabel(self, axis, title, replace_existing = True):
axis_dict = self._get_or_create_subdict(('layout', '%saxis' % axis))
if replace_existing or 'title' not in axis_dict:
axis_dict['title'] = title
return self
def set_xlabel(self, title, replace_existing = True):
return self.set_axislabel('x', title, replace_existing)
def set_ylabel(self, title, replace_existing = True):
return self.set_axislabel('y', title, replace_existing)
def write_image(self, filename, scale, *args, **kwargs):
pio.write_image(self.data_layout, filename, scale=scale, *args, **kwargs)
def write_json(self, filename, *args, **kwargs):
pio.write_json(self.data_layout, filename, *args, **kwargs)
def to_html(self, width = None, height = None, **kwargs):
return plot(self.data_layout,
output_type="div",
image_width = width or self.width,
image_height = height or self.height,
config = self.config,
include_plotlyjs = False,
**kwargs,
)
def scatter(df, x_col, y_col,
groups_col = None, tooltip_cols = None, group_order = None,
layout = dict(), series_dict = dict(), x_order = [], group_colours = dict(),
color_col = None, size_col = None,
scatter_type = 'scatter', #could be changed to e.g. scattergl
auto_axis_title = True,
legend_or_color_title = True,
auto_break_legend_or_color_title = True,
width = 600, height = 400):
"""Scatter plot from data in a DataFrame.
The DataFrame to work on should be passed as `df`
`x_col` should contain the column name of column contain x-axis values
`y_col` should contain the column name of column contain y-axis values
Use `color_col` to make scatter points coloured according to values in the column with that name e.g. by Temperature
Use `groups_col` to make scatter points coloured by group according to values in that column e.g. by Country
`layout` can contain extra Plotly layout info like chart title, axes titles etc
e.g. {'title': 'chart title', 'xaxis': {'title': 'x title'}}
"""
pl_data = []
if groups_col is not None and color_col is not None:
raise RuntimeError('Only one of "groups_col" or "color_col" should be provided when calling this function')
if tooltip_cols is None:
tooltip_cols = []
breakdown_col = groups_col or color_col
if breakdown_col is not None:
if breakdown_col not in tooltip_cols:
tooltip_cols.insert(0, breakdown_col)
if auto_break_legend_or_color_title and len(breakdown_col) >= 10:
split_leg = breakdown_col.split()
if len(split_leg) > 1:
mid_point = len(split_leg) // 2
breakdown_col = ' '.join(split_leg[:mid_point]) + '<br>' + ' '.join(split_leg[mid_point:])
if groups_col is not None:
groups_available = set(df[groups_col])
sorted_groups = group_order if group_order is not None else sorted(groups_available)
layout = reduce(dict_merge, [{}, default_layout, layout]) #overwrite default_layout with given layout
if isinstance(x_col, basestring):
xvals = df[x_col]
else:
xvals = x_col
if isinstance(y_col, basestring):
yvals = df[y_col]
else:
yvals = y_col
layout['width'] = width
layout['height'] = height
def _process_group(grp, grp_vals):
line_dict = {
'x': xvals[grp_vals].values,
'y': yvals[grp_vals].values,
'mode': 'markers',
'type': scatter_type,
'name': grp,
'marker': { 'size': 7 }
}
if tooltip_cols:
group_tooltips_df = df[tooltip_cols][grp_vals]
line_dict['text'] = ['<br>'.join(['%s: %s' % (ttc, val) for ttc, val in row.to_dict().items()])
for i, row in group_tooltips_df.iterrows()]
line_dict = dict_merge(line_dict, series_dict)
marker_dict = line_dict['marker']
if grp in group_colours:
marker_dict['color'] = group_colours[grp]
if color_col is not None:
marker_dict['color'] = df[color_col].values
if legend_or_color_title:
marker_dict['colorbar'] = {'title': '<b>%s</b>' % breakdown_col} #' '.join([color_col, field_caption]), 'ticksuffix': ticksuffix}
if df[color_col].max() > 0 and df[color_col].min() < 0:
marker_dict['cmid'] = 0
if size_col is not None:
marker_dict['size'] = df[size_col].to_list()
if x_order:
indexes = [x_order.index(x) for x in line_dict['x']]
line_dict['x'] = [v for (i,v) in sorted(zip(indexes, line_dict['x']))]
line_dict['y'] = [v for (i,v) in sorted(zip(indexes, line_dict['y']))]
if 'text' in line_dict:
line_dict['text'] = [v for (i,v) in sorted(zip(indexes, line_dict['text']))]
pl_data.append(line_dict)
if groups_col is not None:
for grp in sorted_groups:
if grp in groups_available:
grp_vals = df[groups_col] == grp
_process_group(grp, grp_vals)
else:
_process_group(grp = 'Values',
grp_vals = numpy.repeat(True, len(df)))
data_layout = {'data': pl_data, 'layout': layout}
bundle = _PlotlyChartBundle(data_layout,
width = width,
height = height,
config = default_config)
if auto_axis_title:
bundle.set_xlabel(x_col, replace_existing = False)
bundle.set_ylabel(y_col, replace_existing = False)
layout = bundle.data_layout['layout']
if legend_or_color_title and 'legent_title' not in layout and breakdown_col is not None:
layout['legend_title'] = '<b>%s</b>' % breakdown_col
return bundle
def chart(dataframe, layout = dict(), column_settings = dict(), all_columns_settings = dict(), x_and_y = True,
dropna = True, width = 800, height = 500, text_dataframe = dict(), custom_chart_data = [], col_level_separator = ': '):
"""Generic plot from data in a DataFrame. Can be used for e.g. lines, bars and histograms.
The DataFrame to work on should be passed as `dataframe`
Every column in `dataframe` is plotted as a separate series (e.g. a separate line or bar group)
The index of dataframe is used for x-axis values
`layout` can contain extra Plotly layout info like chart title, axes titles etc
e.g. {'title': 'chart title', 'xaxis': {'title': 'x title'}}
`all_columns_settings` can be used to specify an appearance setting for all columns,
e.g. {'type': 'bar'} makes all of the columns display as bar charts
`column_settings` can be used to specify an appearance setting for specific columns,
e.g. {'Average data line': {'marker': {'color': 'black'}}}
"""
chart_data = []
index = dataframe.index
if isinstance(index, pandas.DatetimeIndex):
index = pandas.Index(index.date)
def process_column(colname, vals):
cleaned_colname = col_level_separator.join([str(v) for v in colname]) if isinstance(colname, tuple) else colname
coldata = {
"name": cleaned_colname
}
coldata.update(all_columns_settings)
if colname in column_settings:
coldata.update(column_settings[colname])
if dropna:
na_mask = ~pandas.isnull(vals)
else:
na_mask = slice(None)
if coldata.get('type') == 'pie':
x_key = 'labels'
y_key = 'values'
else:
x_key = 'x'
y_key = 'y'
data = vals[na_mask].values
if x_and_y:
coldata[x_key] = index[na_mask].values
coldata[y_key] = data
else:
coldata[x_key] = data
if colname in text_dataframe and 'text' not in coldata:
coldata['text'] = text_dataframe.loc[na_mask, colname].values
chart_data.append(coldata)
if isinstance(dataframe, pandas.DataFrame):
for colname, vals in dataframe.iteritems():
process_column(colname, vals)
elif isinstance(dataframe, pandas.Series):
process_column('Values', dataframe)
layout = reduce(dict_merge, [{}, default_layout, layout]) #overwrite default_layout with given layout
layout['width'] = width
layout['height'] = height
data_layout = {'data': chart_data + custom_chart_data, 'layout': layout}
return _PlotlyChartBundle(data_layout,
width = width,
height = height,
config = default_config)
def boxplot(dataframe,
orientation = 'vertical',
use_interquartile_range = True,
column_settings = dict(),
all_columns_settings = dict(),
layout = dict(),
width = 800, height = 500, single_color = True,
BOX_STDEVS = [3, 2, 1, 0, -1, -2, -3],
show_outliers = True):
import scipy
BOX_STDEV_PERCENTILES = scipy.stats.norm.cdf(BOX_STDEVS)
if orientation == 'vertical':
outliers_key = 'y'
names_key = 'x'
else:
outliers_key = 'x'
names_key = 'y'
if use_interquartile_range:
BOX_STDEV_PERCENTILES[2] = 0.75
BOX_STDEV_PERCENTILES[4] = 0.25
box_list = []
if single_color:
qs_df = dataframe.quantile(BOX_STDEV_PERCENTILES).T
qs_df.columns = BOX_STDEVS
coldata = {"lowerfence": qs_df[-2].values,
"q1": qs_df[-1].values,
"median": qs_df[0].values,
#"notchspan": 0.2, #for setting a notch in the box at the median
"q3": qs_df[1].values,
"upperfence": qs_df[2].values,
#"mean": [src_df[col].mean()],
#"sd": [0.2],
"type": "box",
outliers_key: [[qs[-3], qs[3]] for i, qs in qs_df.iterrows()] if show_outliers else [],
names_key: qs_df.index,
"boxpoints": "outliers"}
coldata.update(all_columns_settings)
box_list.append(coldata)
else:
for col in dataframe:
qs = pandas.Series(dataframe[col].quantile(BOX_STDEV_PERCENTILES).values,
index = BOX_STDEVS)
coldata = {"name": col,
"lowerfence": [qs[-2]],
"q1": [qs[-1]],
"median": [qs[0]],
#"notchspan": 0.2, #for setting a notch in the box at the median
"q3": [qs[1]],
"upperfence": [qs[2]],
#"mean": [src_df[col].mean()],
#"sd": [0.2],
"type": "box",
outliers_key: [[qs[-3], qs[3]]] if show_outliers else [],
names_key: [col],
"boxpoints": "outliers"}
coldata.update(all_columns_settings)
if col in column_settings:
coldata.update(column_settings[col])
box_list.append(coldata)
layout = reduce(dict_merge, [{}, default_layout, layout]) #overwrite default_layout with given layout
layout['width'] = width
layout['height'] = height
data_layout = {'data': box_list, 'layout': layout}
return _PlotlyChartBundle(data_layout,
width = width,
height = height,
config = default_config)
| StarcoderdataPython |
3491226 | <reponame>panda-mute/ykdl
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A simple Javascript engines' wrapper.
Description:
This library wraps the Javascript interpreter to python.
System's built-in Javascript interpreter:
macOS: JavascriptCore
Linux: Gjs on Gnome, CJS on Cinnamon
Windows: Chakra
Any installed external Javascript interpreters, e.g.
PyChakra, QuickJS, Node.js, etc.
Usage:
from jsengine import JSEngine
if JSEngine is None: # always check this first!
...
ctx = JSEngine()
ctx.eval('1 + 1') # => 2
ctx2 = JSEngine("""
function add(x, y) {
return x + y;
}
""")
ctx2.call("add", 1, 2) # => 3
ctx.append("""
function square(x) {
return x ** 2;
}
""")
ctx.call("square", 9) # => 81
If your want use a special external Javascript interpreter, please call
`ExternalInterpreter` or `set_external_interpreter` after imported:
from jsengine import *
binary = binary_name or binary_path
name = None or any_string # see ExternalInterpreterNameAlias.keys()
tempfile = True # use tempfile or not
evalstring = True # can run command string as Javascript or can't
args = [args1, args2, ...] # arguments used for interpreter
interpreter = ExternalInterpreter.get(binary, name=name,
tempfile=tempfile,
evalstring=evalstring,
args=args)
if interpreter:
# found
ctx = ExternalJSEngine(interpreter)
if set_external_interpreter(binary, name=name,
tempfile=tempfile,
evalstring=evalstring,
args=args)
# set default external interpreter OK
ctx = ExternalJSEngine()
'''
from __future__ import print_function
from subprocess import Popen, PIPE, list2cmdline
import json
import os
import platform
import sys
import tempfile
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
### Before using this library, check JSEngine first!!!
__all__ = ['JSEngine', 'ChakraJSEngine', 'QuickJSEngine', 'ExternalJSEngine',
'ExternalInterpreter', 'set_external_interpreter',
'RuntimeError', 'ProgramError']
# Exceptions
_RuntimeError = RuntimeError
class RuntimeError(_RuntimeError):
pass
class ProgramError(Exception):
pass
# The maximum length of command string
if os.name == 'posix':
# Used in Unix is ARG_MAX in conf
ARG_MAX = int(os.popen('getconf ARG_MAX').read())
else:
# Used in Windows CreateProcess is 32K
ARG_MAX = 32 * 1024
### Detect Javascript interpreters
chakra_available = False
quickjs_available = False
external_interpreter = None
DefaultExternalInterpreterOptions = {
# tempfile, evalstring, args
'ChakraCore': [ True, False, []],
'Node.js': [ True, True, []],
'QuickJS': [ True, True, []],
'V8': [ True, True, []],
'XS': [ True, True, []],
}
ExternalInterpreterNameAlias = {
# *1 Unceremonious name is not recommended to be used as the binary name
'chakracore': 'ChakraCore',
'chakra': 'ChakraCore',
'ch': 'ChakraCore', # *1
'cjs': 'CJS',
'gjs': 'Gjs',
'javascriptcore': 'JavaScriptCore',
'jsc': 'JavaScriptCore',
'nodejs': 'Node.js',
'node': 'Node.js',
'quickjs': 'QuickJS',
'qjs': 'QuickJS',
'qjsc': 'QuickJS',
'spidermonkey': 'SpiderMonkey',
'sm': 'SpiderMonkey', # *1
'js': 'SpiderMonkey', # *1
'v8': 'V8', # *1
'd8': 'V8', # *1
'xs': 'XS', # *1
'xst': 'XS',
# Don't use these interpreters
# They are not compatible with the most used ES6 features
'duktape': 'Duktape(incompatible)',
'duk': 'Duktape(incompatible)',
'hermes': 'Hermes(incompatible)',
'cscript': 'JScript(incompatible)',
}
# PyChakra
try:
from PyChakra import Runtime as ChakraHandle, get_lib_path
if not os.path.exists(get_lib_path()):
raise RuntimeError
except (ImportError, _RuntimeError):
pass
else:
chakra_available = True
# PyQuickJS
try:
import quickjs
except ImportError:
pass
else:
quickjs_available = True
# macOS: built-in JavaScriptCore
if platform.system() == 'Darwin':
# jsc lives on a new path since macOS Catalina
jsc_paths = ['/System/Library/Frameworks/JavaScriptCore.framework/Versions/A/Resources/jsc',
'/System/Library/Frameworks/JavaScriptCore.framework/Versions/A/Helpers/jsc']
for interpreter in jsc_paths:
external_interpreter = which(interpreter)
if external_interpreter:
break
# Windows: built-in Chakra, or Node.js,QuickJS if installed
elif platform.system() == 'Windows':
if not chakra_available:
try:
from jsengine_chakra import ChakraHandle, chakra_available
except ImportError:
from .jsengine_chakra import ChakraHandle, chakra_available
for interpreter in ('qjs', 'node', 'nodejs'):
external_interpreter = which(interpreter)
if external_interpreter:
break
if not chakra_available and not quickjs_available and external_interpreter is None:
print('Please install PyChakra or Node.js!', file=sys.stderr)
# Linux: Gjs on Gnome, CJS on Cinnamon, or JavaScriptCore, Node.js if installed
elif platform.system() == 'Linux':
for interpreter in ('gjs', 'cjs', 'jsc', 'qjs', 'nodejs', 'node'):
external_interpreter = which(interpreter)
if external_interpreter:
break
if not chakra_available and not quickjs_available and external_interpreter is None:
print('''\
Please install at least one of the following Javascript interpreter.'
python packages: PyChakra, quickjs
applications: Gjs, CJS, QuickJS, JavaScriptCore, Node.js, etc.''', file=sys.stderr)
else:
print('Sorry, the Javascript engine is currently not supported on your system.',
file=sys.stderr)
# Inject to the script to let it return jsonlized value to python
# Fixed our helper objects
injected_script = u'''\
Object.defineProperty((typeof global !== 'undefined') && global ||
(typeof globalThis !== 'undefined') && globalThis ||
this, '_JSEngineHelper', {{
value: {{}},
writable: false,
configurable: false
}});
Object.defineProperty(_JSEngineHelper, 'print', {{
value: typeof print === 'undefined' ? console.log : print,
writable: false,
configurable: false
}});
Object.defineProperty(_JSEngineHelper, 'jsonStringify', {{
value: JSON.stringify,
writable: false,
configurable: false
}});
Object.defineProperty(_JSEngineHelper, 'result', {{
value: null,
writable: true,
configurable: false
}});
Object.defineProperty(_JSEngineHelper, 'status', {{
value: false,
writable: true,
configurable: false
}});
try {{
_JSEngineHelper.result = eval({source}), _JSEngineHelper.status = true;
}}
catch (err) {{
_JSEngineHelper.result = err.toString(), _JSEngineHelper.status = false;
}}
try {{
_JSEngineHelper.print('\\n' + _JSEngineHelper.jsonStringify(
["result", _JSEngineHelper.status, _JSEngineHelper.result]));
}}
catch (err) {{
_JSEngineHelper.print(
'\\n["result", false, "Script returns a value with an unsupported type"]');
}}
'''
# Some simple compatibility processing
init_global_script = u'''\
if (typeof global === 'undefined')
if (typeof Proxy === 'function')
global = new Proxy(this, {});
else
global = this;
if (typeof globalThis === 'undefined')
globalThis = this;
'''
init_del_gobject_script = u'''\
if (typeof {gobject} !== 'undefined')
delete {gobject};
'''
end_split_char = set(u',;\\{}([')
if sys.version_info > (3,):
unicode = str
def to_unicode(s):
if not isinstance(s, unicode):
s = s.decode('utf8')
return s
def to_bytes(s):
if isinstance(s, unicode):
s = s.encode('utf8')
return s
def json_encoder_fallback(o):
# Allow bytes (python3)
if isinstance(o, bytes):
return to_unicode(o)
return json.JSONEncoder.default(json_encoder, o)
json_encoder = json.JSONEncoder(
skipkeys=True,
ensure_ascii=False,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
default=json_encoder_fallback,
)
json_encoder_ensure_ascii = json.JSONEncoder(
skipkeys=True,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
default=None,
)
class AbstractJSEngine:
def __init__(self, source=u'', init_global=False, init_del_gobjects=None):
self._source = []
init_script = []
if init_global:
init_script.append(init_global_script)
if init_del_gobjects:
for gobject in init_del_gobjects:
init_script.append(init_del_gobject_script.format(gobject=gobject))
self.append(u''.join(init_script))
self.append(source)
@property
def source(self):
'''All the inputted Javascript code.'''
return u'\n'.join(self._source)
def _append_source(self, code):
if code:
self._source.append(code)
def _check_code(self, code):
# Input unicode
code = to_unicode(code)
last_c = code.rstrip()[-1:]
if last_c:
# Simple end-split check
if last_c not in end_split_char:
code += u';'
return code
def append(self, code):
'''Run Javascript code and return none.'''
code = self._check_code(code)
if code:
self._append(code)
def eval(self, code):
'''Run Javascript code and return result.'''
code = self._check_code(code)
if code:
return self._eval(code)
def call(self, identifier, *args):
'''Use name string and arguments to call Javascript function.'''
chunks = json_encoder.iterencode(args, _one_shot=True)
chunks = [to_unicode(chunk) for chunk in chunks]
args = u''.join(chunks)[1:-1]
code = u'{identifier}({args});'.format(**vars())
return self._eval(code)
class InternalJSEngine(AbstractJSEngine):
'''Wrappered for Internal(DLL) Javascript interpreter.'''
def _append(self, code):
self._context.eval(code, eval=False, raw=True)
def _eval(self, code):
return self._context.eval(code)
class ChakraJSEngine(InternalJSEngine):
'''Wrappered for system's built-in Chakra or PyChakra(ChakraCore).'''
def __init__(self, *args, **kwargs):
if not chakra_available:
msg = 'No supported Chakra binary found on your system!'
if quickjs_available:
msg += ' Please install PyChakra or use QuickJSEngine.'
elif external_interpreter:
msg += ' Please install PyChakra or use ExternalJSEngine.'
else:
msg += ' Please install PyChakra.'
raise RuntimeError(msg)
self._context = self.Context(self)
InternalJSEngine.__init__(self, *args, **kwargs)
class Context:
def __init__(self, engine):
self._engine = engine
self._context = ChakraHandle()
def eval(self, code, eval=True, raw=False):
self._engine._append_source(code)
ok, result = self._context.eval(code, raw=raw)
if ok:
if eval:
return result
else:
raise ProgramError(str(result))
class QuickJSEngine(InternalJSEngine):
'''Wrappered for QuickJS python binding quickjs.'''
def __init__(self, *args, **kwargs):
if not quickjs_available:
msg = 'No supported QuickJS package found on custom python environment!'
if chakra_available:
msg += ' Please install python package quickjs or use ChakraJSEngine.'
elif external_interpreter:
msg += ' Please install python package quickjs or use ExternalJSEngine.'
else:
msg += ' Please install python package quickjs.'
raise RuntimeError(msg)
self._context = self.Context(self)
InternalJSEngine.__init__(self, *args, **kwargs)
class Context:
def __init__(self, engine):
self._engine = engine
self._context = quickjs.Context()
self.typeof = self.Function(self, self._context.eval(u'(obj => typeof obj)'))
def eval(self, code, eval=True, raw=False):
self._engine._append_source(code)
try:
result = self._context.eval(code)
except quickjs.JSException as e:
raise ProgramError(*e.args)
else:
if eval:
if raw or not isinstance(result, quickjs.Object):
return result
elif callable(result) and self.typeof(result) == u'function':
return self.Function(self, result)
else:
return json.loads(result.json())
class Function:
# PetterS/quickjs/Issue7
# Escape StackOverflow when calling function outside
def __init__(self, context, function):
self._context = context
self._function = function
def __call__(self, *args):
return self._function(*args)
class ExternalJSEngine(AbstractJSEngine):
'''Wrappered for external Javascript interpreter.'''
def __init__(self, source=u'', init_global=False, init_del_gobjects=[], interpreter=None):
if isinstance(interpreter, str):
interpreter = ExternalInterpreter.get(interpreter)
if isinstance(interpreter, ExternalInterpreter):
self.interpreter = interpreter
elif isinstance(external_interpreter, ExternalInterpreter):
self.interpreter = external_interpreter
else:
msg = 'No supported external Javascript interpreter found on your system!'
if chakra_available:
msg += ' Please install one or use ChakraJSEngine.'
elif quickjs_available:
msg += ' Please install one or use QuickJSEngine.'
else:
msg += ' Please install one.'
raise RuntimeError(msg)
# Del 'exports' to ignore import error, e.g. Node.js
init_del_gobjects = list(init_del_gobjects) + ['exports']
AbstractJSEngine.__init__(self, source, init_global, init_del_gobjects)
def _append(self, code):
self._append_source(code)
def _eval(self, code):
self._append_source(code)
code = self._inject_script()
evalstring = False
if self.interpreter.evalstring:
try:
output = self._run_interpreter_with_string(code)
evalstring = True
except ValueError:
pass
except _RuntimeError:
self.interpreter.evalstring = False
if not evalstring and not self.interpreter.tempfile:
try:
output = self._run_interpreter_with_pipe(code)
except _RuntimeError:
self.interpreter.tempfile = True
while True:
if not evalstring and self.interpreter.tempfile:
output = self._run_interpreter_with_tempfile(code)
output = output.replace(u'\r\n', u'\n').replace(u'\r', u'\n')
# Search result in the last 5 lines of output
for result_line in output.split(u'\n')[-5:]:
if result_line[:9] == u'["result"':
break
try:
_, ok, result = json.loads(result_line)
except json.decoder.JSONDecodeError as e:
if not evalstring and self.interpreter.tempfile:
raise RuntimeError('%s:\n%s' % (e, output))
else:
evalstring = False
self.interpreter.tempfile = True
continue
if ok:
return result
else:
raise ProgramError(result)
def _run_interpreter(self, cmd, input=None):
stdin = PIPE if input else None
p = Popen(cmd, stdin=stdin, stdout=PIPE, stderr=PIPE)
stdout_data, stderr_data = p.communicate(input=input)
if p.returncode != 0:
raise RuntimeError('%r returns non-zero value! Error msg: %s' %
(external_interpreter, stderr_data.decode('utf8')))
elif stderr_data:
print("%r has warnings:" % external_interpreter, stderr_data.decode('utf8'))
# Output unicode
return stdout_data.decode('utf8')
def _run_interpreter_with_string(self, code):
# `-e`, `-eval` means run command string as Javascript
# But some interpreters don't use `-eval`
cmd = self.interpreter.command + ['-e', code]
if len(list2cmdline(cmd)) > ARG_MAX: # Direct compare, don't wait an Exception
raise ValueError('code length is too long to run as a command')
return self._run_interpreter(cmd)
def _run_interpreter_with_pipe(self, code):
# Input bytes
return self._run_interpreter(self.interpreter.command, input=to_bytes(code))
def _run_interpreter_with_tempfile(self, code):
fd, filename = tempfile.mkstemp(prefix='execjs', suffix='.js')
try:
# Write bytes
with open(fd, 'wb') as fp:
fp.write(to_bytes(code))
return self._run_interpreter(self.interpreter.command + [filename])
finally:
os.remove(filename)
def _inject_script(self):
if self.interpreter.evalstring:
source = json_encoder_ensure_ascii.encode(self.source)
else:
source = json_encoder.encode(self.source)
return injected_script.format(source=source)
class ExternalInterpreter:
'''Create an external interpreter setting.'''
@classmethod
def get(cls, *args, **kwargs):
try:
return cls(*args, **kwargs)
except Exception as e:
print(e, file=sys.stderr)
def __init__(self, interpreter, name=None, tempfile=False, evalstring=False, args=None):
path = which(interpreter)
if path is None:
raise ValueError('Can not find the given interpreter: %r' % interpreter)
filename = os.path.basename(path).split('.')[0]
if name is None:
name = filename
name = ExternalInterpreterNameAlias.get(name.lower().replace('.', ''), name)
if name in DefaultExternalInterpreterOptions:
tempfile, evalstring, args = DefaultExternalInterpreterOptions[name]
self.name = name
self.path = path
self.tempfile = tempfile
self.evalstring = evalstring
self.command = [path]
if args:
self.command += list(args)
def __repr__(self):
return '<ExternalInterpreter %s @ %r>' % (self.name, self.path)
def set_external_interpreter(interpreter, *args, **kwargs):
'''
Set default an external interpreter, return the resoult status.
Same arguments as the ExternalInterpreter.
'''
global external_interpreter
interpreter = ExternalInterpreter.get(interpreter, *args, **kwargs)
if interpreter:
external_interpreter = interpreter
return interpreter
if external_interpreter:
external_interpreter = ExternalInterpreter(external_interpreter)
# Prefer InternalJSEngine (via dynamic library loading)
if chakra_available:
JSEngine = ChakraJSEngine
elif quickjs_available:
JSEngine = QuickJSEngine
elif external_interpreter:
JSEngine = ExternalJSEngine
else:
JSEngine = None
if __name__ == '__main__':
# Run test
import subprocess
cmds = [sys.executable, 'jsengine_test.py']
subprocess.Popen(cmds)
| StarcoderdataPython |
394877 | <gh_stars>1-10
from setuptools import setup, find_packages
setup(name='clusterweb',
version='0.0.0.2',
description='ClusterWeb, a distributed system API for SSH and PBS systems',
long_description=open('README.md').read(),
url='https://github.com/gndctrl2mjrtm/cweb-project',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
zip_safe=False,
entry_points={
'console_scripts':[
'c-web = clusterweb.local.clusterweb:main'
]
})
| StarcoderdataPython |
4922268 | from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
# Initializing application
app = Flask(__name__,instance_relative_config = True)
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing Flask Extension
bootstrap = Bootstrap(app)
# Will add the articles
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# Setting up configurations
from .request import configure_request
configure_request(app)
return app
| StarcoderdataPython |
1633098 | <filename>test/test_jsonhash.py
import subprocess
import json
import jsonhash
DATA = [
{'a': 1},
{'a': 1, 'b': 2, 'c': 3, 'd': 4},
[1, 3, 2, 5, 43, 3, 1.25],
{'a': 'just some dict', '1': [
'with lists', 1, 'and intergeres',
{'a': {'and even more': 'stuff', 'c': -1}}
]}
]
code = """
import sys
import jsonhash
import json
data = json.load(sys.stdin)
sys.stdout.write(jsonhash.hash(data).hexdigest())
"""
def in_subprocess(data, seed):
env = {'PYTHONHASHSEED': str(seed)}
cmd = ['python', '-c', code]
proc = subprocess.Popen(cmd, env=env, stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
proc.stdin.write(json.dumps(data))
proc.stdin.close()
assert proc.stderr.read() == ''
return proc.stdout.read()
def compare(data0, data1, algorithm=None):
assert jsonhash.hash(data0, algorithm).hexdigest() \
== jsonhash.hash(data1, algorithm).hexdigest()
# start two subprocesses with different pythonhashseeds to change dict ordering
assert in_subprocess(data0, 123) == in_subprocess(data1, 321)
def test_jsonjash():
compare('a', 'a')
compare(1, 1)
compare(1.25, 1.25)
compare(['a'], ['a'])
compare({1: 'a'}, {1: 'a'})
compare(DATA, DATA)
for data in DATA:
compare(data, data)
| StarcoderdataPython |
9694683 | import numpy as np
def VGGPreprocessing(originImgMatrix):
"The only preprocessing we do is subtracting the mean RGB value, \
computed on the training set, from each pixel.\
原论文中对输入的RGB矩阵做了一个减去均值的预处理,该函数实现这个预处理"
if type(originImgMatrix) is not np.ndarray:
originImgMatrix = np.ndarray(originImgMatrix)
# 矩阵X*Y*3
# axis=0,代表第一维,即把X(行)消除了,所以返回的是每一列RGB的均值,形状是(Y*3)
# axis=1, 代表第二维,即把Y(列)消除了,所以返回的是全图的RGB的均值,形状是(3,)
originImgMatrix_RGBMean = np.mean(originImgMatrix, axis=(0, 1))
# 直接减就行
subtract_Img = originImgMatrix - originImgMatrix_RGBMean
return subtract_Img
def VGGPreprocessingBatch(batch_originImgMatrix):
for index, img in enumerate(batch_originImgMatrix):
batch_originImgMatrix[index] = VGGPreprocessing(img)
return batch_originImgMatrix
| StarcoderdataPython |
12844677 | #!/usr/bin/env python3
import sys, os, shutil, json, yaml
from time import localtime
ONLY_SIMULATED = False
ONLY_GIAB = True
# Make import from parent directory possible
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import modules.file_utils as file_utils
with open("constants.yml", "r") as constants_file:
constants = yaml.load(constants_file)
# Directory Paths
reference_directory = "data/references/"
datasets_directory = "data/datasets/"
def log_task_start(item, path):
started_tasks.append(path)
print("Downloading {}...".format(item), flush=True)
def log_task_end(item, path):
finished_tasks.append(path)
print("Downloaded {}".format(item), flush=True)
def log_data_present(item):
print("{} already present".format(item), flush=True)
####################
# REFERENCE GENOMES
####################
# Add new tools needed to download reference genomes here
tools = ["twoBitToFa"]
# Constants
fasta_file_ending = ".fa"
fastq_file_ending = ".fastq"
rsync_uri = "rsync://hgdownload.soe.ucsc.edu/genome/admin/exe/linux.x86_64/"
started_tasks = []
finished_tasks = []
def get_human_genome(genome_id, file_path):
url = "http://hgdownload.soe.ucsc.edu/goldenPath/"
url += "{0}/bigZips/{0}.2bit".format(genome_id)
two_bit_path = file_path + ".2bit"
started_tasks.append(two_bit_path)
file_utils.download(url, two_bit_path)
finished_tasks.append(two_bit_path)
# Convert .2bit file to .fa
print("Extracting {} from 2bit file...".format(genome_id), flush=True)
os.system("chmod +x {0}twoBitToFa && {0}twoBitToFa {1} {2}".format(
reference_directory,
two_bit_path,
file_path
))
file_utils.delete(two_bit_path)
def get_p_falciparum(genome_id, file_path):
url = "http://bp1.s3.amazonaws.com/malaria.tar.bz2"
download_path = reference_directory + "malaria.tar.bz2"
file_utils.download(url, download_path)
print("Unzipping {}...".format(genome_id), flush=True)
unzipped_directory = file_utils.unzip(download_path)
os.rename(unzipped_directory + "/genome_sequence_pfal.fa", file_path)
file_utils.delete(download_path)
file_utils.delete(unzipped_directory)
# Add new reference genomes with options here
genomes = {
"hg19": {
"getter": get_human_genome,
"name": "Human (hg19)",
"source": "http://hgdownload.cse.ucsc.edu/downloads.html#human"
},
"hg38": {
"getter": get_human_genome,
"name": "Human (hg38)",
"source": "http://hgdownload.cse.ucsc.edu/downloads.html#human"
},
"pfal": {
"getter": get_p_falciparum,
"name": "Malaria",
"source": "http://bioinf.itmat.upenn.edu/BEERS/bp1/datasets.php"
}
}
def get_tools():
for tool_name in tools:
tool_path = reference_directory + tool_name
if not os.path.exists(tool_path):
log_task_start(tool_name, tool_path)
tool_uri = rsync_uri + tool_name
os.system("rsync -aPq {} {}".format(tool_uri, tool_path))
log_task_end(tool_name, tool_path)
else:
log_data_present(tool_name)
def remove_tools():
for tool_name in tools:
tool_path = reference_directory + tool_name
file_utils.delete(tool_path)
def genome_path(genome_id):
return reference_directory + genome_id + fasta_file_ending
def get_genomes():
genome_infos_path = os.path.join(reference_directory, "references.json")
genome_infos = []
if os.path.exists(genome_infos_path):
with open(genome_infos_path, "r") as genome_infos_file:
genome_infos = json.load(genome_infos_file)
for genome_id, genome_specification in genomes.items():
if ONLY_SIMULATED and genome_id not in ["hg19", "pfal"]:
print("Skipping {} (only simulated)".format(genome_specification["name"]))
continue
if ONLY_GIAB and genome_id not in ["hg38"]:
print("Skipping {} (only giab)".format(genome_specification["name"]))
continue
file_path = genome_path(genome_id)
info_path = file_path.split(fasta_file_ending)[0] + ".yml"
genome_getter = genome_specification["getter"]
if not os.path.exists(file_path):
log_task_start(genome_id, file_path)
genome_getter(genome_id, file_path)
genome_infos.append({
"id": genome_id,
"name": genome_specification["name"],
"source": genome_specification["source"]
})
with open(genome_infos_path, "w") as genome_infos_file:
genome_infos_file.write(json.dumps(genome_infos))
log_task_end(genome_id, file_path)
else:
log_data_present(genome_id)
###################
# RNASEQ DATA SETS
###################
def write_dataset_json(info):
dataset_info_path = datasets_directory + info["id"] + ".json"
info["method"] = constants["dataset"]["FILE"]
info["layout"] = constants["dataset"]["PAIRED"]
info["created"] = localtime()
info["error"] = False
with open(dataset_info_path, "w") as dataset_info_file:
json.dump(info, dataset_info_file)
def get_baruzzo(dataset, directory):
zip_name = "{}.tar.bz2".format(dataset["file_name"])
url = "http://bp1.s3.amazonaws.com/{}".format(zip_name)
download_path = directory + "/" + zip_name
file_utils.download(url, download_path)
print("Unzipping {}...".format(dataset["name"]), flush=True)
file_utils.unzip(download_path)
# Move files to /beers directory
beers_directory = directory + "/beers/"
file_utils.create_directory(beers_directory)
for file_name in os.listdir(directory):
file_path = directory + "/" + file_name
if not os.path.isdir(file_path) and not file_path == download_path:
shutil.move(file_path, beers_directory + file_name)
# Move FASTQ files to root and rename
def setup_file(direction):
file_name = "{}.{}.fa".format(dataset["id"], direction)
file_origin = beers_directory + file_name
file_destination = "{}/{}{}".format(directory, direction, fastq_file_ending)
os.rename(file_origin, file_destination)
return file_name, file_destination
forward_file_name, forward_file_path = setup_file(constants["dataset"]["FORWARD"])
reverse_file_name, reverse_file_path = setup_file(constants["dataset"]["REVERSE"])
# Move CIG file to root and rename
truth_file_name = "{}.cig".format(dataset["id"])
truth_file_path = directory + "/truth.cig"
os.rename(beers_directory + truth_file_name, truth_file_path)
file_utils.delete(download_path)
file_utils.delete(beers_directory)
write_dataset_json({
"id": dataset["id"],
"name": dataset["name"],
"readLength": "100",
"data": {
constants["dataset"]["FORWARD"]: {
"name": forward_file_name,
"path": forward_file_path,
},
constants["dataset"]["REVERSE"]: {
"name": reverse_file_name,
"path": reverse_file_path,
}
},
"evaluation": {
"type": "beers",
"truth_file": {
"name": truth_file_name,
"path": truth_file_path
}
}
})
def get_from_encode(dataset, directory):
dataset_info = {
"id": dataset["id"],
"name": dataset["name"],
"readLength": "76",
"data": {
constants["dataset"]["FORWARD"]: {},
constants["dataset"]["REVERSE"]: {}
},
"evaluation": dataset["evaluation"]
}
def get_file(file_id, direction, directory):
print("Downloading {} file...".format(direction), flush=True)
zip_name = "{}.fastq.gz".format(file_id)
url = "https://www.encodeproject.org/files/{}/@@download/{}".format(
file_id,
zip_name
)
download_path = directory + "/" + zip_name
file_utils.download(url, download_path)
print("Unzipping {} file...".format(direction), flush=True)
file_utils.unzip(download_path)
file_utils.delete(download_path)
original_name = "{}.fastq".format(file_id)
file_origin = "{}/{}".format(directory, original_name)
file_destination = "{}/{}{}".format(directory, direction, fastq_file_ending)
os.rename(file_origin, file_destination)
return original_name, file_destination
for direction, file_id in dataset["files"].items():
original_name, file_destination = get_file(file_id, direction, directory)
dataset_info["data"][direction]["name"] = original_name
dataset_info["data"][direction]["path"] = file_destination
write_dataset_json(dataset_info)
# Baruzzo Data Sets
# * id is prefix of unzipped FASTA files
# * file_name is zip name given in download url
rna_seq_data = [
{
"id": "GM12878",
"name": "GIAB Pilot Genome",
"getter": get_from_encode,
"files": {
constants["dataset"]["FORWARD"]: "ENCFF000EWJ",
constants["dataset"]["REVERSE"]: "ENCFF000EWX"
},
"evaluation": { "type": "giab" }
},
{
"id": "simulated_reads_HG19t1r1",
"getter": get_baruzzo,
"file_name": "human_t1r1",
"name": "Simulated Human T1R1"
},
# {
# "id": "simulated_reads_HG19t1r2",
# "getter": get_baruzzo,
# "file_name": "human_t1r2",
# "name": "Simulated Human T1R2"
# },
# {
# "id": "simulated_reads_HG19t1r3",
# "getter": get_baruzzo,
# "file_name": "human_t1r3",
# "name": "Simulated Human T1R3"
# },
{
"id": "simulated_reads_HG19t2r1",
"getter": get_baruzzo,
"file_name": "human_t2r1",
"name": "Simulated Human T2R1"
},
# {
# "id": "simulated_reads_HG19t2r2",
# "getter": get_baruzzo,
# "file_name": "human_t2r2",
# "name": "Simulated Human T2R2"
# },
# {
# "id": "simulated_reads_HG19t2r3",
# "getter": get_baruzzo,
# "file_name": "human_t2r3",
# "name": "Simulated Human T2R3"
# },
{
"id": "simulated_reads_HG19t3r1",
"getter": get_baruzzo,
"file_name": "human_t3r1",
"name": "Simulated Human T3R1"
},
# {
# "id": "simulated_reads_HG19t3r2",
# "getter": get_baruzzo,
# "file_name": "human_t3r2",
# "name": "Simulated Human T3R2"
# },
# {
# "id": "simulated_reads_HG19t3r3",
# "getter": get_baruzzo,
# "file_name": "human_t3r3",
# "name": "Simulated Human T3R3"
# },
{
"id": "simulated_reads_PFALt1r1",
"getter": get_baruzzo,
"file_name": "malaria_t1r1",
"name": "Simulated Malaria T1R1"
},
# {
# "id": "simulated_reads_PFALt1r2",
# "getter": get_baruzzo,
# "file_name": "malaria_t1r2",
# "name": "Simulated Malaria T1R2"
# },
# {
# "id": "simulated_reads_PFALt1r3",
# "getter": get_baruzzo,
# "file_name": "malaria_t1r3",
# "name": "Simulated Malaria T1R3"
# },
{
"id": "simulated_reads_PFALt2r1",
"getter": get_baruzzo,
"file_name": "malaria_t2r1",
"name": "Simulated Malaria T2R1"
},
# {
# "id": "simulated_reads_PFALt2r2",
# "getter": get_baruzzo,
# "file_name": "malaria_t2r2",
# "name": "Simulated Malaria T2R2"
# },
# {
# "id": "simulated_reads_PFALt2r3",
# "getter": get_baruzzo,
# "file_name": "malaria_t2r3",
# "name": "Simulated Malaria T2R3"
# },
{
"id": "simulated_reads_PFALt3r1",
"getter": get_baruzzo,
"file_name": "malaria_t3r1",
"name": "Simulated Malaria T3R1"
},
# {
# "id": "simulated_reads_PFALt3r2",
# "getter": get_baruzzo,
# "file_name": "malaria_t3r2",
# "name": "Simulated Malaria T3R2"
# },
# {
# "id": "simulated_reads_PFALt3r3",
# "getter": get_baruzzo,
# "file_name": "malaria_t3r3",
# "name": "Simulated Malaria T3R3"
# }
]
def get_datasets():
for dataset in rna_seq_data:
if ONLY_SIMULATED and not dataset["id"].startswith("simulated"):
print("Skipping {} (only simulated)".format(dataset["name"]))
continue
if ONLY_GIAB and dataset["id"] != "GM12878":
print("Skipping {} (only giab)".format(dataset["name"]))
continue
dataset_directory = datasets_directory + dataset["id"]
dataset_getter = dataset["getter"]
if not os.path.isdir(dataset_directory):
file_utils.create_directory(dataset_directory)
log_task_start(dataset["name"], dataset_directory)
dataset_getter(dataset, dataset_directory)
log_task_end(dataset["name"], dataset_directory)
else:
log_data_present(dataset["name"])
###################
# SCRIPT EXECUTION
###################
print("", flush=True)
print("Downloading data", flush=True)
print("", flush=True)
file_utils.create_directory(reference_directory)
file_utils.create_directory(datasets_directory)
try:
get_tools()
get_genomes()
get_datasets()
remove_tools()
finally:
for path in started_tasks:
if not path in finished_tasks:
print("An error occured, deleting {}".format(path))
file_utils.delete(path)
| StarcoderdataPython |
4989457 | <reponame>TerezijaKrecic/euler-problems
# vsota praštevil pod 10 je 2+3+5+7 = 17.
# poišči vsoto praštevil pod 2 milijona
def ali_je_prastevilo(n):
"""preveri, ali je n praštevilo"""
if n < 2:
return False
else:
for i in range(2, n):
if n % i == 0:
return False
return True
#prastevila = {i for i in range(2000000) if ali_je_prastevilo(i)}
#print(sum(prastevila))
vsota = {2}
for i in range(3, 2000000):
if ali_je_prastevilo(i):
vsota.add(i)
print(sum(vsota))
#nekaj časa melje ... in ne pride do konca ...
# vsota praštevil do 50.000 je 121013308
# vsota praštevil med 50.000 in 80.000 je 175474900
# med 80.000 in 110.000 je 248326848 | StarcoderdataPython |
6605250 | import os
# ------------------------------------------------
# MAIN-CONFIG ------------------------------------
# ------------------------------------------------
c.NotebookApp.allow_origin = '*'
c.NotebookApp.allow_root = True
c.NotebookApp.allow_remote_access = True
c.NotebookApp.ip = '*'
c.NotebookApp.notebook_dir = '/shared'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
c.NotebookApp.trust_xheaders = True
c.NotebookApp.iopub_data_rate_limit = 1.0e10
# ------------------------------------------------
# AUTHENTICATION --------------------------------
# ------------------------------------------------
if "JUPYTER_TOKEN" in os.environ:
c.NotebookApp.token = os.environ['JUPYTER_TOKEN']
if "JUPYTER_PASSWORD_HASH" in os.environ:
c = get_config()
c.NotebookApp.password = <PASSWORD>['<PASSWORD>']
| StarcoderdataPython |
1742989 | <filename>google/appengine/tools/devappserver2/java_runtime.py
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves content for "script" handlers using the Java runtime."""
import os
import os.path
import sys
import tempfile
import threading
import google
from google.appengine.api import appinfo
from google.appengine.tools.devappserver2 import http_runtime
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import java_application
from google.appengine.tools.devappserver2 import util
# TODO: figure out what's needed to react to file changes
class JavaRuntimeInstanceFactory(instance.InstanceFactory):
"""A factory that creates new Java runtime Instances."""
START_URL_MAP = appinfo.URLMap(
url='/_ah/start',
script='_java_app',
login='admin')
WARMUP_URL_MAP = appinfo.URLMap(
url='/_ah/warmup',
script='_java_app',
login='admin')
FILE_CHANGE_INSTANCE_RESTART_POLICY = instance.ALWAYS
def __init__(self, request_data, runtime_config_getter, module_configuration):
"""Initializer for JavaRuntimeInstanceFactory.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
runtime_config_getter: A function that can be called without arguments
and returns the runtime_config_pb2.RuntimeConfig containing the
configuration for the runtime.
module_configuration: An application_configuration.ModuleConfiguration
instance representing the configuration of the module that owns the
runtime.
"""
super(JavaRuntimeInstanceFactory, self).__init__(request_data, 1)
self._runtime_config_getter = runtime_config_getter
self._module_configuration = module_configuration
self._application_lock = threading.Lock()
self._java_application = java_application.JavaApplication(
self._module_configuration)
self._for_jetty9 = (module_configuration.runtime == 'vm' or
util.is_env_flex(module_configuration.env))
self._java_command = self._make_java_command()
def _make_java_command(self):
# We should be in .../google/appengine/tools/devappserver2/java_runtime.py
# and we want to find .../google/appengine/tools and thence
# .../google/appengine/tools/java/lib
java_home = os.environ.get('JAVA_HOME')
if java_home and os.path.exists(java_home):
java_bin = os.path.join(java_home, 'bin/java')
else:
java_bin = 'java'
java_dir = os.environ.get('APP_ENGINE_JAVA_PATH', None)
tools_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not java_dir or not os.path.exists(java_dir):
java_dir = os.path.join(tools_dir, 'java')
java_lib_dir = os.path.join(java_dir, 'lib')
assert os.path.isdir(java_lib_dir), java_lib_dir
class_path = os.path.join(java_lib_dir, 'appengine-tools-api.jar')
assert os.path.isfile(class_path), class_path
jdk_overrides_jar = os.path.join(java_lib_dir, 'override',
'appengine-dev-jdk-overrides.jar')
assert os.path.isfile(jdk_overrides_jar), jdk_overrides_jar
if self._for_jetty9:
jetty_home = os.environ.get('APP_ENGINE_JETTY_HOME', None)
jetty_base = os.environ.get('APP_ENGINE_JETTY_BASE', None)
if not jetty_home:
jetty_home = os.path.join(java_lib_dir, 'java-managed-vm',
'appengine-java-vmruntime')
if not jetty_base:
jetty_base = os.path.join(java_lib_dir, 'jetty-base-sdk')
args = [
java_bin,
('-Dgcloud.java.application=%s' %
self._module_configuration.application_root),
'-Djetty.home=%s' % jetty_home,
'-Djetty.base=%s' % jetty_base,
]
args.extend(self._runtime_config_getter().java_config.jvm_args)
args.append('-jar')
args.append('%s/start.jar' % jetty_home)
else:
args = [
java_bin,
'-cp', class_path,
'-Dappengine.sdk.root=' + java_dir,
'-Xbootclasspath/p:' + jdk_overrides_jar,
]
if sys.platform == 'darwin':
args.append('-XstartOnFirstThread')
args.extend(self._runtime_config_getter().java_config.jvm_args)
args.append(
'com.google.appengine.tools.development.devappserver2.'
'StandaloneInstance')
return args
def get_restart_directories(self):
"""Returns a list of directories where changes trigger a restart.
Returns:
A list of directories where changes trigger a restart.
"""
# TODO: implement
return []
def files_changed(self):
"""Called when a file relevant to the factory *might* have changed."""
# TODO: implement
def configuration_changed(self, config_changes):
"""Called when the configuration of the module has changed.
Args:
config_changes: A set containing the changes that occured. See the
*_CHANGED constants in the application_configuration module.
"""
# TODO: implement
def new_instance(self, instance_id, expect_ready_request=False):
"""Create and return a new Instance.
Args:
instance_id: A string or integer representing the unique (per module) id
of the instance.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
Returns:
The newly created instance.Instance.
"""
def instance_config_getter():
runtime_config = self._runtime_config_getter()
runtime_config.instance_id = str(instance_id)
return runtime_config
def extra_args_getter(port):
return 'jetty.port=%s' % port
env = self._java_application.get_environment()
runtime_config = instance_config_getter()
for env_entry in runtime_config.environ:
env[env_entry.key] = env_entry.value
if self._for_jetty9:
start_process_flavor = http_runtime.START_PROCESS_REVERSE_NO_FILE
env['APP_ENGINE_LOG_CONFIG_PATTERN'] = (
os.path.join(tempfile.mkdtemp(suffix='gae'), 'log.%g'))
else:
start_process_flavor = http_runtime.START_PROCESS_FILE
with self._application_lock:
proxy = http_runtime.HttpRuntimeProxy(
self._java_command,
instance_config_getter,
self._module_configuration,
env=env,
start_process_flavor=start_process_flavor,
extra_args_getter=extra_args_getter)
return instance.Instance(self.request_data,
instance_id,
proxy,
self.max_concurrent_requests,
self.max_background_threads,
expect_ready_request)
| StarcoderdataPython |
3442660 | from selenium import webdriver
from http.client import CannotSendRequest
from socket import error
from selenium.webdriver.remote.command import Command
import os
import random
CURDIR = os.path.dirname(os.path.abspath(__file__))
class DriverManager():
def __init__(self):
self.drivers = {"main":None}
self.id = "".join([str(random.randint(0,9)) for i in range(10)])
def get_driver(self,name=None):
name = name or "main"
if name not in self.drivers:
self.drivers[name] = None
if self.drivers[name] is None or not is_alive(self.drivers[name]):
self.drivers[name] = webdriver.Chrome(os.path.join(CURDIR,"chromedriver.exe"))
return self.drivers[name]
def close_driver(self,name=None):
name = name or "main"
if name not in self.drivers:
pass
elif self.drivers[name] is None:
pass
else:
self.drivers[name].close()
self.drivers[name] = None
driver_manager = DriverManager()
def get_driver(name=None):
# print("Driver ID : {}".format(driver_manager.id))
return driver_manager.get_driver(name)
def close_driver(name=None):
driver_manager.close_driver(name)
def is_alive(driver):
try:
driver.execute(Command.STATUS)
return True
except (error, CannotSendRequest):
return False
| StarcoderdataPython |
9679658 | <filename>mysite/myAPI/checkcode.py
# -*- coding: utf-8 -*-
# python3.5
import os,sys
from io import BytesIO as StringIO
from django.shortcuts import render
import random
from django.http.response import HttpResponseRedirect, HttpResponse
from PIL import Image, ImageDraw, ImageFont, ImageFilter
FONT_TYPE = "static_common/home/fonts/DroidSans.ttf"
_letter_cases = "abcdefghnpqrstuvxy".upper()
_upper_cases = _letter_cases
_numbers = ''.join(map(str, range(3, 8)))
init_chars = ''.join((_letter_cases, _upper_cases, _numbers))
def get_chars(chars=init_chars, length=4):
return random.sample(chars, length)
def create_validate_code(request,size=(120, 30), mode="RGB",
bg_color=(255, 255, 255),
fg_color=(255, 0, 0),
font_size=22,
font_type=FONT_TYPE,
draw_lines=True,
n_line=(1, 3),
draw_points=True,
point_chance = 2):
width, height = size
img = Image.new(mode, size, bg_color)
draw = ImageDraw.Draw(img)
def create_lines():
line_num = random.randint(*n_line)
for i in range(line_num):
begin = (random.randint(0, size[0]), random.randint(0, size[1]))
end = (random.randint(0, size[0]), random.randint(0, size[1]))
draw.line([begin, end], fill=(0, 0, 0))
def create_points():
chance = min(100, max(0, int(point_chance)))
for w in range(width):
for h in range(height):
tmp = random.randint(0, 100)
if tmp > 100 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs():
c_chars =request.session['checkcode']
strs = ' %s ' % ' '.join(c_chars)
font = ImageFont.truetype(font_type, font_size)
font_width, font_height = font.getsize(strs)
draw.text(((width - font_width) / 3, (height - font_height) / 3),
strs, font=font, fill=fg_color)
return ''.join(c_chars)
if draw_lines:
create_lines()
if draw_points:
create_points()
strs = create_strs()
params = [1 - float(random.randint(1, 12)) / 100,
0,
0,
0,
1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500,
0.001,
float(random.randint(1, 2)) / 500
]
img = img.transform(size, Image.PERSPECTIVE, params)
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE)
return img, strs
def gcheckcode(request):
listchar = get_chars()
request.session['checkcode'] = listchar
return ''.join(listchar)
def checkcodeGIF(request):
if not request.session.get('checkcode',''):
request.session['checkcode'] = '1234'
img_type="GIF"
checkcode = create_validate_code(request)
mstream = StringIO()
checkcode[0].save(mstream, img_type)
codeImg = mstream.getvalue()
mstream.close()
return HttpResponse(codeImg, img_type)
def getcheckcode(request):
g_checkcode = gcheckcode(request)
path = request.GET.get('path','')
if not path:
return HttpResponseRedirect('/home/register/')
return render(request, path, context=locals())
| StarcoderdataPython |
4908582 | import sys
input = sys.stdin.readline
n, a, b = map(int, input().split())
if (b - a) % 2 == 0:
print('Alice')
else:
print('Borys')
| StarcoderdataPython |
6608214 | print('=====Desafio 008=====')
m=float(input('Uma distância em metros: '))
km= m*0.001
hm= m*0.01
dam= m*0.1
dm=m*10
cm= m*100
mm= m*1000
print( 'A medida de {} metros corresponde a \n {:.3f} kilômetros \n {:.2f} '
'hectrometros \n {:.1f} decametros \n {:.0f} decimetros \n {:.0f} '
'centimetros \n {:.0f} milimetros'.format(m, km, hm, dam, dm, cm, mm, ))
| StarcoderdataPython |
3481787 | import logging
from checkov.arm.parser import cfn_yaml
from yaml.parser import ParserError, ScannerError
from yaml import YAMLError
from checkov.common.parsers.node import dict_node
from checkov.common.parsers.json import parse as json_parse
LOGGER = logging.getLogger(__name__)
def parse(filename):
"""
Decode filename into an object
"""
template = None
template_lines = None
try:
(template, template_lines) = cfn_yaml.load(filename)
except IOError as e:
if e.errno == 2:
LOGGER.error('Template file not found: %s', filename)
elif e.errno == 21:
LOGGER.error('Template references a directory, not a file: %s',
filename)
elif e.errno == 13:
LOGGER.error('Permission denied when accessing template file: %s',
filename)
except UnicodeDecodeError as err:
LOGGER.error('Cannot read file contents: %s', filename)
except cfn_yaml.CfnParseError as err:
pass
except ScannerError as err:
if err.problem in [
'found character \'\\t\' that cannot start any token',
'found unknown escape character']:
try:
(template, template_lines) = json_parse(filename, allow_nulls=False)
except Exception as json_err: # pylint: disable=W0703
LOGGER.error(
'Template %s is malformed: %s', filename, err.problem)
LOGGER.error('Tried to parse %s as JSON but got error: %s',
filename, str(json_err))
except YAMLError as err:
pass
if template is not None and isinstance(template, dict_node) and '$schema' in template and 'resources' in template:
return template, template_lines
return None, None
| StarcoderdataPython |
9761380 | <gh_stars>0
# comanage_api/_copersonroles.py
"""
CoPersonRole API - https://spaces.at.internet2.edu/display/COmanage/CoPersonRole+API
Methods
-------
coperson_roles_add(coperson_id: int, cou_id: int, status: str = None, affiliation: str = None) -> dict
Add a new CO Person Role.
coperson_roles_delete(coperson_role_id: int) -> bool
Remove a CO Person Role.
coperson_roles_edit(coperson_role_id: int, coperson_id: int = None, cou_id: int = None, status: str = None,
affiliation: str = None) -> bool
Edit an existing CO Person Role.
coperson_roles_view_all() -> dict
Retrieve all existing CO Person Roles.
coperson_roles_view_per_coperson(coperson_id: int) -> dict
Retrieve all existing CO Person Roles for the specified CO Person. Available since Registry v2.0.0.
coperson_roles_view_per_cou(cou_id: int) -> dict
Retrieve all existing CO Person Roles for the specified COU.
coperson_roles_view_one(coperson_role_id: int) -> dict
Retrieve an existing CO Person Role.
"""
import json
def coperson_roles_add(self, coperson_id: int, cou_id: int, status: str = None, affiliation: str = None) -> dict:
"""
Add a new CO Person Role.
:param self:
:param affiliation:
:param coperson_id:
:param cou_id:
:param status:
:return
{
"ResponseType":"NewObject",
"Version":"1.0",
"ObjectType":"CoPersonRole",
"Id":"<ID>"
}:
Request Format
{
"RequestType":"CoPersonRoles",
"Version":"1.0",
"CoPersonRoles":
[
{
"Version":"1.0",
"Person":
{
"Type":"CO",
"Id":"<coperson_id>"
},
"CouId":"<cou_id>",
"Affiliation":"<Affiliation>",
"Title":"<Title>",
"O":"<O>",
"Ordr":"<Order>",
"Ou":"<Ou>",
"Status":("Active"|"Approved"|"Confirmed"|"Declined"|"Deleted"|"Denied"|"Duplicate"|"Expired"|
"GracePeriod"|"Invited"|"Pending"|"PendingApproval"|"PendingConfirmation"|"Suspended"),
"ValidFrom":"<ValidFrom>",
"ValidThrough":"<ValidThrough>",
"ExtendedAttributes":
{
"<Attribute>":"<Value>",
{...}
}
}
]
}
Response Format
HTTP Status Response Body Description
201 Added NewObjectResponse with ObjectType CoPersonRole created
400 Bad Request CoPersonRole Request not provided in POST body
400 Invalid Fields ErrorResponse with details in An error in one or more provided fields
InvalidFields element
401 Unauthorized Authentication required
403 COU Does Not Exist The specified COU does not exist
500 Other Error Unknown error
"""
post_body = {
'RequestType': 'CoPersonRoles',
'Version': '1.0',
'CoPersonRoles': [
{
'Version': '1.0',
'Person':
{
'Type': 'CO',
'Id': str(coperson_id)
},
'CouId': str(cou_id),
'O': str(self._CO_API_ORG_NAME)
}
]
}
if status:
if status not in self.STATUS_OPTIONS:
raise TypeError("Invalid Fields 'status'")
post_body['CoPersonRoles'][0]['Status'] = str(status)
else:
post_body['CoPersonRoles'][0]['Status'] = 'Active'
if affiliation:
affiliation = str(affiliation).lower()
if affiliation not in self.AFFILIATION_OPTIONS:
raise TypeError("Invalid Fields 'affiliation'")
post_body['CoPersonRoles'][0]['Affiliation'] = str(affiliation)
else:
post_body['CoPersonRoles'][0]['Affiliation'] = 'member'
post_body = json.dumps(post_body)
url = self._CO_API_URL + '/co_person_roles.json'
resp = self._s.post(
url=url,
data=post_body
)
if resp.status_code == 201:
return json.loads(resp.text)
else:
resp.raise_for_status()
def coperson_roles_delete(self, coperson_role_id: int) -> bool:
"""
Remove a CO Person Role.
:param self:
:param coperson_role_id:
:return:
Response Format
HTTP Status Response Body Description
200 Deleted CoPersonRole deleted
400 Invalid Fields id not provided
401 Unauthorized Authentication required
404 CoPersonRole Unknown id not found
500 Other Error Unknown error
"""
url = self._CO_API_URL + '/co_person_roles/' + str(coperson_role_id) + '.json'
resp = self._s.delete(
url=url
)
if resp.status_code == 200:
return True
else:
resp.raise_for_status()
def coperson_roles_edit(self, coperson_role_id: int, coperson_id: int = None, cou_id: int = None, status: str = None,
affiliation: str = None) -> bool:
"""
Edit an existing CO Person Role.
:param self:
:param coperson_role_id:
:param affiliation:
:param coperson_id:
:param cou_id:
:param status:
:return:
Request Format
{
"RequestType":"CoPersonRoles",
"Version":"1.0",
"CoPersonRoles":
[
{
"Version":"1.0",
"Person":
{
"Type":"CO",
"Id":"<coperson_id>"
},
"CouId":"<cou_id>",
"Affiliation":"<Affiliation>",
"Title":"<Title>",
"O":"<O>",
"Ordr":"<Order>",
"Ou":"<Ou>",
"Status":("Active"|"Approved"|"Confirmed"|"Declined"|"Deleted"|"Denied"|"Duplicate"|"Expired"|
"GracePeriod"|"Invited"|"Pending"|"PendingApproval"|"PendingConfirmation"|"Suspended"),
"ValidFrom":"<ValidFrom>",
"ValidThrough":"<ValidThrough>",
"ExtendedAttributes":
{
"<Attribute>":"<Value>",
{...}
}
}
]
}
Response Format
HTTP Status Response Body Description
200 OK CoPersonRole updated
400 Bad Request CoPersonRole Request not provided in POST body
400 Invalid Fields ErrorRespons with details in An error in one or more provided fields
InvalidFields element
401 Unauthorized Authentication required
403 COU Does Not Exist The specified COU does not exist
404 CoPersonRole Unknown id not found
500 Other Error Unknown error
"""
coperson_role = coperson_roles_view_one(self, coperson_role_id)
post_body = {
'RequestType': 'CoPersonRoles',
'Version': '1.0',
'CoPersonRoles': [
{
'Version': '1.0',
'Person':
{
'Type': 'CO'
},
'O': str(self._CO_API_ORG_NAME)
}
]
}
if coperson_id:
post_body['CoPersonRoles'][0]['Person']['Id'] = str(coperson_id)
else:
post_body['CoPersonRoles'][0]['Person']['Id'] = str(
coperson_role.get('CoPersonRoles')[0].get('Person').get('Id'))
if cou_id:
post_body['CoPersonRoles'][0]['CouId'] = str(cou_id)
else:
post_body['CoPersonRoles'][0]['CouId'] = str(coperson_role.get('CoPersonRoles')[0].get('CouId'))
if status:
if status not in self.STATUS_OPTIONS:
raise TypeError("Invalid Fields 'status'")
post_body['CoPersonRoles'][0]['Status'] = str(status)
else:
post_body['CoPersonRoles'][0]['Status'] = coperson_role.get('CoPersonRoles')[0].get('Status')
if affiliation:
affiliation = str(affiliation).lower()
if affiliation not in self.AFFILIATION_OPTIONS:
raise TypeError("Invalid Fields 'affiliation'")
post_body['CoPersonRoles'][0]['Affiliation'] = str(affiliation)
else:
post_body['CoPersonRoles'][0]['Affiliation'] = coperson_role.get('CoPersonRoles')[0].get('Affiliation')
post_body = json.dumps(post_body)
url = self._CO_API_URL + '/co_person_roles/' + str(coperson_role_id) + '.json'
resp = self._s.put(
url=url,
data=post_body
)
if resp.status_code == 200:
return True
else:
resp.raise_for_status()
def coperson_roles_view_all(self) -> dict:
"""
Retrieve all existing CO Person Roles.
:param self:
:return
{
"ResponseType":"CoPersonRoles",
"Version":"1.0",
"CoPersonRoles":
[
{
"Version":"1.0",
"Id":"<Id>",
"Person":
{
"Type":"CO",
"Id":"<ID>"
},
"CouId":"<CouId>",
"Affiliation":"<Affiliation>",
"Title":"<Title>",
"O":"<O>",
"Ordr":"<Order>",
"Ou":"<Ou>",
"Status":("Active"|"Approved"|"Confirmed"|"Declined"|"Deleted"|"Denied"|"Duplicate"|"Expired"|"GracePeriod"|"Invited"|"Pending"|"PendingApproval"|"PendingConfirmation"|"Suspended"),
"ValidFrom":"<ValidFrom>",
"ValidThrough":"<ValidThrough>",
"Created":"<CreateTime>",
"Modified":"<ModTime>",
"ExtendedAttributes":
{
"<Attribute>":"<Value>",
{...}
}
},
{...}
]
}:
Response Format
HTTP Status Response Body Description
200 OK CoPersonRole Response CoPersonRoles returned
401 Unauthorized Authentication required
500 Other Error Unknown error
"""
url = self._CO_API_URL + '/co_person_roles.json'
resp = self._s.get(
url=url
)
if resp.status_code == 200:
return json.loads(resp.text)
else:
resp.raise_for_status()
def coperson_roles_view_per_coperson(self, coperson_id: int) -> dict:
"""
Retrieve all existing CO Person Roles for the specified CO Person. Available since Registry v2.0.0.
:param self:
:param coperson_id:
:return
{
"ResponseType":"CoPersonRoles",
"Version":"1.0",
"CoPersonRoles":
[
{
"Version":"1.0",
"Id":"<Id>",
"Person":
{
"Type":"CO",
"Id":"<ID>"
},
"CouId":"<CouId>",
"Affiliation":"<Affiliation>",
"Title":"<Title>",
"O":"<O>",
"Ordr":"<Order>",
"Ou":"<Ou>",
"Status":("Active"|"Approved"|"Confirmed"|"Declined"|"Deleted"|"Denied"|"Duplicate"|"Expired"|"GracePeriod"|"Invited"|"Pending"|"PendingApproval"|"PendingConfirmation"|"Suspended"),
"ValidFrom":"<ValidFrom>",
"ValidThrough":"<ValidThrough>",
"Created":"<CreateTime>",
"Modified":"<ModTime>",
"ExtendedAttributes":
{
"<Attribute>":"<Value>",
{...}
}
},
{...}
]
}:
Response Format
HTTP Status Response Body Description
200 OK CoPersonRole Response CoPersonRoles returned
401 Unauthorized Authentication required
404 CO Person Unknown id not found
500 Other Error Unknown error
"""
url = self._CO_API_URL + '/co_person_roles.json'
params = {'copersonid': int(coperson_id)}
resp = self._s.get(
url=url,
params=params
)
if resp.status_code == 200:
return json.loads(resp.text)
else:
resp.raise_for_status()
def coperson_roles_view_per_cou(self, cou_id: int) -> dict:
"""
Retrieve all existing CO Person Roles for the specified COU.
:param self:
:param cou_id:
:return
{
"ResponseType":"CoPersonRoles",
"Version":"1.0",
"CoPersonRoles":
[
{
"Version":"1.0",
"Id":"<Id>",
"Person":
{
"Type":"CO",
"Id":"<ID>"
},
"CouId":"<CouId>",
"Affiliation":"<Affiliation>",
"Title":"<Title>",
"O":"<O>",
"Ordr":"<Order>",
"Ou":"<Ou>",
"Status":("Active"|"Approved"|"Confirmed"|"Declined"|"Deleted"|"Denied"|"Duplicate"|"Expired"|"GracePeriod"|"Invited"|"Pending"|"PendingApproval"|"PendingConfirmation"|"Suspended"),
"ValidFrom":"<ValidFrom>",
"ValidThrough":"<ValidThrough>",
"Created":"<CreateTime>",
"Modified":"<ModTime>",
"ExtendedAttributes":
{
"<Attribute>":"<Value>",
{...}
}
},
{...}
]
}:
Response Format
HTTP Status Response Body Description
200 OK CoPersonRole Response CoPersonRoles returned
401 Unauthorized Authentication required
404 COU Unknown id not found
500 Other Error Unknown error
"""
url = self._CO_API_URL + '/co_person_roles.json'
params = {'couid': int(cou_id)}
resp = self._s.get(
url=url,
params=params
)
if resp.status_code == 200:
return json.loads(resp.text)
else:
resp.raise_for_status()
def coperson_roles_view_one(self, coperson_role_id: int) -> dict:
"""
Retrieve an existing CO Person Role.
:param self:
:param coperson_role_id:
:return
{
"ResponseType":"CoPersonRoles",
"Version":"1.0",
"CoPersonRoles":
[
{
"Version":"1.0",
"Id":"<Id>",
"Person":
{
"Type":"CO",
"Id":"<ID>"
},
"CouId":"<CouId>",
"Affiliation":"<Affiliation>",
"Title":"<Title>",
"O":"<O>",
"Ordr":"<Order>",
"Ou":"<Ou>",
"Status":("Active"|"Approved"|"Confirmed"|"Declined"|"Deleted"|"Denied"|"Duplicate"|"Expired"|"GracePeriod"|"Invited"|"Pending"|"PendingApproval"|"PendingConfirmation"|"Suspended"),
"ValidFrom":"<ValidFrom>",
"ValidThrough":"<ValidThrough>",
"Created":"<CreateTime>",
"Modified":"<ModTime>",
"ExtendedAttributes":
{
"<Attribute>":"<Value>",
{...}
}
},
{...}
]
}:
Response Format
HTTP Status Response Body Description
200 OK CoPersonRole Response CoPersonRoles returned
401 Unauthorized Authentication required
404 CoPersonRole Unknown id not found
500 Other Error Unknown error
"""
url = self._CO_API_URL + '/co_person_roles/' + str(coperson_role_id) + '.json'
resp = self._s.get(
url=url
)
if resp.status_code == 200:
return json.loads(resp.text)
else:
resp.raise_for_status()
| StarcoderdataPython |
290556 | from pathlib import Path
def loadNotebook(notebookPath: Path) -> str:
with notebookPath.open('r', encoding='utf-8') as f:
return f.read()
| StarcoderdataPython |
5034932 | <filename>tests/test_generator.py
import unittest
from tempfile import TemporaryDirectory
import os
from os.path import join as pjoin, getsize as filesize
from textwrap import dedent
from yaml import dump as ymldump
import cv_generator.generator
class CVGeneratorTest(unittest.TestCase):
"""Testing the CVGenerator class"""
TEST_DATA = {
'title': 'A test file',
'body': 'Test body'
}
TEST_TEMPLATE = dedent('''\
\\documentclass[12pt]{article}
\\title{\\VAR{data.title}}
\\begin{document}
\\VAR{data.body}
\\end{document}
''')
def setUp(self):
self._cwd = os.getcwd()
self.data_dir = TemporaryDirectory()
self.template_dir = TemporaryDirectory()
self.tempwd = TemporaryDirectory()
self.configuration = {
'latex': {
'program': 'pdflatex',
'root': 'main.tex',
'format': 'pdf'
}
}
self.out_file = pjoin(self.tempwd.name, 'main.pdf')
with open(pjoin(self.data_dir.name, 'data.yml'), 'w') as data_file:
ymldump(self.TEST_DATA, stream=data_file)
with open(pjoin(self.template_dir.name, 'main.tex'), 'w') as template_file:
template_file.write(self.TEST_TEMPLATE)
os.chdir(self.tempwd.name)
def tearDown(self):
os.chdir(self._cwd)
def test_process(self):
cv_generator.generator.process(
configuration=self.configuration,
data_dir=self.data_dir.name,
template_dir=self.template_dir.name
)
self.assertGreater(filesize(self.out_file), 0)
| StarcoderdataPython |
79962 | <reponame>rschroll/ipyaml
import yaml
import nbformat
WHITELIST = ['nbformat', 'nbformat_minor', 'metadata', 'kernelspec', 'display_name',
'language', 'name', 'cells']
class SourceCode(unicode):
pass
class SourceDumper(yaml.SafeDumper):
def analyze_scalar(self, scalar):
# The default analysis doesn't allow blocks if `trailing_space`, `space_break`,
# or `special_characters`. The first two are common and don't actually cause
# any problems. We'll test for special characters ourselves and set `allow_blocks`
# based on that.
analysis = super(SourceDumper, self).analyze_scalar(scalar)
special_characters = False
for ch in scalar:
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
if not self.allow_unicode or not (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD') or ch == u'\uFEFF':
special_characters = True
break
analysis.allow_block = not special_characters
return analysis
def sourcecode_representer(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
def notebooknode_representer(dumper, data):
if 'cell_type' in data:
return dumper.represent_dict({
'cell_type': data.cell_type,
'source': SourceCode(data.source)
})
return dumper.represent_dict({k: v for k, v in data.iteritems() if k in WHITELIST})
SourceDumper.add_representer(nbformat.NotebookNode, notebooknode_representer)
SourceDumper.add_representer(SourceCode, sourcecode_representer)
def read(f, version=4):
nb_struct = yaml.load(f, Loader=yaml.SafeLoader)
nb_struct['metadata']['ipyaml'] = True
for cell in nb_struct['cells']:
cell['metadata'] = {}
if cell['cell_type'] == 'code':
cell['outputs'] = []
cell['execution_count'] = None
nb = nbformat.from_dict(nb_struct)
if version is not nbformat.NO_CONVERT:
nb = nbformat.convert(nb, version)
nbformat.validate(nb)
return nb
def write(nb, f=None, version=nbformat.NO_CONVERT):
return yaml.dump(nb, f, default_flow_style=False, Dumper=SourceDumper, allow_unicode=True)
if __name__ == '__main__':
def test_roundtrip_yaml(s, as_block=True):
y = yaml.dump(SourceCode(s), default_flow_style=False, Dumper=SourceDumper,
allow_unicode=True)
if as_block:
assert y[0] == '|', "Not encoded as block: %r" % s
assert s == yaml.load(y), "Did not round-trip: %r" % s
test_roundtrip_yaml('simple string')
test_roundtrip_yaml('string with\nnew lines')
test_roundtrip_yaml(' leading spaces')
test_roundtrip_yaml(' leading spaces\nand new lines')
test_roundtrip_yaml('trailing spacings ')
test_roundtrip_yaml('trailing spaces \nin multiline')
test_roundtrip_yaml('line with only spaces\n ')
test_roundtrip_yaml('many trailing new lines\n\n\n')
test_roundtrip_yaml(u'unicode \uABCD')
test_roundtrip_yaml(u'unicode control \x80', False)
test_roundtrip_yaml(u'unicode private \uD800', False)
yaml_nb = """cells:
- cell_type: code
source: |-
1+2-3+4
- cell_type: markdown
source: |-
Text
- cell_type: raw
source: |-
Raw cell!
metadata:
kernelspec:
display_name: Python 2
language: python
name: python2
nbformat: 4
nbformat_minor: 0
"""
assert yaml_nb == write(read(yaml_nb)), "Notebook did not round-trip"
| StarcoderdataPython |
11318585 | #!/bin/python
import Tkinter, tkFileDialog
from kafka import KafkaProducer
import re
def cleanup(value):
if value[0]=='/':
return cleanup(value[1:])
if value[len(value)-1]==':':
return cleanup(value[:-1])
return value
def parse(uri,regex,default):
p=re.compile(regex)
found=p.findall(uri)
if len(found) == 0:
return default
else :
if len(found) == 1:
return cleanup(found[0])
else:
raise RuntimeError("command-broker parsing failure")
class Connector:
def __init__(self):
self.connected = False
self.server = ""
self.topic = ""
def connect(self,uri):
# protocol: ^[A-Za-z]*
# broker: \/{2}[A-Za-z.0-9]+: (literal)
# port: :([0-9]+)
# topic: \/[A-Za-z._-]+$
broker = parse(uri,r"\/{2}[A-Za-z.0-9]+:","ess01.psi.ch")
port = parse(uri,r":([0-9]+)","9092")
if not self.connected:
self.server = broker+":"+port
self.topic = parse(uri,r"\/[A-Za-z._-]+$","topic.default")
self.producer = KafkaProducer(bootstrap_servers=self.server)
self.connected = True
def disconnect(self):
if self.connected:
self.producer.close()
def send(self,message):
if self.connected:
self.producer.send(self.topic,value=message)
self.producer.flush()
else:
raise RuntimeError("producer not connected")
class ESSFileWriter(Tkinter.LabelFrame):
def __init__(self,parent,name):
self.mess = Connector()
Tkinter.LabelFrame.__init__(self,parent,text=name)
self.parent = parent
self.cmd = Tkinter.StringVar()
self.src = Tkinter.StringVar()
self.text = ""
self.initialize()
self.grid()
def initialize(self):
self.grid()
f1 = Tkinter.Frame(self)
f2 = Tkinter.Frame(self)
f3 = Tkinter.Frame(self)
self.cmd.set("//[host]:[port]/[topic]")
self.line1(f1)
self.line2(f2)
self.line3(f3)
f1.grid(column=0,row=0,columnspan=3)
f2.grid(column=0,row=1,columnspan=3)
f3.grid(column=0,row=2,columnspan=3)
def line1(self,parent):
lbl = "kafka-to-nexus command-broker: "
entry = Tkinter.Entry(parent,textvariable=self.cmd,width=40)
lb = Tkinter.Label(parent,text=lbl)
lb.grid(column=0,row=0,columnspan=2)
entry.grid(column=2,row=0,columnspan=2)
entry.bind("<Return>",(lambda event: self.OnClickConnect()))
def line2(self,parent):
lbl = "command file: "
lb = Tkinter.Label(parent,text=lbl)
self.src.set("")
self.sr = Tkinter.Label(parent,textvariable=self.src)
button = Tkinter.Button(parent,text=u"Open",
command=self.OnButtonClickOpenfile)
lb.grid(column=0,row=0)
self.sr.grid(column=1,row=0)
button.grid(column=2,row=0)
def line3(self,parent):
button_sub = Tkinter.Button(parent,text=u"Submit",
command=self.OnButtonSubmit)
button_stop = Tkinter.Button(parent,text=u"Stop",
command=self.OnButtonStop)
button_sub.grid(column=0,row=0)
button_stop.grid(column=1,row=0)
def OnButtonClick(self):
pass
def OnClickConnect(self):
self.mess.connect(self.cmd.get())
def OnButtonClickOpenfile(self):
ftypes = [('JSON files', '*.json'), ('All files', '*')]
dlg = tkFileDialog.Open(self, filetypes = ftypes)
fl = dlg.show()
self.src.set(fl)
self.sr.update()
f = open(fl, "r")
self.text = f.read()
def OnButtonSubmit(self):
if len(self.text) == 0:
print "Command message not defined: nothing to do"
pass
self.mess.send(self.text)
def OnButtonStop(self):
self.mess.send("{\"cmd\": \"FileWriter_exit\",\"teamid\": 0}")
| StarcoderdataPython |
3345601 | """
==================================
Set Axis Range When Plotting a Map
==================================
In this example we are going to look at how to set the axes
range using Matplotlib's ``set_xlim`` and ``set_ylim`` when plotting a
Map with WCSAxes.
"""
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
import sunpy.data.sample
import sunpy.map
###############################################################################
# Lets start by creating a Map from the sample data.
aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
###############################################################################
# Now lets say for example we are only interested in plotting a certain region
# of this Map. One way this could be done is to create a submap over the region
# of interest and then plotting that. Another useful way is to set the axes
# range over which to plot using Matplotlib's
# `~matplotlib.axes.Axes.set_xlim` and `~matplotlib.axes.Axes.set_ylim` functionality.
# The axes that Matplotlib uses is in pixel coordinates (e.g. of image data array)
# rather than world coordinates (e.g. in arcsecs) so we need to define our limits that
# are passed to set_xlim(), set_lim() to pixel coordinates.
# We can define our limits we want to use in world coordinates and then work out what pixel
# coordinates these correspond to.
# Lets choose xlimits and ylimits in arcsecs that we are interested in.
xlims_world = [500, 1100]*u.arcsec
ylims_world = [-800, 0]*u.arcsec
###############################################################################
# We can then covert these into a SkyCoord which can be passed to :func:`~sunpy.map.GenericMap.world_to_pixel` to
# determine which pixel coordinates these represent on the Map.
world_coords = SkyCoord(Tx=xlims_world, Ty=ylims_world, frame=aia_map.coordinate_frame)
pixel_coords = aia_map.world_to_pixel(world_coords)
# we can then pull out the x and y values of these limits.
xlims_pixel = pixel_coords.x.value
ylims_pixel = pixel_coords.y.value
###############################################################################
# We can now plot this Map and then use the x_lims_pixel and y_lims_pixel to set
# the range of the axes for which to plot.
fig = plt.figure()
ax = plt.subplot(projection=aia_map)
aia_map.plot(axes=ax, clip_interval=(1, 99.9)*u.percent)
ax.set_xlim(xlims_pixel)
ax.set_ylim(ylims_pixel)
plt.show()
| StarcoderdataPython |
3481159 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The F4PGA Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" Classes for representing and creating a physical netlist in python,
along with reading and writing the physical netlist format.
Useful starting points:
PhysicalNetlist - class that can read and write physical netlist format,
and be constructed and inspected from Python.
"""
import enum
from collections import namedtuple
from .route_stitching import RoutingTree, stitch_segments, flatten_segments
# Physical cell type enum.
class PhysicalCellType(enum.Enum):
Locked = 0
Port = 1
Gnd = 2
Vcc = 3
class PhysicalNetType(enum.Enum):
# Net is just a signal, not a VCC or GND tied net.
Signal = 0
# Net is tied to GND.
Gnd = 1
# Net is tied to VCC.
Vcc = 2
# Represents an active pip between two tile wires.
#
# tile (str) - Name of tile
# wire0 (str) - Name of upstream wire to pip
# wire1 (str) - Name of downstream wire from pip
# forward (bool) - For bidirectional pips, is the connection from wire0 to
# wire1 (forward=True) or wire1 to wire0 (forward=False).
Pip = namedtuple('Pip', 'tile wire0 wire1 forward')
# Pin placement directive
#
# Associates a BEL pin with a Cell pin
#
# bel_pin (str) - Name of BEL pin being associated
# cell_pin (str) - Name of Cell pin being associated
# bel (str) - Name of BEL that contains BEL pin. If None is BEL from Placement
# class.
# other_cell_type (str) - Used to define multi cell mappings.
# other_cell_name (str) - Used to define multi cell mappings.
Pin = namedtuple('Pin', 'bel_pin cell_pin bel other_cell_type other_cell_name')
PhysicalNet = namedtuple('PhysicalNet', 'name type sources stubs')
class Placement():
""" Class for defining a Cell placement within a design.
cell_type (str) - Type of cell being placed
cell_name (str) - Name of cell instance being placed.
site (str) - Site Cell is being placed within.
bel (str) - Name of primary BEL being placed.
"""
def __init__(self, cell_type, cell_name, site, bel):
self.cell_type = cell_type
self.cell_name = cell_name
self.site = site
self.bel = bel
self.pins = []
self.other_bels = set()
def add_bel_pin_to_cell_pin(self,
bel_pin,
cell_pin,
bel=None,
other_cell_type=None,
other_cell_name=None):
""" Add a BEL pin -> Cell pin association.
bel_pin (str) - Name of BEL pin being associated.
cell_pin (str) - NAme of Cell pin being associated.
"""
if bel is None:
bel = self.bel
elif bel != self.bel:
self.other_bels.add(bel)
self.pins.append(
Pin(
bel_pin=bel_pin,
cell_pin=cell_pin,
bel=bel,
other_cell_type=other_cell_type,
other_cell_name=other_cell_name,
))
def descend_branch(obj, node, string_id):
""" Descend a branch to continue outputting the interchange to capnp object. """
obj.init('branches', len(node.branches))
for branch_obj, branch in zip(obj.branches, node.branches):
branch.output_interchange(branch_obj, string_id)
class PhysicalBelPin():
""" Python class that represents a BEL pin in a physical net.
site (str) - Site containing BEL pin
bel (str) - BEL containing BEL pin
pin (str) - BEL pin in physical net.
direction (Direction) - Direction of BEL pin.
"""
def __init__(self, site, bel, pin):
self.site = site
self.bel = bel
self.pin = pin
self.branches = []
def output_interchange(self, obj, string_id):
""" Output this route segment and all branches beneth it.
obj (physical_netlist.RouteBranch pycapnp object) -
Object to write PhysicalBelPin into
string_id (str -> int) - Function to intern strings into PhysNetlist
string list.
"""
obj.routeSegment.init('belPin')
obj.routeSegment.belPin.site = string_id(self.site)
obj.routeSegment.belPin.bel = string_id(self.bel)
obj.routeSegment.belPin.pin = string_id(self.pin)
descend_branch(obj, self, string_id)
def get_device_resource(self, site_types, device_resources):
""" Get device resource that corresponds to this class. """
return device_resources.bel_pin(self.site, site_types[self.site],
self.bel, self.pin)
def to_tuple(self):
""" Create tuple suitable for sorting this object.
This tuple is used for sorting against other routing branch objects
to generate a canonical routing tree.
"""
return ('bel_pin', self.site, self.bel, self.pin)
def __str__(self):
return 'PhysicalBelPin({}, {}, {})'.format(
repr(self.site),
repr(self.bel),
repr(self.pin),
)
class PhysicalSitePin():
""" Python class that represents a site pin in a physical net.
site (str) - Site containing site pin
pin (str) - Site pin in physical net.
"""
def __init__(self, site, pin):
self.site = site
self.pin = pin
self.branches = []
def output_interchange(self, obj, string_id):
""" Output this route segment and all branches beneth it.
obj (physical_netlist.RouteBranch pycapnp object) -
Object to write PhysicalBelPin into
string_id (str -> int) - Function to intern strings into PhysNetlist
string list.
"""
obj.routeSegment.init('sitePin')
obj.routeSegment.sitePin.site = string_id(self.site)
obj.routeSegment.sitePin.pin = string_id(self.pin)
descend_branch(obj, self, string_id)
def get_device_resource(self, site_types, device_resources):
""" Get device resource that corresponds to this class. """
return device_resources.site_pin(self.site, site_types[self.site],
self.pin)
def to_tuple(self):
""" Create tuple suitable for sorting this object.
This tuple is used for sorting against other routing branch objects
to generate a canonical routing tree.
"""
return ('site_pin', self.site, self.pin)
def __str__(self):
return 'PhysicalSitePin({}, {})'.format(
repr(self.site),
repr(self.pin),
)
class PhysicalPip():
""" Python class that represents a active pip in a physical net.
tile (str) - Tile containing pip
wire0 (str) - Name of upstream wire to pip
wire1 (str) - Name of downstream wire from pip
forward (bool) - For bidirectional pips, is the connection from wire0 to
wire1 (forward=True) or wire1 to wire0 (forward=False).
site (str) - name of site in case of Pseudo PIP
"""
def __init__(self, tile, wire0, wire1, forward=True, site=None):
self.tile = tile
self.wire0 = wire0
self.wire1 = wire1
self.forward = forward
self.site = site
self.branches = []
def output_interchange(self, obj, string_id):
""" Output this route segment and all branches beneth it.
obj (physical_netlist.RouteBranch pycapnp object) -
Object to write PhysicalBelPin into
string_id (str -> int) - Function to intern strings into PhysNetlist
string list.
"""
obj.routeSegment.init('pip')
obj.routeSegment.pip.tile = string_id(self.tile)
obj.routeSegment.pip.wire0 = string_id(self.wire0)
obj.routeSegment.pip.wire1 = string_id(self.wire1)
obj.routeSegment.pip.forward = self.forward
obj.routeSegment.pip.isFixed = True
descend_branch(obj, self, string_id)
def get_device_resource(self, site_types, device_resources):
""" Get device resource that corresponds to this class. """
return device_resources.pip(self.tile, self.wire0, self.wire1)
def to_tuple(self):
""" Create tuple suitable for sorting this object.
This tuple is used for sorting against other routing branch objects
to generate a canonical routing tree.
"""
return ('pip', self.tile, self.wire0, self.wire1)
def __str__(self):
return 'PhysicalPip({}, {}, {}, {}, {})'.format(
repr(self.tile),
repr(self.wire0),
repr(self.wire1),
repr(self.forward),
repr(self.site),
)
class PhysicalSitePip():
""" Python class that represents a site pip in a physical net.
This models site routing muxes and site inverters.
site (str) - Site containing site pip
bel (str) - Name of BEL that contains site pip
pin (str) - Name of BEL pin that is the active site pip
is_inverting (bool) - Indicates whether the site PIP is inverted
"""
def __init__(self, site, bel, pin, is_inverting=False):
self.site = site
self.bel = bel
self.pin = pin
self.is_inverting = is_inverting
self.branches = []
def output_interchange(self, obj, string_id):
""" Output this route segment and all branches beneth it.
obj (physical_netlist.RouteBranch pycapnp object) -
Object to write PhysicalBelPin into
string_id (str -> int) - Function to intern strings into PhysNetlist
string list.
"""
obj.routeSegment.init('sitePIP')
obj.routeSegment.sitePIP.site = string_id(self.site)
obj.routeSegment.sitePIP.bel = string_id(self.bel)
obj.routeSegment.sitePIP.pin = string_id(self.pin)
obj.routeSegment.sitePIP.isInverting = self.is_inverting
descend_branch(obj, self, string_id)
def get_device_resource(self, site_types, device_resources):
""" Get device resource that corresponds to this class. """
return device_resources.site_pip(self.site, site_types[self.site],
self.bel, self.pin)
def to_tuple(self):
""" Create tuple suitable for sorting this object.
This tuple is used for sorting against other routing branch objects
to generate a canonical routing tree.
"""
return ('site_pip', self.site, self.bel, self.pin, self.is_inverting)
def __str__(self):
return 'PhysicalSitePip({}, {}, {}, {})'.format(
repr(self.site),
repr(self.bel),
repr(self.pin),
repr(self.is_inverting),
)
def convert_tuple_to_object(site, tup):
""" Convert physical netlist tuple to object.
Physical netlist tuples are light weight ways to represent the physical
net tree.
site (Site) - Site object that tuple belongs too.
tup (tuple) - Tuple that is either a site pin, bel pin, or site pip.
Returns - PhysicalSitePin, PhysicalBelPin, or PhysicalSitePip based on
tuple.
>>> Site = namedtuple('Site', 'name')
>>> site = Site(name='TEST_SITE')
>>> site_pin = convert_tuple_to_object(site, ('site_pin', 'TEST_PIN'))
>>> assert isinstance(site_pin, PhysicalSitePin)
>>> site_pin.site
'TEST_SITE'
>>> site_pin.pin
'TEST_PIN'
>>> site_pin.branches
[]
>>> bel_pin = convert_tuple_to_object(site, ('bel_pin', 'ABEL', 'APIN'))
>>> assert isinstance(bel_pin, PhysicalBelPin)
>>> bel_pin.site
'TEST_SITE'
>>> bel_pin.bel
'ABEL'
>>> bel_pin.pin
'APIN'
>>> site_pip = convert_tuple_to_object(site, ('site_pip', 'BBEL', 'BPIN'))
>>> assert isinstance(site_pip, PhysicalSitePip)
>>> site_pip.site
'TEST_SITE'
>>> site_pip.bel
'BBEL'
>>> site_pip.pin
'BPIN'
"""
if tup[0] == 'site_pin':
_, pin = tup
return PhysicalSitePin(site.name, pin)
elif tup[0] == 'bel_pin':
assert len(tup) == 3, tup
_, bel, pin = tup
return PhysicalBelPin(site.name, bel, pin)
elif tup[0] == 'site_pip':
_, bel, pin = tup
return PhysicalSitePip(site.name, bel, pin)
else:
assert False, tup
def add_site_routing_children(site, parent_obj, parent_key, site_routing,
inverted_root):
""" Convert site_routing map into Physical* python objects.
site (Site) - Site object that contains site routing.
parent_obj (Physical* python object) - Parent Physical* object to add new
branches too.
parent_key (tuple) - Site routing tuple for current parent_obj.
site_routing (dict) - Map of parent site routing tuple to a set of
child site routing tuples.
inverted_root (list) - List of physical net sources for the inverted
signal (e.g. a constant 1 net inverts to the
constant 0 net)
"""
if parent_key in site_routing:
for child in site_routing[parent_key]:
if child[0] == 'inverter':
if inverted_root is not None:
for child2 in site_routing[child]:
obj = convert_tuple_to_object(site, child2)
inverted_root.append(obj)
# Continue to descend, but no more inverted root.
# There should be no double site inverters (hopefully?)
add_site_routing_children(
site,
obj,
child2,
site_routing,
inverted_root=None)
else:
add_site_routing_children(site, parent_obj, child,
site_routing, inverted_root)
else:
obj = convert_tuple_to_object(site, child)
parent_obj.branches.append(obj)
add_site_routing_children(site, obj, child, site_routing,
inverted_root)
def create_site_routing(site, net_roots, site_routing, constant_nets):
""" Convert site_routing into map of nets to site local sources.
site (Site) - Site object that contains site routing.
net_roots (dict) - Map of root site routing tuples to the net name for
this root.
site_routing (dict) - Map of parent site routing tuple to a set of
child site routing tuples.
constant_nets (dict) - Map of 0/1 to their net name.
Returns dict of nets to Physical* objects that represent the site local
sources for that net.
"""
nets = {}
# Create a map of constant net names to their inverse.
inverted_roots = {}
for value, net_name in constant_nets.items():
nets[net_name] = []
inverted_roots[constant_nets[value ^ 1]] = nets[net_name]
for root, net_name in net_roots.items():
if net_name not in nets:
nets[net_name] = []
root_obj = convert_tuple_to_object(site, root)
add_site_routing_children(site, root_obj, root, site_routing,
inverted_roots.get(net_name, None))
nets[net_name].append(root_obj)
return nets
class PhysicalNetlist:
""" Object that represents a physical netlist.
part (str) - Part that this physical netlist is for.
properties (dict) - Root level properties (if any) for physical netlist.
"""
def __init__(self, part, properties={}):
self.part = part
self.properties = {}
self.placements = []
self.nets = []
self.physical_cells = {}
self.site_instances = {}
self.null_net = []
def add_site_instance(self, site_name, site_type):
""" Add the site type for a site instance.
All sites used in placement require a site type.
If site instance was already added before, replaces the previous site
type.
"""
self.site_instances[site_name] = site_type
def add_physical_cell(self, cell_name, cell_type):
""" Add physical cell instance
cell_name (str) - Name of physical cell instance
cell_type (str) - Value of physical_netlist.PhysCellType
If physical cell was already added before, replaces the previous cell
type.
"""
self.physical_cells[cell_name] = cell_type
def add_placement(self, placement):
""" Add physical_netlist.Placement python object to this physical netlist.
placement (physical_netlist.Placement) - Placement to add.
"""
self.placements.append(placement)
def add_physical_net(self,
net_name,
sources,
stubs,
net_type=PhysicalNetType.Signal):
""" Adds a physical net to the physical netlist.
net_name (str) - Name of net.
sources (list of
physical_netlist.PhysicalBelPin - or -
physical_netlist.PhysicalSitePin - or -
physical_netlist.PhysicalSitePip - or -
physical_netlist.PhysicalPip
) - Sources of this net.
stubs (list of
physical_netlist.PhysicalBelPin - or -
physical_netlist.PhysicalSitePin - or -
physical_netlist.PhysicalSitePip - or -
physical_netlist.PhysicalPip
) - Stubs of this net.
net_type (PhysicalNetType) - Type of net.
"""
self.nets.append(
PhysicalNet(
name=net_name, type=net_type, sources=sources, stubs=stubs))
def check_physical_nets(self, device_resources):
""" Check physical nets for errors.
Detects duplicate resources and invalid routing trees.
"""
for net in self.nets:
# RoutingTree does a check on the subtrees during construction.
_ = RoutingTree(
device_resources,
self.site_instances,
sources=net.sources,
stubs=net.stubs)
def stitch_physical_nets(self, device_resources, flatten=False):
""" Stitch supplied physical nets into routing trees.
flatten (bool) - If true, existing routing trees are flattened before
stitching process. This can be useful for testing,
or if the input routing tree was invalid and needs to
be reconstructed.
"""
for idx, net in enumerate(self.nets):
segments = net.sources + net.stubs
if flatten:
segments = flatten_segments(segments)
sources, stubs = stitch_segments(device_resources,
self.site_instances, segments)
self.nets[idx] = PhysicalNet(
name=net.name,
type=net.type,
sources=sources,
stubs=stubs,
)
def get_normalized_tuple_tree(self, device_resources):
""" Return physical nets in canonical tuple form.
Returns a dictionary of net names to tuple trees. Each value of the
dictionary is a two tuple or the stubs and sources for the net. Each
stub and source is a two tuple of the current segment of the routing
tree, and a tuple of children from that segment.
The method is mostly useful for comparing routing trees for equality,
as equivelent routing trees will generate the same tuple tree.
"""
output = {}
for net in self.nets:
routing_tree = RoutingTree(
device_resources,
self.site_instances,
sources=net.sources,
stubs=net.stubs)
routing_tree.normalize_tree()
assert net.name not in output
output[net.name] = routing_tree.get_tuple_tree()
return output
def set_null_net(self, stubs):
self.null_net = stubs
@staticmethod
def read_from_capnp(f, interchange, *args, **kwargs):
""" Reads a capnp logical netlist into PhysicalNetlist object.
f (file-like)
File to be read
interchange (interchange_capnp.Interchange)
Interchange object holding capnp schema's for the FPGA interchange
format.
compression_format (interchange_capnp.CompressionFormat)
What compression format to use. Default is
interchange_capnp.DEFAULT_COMPRESSION_TYPE
is_packed (bool)
Whether capnp is packed or not. Default is
interchange_capnp.IS_PACKED.
Returns PhysicalNetlist created from input file.
"""
return interchange.read_physical_netlist(f, *args, **kwargs)
def convert_to_capnp(self, interchange):
""" Convert PhysicalNetlist object into capnp object.
Use interchange_capnp.write_capnp_file to write to disk or other
storage.
interchange (interchange_capnp.Interchange)
Interchange object holding capnp schema's for the FPGA interchange
format.
"""
return interchange.output_physical_netlist(self)
def chain_branches(segments):
""" Convert a linear routing segment chain into the branch structure.
Returns the root of the tree.
"""
for parent, child in zip(segments[:-1], segments[1:]):
parent.branches.append(child)
return segments[0]
def chain_pips(tile, wires):
""" Chain a set of pips into a branch tree structure. """
segments = []
for wire0, wire1 in zip(wires[:-1], wires[1:]):
segments.append(
PhysicalPip(tile=tile, wire0=wire0, wire1=wire1, forward=True))
return tuple(segments)
| StarcoderdataPython |
6611679 | if __name__ == "__main__":
from src import cli
cli.app()
| StarcoderdataPython |
3357384 | """
PyBpf
=====
This package is meant for a modern interface for dealing with the
outputs of Budapest-Florida code (Yecko et al. 1996.).
Current Features
----------------
- Full interface for fort.95,fort.18 and fort.19 files
- OOP datastructure for easy acces all the data
- Ionization fraction calculations for the fort.19 data
- A reader function to easily initialize all data from a BpF code-
directory
Usage
-----
You can import it as:
>>> import pybpf
and you can easily get data from full bpf working directory (assumed
that fort.95 fort.19 and fort.18 files DOES exists.)
>>> working_path = 'path/to/working/dir'
>>> mod, hist, lim = pybpf.bpfDataRead(working_path,\
do__ionization = True, X=0.75,Y=0.2496)
and you have initialized the whole directory.
"""
from . import tcdata
from . import calcion
from .bpfreader import bpfDataRead
| StarcoderdataPython |
331043 | '''
HOW TO RUN THIS CODE (if tests are within the assignment 1 root):
python -m py.test tests/test_neural_to_solutions.py -vv -s -q
python -m py.test tests/test_neural_to_solutions.py -vv -s -q --cov
py.test.exe --cov=cs224d/ tests/test_neural_to_solutions.py --cov-report html
(if the tests are within the subfolder tests)
PYTHONPATH=${PWD} py.test.exe tests/ -v --cov-report html
python -m pytest tests -v --cov-report html
Open index.html contained within htmlcov
'''
import pytest
import numpy as np
import random
from collections import defaultdict, OrderedDict, Counter
from q2_gradcheck import grad_numerical, eval_numerical_gradient_array
from q2_neural import forward_backward_prop
from q2_neural import affine_forward, affine_backward, sigmoid_forward, sigmoid_backward
from q2_neural_sol import forward_backward_prop_sol
def rel_error(x,y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
@pytest.fixture(scope='module')
def construct_toy_model(D1=10, H=20, D2=10, N=100):
dim = [D1, H, D2]
data = np.random.randn(N, dim[0])
labels = np.zeros((N,dim[2]))
for i in range(N):
labels[i, np.random.randint(0, dim[2]-1)] = 0
params = np.random.randn((dim[0] + 1) * dim[1] + (dim[1] + 1) * dim[2], )
return data,labels,params,dim
def test_affine_forward():
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim).reshape((1,output_dim))
out, _ = affine_forward(x, w, b)
correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297],
[ 3.25553199, 3.5141327, 3.77273342]])
# Compare your output with ours. The error should be around 1e-9.
assert out.shape == correct_out.shape
assert rel_error(out, correct_out) < 5e-7
def test_affine_backward():
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5).reshape((1,5))
dout = np.random.randn(10, 5)
#use eval_numerical_gradient_array for backprop from an output layer:
# input -> layer -> output -> ... -> final_layer_loss
# backprop becomes:
# final_layer_loss -> gradient_of_loss (g.o.l)
# g.o.l -> .. -> g.o.l backproped -> output -> layer -> g.o.l @ input
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
assert dx.shape == dx.shape
assert dw.shape == dw.shape
assert db.shape == db.shape
assert rel_error(dx_num,dx) < 5e-7
assert rel_error(dw_num,dw) < 5e-7
assert rel_error(db_num,db) < 5e-7
@pytest.mark.parametrize("dim1", list(range(2,10)))
@pytest.mark.parametrize("dim2", list(range(2,10)))
@pytest.mark.parametrize("dim3", list(range(2,10)))
def test_neural_vs_neural_sol(dim1, dim2, dim3, N=300):
dimensions = [ dim1, dim2, dim3 ]
data = np.random.randn(N, dim1)
labels = np.zeros((N, dim3))
for i in range(N):
labels[i, random.randint(0,dim3 -1)] = 1.
params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (
dimensions[1] + 1) * dimensions[2], )
cost, grad = forward_backward_prop(data, labels, params, dimensions)
cost_sol, grad_sol = forward_backward_prop_sol(data, labels, params, dimensions)
assert rel_error(cost, cost_sol) < 1e-7
@pytest.mark.parametrize("dim1", list(range(2,10)))
@pytest.mark.parametrize("dim2", list(range(2,10)))
@pytest.mark.parametrize("dim3", list(range(2,10)))
def test_neural_vs_neural_sol_gradient(dim1, dim2, dim3, N=300):
dimensions = [ dim1, dim2, dim3 ]
data = np.random.randn(N, dim1)
labels = np.zeros((N, dim3))
for i in range(N):
labels[i, random.randint(0,dim3 -1)] = 1.
params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (
dimensions[1] + 1) * dimensions[2], )
cost, grad = forward_backward_prop(data, labels, params, dimensions)
cost_sol, grad_sol = forward_backward_prop_sol(data, labels, params, dimensions)
assert rel_error(grad, grad_sol) < 1e-8
| StarcoderdataPython |
1874407 | # Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
"""Data validations for responsibleai module."""
from typing import List, Optional
import numpy as np
import pandas as pd
from responsibleai.exceptions import UserConfigValidationException
def validate_train_test_categories(
train_data: pd.DataFrame,
test_data: pd.DataFrame,
rai_compute_type: str,
categoricals: Optional[List[str]] = None,
):
if categoricals is None:
return
discovered = {}
for column in train_data.columns:
if column in categoricals:
train_unique = np.unique(train_data[column])
test_unique = np.unique(test_data[column])
difference = np.setdiff1d(test_unique, train_unique)
if difference.shape[0] != 0:
discovered[column] = difference.tolist()
if len(discovered) > 0:
message = ("{} requires that every category of "
"categorical features present in the test data "
"be also present in the train data. "
"Categories missing from train data: {}")
raise UserConfigValidationException(
message.format(rai_compute_type, discovered)
)
| StarcoderdataPython |
3356288 | from rest_framework import serializers
from accelerator.models import Location
class LocationSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = ['id', 'street_address', 'timezone', 'country',
'state', 'name', 'city', ]
| StarcoderdataPython |
1978374 | import glob
import numpy as np
import os
from astropy.io import fits
from specklepy.io import config
from specklepy.io.config import read
from specklepy.io.filearchive import ReductionFileArchive
from specklepy.logging import logger
from specklepy.reduction import dark, flat, sky
from specklepy.utils.array import frame_shape
from specklepy.utils.time import default_time_stamp
class DataReduction(object):
config_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '../config/reduction.cfg'))
def __init__(self, paths=None, dark=None, linear=None, flat=None, sky=None, options=None, **kwargs):
self.paths = paths
self.dark = dark
self.linear = linear
self.flat = flat
self.sky = sky
self.options = options
for attr in ['paths', 'dark', 'linear', 'flat', 'sky', 'options']:
if getattr(self, attr) is None:
logger.debug(f"Transforming parameters from section {attr.upper()!r} in the config file to {attr!r}...")
setattr(self, attr, kwargs.get(attr.upper()))
# Initialize file archive
self.files = ReductionFileArchive(file_list=self.paths.get('fileList'), in_dir=self.paths.get('filePath'),
out_dir=self.paths.get('outDir'), table_format='ascii.fixed_width')
# Reset input directory
self.files.file_path = self.paths.get('filePath')
@classmethod
def from_file(cls, file_name):
logger.info(f"Configuring data reduction from config file {cls.config_file!r}")
config = read(par_file=cls.config_file)
logger.info(f"Updating data reduction configuration from parameter file {file_name!r}")
params = read(par_file=file_name)
for section in params:
config[section].update(**params[section])
return cls(**config)
def run(self):
self.initialize_directories()
self.initialize_product_files()
if not self.dark.get('skip'):
self.run_dark_correction()
if not self.linear.get('skip'):
self.run_linearization()
if not self.flat.get('skip'):
self.run_flat_fielding()
if not self.sky.get('skip'):
self.run_sky_subtraction()
self.fill_masked_pixels(fill_value=self.options.get('fillValueMasked', None))
# Close reduction
logger.info("Reduction finished!")
def initialize_directories(self):
if not os.path.isdir(self.paths.get('outDir')):
logger.debug(f"Making directory {self.paths.get('outDir')}")
os.makedirs(self.paths.get('outDir'))
if not os.path.isdir(self.paths.get('tmpDir')):
logger.debug(f"Making directory {self.paths.get('tmpDir')}")
os.makedirs(self.paths.get('tmpDir'))
@classmethod
def set_up(cls, path, instrument, par_file=None, list_file=None, sort_by=None, recursive=False):
"""Sets up the data reduction parameter file and file list.
Args:
path (str):
Path to the files.
instrument (str):
Name of the instrument that took the data. This must be covered by config/instruments.cfg.
par_file (str, optional):
Name of the output default parameter file for the reduction.
list_file (str):
Name of the output file that contains all the file names and header information.
sort_by (str, optional):
Header card that is used for the sorting of files.
recursive (bool, optional):
Search for files in a recursive way, that is all sub-directories.
"""
# Defaults
default_cards = ['OBSTYPE', 'OBJECT', 'FILTER', 'EXPTIME', 'DIT', 'nFRAMES', 'DATE', 'SUBWIN']
dtypes = [str, str, str, float, float, int, str, str]
instrument_config_file = os.path.join(os.path.dirname(__file__), '../config/instruments.cfg')
# Read config
configs = config.read(instrument_config_file)
instrument_cards = configs[instrument.upper()]
# Double check whether all aliases are defined
cards = []
for card in default_cards:
try:
cards.append(instrument_cards[card])
except KeyError:
logger.info(
f"Dropping header card {card!r} from setup identification, as there is no description in the "
f"config file.\nCheck out the config file {instrument_config_file!r} for details.")
cards.append(None)
for card, dtype, header_card in zip(cards, dtypes, default_cards):
if card is None:
cards.remove(card)
dtypes.remove(dtype)
default_cards.remove(header_card)
# Apply fall back values
if path is None:
path = '.'
if list_file is None:
list_file = 'files.tab'
if par_file is None:
par_file = 'reduction.yaml'
# Find files
if '*' in path:
files = glob.glob(path, recursive=recursive)
else:
files = glob.glob(os.path.join(path, '*fits'), recursive=recursive)
if len(files):
logger.info(f"Found {len(files)} file(s)")
files.sort()
else:
logger.error(f"Found no files in {path}!")
raise RuntimeError(f"Found no files in {path}!")
# Initialize a file archive
raw_files = ReductionFileArchive(files, cards=cards, dtypes=dtypes, names=default_cards, sort_by=sort_by)
raw_files.identify_setups(['FILTER', 'EXPTIME'])
raw_files.add_dark_column()
raw_files.write_table(file_name=list_file)
# Write dummy parameter file for the reduction
_, ext = os.path.splitext(par_file)
if 'yaml' in ext:
logger.info(f"Creating default reduction YAML parameter file {par_file}")
par_file_content = f"PATHS:\n filePath: {raw_files.file_path}\n fileList: {list_file}\n outDir: Science/" \
f"\n tmpDir: Master//\n prefix: r" \
f"\n\nDARK:\n masterDarkFile: MasterDark.fits" \
f"\n\nFLAT:\n masterFlatFile: MasterFlat.fits" \
f"\n\nSKY:\n method: scalar"
else:
logger.info(f"Creating default reduction INI parameter file {par_file}")
par_file_content = f"[PATHS]\nfilePath = {raw_files.file_path}\nfileList = {list_file}\noutDir = Science/" \
f"\ntmpDir = Master/\nprefix = r" \
f"\n\n[DARK]\nmasterDarkFile = MasterDark.fits" \
f"\n\n[FLAT]\nmasterFlatFile = MasterFlat.fits" \
f"\n\n[SKY]\nmethod = scalar"
with open(par_file, 'w+') as par_file:
par_file.write(par_file_content)
def initialize_product_files(self):
self.files.add_product_file_column(prefix=self.paths.get('filePrefix'))
logger.info(f"Overview of the considered files:\n{self.files.table}")
if self.options.get('clearEarlierProductFiles', False):
# Remove product files
logger.info("Removing data product files from earlier reductions...")
for product_file_path in self.files.product_file_paths:
if product_file_path is None:
continue
try:
os.remove(product_file_path)
logger.debug(f"Removed product file {product_file_path!r} from earlier reduction")
except FileNotFoundError:
pass
# Remove masters
logger.info("Removing master files from earlier reductions...")
os.system(f"rm {os.path.join(self.paths.get('tmpDir'), '*.fits')}")
def run_dark_correction(self):
logger.info(f"{'>' * 15} DARK SUBTRACTION {'<' * 15}")
# Identify dark setups
dark_setups = self.files.get_dark_setups()
master_darks = {}
# Create a master dark for each setup
if not self.dark.get('reuse'):
for setup in dark_setups:
darks = self.files.filter({'OBSTYPE': 'DARK', 'SETUP': setup})
sub_window = self.files.filter({'OBSTYPE': 'DARK', 'SETUP': setup}, namekey='SUBWIN')
master_dark = dark.MasterDark(file_list=darks, file_path=self.files.file_path, setup=setup,
file_name=self.dark.get('masterDarkFile'),
out_dir=self.paths.get('tmpDir'), sub_window=sub_window)
master_dark.combine()
master_dark.write()
master_darks[setup] = master_dark.path
# Apply dark subtraction
for setup in dark_setups:
# Load master dark for this setup
master_dark = dark.MasterDark.from_file(master_darks[setup])
counter = 0
# Iterate through product files
for p, product_file_path in enumerate(self.files.product_file_paths):
# Skip files not allocated
if product_file_path is None or self.files.table['DARK'][p] != setup:
continue
# Initialize file if not existing
if not os.path.isfile(product_file_path):
self.files.initialize_product_file(index=p)
# Extract sub-window for file
sub_window = self.files.table['SUBWIN'].data[p]
# Subtract master dark from file
master_dark.subtract(product_file_path, sub_window=sub_window,
sub_window_order=self.options.get('windowAxisOrder'))
counter += 1
logger.info(f"Dark subtraction complete for {counter} files in setup {setup!r}")
logger.info(f"{'>' * 11} DARK SUBTRACTION COMPLETE {'<' * 11}")
def run_flat_fielding(self):
logger.info(f"{'>' * 15} FLAT FIELDING {'<' * 15}")
# Identify flat filters
flat_filters = self.files.get_flat_filters()
master_flats = {}
# Create a master flat for each filter
if not self.flat.get('reuse'):
for filter in flat_filters:
flats = self.files.filter({'OBSTYPE': 'FLAT', 'FILTER': filter}, namekey='PRODUCT')
sub_window = self.files.filter({'OBSTYPE': 'FLAT', 'FILTER': filter}, namekey='SUBWIN')
master_flat = flat.MasterFlat(file_list=flats, file_name=self.flat.get('masterFlatFile'),
file_path=self.files.out_dir, out_dir=self.paths.get('tmpDir'),
filter=filter, sub_window=sub_window)
master_flat.combine(method=self.flat.get('method'), mask_percentage=self.flat.get('maskPercentage'))
master_flat.write()
master_flats[filter] = master_flat.path
# Apply flat field correction
for filter in flat_filters:
# Load master flat for this filter
master_flat = flat.MasterFlat.from_file(master_flats[filter])
counter = 0
# Iterate through product files
for p, product_file_path in enumerate(self.files.product_file_paths):
# Skip files from different filter
if product_file_path is None \
or self.files.table['FILTER'][p] != filter \
or self.files.table['OBSTYPE'][p] in ['DARK', 'FLAT']:
continue
# Initialize file if not existing
if not os.path.isfile(product_file_path):
self.files.initialize_product_file(index=p)
# Extract sub-window for file
sub_window = self.files.table['SUBWIN'].data[p]
# Normalize product file with master flat
master_flat.run_correction(file_list=[product_file_path], sub_windows=[sub_window],
sub_window_order=self.options.get('windowAxisOrder'))
counter += 1
logger.info(f"Flat fielding complete for {counter} files in {filter!r} band")
logger.info(f"{'>' * 11} FLAT FIELDING COMPLETE {'<' * 11}")
def run_linearization(self):
raise NotImplementedError("Linearization is not implemented yet!")
def run_sky_subtraction(self):
logger.info(f"{'>' * 15} SKY SUBTRACTION {'<' * 15}")
# Choose sky source
if self.sky.get('source').lower() in ['default', 'self', 'science']:
logger.info("Estimating sky brightness from the SCIENCE frames themselves")
# Iterate through science files
for p, product_file_path in enumerate(self.files.product_file_paths):
# Skip files from different filter
if product_file_path is None \
or self.files.table['OBSTYPE'][p] in ['DARK', 'FLAT', 'SKY']:
continue
# Catch not implemented methods
if self.sky.get('method') != 'scalar':
raise NotImplementedError(f"Sky subtraction with {self.sky.get('method')} method is not "
f"implemented yet!")
# Initialize file if not existing
if not os.path.isfile(product_file_path):
self.files.initialize_product_file(index=p)
# Estimate sky background
logger.info(f"Estimating sky background for file {product_file_path!r}...")
sky_mean, sky_std = sky.estimate_sky_background(product_file_path, method=self.sky.get('method'))
# Subtract sky background from data
logger.info(f"Subtracting sky background...")
with fits.open(product_file_path, mode='update') as hdu_list:
# Extract mask
if 'MASK' in hdu_list:
bpm = hdu_list['MASK'].data.astype(bool)
gpm = ~bpm
else:
bpm = np.zeros(frame_shape(hdu_list[0].data), dtype=bool)
gpm = True
# Subtract sky
hdu_list[0].data = np.subtract(hdu_list[0].data, sky_mean, where=gpm)
# Update header
hdu_list[0].header.set('HIERARCH SPECKLEPY REDUCTION SKYBKG', default_time_stamp())
# Propagate uncertainties
try:
hdu_list['VAR'].data = np.add(hdu_list['VAR'].data, np.square(sky_std))
except KeyError:
# Create VAR image and HDU
sky_std_image = np.full(frame_shape(hdu_list[0].data), np.square(sky_std))
var_hdu = fits.ImageHDU(data=sky_std_image, name='VAR')
hdu_list.append(var_hdu)
# Propagate mask
if 'MASK' not in hdu_list:
mask_hdu = fits.ImageHDU(data=bpm.astype(np.int16), name='MASK')
hdu_list.append(mask_hdu)
# Store updates
hdu_list.flush()
else:
raise NotImplementedError(f"Sky subtraction from source {self.sky.get('source')!r} is not implemented yet!")
logger.info(f"{'>' * 11} SKY SUBTRACTION COMPLETE {'<' * 11}")
def fill_masked_pixels(self, fill_value=None):
if fill_value is not None:
logger.info(f"Filling masked pixels with value {fill_value} in science product files...")
# Iterate through science files
for p, product_file_path in enumerate(self.files.product_file_paths):
# Skip files from different filter
if product_file_path is None \
or self.files.table['OBSTYPE'][p] in ['DARK', 'FLAT', 'SKY']:
continue
# Subtract sky background from data
logger.info(f"Filling masked pixels in file {product_file_path!r}")
with fits.open(product_file_path, mode='update') as hdu_list:
# Extract mask
if 'MASK' in hdu_list:
bpm = hdu_list['MASK'].data != 0
else:
bpm = []
# Fill pixels
if hdu_list[0].data.ndim == 2:
hdu_list[0].data[bpm] = fill_value
elif hdu_list[0].data.ndim == 3:
hdu_list[0].data[:, bpm] = fill_value
hdu_list.flush()
| StarcoderdataPython |
5093643 | <filename>job-queue-portal/postgres_django_queue/djangoenv/lib/python3.8/site-packages/django_celery_results/migrations/0008_chordcounter.py
# Generated by Django 3.0.6 on 2020-05-12 12:05
from __future__ import unicode_literals, absolute_import
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_celery_results', '0007_remove_taskresult_hidden'),
]
operations = [
migrations.CreateModel(
name='ChordCounter',
fields=[
('id', models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('group_id', models.CharField(
db_index=True,
help_text='Celery ID for the Chord header group',
max_length=getattr(
settings,
'DJANGO_CELERY_RESULTS_TASK_ID_MAX_LENGTH',
255
),
unique=True,
verbose_name='Group ID')),
('sub_tasks', models.TextField(
help_text='JSON serialized list of task result tuples. '
'use .group_result() to decode')),
('count', models.PositiveIntegerField(
help_text='Starts at len(chord header) '
'and decrements after each task is finished')),
],
),
]
| StarcoderdataPython |
4802128 | <reponame>beastzx18/Tron
import threading
from sys import platform
from sqlalchemy import (
Column,
String,
Integer
)
from . import SESSION, BASE
# save user ids in whitelists
class data(BASE):
__tablename__ = "database var"
keys = Column(String, primary_key=True)
values = Column(String)
def __init__(self, keys, values):
self.keys = keys
self.values = values
data.__table__.create(checkfirst=True)
INSERTION_LOCK = threading.RLock()
# set, del, get keys & values
def setdv(keys, values):
with INSERTION_LOCK:
mydata = SESSION.query(data).get(keys)
try:
if not mydata:
mydata = data(keys, values)
else:
mydata.values = values
SESSION.merge(mydata)
SESSION.commit()
finally:
SESSION.close()
return keys
def deldv(keys):
with INSERTION_LOCK:
mydata = SESSION.query(data).get(keys)
try:
if mydata:
SESSION.delete(mydata)
SESSION.commit()
finally:
SESSION.close()
return False
def getdv(keys):
mydata = SESSION.query(data).get(keys)
rep = ""
if mydata:
rep = str(mydata.values)
SESSION.close()
return rep
def get_alldv():
kv_data = {}
mydata = SESSION.query(data).distinct().all()
for x in mydata:
kv_data.update({x.keys : x.values})
return kv_data
| StarcoderdataPython |
11297842 | <gh_stars>1-10
from distutils.core import setup
setup(name='collective_classification',
version='0.1',
package_dir={'collective_classification': ''},
packages=['collective_classification.components', 'collective_classification.examples'],
) | StarcoderdataPython |
3560087 | import pathlib
import pygubu
try:
import Tkinter as tk
import Tkinter.ttk as ttk
from Tkinter.messagebox import showinfo, showerror, askyesno
from Tkinter import Toplevel
from Tkinter.filedialog import askopenfilename, asksaveasfilename
except:
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.messagebox import showinfo, showerror, askyesno
from tkinter import Toplevel
from tkinter.filedialog import askopenfilename, asksaveasfilename
from Libraries.ttkScrollableNotebook.ScrollableNotebook import *
from pygubu.widgets.editabletreeview import EditableTreeview
from pygubu.widgets.pathchooserinput import PathChooserInput
from pygubu.widgets.scrolledframe import ScrolledFrame
import pygubu.widgets.simpletooltip as tooltip
import traceback
from os import path, mkdir, listdir, remove, walk, rename, rmdir
import re
import xml.etree.ElementTree as ET
import zipfile
import shutil
from pathlib import Path as plpath
from math import ceil
from gatelib import *
from filehash import FileHash
import configparser
from dateutil.parser import parse as dateParse
import binascii
from time import sleep
from datetime import datetime
progFolder = getCurrFolder()
sys.path.append(progFolder)
# tk.Tk().withdraw()
noSystemNamesFileFlag = False
try:
from SystemNames import *
except:
noSystemNamesFileFlag = True
from SystemNamesDefault import *
crcHasher = FileHash('crc32')
defaultSettingsFile = path.join(progFolder, "settings.ini")
regionsFile = path.join(progFolder, "regions.ini")
logFolder = path.join(progFolder, "Logs")
systemListStr = "\"\" "+" ".join(["\""+sn+"\"" for sn in systemNamesDict.keys() if systemNamesDict[sn][0] != "Advanced"])
PROJECT_PATH = pathlib.Path(__file__).parent
PROJECT_UI = PROJECT_PATH / "EzRO.ui"
class EzroApp:
def __init__(self, master=None):
# Menu Bar
menubar = tk.Menu(tk_root, tearoff=0)
fileMenu = tk.Menu(menubar, tearoff=0)
helpMenu = tk.Menu(menubar, tearoff=0)
helpMenu.add_command(label="View Help...", command=self.menu_viewHelp)
helpMenu.add_separator()
helpMenu.add_command(label="About...", command=self.menu_viewAbout)
helpMenu.add_command(label="External Libraries...", command=self.menu_viewExternalLibraries)
menubar.add_cascade(label="Help", menu=helpMenu)
tk_root.config(menu=menubar)
# build ui
self.Main_Notebook = ttk.Notebook(master)
self.Export_Frame = ttk.Frame(self.Main_Notebook)
self.Export_System_Combobox = ttk.Combobox(self.Export_Frame)
self.systemChoice = tk.StringVar(value='')
self.Export_System_Combobox.configure(state='readonly', textvariable=self.systemChoice, values=systemListStr, width='50', height=25)
self.Export_System_Combobox.place(anchor='e', relx='.375', rely='.075', x='0', y='0')
self.Export_System_Button = ttk.Button(self.Export_Frame)
self.Export_System_Button.configure(text='Add System')
self.Export_System_Button.place(anchor='e', relx='.45', rely='.075', x='0', y='0')
self.Export_System_Button.configure(command=self.export_addSystem)
self.Export_ShowAdvancedSystems = ttk.Checkbutton(self.Export_Frame)
self.showAdvancedSystems = tk.IntVar(value=False)
self.Export_ShowAdvancedSystems.configure(text='Show Advanced Systems', variable=self.showAdvancedSystems)
self.Export_ShowAdvancedSystems.place(anchor='w', relx='.46', rely='.075', x='0', y='0')
self.Export_ShowAdvancedSystems.configure(command=self.export_toggleAdvancedSystems)
self.Export_SaveLayout_Button = ttk.Button(self.Export_Frame)
self.Export_SaveLayout_Button.configure(text='Save System Tabs')
self.Export_SaveLayout_Button.place(anchor='e', relx='.78', rely='.075', x='0', y='0')
self.Export_SaveLayout_Button.configure(command=self.export_saveSystemLoadout)
self.Export_LoadLayout_Button = ttk.Button(self.Export_Frame)
self.Export_LoadLayout_Button.configure(text='Load System Tabs')
self.Export_LoadLayout_Button.place(anchor='w', relx='.80', rely='.075', x='0', y='0')
self.Export_LoadLayout_Button.configure(command=self.export_loadSystemLoadout)
self.Export_Systems = ScrollableNotebook(self.Export_Frame, wheelscroll=True, tabmenu=True)
self.initVars()
self.Export_Systems.configure(height='200', width='200')
self.Export_Systems.place(anchor='nw', relheight='.7', relwidth='.9', relx='.05', rely='.15', x='0', y='0')
self.Export_AuditThis = ttk.Button(self.Export_Frame)
self.Export_AuditThis.configure(text='Audit This System')
self.Export_AuditThis.place(anchor='w', relx='.05', rely='.925', x='0', y='0')
self.Export_AuditThis.configure(command=self.export_auditSystem)
self.Export_AuditAll = ttk.Button(self.Export_Frame)
self.Export_AuditAll.configure(text='Audit All Open Systems')
self.Export_AuditAll.place(anchor='w', relx='.15', rely='.925', x='0', y='0')
self.Export_AuditAll.configure(command=self.export_auditAllSystems)
self.Export_TestExport = ttk.Checkbutton(self.Export_Frame)
self.isTestExport = tk.IntVar(value=False)
self.Export_TestExport.configure(text='Test Export', variable=self.isTestExport)
self.Export_TestExport.place(anchor='e', relx='.72', rely='.925', x='0', y='0')
self.Export_TestExport.configure(command=self.export_toggleTestExport)
self.Export_ExportThis = ttk.Button(self.Export_Frame)
self.Export_ExportThis.configure(text='Export This System')
self.Export_ExportThis.place(anchor='e', relx='.825', rely='.925', x='0', y='0')
self.Export_ExportThis.configure(command=self.export_exportSystem)
self.Export_ExportAll = ttk.Button(self.Export_Frame)
self.Export_ExportAll.configure(text='Export All Open Systems')
self.Export_ExportAll.place(anchor='e', relx='.95', rely='.925', x='0', y='0')
self.Export_ExportAll.configure(command=self.export_exportAllSystems)
self.Export_AuditHelp = ttk.Button(self.Export_Frame)
self.Export_AuditHelp.configure(text='?', width='2')
self.Export_AuditHelp.place(anchor='w', relx='.275', rely='.925', x='0', y='0')
self.Export_AuditHelp.configure(command=self.export_auditHelp)
self.Export_Frame.configure(height='200', width='200')
self.Export_Frame.pack(side='top')
self.Main_Notebook.add(self.Export_Frame, text='Export')
# Favorites Tab is unused
# self.Favorites_Frame = ttk.Frame(self.Main_Notebook)
# self.Favorites_Load = ttk.Button(self.Favorites_Frame)
# self.Favorites_Load.configure(text='Load Existing List...')
# self.Favorites_Load.place(anchor='w', relx='.1', rely='.075', x='0', y='0')
# self.Favorites_Load.configure(command=self.favorites_loadList)
# self.Favorites_System_Label = ttk.Label(self.Favorites_Frame)
# self.Favorites_System_Label.configure(text='System')
# self.Favorites_System_Label.place(anchor='w', relx='.1', rely='.15', x='0', y='0')
# self.Favorites_System_Combobox = ttk.Combobox(self.Favorites_Frame)
# self.favoritesSystemChoice = tk.StringVar(value='')
# self.Favorites_System_Combobox.configure(state='readonly', textvariable=self.favoritesSystemChoice, values=systemListStr, width='50')
# self.Favorites_System_Combobox.place(anchor='w', relx='.15', rely='.15', x='0', y='0')
# self.Favorites_List = EditableTreeview(self.Favorites_Frame)
# self.Favorites_List.place(anchor='nw', relheight='.65', relwidth='.8', relx='.1', rely='.2', x='0', y='0')
# self.Favorites_Add = ttk.Button(self.Favorites_Frame)
# self.Favorites_Add.configure(text='Add Files...')
# self.Favorites_Add.place(anchor='w', relx='.1', rely='.925', x='0', y='0')
# self.Favorites_Add.configure(command=self.favorites_addFiles)
# self.Favorites_Save = ttk.Button(self.Favorites_Frame)
# self.Favorites_Save.configure(text='Save List As...')
# self.Favorites_Save.place(anchor='e', relx='.9', rely='.925', x='0', y='0')
# self.Favorites_Save.configure(command=self.favorites_saveList)
# self.Favorites_Frame.configure(height='200', width='200')
# self.Favorites_Frame.pack(side='top')
# self.Main_Notebook.add(self.Favorites_Frame, text='Favorites')
self.Config_Frame = ttk.Frame(self.Main_Notebook)
self.Config_Default_SaveChanges = ttk.Button(self.Config_Frame)
self.Config_Default_SaveChanges.configure(text='Save Changes')
self.Config_Default_SaveChanges.place(anchor='e', relx='.95', rely='.925', x='0', y='0')
self.Config_Default_SaveChanges.configure(command=self.settings_saveChanges)
self.Config_Notebook = ttk.Notebook(self.Config_Frame)
self.Config_Default_Frame = ttk.Frame(self.Config_Notebook)
self.Config_Default_DATDir_Label = ttk.Label(self.Config_Default_Frame)
self.Config_Default_DATDir_Label.configure(text='Input No-Intro DAT Directory')
self.Config_Default_DATDir_Label.grid(column='0', padx='20', pady='10', row='0', sticky='w')
self.Config_Default_DATDir_PathChooser = PathChooserInput(self.Config_Default_Frame)
self.g_datFilePath = tk.StringVar(value='')
self.Config_Default_DATDir_PathChooser.configure(textvariable=self.g_datFilePath, type='directory')
self.Config_Default_DATDir_PathChooser.grid(column='0', ipadx='75', padx='200', pady='10', row='0', sticky='w')
self.Config_Default_RomsetDir_Label = ttk.Label(self.Config_Default_Frame)
self.Config_Default_RomsetDir_Label.configure(text='Input Romset Directory')
self.Config_Default_RomsetDir_Label.grid(column='0', padx='20', pady='10', row='1', sticky='w')
self.Config_Default_RomsetDir_PathChooser = PathChooserInput(self.Config_Default_Frame)
self.g_romsetFolderPath = tk.StringVar(value='')
self.Config_Default_RomsetDir_PathChooser.configure(textvariable=self.g_romsetFolderPath, type='directory')
self.Config_Default_RomsetDir_PathChooser.grid(column='0', ipadx='75', padx='200', pady='10', row='1', sticky='w')
self.Config_Default_IncludeOtherRegions = ttk.Checkbutton(self.Config_Default_Frame)
self.g_includeOtherRegions = tk.IntVar(value=False)
self.Config_Default_IncludeOtherRegions.configure(text='(1G1R) Include Games from Non-Primary Regions', variable=self.g_includeOtherRegions)
self.Config_Default_IncludeOtherRegions.grid(column='0', padx='20', pady='10', row='2', sticky='w')
self.Config_Default_Include = ttk.Labelframe(self.Config_Default_Frame)
self.Config_Default_IncludeUnlicensed = ttk.Checkbutton(self.Config_Default_Include)
self.g_includeUnlicensed = tk.IntVar(value=False)
self.Config_Default_IncludeUnlicensed.configure(text='Unlicensed', variable=self.g_includeUnlicensed)
self.Config_Default_IncludeUnlicensed.grid(column='0', padx='20', pady='10', row='0', sticky='w')
self.Config_Default_IncludeUnreleased = ttk.Checkbutton(self.Config_Default_Include)
self.g_includeUnreleased = tk.IntVar(value=False)
self.Config_Default_IncludeUnreleased.configure(text='Unreleased', variable=self.g_includeUnreleased)
self.Config_Default_IncludeUnreleased.grid(column='1', padx='0', pady='10', row='0', sticky='w')
self.Config_Default_IncludeCompilations = ttk.Checkbutton(self.Config_Default_Include)
self.g_includeCompilations = tk.IntVar(value=False)
self.Config_Default_IncludeCompilations.configure(text='Compilations', variable=self.g_includeCompilations)
self.Config_Default_IncludeCompilations.grid(column='2', padx='37', pady='10', row='0', sticky='w')
self.Config_Default_IncludeTestPrograms = ttk.Checkbutton(self.Config_Default_Include)
self.g_includeTestPrograms = tk.IntVar(value=False)
self.Config_Default_IncludeTestPrograms.configure(text='Misc. Programs', variable=self.g_includeTestPrograms)
self.Config_Default_IncludeTestPrograms.grid(column='0', padx='20', pady='10', row='1', sticky='w')
self.Config_Default_IncludeBIOS = ttk.Checkbutton(self.Config_Default_Include)
self.g_includeBIOS = tk.IntVar(value=False)
self.Config_Default_IncludeBIOS.configure(text='BIOS', variable=self.g_includeBIOS)
self.Config_Default_IncludeBIOS.grid(column='1', padx='0', pady='10', row='1', sticky='w')
self.Config_Default_IncludeNESPorts = ttk.Checkbutton(self.Config_Default_Include)
self.g_includeNESPorts = tk.IntVar(value=False)
self.Config_Default_IncludeNESPorts.configure(text='(GBA) NES Ports', variable=self.g_includeNESPorts)
self.Config_Default_IncludeNESPorts.grid(column='0', padx='20', pady='10', row='2', sticky='w')
self.Config_Default_IncludeGBAVideo = ttk.Checkbutton(self.Config_Default_Include)
self.g_includeGBAVideo = tk.IntVar(value=False)
self.Config_Default_IncludeGBAVideo.configure(text='GBA Video', variable=self.g_includeGBAVideo)
self.Config_Default_IncludeGBAVideo.grid(column='1', padx='0', pady='10', row='2', sticky='w')
self.Config_Default_Include.configure(text='Include')
self.Config_Default_Include.grid(column='0', padx='20', pady='10', row='3', sticky='w')
self.Config_Default_Separator = ttk.Separator(self.Config_Default_Frame)
self.Config_Default_Separator.configure(orient='vertical')
self.Config_Default_Separator.place(anchor='center', relheight='.95', relx='.5', rely='.5', x='0', y='0')
self.Config_Default_ExtractArchives = ttk.Checkbutton(self.Config_Default_Frame)
self.g_extractArchives = tk.IntVar(value=False)
self.Config_Default_ExtractArchives.configure(text='Extract Compressed Roms', variable=self.g_extractArchives)
self.Config_Default_ExtractArchives.place(anchor='nw', relx='.651', rely='.03', x='0', y='0')
self.Config_Default_ParentFolder = ttk.Checkbutton(self.Config_Default_Frame)
self.g_parentFolder = tk.IntVar(value=False)
self.Config_Default_ParentFolder.configure(text='Create Game Folder for Each Game', variable=self.g_parentFolder)
self.Config_Default_ParentFolder.place(anchor='nw', relx='.651', rely='.132', x='0', y='0')
self.Config_Default_SortByPrimaryRegion = ttk.Checkbutton(self.Config_Default_Frame)
self.g_sortByPrimaryRegion = tk.IntVar(value=False)
self.Config_Default_SortByPrimaryRegion.configure(text='Create Region Folders', variable=self.g_sortByPrimaryRegion)
self.Config_Default_SortByPrimaryRegion.place(anchor='nw', relx='.651', rely='.234', x='0', y='0')
self.Config_Default_PrimaryRegionInRoot = ttk.Checkbutton(self.Config_Default_Frame)
self.g_primaryRegionInRoot = tk.IntVar(value=False)
self.Config_Default_PrimaryRegionInRoot.configure(text='Do Not Create Folder for Primary Region', variable=self.g_primaryRegionInRoot)
self.Config_Default_PrimaryRegionInRoot.place(anchor='nw', relx='.651', rely='.336', x='0', y='0')
self.Config_Default_SpecialCategoryFolder = ttk.Checkbutton(self.Config_Default_Frame)
self.g_specialCategoryFolder = tk.IntVar(value=False)
self.Config_Default_SpecialCategoryFolder.configure(text='Create Folders for Special Categories', variable=self.g_specialCategoryFolder)
self.Config_Default_SpecialCategoryFolder.place(anchor='nw', relx='.651', rely='.438', x='0', y='0')
self.Config_Default_OverwriteDuplicates = ttk.Checkbutton(self.Config_Default_Frame)
self.g_overwriteDuplicates = tk.IntVar(value=False)
self.Config_Default_OverwriteDuplicates.configure(text='Overwrite Duplicate Files', variable=self.g_overwriteDuplicates)
self.Config_Default_OverwriteDuplicates.place(anchor='nw', relx='.651', rely='.540', x='0', y='0')
self.Config_Default_Frame.configure(height='200', width='200')
self.Config_Default_Frame.pack(side='top')
self.Config_Notebook.add(self.Config_Default_Frame, text='Default Settings')
self.Config_Region_Frame = ScrolledFrame(self.Config_Notebook, scrolltype='vertical')
self.Config_Region_Choice_RemoveButton_Tertiary = ttk.Button(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_RemoveButton_Tertiary.configure(state='disabled', text='X', width='2')
self.Config_Region_Choice_RemoveButton_Tertiary.grid(column='0', padx='20', pady='10', row='98', sticky='w')
self.Config_Region_Choice_Name_Label_Tertiary = ttk.Label(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_Name_Label_Tertiary.configure(text='Region Group')
self.Config_Region_Choice_Name_Label_Tertiary.grid(column='0', padx='130', pady='10', row='98', sticky='w')
self.Config_Region_Choice_Name_Entry_Tertiary = ttk.Entry(self.Config_Region_Frame.innerframe)
self.regionGroupTertiary = tk.StringVar(value='')
self.Config_Region_Choice_Name_Entry_Tertiary.configure(textvariable=self.regionGroupTertiary)
self.Config_Region_Choice_Name_Entry_Tertiary.grid(column='0', padx='220', pady='10', row='98', sticky='w')
self.Config_Region_Choice_Type_Label_Tertiary = ttk.Label(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_Type_Label_Tertiary.configure(text='Priority Type')
self.Config_Region_Choice_Type_Label_Tertiary.grid(column='0', padx='380', pady='10', row='98', sticky='w')
self.Config_Region_Choice_Type_Combobox_Tertiary = ttk.Combobox(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_Type_Combobox_Tertiary.configure(state='disabled', values='"Tertiary"', width='12')
self.Config_Region_Choice_Type_Combobox_Tertiary.grid(column='0', padx='465', pady='10', row='98', sticky='w')
self.Config_Region_Choice_Type_Combobox_Tertiary.current(0)
self.Config_Region_AddNewRegionCategory = ttk.Button(self.Config_Region_Frame.innerframe)
self.Config_Region_AddNewRegionCategory.configure(text='+ Add New Region Category')
self.Config_Region_AddNewRegionCategory.grid(column='0', padx='20', pady='10', row='99', sticky='w')
self.Config_Region_AddNewRegionCategory.configure(command=self.settings_region_addNewRegionCategory)
self.Config_Region_Help = ttk.Button(self.Config_Region_Frame.innerframe)
self.Config_Region_Help.configure(text='?', width='2')
self.Config_Region_Help.grid(column='0', padx='200', pady='10', row='99', sticky='w')
self.Config_Region_Help.configure(command=self.settings_region_help)
self.Config_Region_Template_Combobox = ttk.Combobox(self.Config_Region_Frame.innerframe)
self.templateChoice = tk.StringVar(value='')
self.Config_Region_Template_Combobox.configure(state='readonly', textvariable=self.templateChoice, values='"" "English" "English + Secondary" "English (USA Focus)" "English (Europe Focus)" "Japanese" "Japanese + Secondary"')
self.Config_Region_Template_Combobox.place(anchor='e', x=int(960*screenHeightMult), y=int(470*screenHeightMult))
self.Config_Region_Template_Apply = ttk.Button(self.Config_Region_Frame.innerframe)
self.Config_Region_Template_Apply.configure(text='Apply Template')
self.Config_Region_Template_Apply.place(anchor='e', x=int(1065*screenHeightMult), y=int(470*screenHeightMult))
self.Config_Region_Template_Apply.configure(command=self.config_region_applyTemplate)
self.Config_Region_Frame.configure(usemousewheel=True)
self.Config_Region_Frame.pack(side='top')
self.Config_Notebook.add(self.Config_Region_Frame, text='Region Settings')
self.Config_Notebook.configure(height='200', width='200')
self.Config_Notebook.place(anchor='nw', relheight='.8', relwidth='.9', relx='.05', rely='.05', x='0', y='0')
self.Config_Frame.configure(height='200', width='200')
self.Config_Frame.pack(side='top')
self.Main_Notebook.add(self.Config_Frame, text='Config')
self.Main_Notebook.bind('<<NotebookTabChanged>>', self.changeMainTab, add='')
# self.Main_Notebook.configure(height=int(675*screenHeightMult), width=int(1200*screenHeightMult))
self.Main_Notebook.grid(column='0', row='0')
self.Main_Notebook.place(relheight='1', relwidth='1')
if noSystemNamesFileFlag:
showerror("EzRO", "Valid SystemNames.py file not found. Using default system list.")
# Tooltips
tooltip.create(self.Export_ShowAdvancedSystems, 'Show systems that are difficult or uncommon to emulate, and systems that often do not make use of No-Intro DAT files.')
tooltip.create(self.Export_TestExport, 'For testing; if enabled, roms will NOT be exported. This allows you to see how many roms would be exported and how much space they would take up without actually exporting anything.\n\nIf unsure, leave this disabled.')
tooltip.create(self.Config_Default_DATDir_Label, 'The directory containing No-Intro DAT files for each system. These contain information about each rom, which is used in both exporting and auditing.\n\nIf this is provided, the \"Export\" tab will attempt to automatically match DAT files from this directory with each system so you don\'t have to input them manually.')
tooltip.create(self.Config_Default_RomsetDir_Label, 'The directory containing your rom directories for each system.\n\nIf this is provided, the \"Export\" tab will attempt to automatically match folders from this directory with each system so you don\'t have to input them manually.')
tooltip.create(self.Config_Default_ExtractArchives, 'If enabled, any roms from your input romset that are contained in zipped archives (ZIP, 7z, etc.) will be extracted during export.\n\nUseful if your output device does not support zipped roms.\n\nIf unsure, leave this disabled.')
tooltip.create(self.Config_Default_ParentFolder, 'If enabled, roms will be exported to a parent folder with the same name as the primary region release of your rom.\n\nFor example, \"Legend of Zelda, The (USA)\" and \"Zelda no Densetsu 1 - The Hyrule Fantasy (Japan)\" will both be exported to a folder titled \"Legend of Zelda, The\".\n\nIf unsure, leave this disabled.')
tooltip.create(self.Config_Default_SortByPrimaryRegion, 'If enabled, all roms will be exported to a parent folder named after the game\'s highest-priority region.\n\nFor example, Devil World (NES) has Europe and Japan releases, but not USA. If your order of region priority is USA->Europe->Japan, then all versions of Devil World (and its parent folder, if enabled) will be exported to a folder titled \"[Europe]\".\n\nIf you enable this, it is strongly recommended that you also enable \"Create Game Folder for Each Game\".\n\nIf unsure, leave this enabled.')
tooltip.create(self.Config_Default_PrimaryRegionInRoot, '(Only applies if \"Create Region Folders\" is enabled.)\n\nIf enabled, a region folder will NOT be created for your highest-priority region.\n\nFor example, if your order of region priority is USA->Europe->Japan, then games that have USA releases will not be exported to a [USA] folder (they will instead be placed directly in the output folder), but games that have Europe releases and not USA releases will be exported to a [Europe] folder.\n\nIf unsure, leave this enabled.')
tooltip.create(self.Config_Default_SpecialCategoryFolder, 'If enabled, all exported roms that are part of a special category (Unlicensed, Unreleased, etc.) will be exported to a parent folder named after that category. There will be multiple nested folders if a game belongs to multiple special categories.\n\nIf unsure, leave this enabled.')
tooltip.create(self.Config_Default_OverwriteDuplicates, 'If enabled: If a rom in the output directory with the same name as an exported rom already exists, it will be overwritten by the new export.\n\nIf disabled: The export will not overwrite matching roms in the output directory.\n\nIf unsure, leave this disabled.')
tooltip.create(self.Config_Default_IncludeOtherRegions, '(Only applies to 1G1R export.)\n\nIf enabled: In the event that a game does not contain a rom from your region (e.g. your primary region is USA but the game is a Japan-only release), a secondary region will be used according to your Region/Language Priority Order.\n\nIf disabled: In the event that a game does not contain a rom from your region, the game is skipped entirely.\n\nIf you only want to export roms from your own region, disable this.')
tooltip.create(self.Config_Default_IncludeTestPrograms, 'Include non-game programs such as test programs, SDK files, and SNES enhancement chips.\n\nIf unsure, leave this disabled.')
tooltip.create(self.Config_Default_IncludeNESPorts, '(Only applies to GBA.)\n\nInclude Classic NES Series, Famicom Mini, Hudson Best Collection, and Kunio-kun Nekketsu Collection emulated ports.')
tooltip.create(self.Config_Default_IncludeGBAVideo, '(Only applies to GBA.)')
tooltip.create(self.Config_Region_Choice_Name_Label_Tertiary, 'The name of the region group. If \"Create Region Folders\" is enabled, then games marked as one of this group\'s region tags will be exported to a folder named after this group, surround by brackets (e.g. [World], [USA], etc).')
tooltip.create(self.Config_Region_Choice_Type_Label_Tertiary, 'The type of region group.\n\nPrimary: The most significant region; 1G1R exports will prioritize this. If there are multiple Primary groups, then higher groups take priority.\n\nSecondary: \"Backup\" regions that will not be used in a 1G1R export unless no Primary-group version of a game exists, and \"Include Games from Non-Primary Regions\" is also enabled. If there are multiple Secondary groups, then higher groups take priority.\n\nTertiary: Any known region/language tag that is not part of a Primary/Secondary group is added to the Tertiary group by default. This is functionally the same as a Secondary group.')
# Main widget
self.mainwindow = self.Main_Notebook
master.protocol("WM_DELETE_WINDOW", sys.exit) # Why does this not work automatically?
# Other initialization
self.g_specificAttributes = []
self.g_generalAttributes = []
self.isExport = True
if not path.exists(defaultSettingsFile):
self.createDefaultSettings()
if not path.exists(regionsFile):
self.createRegionSettings()
self.loadConfig()
def run(self):
self.mainwindow.mainloop()
#########################
# EXPORT (GUI Handling) #
#########################
def initVars(self):
self.recentlyVerified = False
self.exportTabNum = 0
self.exportSystemNames = []
self.Export_ScrolledFrame_ = []
self.Export_DAT_Label_ = []
self.Export_DAT_PathChooser_ = []
self.datFilePathChoices = []
self.Export_Romset_Label_ = []
self.Export_Romset_PathChooser_ = []
self.romsetFolderPathChoices = []
self.Export_OutputDir_Label_ = []
self.Export_OutputDir_PathChooser_ = []
self.outputFolderDirectoryChoices = []
self.Export_Separator_ = []
self.Export_OutputType_Label_ = []
self.Export_OutputType_Combobox_ = []
self.outputTypeChoices = []
self.Export_includeOtherRegions_ = []
self.includeOtherRegionsChoices = []
self.Export_FromList_Label_ = []
self.Export_FromList_PathChooser_ = []
self.romListFileChoices = []
self.Export_IncludeFrame_ = []
self.Export_IncludeUnlicensed_ = []
self.includeUnlicensedChoices = []
self.Export_IncludeUnreleased_ = []
self.includeUnreleasedChoices = []
self.Export_IncludeCompilations_ = []
self.includeCompilationsChoices = []
self.Export_IncludeTestPrograms_ = []
self.includeTestProgramsChoices = []
self.Export_IncludeBIOS_ = []
self.includeBIOSChoices = []
self.Export_IncludeNESPorts_ = []
self.includeNESPortsChoices = []
self.Export_IncludeGBAVideo_ = []
self.includeGBAVideoChoices = []
self.Export_ExtractArchives_ = []
self.extractArchivesChoices = []
self.Export_ParentFolder_ = []
self.parentFolderChoices = []
self.Export_SortByPrimaryRegion_ = []
self.sortByPrimaryRegionChoices = []
self.Export_SpecialCategoryFolder_ = []
self.specialCategoryFolderChoices = []
self.Export_PrimaryRegionInRoot_ = []
self.primaryRegionInRootChoices = []
self.Export_OverwriteDuplicates_ = []
self.overwriteDuplicatesChoices = []
self.Export_RemoveSystem_ = []
self.regionNum = 0
self.Config_Region_Choice_RemoveButton_ = []
self.Config_Region_Choice_UpButton_ = []
self.Config_Region_Choice_DownButton_ = []
self.Config_Region_Choice_Name_Label_ = []
self.Config_Region_Choice_Name_Entry_ = []
self.regionGroupNames = []
self.Config_Region_Choice_Type_Label_ = []
self.Config_Region_Choice_Type_Combobox_ = []
self.regionPriorityTypes = []
self.Config_Region_Choice_Tags_Label_ = []
self.Config_Region_Choice_Tags_Entry_ = []
self.regionTags = []
def export_toggleAdvancedSystems(self):
global systemListStr
if self.showAdvancedSystems.get():
systemListStr = "\"\" "+" ".join(["\""+sn+"\"" for sn in systemNamesDict.keys()])
else:
systemListStr = "\"\" "+" ".join(["\""+sn+"\"" for sn in systemNamesDict.keys() if systemNamesDict[sn][0] != "Advanced"])
self.Export_System_Combobox.configure(values=systemListStr)
def addSystemTab(self, systemName="New System", datFilePath="", romsetFolderPath="", outputFolderDirectory="",
outputType="All", includeOtherRegions=False, romList="",
includeUnlicensed=False, includeUnreleased=False, includeCompilations=False,
includeTestPrograms=False, includeBIOS=False, includeNESPorts=False,
includeGBAVideo=False, extractArchives=False, parentFolder=False, sortByPrimaryRegion=False, primaryRegionInRoot=False,
specialCategoryFolder=False, overwriteDuplicates=False):
self.exportSystemNames.append(systemName)
self.Export_ScrolledFrame_.append(ScrolledFrame(self.Export_Systems, scrolltype='both'))
self.Export_DAT_Label_.append(ttk.Label(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.Export_DAT_Label_[self.exportTabNum].configure(text='Input No-Intro DAT')
self.Export_DAT_Label_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='0', sticky='w')
self.Export_DAT_PathChooser_.append(PathChooserInput(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.setSystemDAT(systemName, datFilePath)
self.Export_DAT_PathChooser_[self.exportTabNum].configure(mustexist='true', state='normal', textvariable=self.datFilePathChoices[self.exportTabNum], type='file', filetypes=[('DAT Files', '*.dat')])
self.Export_DAT_PathChooser_[self.exportTabNum].grid(column='0', ipadx='90', padx='150', pady='10', row='0', sticky='w')
self.Export_Romset_Label_.append(ttk.Label(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.Export_Romset_Label_[self.exportTabNum].configure(text='Input Romset')
self.Export_Romset_Label_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='1', sticky='w')
self.Export_Romset_PathChooser_.append(PathChooserInput(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.setInputRomsetDir(systemName, romsetFolderPath)
self.Export_Romset_PathChooser_[self.exportTabNum].configure(mustexist='true', state='normal', textvariable=self.romsetFolderPathChoices[self.exportTabNum], type='directory')
self.Export_Romset_PathChooser_[self.exportTabNum].grid(column='0', ipadx='90', padx='150', pady='10', row='1', sticky='w')
self.Export_OutputDir_Label_.append(ttk.Label(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.Export_OutputDir_Label_[self.exportTabNum].configure(text='Output Directory')
self.Export_OutputDir_Label_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='2', sticky='w')
self.Export_OutputDir_PathChooser_.append(PathChooserInput(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.outputFolderDirectoryChoices.append(tk.StringVar(value=''))
self.outputFolderDirectoryChoices[self.exportTabNum].set(outputFolderDirectory)
self.Export_OutputDir_PathChooser_[self.exportTabNum].configure(mustexist='true', state='normal', textvariable=self.outputFolderDirectoryChoices[self.exportTabNum], type='directory')
self.Export_OutputDir_PathChooser_[self.exportTabNum].grid(column='0', ipadx='90', padx='150', pady='10', row='2', sticky='w')
self.Export_Separator_.append(ttk.Separator(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.Export_Separator_[self.exportTabNum].configure(orient='vertical')
self.Export_Separator_[self.exportTabNum].place(anchor='center', relheight='.95', relx='.5', rely='.5', x='0', y='0')
self.Export_OutputType_Label_.append(ttk.Label(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.Export_OutputType_Label_[self.exportTabNum].configure(text='Output Type')
self.Export_OutputType_Label_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='3', sticky='w')
self.Export_OutputType_Combobox_.append(ttk.Combobox(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.outputTypeChoices.append(tk.StringVar(value=''))
self.outputTypeChoices[self.exportTabNum].set(outputType)
self.Export_OutputType_Combobox_[self.exportTabNum].configure(state='readonly', textvariable=self.outputTypeChoices[self.exportTabNum], values='"All" "1G1R" "Favorites"', width='10')
self.Export_OutputType_Combobox_[self.exportTabNum].grid(column='0', padx='150', pady='10', row='3', sticky='w')
self.Export_OutputType_Combobox_[self.exportTabNum].bind('<<ComboboxSelected>>', self.export_setOutputType, add='')
if outputType == "1G1R":
self.Export_OutputType_Combobox_[self.exportTabNum].current(1)
elif outputType == "Favorites":
self.Export_OutputType_Combobox_[self.exportTabNum].current(2)
else:
self.Export_OutputType_Combobox_[self.exportTabNum].current(0)
self.Export_includeOtherRegions_.append(ttk.Checkbutton(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.includeOtherRegionsChoices.append(tk.IntVar(value=includeOtherRegions))
self.Export_includeOtherRegions_[self.exportTabNum].configure(text='Include Games from Non-Primary Regions', variable=self.includeOtherRegionsChoices[self.exportTabNum])
self.Export_includeOtherRegions_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='4', sticky='w')
self.Export_FromList_Label_.append(ttk.Label(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.Export_FromList_Label_[self.exportTabNum].configure(text='Rom List')
self.Export_FromList_Label_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='5', sticky='w')
self.Export_FromList_PathChooser_.append(PathChooserInput(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.romListFileChoices.append(tk.StringVar(value=''))
self.romListFileChoices[self.exportTabNum].set(romList)
self.Export_FromList_PathChooser_[self.exportTabNum].configure(mustexist='true', state='normal', textvariable=self.romListFileChoices[self.exportTabNum], type='file', filetypes=[('Text Files', '*.txt')])
self.Export_FromList_PathChooser_[self.exportTabNum].grid(column='0', ipadx='90', padx='150', pady='10', row='5', sticky='w')
self.Export_IncludeFrame_.append(ttk.Labelframe(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.Export_IncludeUnlicensed_.append(ttk.Checkbutton(self.Export_IncludeFrame_[self.exportTabNum]))
self.includeUnlicensedChoices.append(tk.IntVar(value=includeUnlicensed))
self.Export_IncludeUnlicensed_[self.exportTabNum].configure(text='Unlicensed', variable=self.includeUnlicensedChoices[self.exportTabNum])
self.Export_IncludeUnlicensed_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='0', sticky='w')
self.Export_IncludeUnreleased_.append(ttk.Checkbutton(self.Export_IncludeFrame_[self.exportTabNum]))
self.includeUnreleasedChoices.append(tk.IntVar(value=includeUnreleased))
self.Export_IncludeUnreleased_[self.exportTabNum].configure(text='Unreleased', variable=self.includeUnreleasedChoices[self.exportTabNum])
self.Export_IncludeUnreleased_[self.exportTabNum].grid(column='1', padx='0', pady='10', row='0', sticky='w')
self.Export_IncludeCompilations_.append(ttk.Checkbutton(self.Export_IncludeFrame_[self.exportTabNum]))
self.includeCompilationsChoices.append(tk.IntVar(value=includeCompilations))
self.Export_IncludeCompilations_[self.exportTabNum].configure(text='Compilations', variable=self.includeCompilationsChoices[self.exportTabNum])
self.Export_IncludeCompilations_[self.exportTabNum].grid(column='2', padx='37', pady='10', row='0', sticky='w')
self.Export_IncludeTestPrograms_.append(ttk.Checkbutton(self.Export_IncludeFrame_[self.exportTabNum]))
self.includeTestProgramsChoices.append(tk.IntVar(value=includeTestPrograms))
self.Export_IncludeTestPrograms_[self.exportTabNum].configure(text='Misc. Programs', variable=self.includeTestProgramsChoices[self.exportTabNum])
self.Export_IncludeTestPrograms_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='1', sticky='w')
self.Export_IncludeBIOS_.append(ttk.Checkbutton(self.Export_IncludeFrame_[self.exportTabNum]))
self.includeBIOSChoices.append(tk.IntVar(value=includeBIOS))
self.Export_IncludeBIOS_[self.exportTabNum].configure(text='BIOS', variable=self.includeBIOSChoices[self.exportTabNum])
self.Export_IncludeBIOS_[self.exportTabNum].grid(column='1', padx='0', pady='10', row='1', sticky='w')
self.Export_IncludeNESPorts_.append(ttk.Checkbutton(self.Export_IncludeFrame_[self.exportTabNum]))
self.includeNESPortsChoices.append(tk.IntVar(value=includeNESPorts))
self.Export_IncludeNESPorts_[self.exportTabNum].configure(text='NES Ports', variable=self.includeNESPortsChoices[self.exportTabNum])
self.Export_IncludeNESPorts_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='2', sticky='w')
self.Export_IncludeGBAVideo_.append(ttk.Checkbutton(self.Export_IncludeFrame_[self.exportTabNum]))
self.includeGBAVideoChoices.append(tk.IntVar(value=includeGBAVideo))
self.Export_IncludeGBAVideo_[self.exportTabNum].configure(text='GBA Video', variable=self.includeGBAVideoChoices[self.exportTabNum])
self.Export_IncludeGBAVideo_[self.exportTabNum].grid(column='1', padx='0', pady='10', row='2', sticky='w')
self.Export_IncludeFrame_[self.exportTabNum].configure(text='Include')
self.Export_IncludeFrame_[self.exportTabNum].grid(column='0', padx='20', pady='10', row='6', sticky='w')
self.Export_ExtractArchives_.append(ttk.Checkbutton(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.extractArchivesChoices.append(tk.IntVar(value=extractArchives))
self.Export_ExtractArchives_[self.exportTabNum].configure(text='Extract Compressed Roms', variable=self.extractArchivesChoices[self.exportTabNum])
self.Export_ExtractArchives_[self.exportTabNum].place(anchor='nw', relx='.651', rely='.03', x='0', y='0')
self.Export_ParentFolder_.append(ttk.Checkbutton(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.parentFolderChoices.append(tk.IntVar(value=parentFolder))
self.Export_ParentFolder_[self.exportTabNum].configure(text='Create Game Folder for Each Game', variable=self.parentFolderChoices[self.exportTabNum])
self.Export_ParentFolder_[self.exportTabNum].place(anchor='nw', relx='.651', rely='.132', x='0', y='0')
self.Export_SortByPrimaryRegion_.append(ttk.Checkbutton(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.sortByPrimaryRegionChoices.append(tk.IntVar(value=sortByPrimaryRegion))
self.Export_SortByPrimaryRegion_[self.exportTabNum].configure(text='Create Region Folders', variable=self.sortByPrimaryRegionChoices[self.exportTabNum])
self.Export_SortByPrimaryRegion_[self.exportTabNum].place(anchor='nw', relx='.651', rely='.234', x='0', y='0')
self.Export_SortByPrimaryRegion_[self.exportTabNum].configure(command=self.export_togglePrimaryRegionInRoot)
self.Export_PrimaryRegionInRoot_.append(ttk.Checkbutton(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.primaryRegionInRootChoices.append(tk.IntVar(value=primaryRegionInRoot))
self.Export_PrimaryRegionInRoot_[self.exportTabNum].configure(text='Do Not Create Folder for Primary Region', variable=self.primaryRegionInRootChoices[self.exportTabNum])
self.Export_PrimaryRegionInRoot_[self.exportTabNum].place(anchor='nw', relx='.651', rely='.336', x='0', y='0')
self.Export_SpecialCategoryFolder_.append(ttk.Checkbutton(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.specialCategoryFolderChoices.append(tk.IntVar(value=specialCategoryFolder))
self.Export_SpecialCategoryFolder_[self.exportTabNum].configure(text='Create Folders for Special Categories', variable=self.specialCategoryFolderChoices[self.exportTabNum])
self.Export_SpecialCategoryFolder_[self.exportTabNum].place(anchor='nw', relx='.651', rely='.438', x='0', y='0')
self.Export_OverwriteDuplicates_.append(ttk.Checkbutton(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.overwriteDuplicatesChoices.append(tk.IntVar(value=overwriteDuplicates))
self.Export_OverwriteDuplicates_[self.exportTabNum].configure(text='Overwrite Duplicate Files', variable=self.overwriteDuplicatesChoices[self.exportTabNum])
self.Export_OverwriteDuplicates_[self.exportTabNum].place(anchor='nw', relx='.651', rely='.540', x='0', y='0')
self.Export_RemoveSystem_.append(ttk.Button(self.Export_ScrolledFrame_[self.exportTabNum].innerframe))
self.Export_RemoveSystem_[self.exportTabNum].configure(text='Remove System')
self.Export_RemoveSystem_[self.exportTabNum].place(anchor='se', relx='1', rely='1', x='-10', y='-10')
self.Export_RemoveSystem_[self.exportTabNum].configure(command=self.export_removeSystem)
# self.Export_ScrolledFrame_[self.exportTabNum].innerframe.configure(relief='groove')
self.Export_ScrolledFrame_[self.exportTabNum].configure(usemousewheel=True)
self.Export_ScrolledFrame_[self.exportTabNum].place(anchor='nw', relheight='.9', relwidth='.9', relx='.05', rely='.05', x='0', y='0')
self.Export_Systems.add(self.Export_ScrolledFrame_[self.exportTabNum], text=systemName)
tooltip.create(self.Export_DAT_Label_[self.exportTabNum], 'The No-Intro DAT file for the current system. This contains information about each rom, which is used in both exporting and auditing.\n\nNot needed for the \"Favorites\" output type.')
tooltip.create(self.Export_Romset_Label_[self.exportTabNum], 'The directory containing your roms for the current system.')
tooltip.create(self.Export_OutputDir_Label_[self.exportTabNum], 'The directory that your roms will be exported to. Ideally, this should be named after the current system.')
tooltip.create(self.Export_OutputType_Label_[self.exportTabNum], '\"All\": All roms will be exported.\n\n\"1G1R\" (1 Game 1 Rom): Only the latest revision of the highest-priority region group of each game will be exported (e.g. USA Revision 2). See "Region Settings" in Config for more information.\n\n\"Favorites\": Only specific roms from a provided text file will be exported; good for exporting a list of only your favorite roms.')
tooltip.create(self.Export_includeOtherRegions_[self.exportTabNum], 'If enabled: In the event that a game does not contain a rom from your region (e.g. your primary region is USA but the game is a Japan-only release), a secondary region will be used according to your Region/Language Priority Order.\n\nIf disabled: In the event that a game does not contain a rom from your region, the game is skipped entirely.\n\nIf you only want to export roms from your own region, disable this.')
tooltip.create(self.Export_FromList_Label_[self.exportTabNum], 'The text list containing your favorite roms for the current system.')
tooltip.create(self.Export_IncludeTestPrograms_[self.exportTabNum], 'Include non-game programs such as test programs, SDK files, and SNES enhancement chips.\n\nIf unsure, leave this disabled.')
tooltip.create(self.Export_IncludeNESPorts_[self.exportTabNum], 'Include Classic NES Series, Famicom Mini, Hudson Best Collection, and Kunio-kun Nekketsu Collection emulated ports.\n\nIf unsure, leave this disabled.')
tooltip.create(self.Export_ExtractArchives_[self.exportTabNum], 'If enabled, any roms from your input romset that are contained in zipped archives (ZIP, 7z, etc.) will be extracted during export.\n\nUseful if your output device does not support zipped roms.\n\nIf unsure, leave this disabled.')
tooltip.create(self.Export_ParentFolder_[self.exportTabNum], 'If enabled, roms will be exported to a parent folder with the same name as the primary region release of your rom.\n\nFor example, \"Legend of Zelda, The (USA)\" and \"Zelda no Densetsu 1 - The Hyrule Fantasy (Japan)\" will both be exported to a folder titled \"Legend of Zelda, The\".\n\nIf unsure, leave this disabled.')
tooltip.create(self.Export_SortByPrimaryRegion_[self.exportTabNum], 'If enabled, all roms will be exported to a parent folder named after the game\'s highest-priority region.\n\nFor example, Devil World (NES) has Europe and Japan releases, but not USA. If your order of region priority is USA->Europe->Japan, then all versions of Devil World (and its parent folder, if enabled) will be exported to a folder titled \"[Europe]\".\n\nIf you enable this, it is strongly recommended that you also enable \"Create Game Folder for Each Game\".\n\nIf unsure, leave this enabled.')
tooltip.create(self.Export_SpecialCategoryFolder_[self.exportTabNum], 'If enabled, all exported roms that are part of a special category (Unlicensed, Unreleased, etc.) will be exported to a parent folder named after that category. There will be multiple nested folders if a game belongs to multiple special categories.\n\nIf unsure, leave this enabled.')
tooltip.create(self.Export_PrimaryRegionInRoot_[self.exportTabNum], '(Only applies if \"Create Region Folders\" is enabled.)\n\nIf enabled, a region folder will NOT be created for your highest-priority region.\n\nFor example, if your order of region priority is USA->Europe->Japan, then games that have USA releases will not be exported to a [USA] folder (they will instead be placed directly in the output folder), but games that have Europe releases and not USA releases will be exported to a [Europe] folder.\n\nIf unsure, leave this enabled.')
tooltip.create(self.Export_OverwriteDuplicates_[self.exportTabNum], 'If enabled: If a rom in the output directory with the same name as an exported rom already exists, it will be overwritten by the new export.\n\nIf disabled: The export will not overwrite matching roms in the output directory.\n\nIf unsure, leave this disabled.')
self.Export_Systems.select(self.exportTabNum)
if systemName != "Nintendo - Game Boy Advance":
self.Export_IncludeNESPorts_[self.exportTabNum].grid_remove()
self.Export_IncludeGBAVideo_[self.exportTabNum].grid_remove()
self.export_setOutputType()
self.export_togglePrimaryRegionInRoot()
self.exportTabNum += 1
def setSystemDAT(self, systemName, datFilePath):
self.datFilePathChoices.append(tk.StringVar(value=''))
if datFilePath != "":
self.datFilePathChoices[self.exportTabNum].set(datFilePath)
return
if self.ssdHelper(systemName):
return
alternateSystemNames = systemNamesDict.get(systemName)[1]
if alternateSystemNames is not None:
for name in alternateSystemNames:
if self.ssdHelper(name):
return
def ssdHelper(self, name):
currSystemDAT = path.join(self.g_datFilePath.get(), name+".dat").replace("\\", "/")
if path.isfile(currSystemDAT):
self.datFilePathChoices[self.exportTabNum].set(currSystemDAT)
return True
if " - " in name:
currSystemDAT = path.join(self.g_datFilePath.get(), name.replace(" - ", " ")+".dat").replace("\\", "/")
if path.isfile(currSystemDAT):
self.datFilePathChoices[self.exportTabNum].set(currSystemDAT)
return True
return False
def setInputRomsetDir(self, systemName, romsetFolderPath):
self.romsetFolderPathChoices.append(tk.StringVar(value=''))
if romsetFolderPath != "":
self.romsetFolderPathChoices[self.exportTabNum].set(romsetFolderPath)
return
if self.sirdHelper(systemName):
return
alternateSystemNames = systemNamesDict.get(systemName)[1]
if alternateSystemNames is not None:
for name in alternateSystemNames:
if self.sirdHelper(name):
return
def sirdHelper(self, name):
currSystemInputDir = path.join(self.g_romsetFolderPath.get(), name).replace("\\", "/")
if path.isdir(currSystemInputDir):
self.romsetFolderPathChoices[self.exportTabNum].set(currSystemInputDir)
return True
if " - " in name:
currSystemInputDir = path.join(self.g_romsetFolderPath.get(), name.replace(" - ", " ")).replace("\\", "/")
if path.isdir(currSystemInputDir):
self.romsetFolderPathChoices[self.exportTabNum].set(currSystemInputDir)
return True
return False
def export_saveSystemLoadout(self):
if self.exportTabNum > 0:
loadoutFile = asksaveasfilename(defaultextension='.txt', filetypes=[("Text Files", '*.txt')],
initialdir=path.join(progFolder, "System Loadouts"),
title="Save System Tabs")
if loadoutFile == "":
return
loadout = configparser.ConfigParser(allow_no_value=True)
loadout.optionxform = str
for i in range(len(self.exportSystemNames)):
loadout[self.exportSystemNames[i]] = {}
loadout[self.exportSystemNames[i]]["Input No-Intro DAT"] = self.datFilePathChoices[i].get()
loadout[self.exportSystemNames[i]]["Input Romset"] = self.romsetFolderPathChoices[i].get()
loadout[self.exportSystemNames[i]]["Output Directory"] = self.outputFolderDirectoryChoices[i].get()
loadout[self.exportSystemNames[i]]["Output Type"] = self.outputTypeChoices[i].get()
loadout[self.exportSystemNames[i]]["Include Games from Non-Primary Regions"] = str(self.includeOtherRegionsChoices[i].get())
loadout[self.exportSystemNames[i]]["Rom List"] = self.romListFileChoices[i].get()
loadout[self.exportSystemNames[i]]["Include Unlicensed"] = str(self.includeUnlicensedChoices[i].get())
loadout[self.exportSystemNames[i]]["Include Unreleased"] = str(self.includeUnreleasedChoices[i].get())
loadout[self.exportSystemNames[i]]["Include Compilations"] = str(self.includeCompilationsChoices[i].get())
loadout[self.exportSystemNames[i]]["Include Misc. Programs"] = str(self.includeTestProgramsChoices[i].get())
loadout[self.exportSystemNames[i]]["Include BIOS"] = str(self.includeBIOSChoices[i].get())
loadout[self.exportSystemNames[i]]["Include NES Ports"] = str(self.includeNESPortsChoices[i].get())
loadout[self.exportSystemNames[i]]["Include GBA Video"] = str(self.includeGBAVideoChoices[i].get())
loadout[self.exportSystemNames[i]]["Extract Compressed Roms"] = str(self.extractArchivesChoices[i].get())
loadout[self.exportSystemNames[i]]["Create Game Folder for Each Game"] = str(self.parentFolderChoices[i].get())
loadout[self.exportSystemNames[i]]["Create Region Folders"] = str(self.sortByPrimaryRegionChoices[i].get())
loadout[self.exportSystemNames[i]]["Do Not Create Folder for Primary Region"] = str(self.primaryRegionInRootChoices[i].get())
loadout[self.exportSystemNames[i]]["Create Folders for Special Categories"] = str(self.specialCategoryFolderChoices[i].get())
loadout[self.exportSystemNames[i]]["Overwrite Duplicate Files"] = str(self.overwriteDuplicatesChoices[i].get())
with open(loadoutFile, 'w') as lf:
loadout.write(lf)
def export_loadSystemLoadout(self):
if self.exportTabNum > 0:
if not askyesno("EzRO", "This will replace your current system loadout. Continue?"):
return
loadoutFile = askopenfilename(filetypes=[("System Loadouts", "*.txt")])
if loadoutFile == "":
return
if self.exportTabNum > 0:
while self.exportTabNum > 0:
self.export_removeSystem()
loadout = configparser.ConfigParser(allow_no_value=True)
loadout.optionxform = str
loadout.read(loadoutFile)
for key in loadout.keys():
if key in systemNamesDict.keys():
self.addSystemTab(systemName=key, datFilePath=loadout[key]["Input No-Intro DAT"], romsetFolderPath=loadout[key]["Input Romset"], outputFolderDirectory=loadout[key]["Output Directory"],
outputType=loadout[key]["Output Type"], includeOtherRegions=loadout[key]["Include Games from Non-Primary Regions"], romList=loadout[key]["Rom List"],
includeUnlicensed=loadout[key]["Include Unlicensed"], includeUnreleased=loadout[key]["Include Unreleased"], includeCompilations=loadout[key]["Include Compilations"],
includeTestPrograms=loadout[key]["Include Misc. Programs"], includeBIOS=loadout[key]["Include BIOS"], includeNESPorts=loadout[key]["Include NES Ports"],
includeGBAVideo=loadout[key]["Include GBA Video"], extractArchives=loadout[key]["Extract Compressed Roms"], parentFolder=loadout[key]["Create Game Folder for Each Game"],
sortByPrimaryRegion=loadout[key]["Create Region Folders"], primaryRegionInRoot=loadout[key]["Do Not Create Folder for Primary Region"],
specialCategoryFolder=loadout[key]["Create Folders for Special Categories"], overwriteDuplicates=loadout[key]["Overwrite Duplicate Files"])
def export_addSystem(self):
currSystemChoice = self.systemChoice.get()
if (currSystemChoice.replace("-","").replace("=","") != ""):
for es in self.Export_Systems.tabs():
if self.Export_Systems.tab(es, "text") == currSystemChoice:
return
self.addSystemTab(systemName=currSystemChoice, datFilePath="", romsetFolderPath="", outputFolderDirectory="",
outputType="All", includeOtherRegions=self.g_includeOtherRegions.get(), romList="",
includeUnlicensed=self.g_includeUnlicensed.get(), includeUnreleased=self.g_includeUnreleased.get(), includeCompilations=self.g_includeCompilations.get(),
includeTestPrograms=self.g_includeTestPrograms.get(), includeBIOS=self.g_includeBIOS.get(), includeNESPorts=self.g_includeNESPorts.get(),
includeGBAVideo=self.g_includeGBAVideo.get(), extractArchives=False, parentFolder=self.g_parentFolder.get(),
sortByPrimaryRegion=self.g_sortByPrimaryRegion.get(), primaryRegionInRoot=self.g_primaryRegionInRoot.get(),
specialCategoryFolder=self.g_specialCategoryFolder.get(), overwriteDuplicates=self.g_overwriteDuplicates.get())
pass
def export_setOutputType(self, event=None):
currIndex = self.Export_Systems.index("current")
currOutputType = self.outputTypeChoices[currIndex].get()
if currOutputType == "1G1R":
self.Export_includeOtherRegions_[currIndex].grid(column='0', padx='20', pady='10', row='4', sticky='w')
else:
self.Export_includeOtherRegions_[currIndex].grid_remove()
if currOutputType == "Favorites":
self.Export_FromList_Label_[currIndex].grid(column='0', padx='20', pady='10', row='5', sticky='w')
self.Export_FromList_PathChooser_[currIndex].grid(column='0', padx='150', pady='10', row='5', sticky='w')
else:
self.Export_FromList_Label_[currIndex].grid_remove()
self.Export_FromList_PathChooser_[currIndex].grid_remove()
def export_togglePrimaryRegionInRoot(self):
currSystemIndex = self.Export_Systems.index("current")
if self.sortByPrimaryRegionChoices[currSystemIndex].get():
self.Export_PrimaryRegionInRoot_[currSystemIndex].configure(state='enabled')
else:
self.Export_PrimaryRegionInRoot_[currSystemIndex].configure(state='disabled')
def export_removeSystem(self):
currSystemIndex = self.Export_Systems.index("current")
self.Export_Systems.forget(self.Export_Systems.tabs()[currSystemIndex])
self.exportSystemNames.pop(currSystemIndex)
self.Export_ScrolledFrame_.pop(currSystemIndex)
self.Export_DAT_Label_.pop(currSystemIndex)
self.Export_DAT_PathChooser_.pop(currSystemIndex)
self.datFilePathChoices.pop(currSystemIndex)
self.Export_Romset_Label_.pop(currSystemIndex)
self.Export_Romset_PathChooser_.pop(currSystemIndex)
self.romsetFolderPathChoices.pop(currSystemIndex)
self.Export_OutputDir_Label_.pop(currSystemIndex)
self.Export_OutputDir_PathChooser_.pop(currSystemIndex)
self.outputFolderDirectoryChoices.pop(currSystemIndex)
self.Export_Separator_.pop(currSystemIndex)
self.Export_OutputType_Label_.pop(currSystemIndex)
self.Export_OutputType_Combobox_.pop(currSystemIndex)
self.outputTypeChoices.pop(currSystemIndex)
self.Export_includeOtherRegions_.pop(currSystemIndex)
self.includeOtherRegionsChoices.pop(currSystemIndex)
self.Export_FromList_Label_.pop(currSystemIndex)
self.Export_FromList_PathChooser_.pop(currSystemIndex)
self.romListFileChoices.pop(currSystemIndex)
self.Export_IncludeFrame_.pop(currSystemIndex)
self.Export_IncludeUnlicensed_.pop(currSystemIndex)
self.includeUnlicensedChoices.pop(currSystemIndex)
self.Export_IncludeUnreleased_.pop(currSystemIndex)
self.includeUnreleasedChoices.pop(currSystemIndex)
self.Export_IncludeCompilations_.pop(currSystemIndex)
self.includeCompilationsChoices.pop(currSystemIndex)
self.Export_IncludeTestPrograms_.pop(currSystemIndex)
self.includeTestProgramsChoices.pop(currSystemIndex)
self.Export_IncludeBIOS_.pop(currSystemIndex)
self.includeBIOSChoices.pop(currSystemIndex)
self.Export_IncludeNESPorts_.pop(currSystemIndex)
self.includeNESPortsChoices.pop(currSystemIndex)
self.Export_IncludeGBAVideo_.pop(currSystemIndex)
self.includeGBAVideoChoices.pop(currSystemIndex)
self.Export_ExtractArchives_.pop(currSystemIndex)
self.extractArchivesChoices.pop(currSystemIndex)
self.Export_ParentFolder_.pop(currSystemIndex)
self.parentFolderChoices.pop(currSystemIndex)
self.Export_SortByPrimaryRegion_.pop(currSystemIndex)
self.sortByPrimaryRegionChoices.pop(currSystemIndex)
self.Export_PrimaryRegionInRoot_.pop(currSystemIndex)
self.primaryRegionInRootChoices.pop(currSystemIndex)
self.Export_SpecialCategoryFolder_.pop(currSystemIndex)
self.specialCategoryFolderChoices.pop(currSystemIndex)
self.Export_OverwriteDuplicates_.pop(currSystemIndex)
self.overwriteDuplicatesChoices.pop(currSystemIndex)
self.Export_RemoveSystem_.pop(currSystemIndex)
self.exportTabNum -= 1
if currSystemIndex < self.exportTabNum:
self.Export_Systems.select(currSystemIndex)
elif currSystemIndex > 0:
self.Export_Systems.select(currSystemIndex - 1)
def export_auditSystem(self):
if self.exportTabNum > 0:
currSystemIndex = [self.Export_Systems.index("current")]
if self.auditCheck(currSystemIndex):
self.openAuditWindow(numSystems=1, systemIndexList=currSystemIndex)
def export_auditAllSystems(self):
if self.exportTabNum > 0:
allSystemIndices = list(range(self.exportTabNum))
if self.auditCheck(allSystemIndices):
self.openAuditWindow(numSystems=len(allSystemIndices), systemIndexList=allSystemIndices)
def auditCheck(self, systemIndices):
failureMessage = ""
for ind in systemIndices:
currSystemName = self.exportSystemNames[ind]
currSystemDAT = self.datFilePathChoices[ind].get()
currSystemFolder = self.romsetFolderPathChoices[ind].get()
if currSystemDAT == "":
failureMessage += currSystemName+":\nMissing DAT file.\n\n"
elif not path.isfile(currSystemDAT):
failureMessage += currSystemName+":\nInvalid DAT file (file not found).\n\n"
else:
tree = ET.parse(currSystemDAT)
treeRoot = tree.getroot()
systemNameFromDAT = treeRoot.find("header").find("name").text
if systemNameFromDAT is None or systemNameFromDAT == "":
failureMessage += currSystemName+":\nInvalid DAT file (Parent-Clone DAT is required).\n\n"
if systemNameFromDAT != currSystemName+" (Parent-Clone)":
if systemNameFromDAT.startswith(currSystemName) and "(Parent-Clone)" in systemNameFromDAT: # the found DAT is *probably* correct (e.g N64 has "(BigEndian)" in the name, so this is needed)
pass
else:
failureMessage += currSystemName+":\nDAT header mismatched; this is likely the incorrect DAT file.\nExpected: \""+currSystemName+" (Parent-Clone)\" (or something similar)\nGot: \""+systemNameFromDAT+"\".\n\n"
if currSystemFolder == "":
failureMessage += currSystemName+":\nMissing input romset.\n\n"
elif not path.isdir(currSystemFolder):
failureMessage += currSystemName+":\nInvalid input romset (directory not found).\n\n"
if failureMessage == "":
return True
showerror("Invalid Parameters", "Please fix the following issues before attempting an audit:\n\n"+failureMessage.strip())
return False
def openAuditWindow(self, numSystems, systemIndexList):
self.systemIndexList = systemIndexList
self.auditInProgress = False
self.Audit_Window = Toplevel()
self.Audit_Window.title("EzRO Audit")
self.Audit_Frame = ttk.Frame(self.Audit_Window)
self.Audit_MainProgress_Label = ttk.Label(self.Audit_Frame)
if numSystems == 1:
self.Audit_MainProgress_Label.configure(text='Preparing to audit system: '+self.exportSystemNames[systemIndexList[0]])
else:
self.Audit_MainProgress_Label.configure(text='Preparing to audit '+str(numSystems)+' systems')
self.Audit_MainProgress_Label.place(anchor='center', relx='.5', rely='.05', x='0', y='0')
self.Audit_MainProgress_Bar = ttk.Progressbar(self.Audit_Frame)
self.Audit_MainProgress_Bar.configure(maximum=numSystems, orient='horizontal')
self.Audit_MainProgress_Bar.place(anchor='center', relwidth='.9', relx='.5', rely='.11', x='0', y='0')
self.Audit_SubProgress_Bar = ttk.Progressbar(self.Audit_Frame)
self.Audit_SubProgress_Bar.configure(maximum='100', orient='horizontal')
self.Audit_SubProgress_Bar.place(anchor='center', relwidth='.9', relx='.5', rely='.17', x='0', y='0')
self.SubProgress_TextField = tk.Text(self.Audit_Frame)
self.SubProgress_TextField.configure(autoseparators='true', blockcursor='false', height='10', insertontime='0')
self.SubProgress_TextField.configure(insertwidth='0', state='disabled', tabstyle='tabular', takefocus=False)
self.SubProgress_TextField.configure(width='50', wrap='word')
self.SubProgress_TextField.place(anchor='n', relheight='.62', relwidth='.9', relx='.5', rely='.23', x='0', y='0')
self.Audit_StartButton = ttk.Button(self.Audit_Frame)
self.audit_startButtonText = tk.StringVar(value='Start Audit')
self.Audit_StartButton.configure(text='Start Audit', textvariable=self.audit_startButtonText)
self.Audit_StartButton.place(anchor='center', relx='.4', rely='.925', x='0', y='0')
self.Audit_StartButton.configure(command=self.audit_startAudit)
self.Audit_CancelButton = ttk.Button(self.Audit_Frame)
self.audit_cancelButtonText = tk.StringVar(value='Cancel')
self.Audit_CancelButton.configure(text='Cancel', textvariable=self.audit_cancelButtonText)
self.Audit_CancelButton.place(anchor='center', relx='.6', rely='.925', x='0', y='0')
self.Audit_CancelButton.configure(command=self.audit_cancelAudit)
self.Audit_Frame.configure(height='200', width='200')
self.Audit_Frame.place(anchor='nw', relheight='1', relwidth='1', relx='0', rely='0', x='0', y='0')
self.Audit_Window.configure(height=int(600*screenHeightMult), width=int(800*screenHeightMult))
self.Audit_Window.grab_set()
self.Audit_Window.protocol("WM_DELETE_WINDOW", self.audit_cancelAudit)
def audit_startAudit(self):
self.Audit_StartButton.configure(state='disabled')
self.updateAndAuditVerifiedRomsets(self.systemIndexList)
def audit_cancelAudit(self):
if self.auditInProgress:
if askyesno("EzRO Audit", "Cancel the audit?"):
# self.auditInProgress = False
# self.Audit_Window.destroy()
self.cancelledAudit = True
else:
self.Audit_Window.destroy()
# self.cancelledAudit = True
def export_exportSystem(self):
if self.exportTabNum > 0:
currSystemIndex = [self.Export_Systems.index("current")]
if self.exportCheck(currSystemIndex):
self.openExportWindow(numSystems=1, systemIndexList=currSystemIndex)
def export_exportAllSystems(self):
if self.exportTabNum > 0:
allSystemIndices = list(range(self.exportTabNum))
if self.exportCheck(allSystemIndices):
self.openExportWindow(numSystems=len(allSystemIndices), systemIndexList=allSystemIndices)
def exportCheck(self, systemIndices):
failureMessage = ""
warningMessage = ""
for ind in systemIndices:
currSystemName = self.exportSystemNames[ind]
currSystemDAT = self.datFilePathChoices[ind].get()
if currSystemDAT == "":
failureMessage += currSystemName+":\nMissing DAT file.\n"
elif not path.isfile(currSystemDAT):
failureMessage += currSystemName+":\nInvalid DAT file (file not found).\n"
else:
try:
tree = ET.parse(currSystemDAT)
treeRoot = tree.getroot()
systemNameFromDAT = treeRoot.find("header").find("name").text
if systemNameFromDAT is None or systemNameFromDAT == "":
failureMessage += currSystemName+":\nInvalid DAT file (Parent-Clone DAT is required).\n\n"
if systemNameFromDAT != currSystemName+" (Parent-Clone)":
if systemNameFromDAT.startswith(currSystemName) and "(Parent-Clone)" in systemNameFromDAT: # the found DAT is *probably* correct (e.g N64 has "(BigEndian)" in the name, so this is needed)
pass
else:
failureMessage += currSystemName+":\nDAT header mismatched; this is likely the incorrect DAT file.\nExpected: \""+currSystemName+" (Parent-Clone)\" (or something similar)\nGot: \""+systemNameFromDAT+"\".\n\n"
except:
failureMessage += currSystemName+":\nInvalid DAT file (Parent-Clone DAT is required).\n\n"
currSystemFolder = self.romsetFolderPathChoices[ind].get()
if currSystemFolder == "":
failureMessage += currSystemName+":\nMissing input romset.\n\n"
elif not path.isdir(currSystemFolder):
failureMessage += currSystemName+":\nInvalid input romset (directory not found).\n\n"
currOutputFolder = self.outputFolderDirectoryChoices[ind].get()
if currOutputFolder == "":
failureMessage += currSystemName+":\nMissing output directory.\n\n"
elif not path.isdir(currOutputFolder):
failureMessage += currSystemName+":\nInvalid output directory (directory not found).\n\n"
if (not (currSystemFolder == "" or currOutputFolder == "")) and (currSystemFolder == currOutputFolder):
failureMessage += currSystemName+":\nInput and output directories are the same.\n\n"
currOutputType = self.outputTypeChoices[ind].get()
if currOutputType == "Favorites":
currFavoritesList = self.romListFileChoices[ind].get()
if currFavoritesList == "":
failureMessage += currSystemName+":\nMissing Favorites rom list.\n\n"
elif not path.isfile(currFavoritesList):
failureMessage += currSystemName+":\nInvalid Favorites rom list (file not found).\n\n"
for i in range(len(self.outputFolderDirectoryChoices)):
currOutputDir = self.outputFolderDirectoryChoices[i]
for j in range(i+1, len(self.outputFolderDirectoryChoices)):
otherOutputDir = self.outputFolderDirectoryChoices[j]
if currOutputDir == otherOutputDir:
warningMessage += self.exportSystemNames[i]+" and "+self.exportSystemNames[j]+":\nThese two systems have the same output directory.\n\nYou may want to create a different directory for each system so their games don't get mixed up.\n\n"
break
if warningMessage != "":
break
if warningMessage != "":
showinfo("Warning", warningMessage.strip())
if failureMessage == "":
return True
showerror("Invalid Parameters", "Please fix the following issues before attempting an export:\n\n"+failureMessage.strip())
return False
def openExportWindow(self, numSystems, systemIndexList):
self.systemIndexList = systemIndexList
self.exportInProgress = False
self.Export_Window = Toplevel()
if self.isExport:
self.Export_Window.title("EzRO Export")
else:
self.Export_Window.title("EzRO Test Export")
self.Export_Frame = ttk.Frame(self.Export_Window)
self.Export_MainProgress_Label = ttk.Label(self.Export_Frame)
if numSystems == 1:
if self.isExport:
self.Export_MainProgress_Label.configure(text='Preparing to export system: '+self.exportSystemNames[systemIndexList[0]])
else:
self.Export_MainProgress_Label.configure(text='Preparing to test export of system: '+self.exportSystemNames[systemIndexList[0]])
else:
if self.isExport:
self.Export_MainProgress_Label.configure(text='Preparing to export '+str(numSystems)+' systems')
else:
self.Export_MainProgress_Label.configure(text='Preparing to test export of '+str(numSystems)+' systems')
self.Export_MainProgress_Label.place(anchor='center', relx='.5', rely='.05', x='0', y='0')
self.Export_MainProgress_Bar = ttk.Progressbar(self.Export_Frame)
self.Export_MainProgress_Bar.configure(maximum=numSystems, orient='horizontal')
self.Export_MainProgress_Bar.place(anchor='center', relwidth='.9', relx='.5', rely='.11', x='0', y='0')
self.Export_SubProgress_Bar = ttk.Progressbar(self.Export_Frame)
self.Export_SubProgress_Bar.configure(maximum='100', orient='horizontal')
self.Export_SubProgress_Bar.place(anchor='center', relwidth='.9', relx='.5', rely='.17', x='0', y='0')
self.SubProgress_TextField = tk.Text(self.Export_Frame)
self.SubProgress_TextField.configure(autoseparators='true', blockcursor='false', height='10', insertontime='0')
self.SubProgress_TextField.configure(insertwidth='0', state='disabled', tabstyle='tabular', takefocus=False)
self.SubProgress_TextField.configure(width='50', wrap='word')
self.SubProgress_TextField.place(anchor='n', relheight='.62', relwidth='.9', relx='.5', rely='.23', x='0', y='0')
self.Export_StartButton = ttk.Button(self.Export_Frame)
self.export_startButtonText = tk.StringVar(value='Start Export')
self.Export_StartButton.configure(text='Start Export', textvariable=self.export_startButtonText)
self.Export_StartButton.place(anchor='center', relx='.4', rely='.925', x='0', y='0')
self.Export_StartButton.configure(command=self.export_startExport)
self.Export_CancelButton = ttk.Button(self.Export_Frame)
self.export_cancelButtonText = tk.StringVar(value='Cancel')
self.Export_CancelButton.configure(text='Cancel', textvariable=self.export_cancelButtonText)
self.Export_CancelButton.place(anchor='center', relx='.6', rely='.925', x='0', y='0')
self.Export_CancelButton.configure(command=self.export_cancelExport)
self.Export_Frame.configure(height='200', width='200')
self.Export_Frame.place(anchor='nw', relheight='1', relwidth='1', relx='0', rely='0', x='0', y='0')
self.Export_Window.configure(height=int(600*screenHeightMult), width=int(800*screenHeightMult))
self.Export_Window.grab_set()
self.Export_Window.protocol("WM_DELETE_WINDOW", self.export_cancelExport)
def export_startExport(self):
self.Export_StartButton.configure(state='disabled')
self.mainExport(self.systemIndexList)
def export_cancelExport(self):
# Cancelling an export early causes an error in tkinter (writing to a progressbar/text field/etc. that no longer exists) but it doesn't actually affect anything
if self.exportInProgress:
if askyesno("EzRO Export", "Cancel the export?"):
# self.exportInProgress = False
# self.Export_Window.destroy()
self.cancelledExport = True
else:
self.Export_Window.destroy()
# self.cancelledExport = True
def export_toggleTestExport(self):
self.isExport = not self.isTestExport.get()
def export_auditHelp(self):
showinfo("Audit Help",
"\"Auditing\" a system directory updates the file names of misnamed roms (and the ZIP files containing them, if applicable) to match the rom's entry in the system's No-Intro DAT. This is determined by the rom's matching checksum in the DAT, so the original name doesn't matter."
+"\n\nThis also creates a log file indicating which roms exist in the romset, which roms are missing, and which roms are in the set that don't match anything from the DAT."
+"\n\nIt is highly recommended that you audit a system directory whenever you update that system's No-Intro DAT.")
def writeTextToSubProgress(self, text):
self.SubProgress_TextField.configure(state='normal')
self.SubProgress_TextField.insert(tk.END, text)
self.SubProgress_TextField.configure(state='disabled')
#################
# AUDIT (Logic) #
#################
def updateAndAuditVerifiedRomsets(self, systemIndices):
global allGameNamesInDAT, romsWithoutCRCMatch, progressBar
self.auditInProgress = True
self.cancelledAudit = False
self.Audit_MainProgress_Label.configure(text='Auditing...')
self.Audit_MainProgress_Bar['value'] = 0
isFirstSystem = True
for currIndex in systemIndices:
self.Audit_SubProgress_Bar['value'] = 0
currSystemName = self.exportSystemNames[currIndex]
currSystemFolder = self.romsetFolderPathChoices[currIndex].get()
if not path.isdir(currSystemFolder):
continue
if isFirstSystem:
isFirstSystem = False
else:
self.writeTextToSubProgress("========================================\n\n")
self.Audit_MainProgress_Label.configure(text="Auditing system: "+currSystemName)
self.writeTextToSubProgress("Auditing system: "+currSystemName+"\n\n")
isNoIntro = True
currSystemDAT = self.datFilePathChoices[currIndex].get()
tree = ET.parse(currSystemDAT)
treeRoot = tree.getroot()
allGameFields = treeRoot[1:]
crcToGameName = {}
allGameNames = []
for game in allGameFields:
gameName = game.get("name")
allGameNames.append(gameName)
try:
gameCRC = game.find("rom").get("crc").upper()
except:
gameCRC = None
if gameCRC not in crcToGameName.keys():
crcToGameName[gameCRC] = []
crcToGameName[gameCRC].append(gameName)
if currSystemName == "Nintendo - Nintendo Entertainment System":
headerLength = 16
else:
headerLength = 0
allGameNamesInDAT = {}
for gameName in allGameNames:
allGameNamesInDAT[gameName] = False
romsWithoutCRCMatch = []
numFiles = 0
for root, dirs, files in walk(currSystemFolder):
for file in files:
if path.basename(root) != "[Unverified]":
numFiles += 1
self.Audit_SubProgress_Bar.configure(maximum=str(numFiles))
for root, dirs, files in walk(currSystemFolder):
for file in files:
if path.basename(root) != "[Unverified]":
foundMatch = self.renamingProcess(root, file, isNoIntro, headerLength, crcToGameName, allGameNames)
self.Audit_SubProgress_Bar['value'] += 1
tk_root.update() # a full update() (as opposed to update_idletasks()) allows us to check if the Cancel button was clicked, allowing a safe early exit
if self.cancelledAudit:
break
if self.cancelledAudit:
break
xmlRomsInSet = [key for key in allGameNamesInDAT.keys() if allGameNamesInDAT[key] == True]
xmlRomsNotInSet = [key for key in allGameNamesInDAT.keys() if allGameNamesInDAT[key] == False]
self.createSystemAuditLog(xmlRomsInSet, xmlRomsNotInSet, romsWithoutCRCMatch, currSystemName)
numNoCRC = len(romsWithoutCRCMatch)
if numNoCRC > 0:
self.writeTextToSubProgress("NOTICE: "+str(numNoCRC)+pluralize(" file", numNoCRC)+" in this system folder "+pluralize("do", numNoCRC, "es", "")+" not have a matching entry in the provided DAT file.\n")
self.writeTextToSubProgress(pluralize("", numNoCRC, "This file", "These files")+" may be ignored when exporting this system's romset.\n\n")
# if moveUnverified == 1:
# numMoved = 0
# unverifiedFolder = path.join(currSystemFolder, "[Unverified]")
# createDir(unverifiedFolder)
# for fileName in romsWithoutCRCMatch:
# try:
# rename(path.join(currSystemFolder, fileName), path.join(unverifiedFolder, fileName))
# numMoved += 1
# except:
# pass
# print("Moved "+str(numMoved)+" of these file(s) to \"[Unverified]\" subfolder in system directory.")
self.Audit_MainProgress_Bar['value'] += 1
tk_root.update_idletasks()
self.recentlyVerified = True
self.writeTextToSubProgress("Done.")
self.Audit_MainProgress_Label.configure(text="Audit complete.")
self.audit_cancelButtonText.set("Finish")
self.auditInProgress = False
def getCRC(self, filePath, headerLength=0):
if zipfile.is_zipfile(filePath):
with zipfile.ZipFile(filePath, 'r', zipfile.ZIP_DEFLATED) as zippedFile:
if len(zippedFile.namelist()) > 1:
return False
if headerLength == 0:
fileInfo = zippedFile.infolist()[0]
fileCRC = format(fileInfo.CRC & 0xFFFFFFFF, '08x')
return fileCRC.zfill(8).upper()
else:
fileBytes = zippedFile.read(zippedFile.namelist()[0])
headerlessCRC = str(hex(binascii.crc32(fileBytes[headerLength:])))[2:]
return headerlessCRC.zfill(8).upper()
else:
if headerLength == 0:
fileCRC = crcHasher.hash_file(filePath)
return fileCRC.zfill(8).upper()
with open(filePath, "rb") as unheaderedFile:
fileBytes = unheaderedFile.read()
headerlessCRC = str(hex(binascii.crc32(fileBytes[headerLength:])))[2:]
return headerlessCRC.zfill(8).upper()
def renamingProcess(self, root, file, isNoIntro, headerLength, crcToGameName, allGameNames):
global allGameNamesInDAT, romsWithoutCRCMatch
currFilePath = path.join(root, file)
currFileName, currFileExt = path.splitext(file)
if not path.isfile(currFilePath): # this is necessary
romsWithoutCRCMatch.append(file)
return
foundMatch = False
if isNoIntro:
currFileCRC = self.getCRC(currFilePath, headerLength)
if not currFileCRC:
self.writeTextToSubProgress(file+" archive contains more than one file. Skipping.\n\n")
romsWithoutCRCMatch.append(file)
return
matchingGameNames = crcToGameName.get(currFileCRC)
if matchingGameNames is not None:
if not currFileName in matchingGameNames:
currFileIsDuplicate = True
for name in matchingGameNames:
currPossibleMatchingGame = path.join(root, name+currFileExt)
if not path.exists(currPossibleMatchingGame):
self.renameGame(currFilePath, name, currFileExt)
allGameNamesInDAT[name] = True
currFileIsDuplicate = False
break
elif self.getCRC(currPossibleMatchingGame, headerLength) != currFileCRC: # If the romset started with a rom that has a name in the database, but with the wrong hash (e.g. it's called "Doom 64 (USA)", but it's actually something else)
self.renameGame(currPossibleMatchingGame, name+" (no match)", currFileExt)
self.renameGame(currFilePath, name, currFileExt)
self.renamingProcess(root, name+" (no match)", isNoIntro, headerLength, crcToGameName, allGameNames)
allGameNamesInDAT[name] = True
currFileIsDuplicate = False
break
if currFileIsDuplicate:
dnStart = matchingGameNames[0]+" (copy) ("
i = 1
while True:
duplicateName = dnStart+str(i)+")"
duplicatePath = path.join(root, duplicateName)
if not path.exists(duplicatePath):
break
i += 1
self.renameGame(currFilePath, duplicateName, currFileExt)
self.writeTextToSubProgress("Duplicate found and renamed: "+duplicateName+"\n\n")
else:
allGameNamesInDAT[currFileName] = True
foundMatch = True
else:
if currFileName in allGameNames:
allGameNamesInDAT[currFileName] = True
foundMatch = True
if not foundMatch:
romsWithoutCRCMatch.append(file)
def renameGame(self, filePath, newName, fileExt):
if zipfile.is_zipfile(filePath):
self.renameArchiveAndContent(filePath, newName)
else:
rename(filePath, path.join(path.dirname(filePath), newName+fileExt))
self.writeTextToSubProgress("Renamed "+path.splitext(path.basename(filePath))[0]+" to "+newName+"\n\n")
def createSystemAuditLog(self, xmlRomsInSet, xmlRomsNotInSet, romsWithoutCRCMatch, currSystemName):
currTime = datetime.now().isoformat(timespec='seconds').replace(":", ".")
xmlRomsInSet.sort()
xmlRomsNotInSet.sort()
romsWithoutCRCMatch.sort()
numOverlap = len(xmlRomsInSet)
numNotInSet = len(xmlRomsNotInSet)
numNoCRC = len(romsWithoutCRCMatch)
createDir(logFolder)
auditLogFile = open(path.join(logFolder, currTime+" Audit ("+currSystemName+") ["+str(numOverlap)+" out of "+str(numOverlap+numNotInSet)+"] ["+str(numNoCRC)+" unverified].txt"), "w", encoding="utf-8", errors="replace")
auditLogFile.writelines("=== "+currSystemName+" ===\n")
auditLogFile.writelines("=== This romset contains "+str(numOverlap)+" of "+str(numOverlap+numNotInSet)+" known ROMs ===\n\n")
if numOverlap > 0:
auditLogFile.writelines("= CONTAINS =\n")
for rom in xmlRomsInSet:
auditLogFile.writelines(rom+"\n")
if numNotInSet > 0:
auditLogFile.writelines("\n= MISSING =\n")
for rom in xmlRomsNotInSet:
auditLogFile.writelines(rom+"\n")
if numNoCRC > 0:
auditLogFile.writelines("\n=== This romset contains "+str(numNoCRC)+pluralize(" file", numNoCRC)+" with no known database match ===\n\n")
for rom in romsWithoutCRCMatch:
auditLogFile.writelines(rom+"\n")
auditLogFile.close()
def renameArchiveAndContent(self, archivePath, newName):
with zipfile.ZipFile(archivePath, 'r', zipfile.ZIP_DEFLATED) as zippedFile:
zippedFiles = zippedFile.namelist()
if len(zippedFiles) > 1:
self.writeTextToSubProgress("Archive contains more than one file. Skipping.\n")
return
fileExt = path.splitext(zippedFiles[0])[1]
archiveExt = path.splitext(archivePath)[1]
zippedFile.extract(zippedFiles[0], path.dirname(archivePath))
currExtractedFilePath = path.join(path.dirname(archivePath), zippedFiles[0])
newArchivePath = path.join(path.dirname(archivePath), newName+archiveExt)
newExtractedFilePath = path.splitext(newArchivePath)[0]+fileExt
rename(currExtractedFilePath, newExtractedFilePath)
remove(archivePath)
with zipfile.ZipFile(newArchivePath, 'w', zipfile.ZIP_DEFLATED) as newZip:
newZip.write(newExtractedFilePath, arcname=newName+fileExt)
remove(newExtractedFilePath)
self.writeTextToSubProgress("Renamed "+path.splitext(path.basename(archivePath))[0]+" to "+newName+"\n\n")
##################
# EXPORT (Logic) #
##################
def mainExport(self, systemIndices):
global currSystemName, currSystemSourceFolder, currSystemTargetFolder, currSystemDAT, romsetCategory
global includeOtherRegions, includeUnlicensed, includeUnreleased, includeCompilations, includeGBAVideo, includeNESPorts
global extractArchives, exportToGameParentFolder, sortByPrimaryRegion, primaryRegionInRoot, specialCategoryFolder, overwriteDuplicates
global ignoredFolders, primaryRegions, favoritesList
global export_regionGroupNames, export_regionPriorityTypes, export_regionTags
if not self.recentlyVerified:
if not askyesno("EzRO Export", "If you haven't done so already, it is recommended that you update/audit your romsets whenever you export (or if this is your first time running EzRO). This will make sure your rom names match those in the No-Intro DAT files.\n\nContinue with export?"):
return
self.exportInProgress = True
self.cancelledExport = False
export_regionGroupNames = self.regionGroupNames + [self.regionGroupTertiary]
for i in range(len(export_regionGroupNames)):
export_regionGroupNames[i] = export_regionGroupNames[i].get()
export_regionPriorityTypes = self.regionPriorityTypes + [tk.StringVar(value="Secondary")]
for i in range(len(export_regionPriorityTypes)):
export_regionPriorityTypes[i] = export_regionPriorityTypes[i].get()
export_regionTags = self.regionTags[:]
tertiaryTags = []
# Not needed
# for regionTag in ["World", "U", "USA", "E", "Europe", "United Kingdom", "En", "A", "Australia", "Ca", "Canada", "J", "Japan", "Ja", "F", "France", "Fr", "G", "Germany", "De", "S", "Spain", "Es", "I", "Italy", "It", "No", "Norway", "Br", "Brazil", "Sw", "Sweden", "Cn", "China", "Zh", "K", "Korea", "Ko", "As", "Asia", "Ne", "Netherlands", "Ru", "Russia", "Da", "Denmark", "Nl", "Pt", "Sv", "No", "Da", "Fi", "Pl"]:
# addToTertiary = True
# for rtGroup in self.regionTags:
# if regionTag in self.commaSplit(rtGroup.get()):
# addToTertiary = False
# break
# if addToTertiary:
# tertiaryTags.append(regionTag)
export_regionTags.append(tk.StringVar(value=", ".join(tertiaryTags)))
for i in range(len(export_regionTags)):
export_regionTags[i] = export_regionTags[i].get()
numCopiedBytesMain = 0
self.Export_MainProgress_Bar['value'] = 0
for currIndex in systemIndices:
currSystemName = self.exportSystemNames[currIndex]
if self.isExport:
self.Export_MainProgress_Label.configure(text='Exporting system: '+currSystemName)
else:
self.Export_MainProgress_Label.configure(text='Testing export of system: '+currSystemName)
self.writeTextToSubProgress("====================\n\n"+currSystemName+"\n")
currSystemDAT = self.datFilePathChoices[currIndex].get()
currSystemSourceFolder = self.romsetFolderPathChoices[currIndex].get()
currSystemTargetFolder = self.outputFolderDirectoryChoices[currIndex].get()
romsetCategory = self.outputTypeChoices[currIndex].get()
favoritesFile = self.romListFileChoices[currIndex].get()
includeOtherRegions = self.includeOtherRegionsChoices[currIndex].get()
includeUnlicensed = self.includeUnlicensedChoices[currIndex].get()
includeUnreleased = self.includeUnreleasedChoices[currIndex].get()
includeCompilations = self.includeCompilationsChoices[currIndex].get()
includeTestPrograms = self.includeTestProgramsChoices[currIndex].get()
includeBIOS = self.includeBIOSChoices[currIndex].get()
includeNESPorts = self.includeNESPortsChoices[currIndex].get()
includeGBAVideo = self.includeGBAVideoChoices[currIndex].get()
extractArchives = self.extractArchivesChoices[currIndex].get()
exportToGameParentFolder = self.parentFolderChoices[currIndex].get()
sortByPrimaryRegion = self.sortByPrimaryRegionChoices[currIndex].get()
primaryRegionInRoot = self.primaryRegionInRootChoices[currIndex].get()
specialCategoryFolder = self.specialCategoryFolderChoices[currIndex].get()
overwriteDuplicates = self.overwriteDuplicatesChoices[currIndex].get()
ignoredFolders = []
if not includeUnlicensed:
ignoredFolders.append("Unlicensed")
if not includeUnreleased:
ignoredFolders.append("Unreleased")
if not includeCompilations:
ignoredFolders.append("Compilation")
if not includeTestPrograms:
ignoredFolders.append("Misc. Programs")
if not includeBIOS:
ignoredFolders.append("BIOS")
if not includeNESPorts:
ignoredFolders.append("NES & Famicom")
if not includeGBAVideo:
ignoredFolders.append("GBA Video")
primaryRegions = []
for i in range(len(export_regionGroupNames)):
if export_regionPriorityTypes[i] == "Primary":
primaryRegions.append(export_regionGroupNames[i])
favoritesList = {}
if romsetCategory == "Favorites":
with open(favoritesFile, 'r') as ff:
for line in ff.readlines():
if line.strip() != "" and not line.startswith("#"):
favoritesList[line.strip()] = False
self.checkSystemDATForClones()
self.generateGameRomDict(currIndex)
numCopiedBytesMain += self.copyMainRomset(currIndex)
self.Export_MainProgress_Bar['value'] += 1
self.writeTextToSubProgress("====================\n\nTotal Export Size: "+simplifyNumBytes(numCopiedBytesMain)+"\n\n")
self.writeTextToSubProgress("Review the log files for more information on what files "+("were" if self.isExport else "would be")+" transferred.\n")
self.writeTextToSubProgress("Log files are not created for systems that "+("do" if self.isExport else "would")+" not receive any new files.\n\n")
self.recentlyVerified = True
self.writeTextToSubProgress("Done.")
if self.isExport:
self.Export_MainProgress_Label.configure(text="Export complete.")
else:
self.Export_MainProgress_Label.configure(text="Test export complete.")
self.export_cancelButtonText.set("Finish")
self.exportInProgress = False
def checkSystemDATForClones(self):
global currSystemHasClones
tempTree = ET.parse(currSystemDAT)
tempTreeRoot = tempTree.getroot()
tempAllGameFields = tempTreeRoot[1:]
for game in tempAllGameFields:
gameName = game.get("name")
try:
gameCloneOf = game.get("cloneof")
except:
gameCloneOf = None
if gameCloneOf is not None:
currSystemHasClones = True
return
currSystemHasClones = False
def generateGameRomDict(self, currIndex):
global gameRomDict, newGameRomDict, allGameFields
gameRomDict = {}
tree = ET.parse(currSystemDAT)
treeRoot = tree.getroot()
allGameFields = treeRoot[1:]
gameNameToCloneOf = {}
for game in allGameFields:
gameName = game.get("name")
try:
gameCloneOf = game.get("cloneof")
except:
gameCloneOf = None
gameNameToCloneOf[gameName] = gameCloneOf
for file in listdir(currSystemSourceFolder):
_, _, _, currRegionType = self.getRomsInBestRegion([path.splitext(file)[0]])
if currRegionType != "Primary" and romsetCategory == "1G1R" and not includeOtherRegions:
continue
romName = path.splitext(file)[0]
if romName in gameNameToCloneOf:
parent = gameNameToCloneOf[romName]
if parent is None:
self.addGameAndRomToDict(romName, file)
else:
self.addGameAndRomToDict(parent, file)
# Rename gameRomDict keys according to best game name
newGameRomDict = {}
for game in gameRomDict.keys():
bestGameName = self.getBestGameName(gameRomDict[game])
mergeBoth = False
if bestGameName in newGameRomDict: # same name for two different games (Pokemon Stadium International vs. Japan)
finalFirstGameName, finalSecondGameName, renameByAtts = self.fixDuplicateName(newGameRomDict[bestGameName], gameRomDict[game], bestGameName)
if renameByAtts: # rename first game according to region
newGameRomDict[finalFirstGameName] = newGameRomDict.pop(bestGameName)
newGameRomDict[finalSecondGameName] = gameRomDict[game]
else: # rename neither (merge the two together); rare, but possible, such as DS demos that have both a DS Download Station and a Nintendo Channel version
for rom in gameRomDict[game]: # rename one or both games according to
newGameRomDict[bestGameName].append(rom)
else:
newGameRomDict[bestGameName] = gameRomDict[game]
gameRomDict = newGameRomDict
def getRomsInBestRegion(self, roms):
romsInBestRegion = []
bestRegionIndex = 99
bestRegion = None
bestRegionType = 0
for rom in roms:
attributeSplit = self.getAttributeSplit(rom)
for i in range(len(export_regionGroupNames)):
region = export_regionGroupNames[i]
currRegionAtts = self.commaSplit(export_regionTags[i])
regionType = (2 if export_regionPriorityTypes[i]=="Primary" else 1)
if arrayOverlap(attributeSplit, currRegionAtts) or i==len(export_regionGroupNames)-1:
if regionType >= bestRegionType:
if i < bestRegionIndex or regionType > bestRegionType:
bestRegionIndex = i
romsInBestRegion = [rom]
bestRegion = region
bestRegionType = regionType
elif i == bestRegionIndex:
romsInBestRegion.append(rom)
if regionType == 2:
break
bestRegionType = (None, "Secondary", "Primary")[bestRegionType]
return romsInBestRegion, bestRegionIndex, bestRegion, bestRegionType
def getAttributeSplit(self, name):
mna = [s.strip() for s in re.split('\(|\)|\[|\]', path.splitext(name)[0]) if s.strip() != ""]
if name.startswith("[BIOS]") and len(mna) > 1:
mna[:2] = ["[BIOS] "+mna[1]]
mergeNameArray = []
mergeNameArray.append(mna[0])
if len(mna) > 1:
for i in range(1, len(mna)):
if not ("," in mna[i] or "+" in mna[i]):
mergeNameArray.append(mna[i])
else:
arrayWithComma = [s.strip() for s in re.split('\,|\+', mna[i]) if s.strip() != ""]
for att2 in arrayWithComma:
mergeNameArray.append(att2)
return mergeNameArray
def commaSplit(self, string):
if string.strip() == "":
return [] # otherwise, it would return [""]
return [s.strip() for s in string.split(",")]
def barSplit(self, string):
if string.strip() == "":
return [] # otherwise, it would return [""]
return [s.strip() for s in string.split("|")]
def addGameAndRomToDict(self, game, rom):
global gameRomDict
if game not in gameRomDict.keys():
gameRomDict[game] = []
gameRomDict[game].append(rom)
def getBestGameName(self, roms):
bestRom, _, _ = self.getBestRom(roms)
atts = self.getAttributeSplit(bestRom)
return atts[0].rstrip(".")
def getBestRom(self, roms):
romsInBestRegion, _, bestRegion, bestRegionType = self.getRomsInBestRegion(roms)
if len(romsInBestRegion) == 1:
return romsInBestRegion[0], bestRegion, bestRegionType
bestScore = -500
bestRom = ""
for rom in romsInBestRegion:
currScore = self.getScore(rom)
if currScore >= bestScore:
bestScore = currScore
bestRom = rom
return bestRom, bestRegion, bestRegionType
def getScore(self, rom):
# We already know that this is only called for roms that are in the best region
attributes = self.getAttributeSplit(rom)[1:]
score = 100
lastVersion = 0
primaryRegionTags = []
for i in range(len(export_regionGroupNames)):
if export_regionPriorityTypes[i] == "Primary":
primaryRegionTags += self.commaSplit(export_regionTags[i])
while "" in primaryRegionTags:
primaryRegionTags.remove("") # probably not needed, but just in case
containsPrimaryLanguage = False
containsOtherLanguage = False
for att in attributes:
if att.startswith("Rev") or att.startswith("Reprint"):
try:
score += 15 + (15 * int(att.split()[1]))
except:
score += 30
elif att.startswith("v") and len(att) > 1 and att[1].isdigit():
try:
score += float(att[1:])
lastVersion = float(att[1:])
except:
score += lastVersion
elif att.startswith("b") and (len(att) == 1 or att[1].isdigit()):
if len(att) == 1:
score -= 30
else:
try:
score -= (15 - float(att[1:]))
lastVersion = float(att[1:])
except:
score -= (15 - lastVersion)
elif att.startswith("Beta") or att.startswith("Proto"):
try:
score -= (50 - int(att.split()[1]))
except:
score -= 49
elif att.startswith("Sample") or att.startswith("Demo"):
try:
score -= (90 - int(att.split()[1]))
except:
score -= 89
elif "Collection" in att:
score -= 10
elif att in self.g_specificAttributes:
score -= 10
elif "DLC" in att:
score -= 10
elif att in ["Unl", "Pirate"]:
score -= 20
elif att in ["En", "Ca", "Ja", "Fr", "De", "Es", "It", "No", "Br", "Sw", "Cn", "Zh", "Ko", "As", "Ne", "Ru", "Da", "Nl", "Pt", "Sv", "No", "Da", "Fi", "Pl"]:
if att in primaryRegionTags:
containsPrimaryLanguage = True
else:
containsOtherLanguage = True
elif not (att in self.g_specificAttributes or any(att.startswith(starter) for starter in self.g_generalAttributes)): # a tiebreaker for any new keywords that are later added
score -= 1
if (not containsPrimaryLanguage) and containsOtherLanguage:
score -= 2
return score
def fixDuplicateName(self, firstGameRoms, secondGameRoms, sharedName):
global newGameRomDict
firstBestRoms, firstRegionNum, _, _ = self.getRomsInBestRegion(firstGameRoms)
secondBestRoms, secondRegionNum, _, _ = self.getRomsInBestRegion(secondGameRoms)
if currSystemHasClones and (firstRegionNum != secondRegionNum):
newFirstGameName = sharedName+" ("+export_regionGroupNames[firstRegionNum]+")"
newSecondGameName = sharedName+" ("+export_regionGroupNames[secondRegionNum]+")"
return newFirstGameName, newSecondGameName, True
else:
firstUniqueAtts, secondUniqueAtts = self.getUniqueAttributes(self.getBestRom(firstBestRoms)[0], self.getBestRom(secondBestRoms)[0])
if len(firstUniqueAtts) > 0 or len(secondUniqueAtts) > 0:
newFirstGameName = sharedName
for att in firstUniqueAtts:
newFirstGameName += " ("+att+")"
newSecondGameName = sharedName
for att in secondUniqueAtts:
newSecondGameName += " ("+att+")"
return newFirstGameName, newSecondGameName, True
else:
return None, None, False
def getUniqueAttributes(self, firstRom, secondRom):
firstAtts = self.getAttributeSplit(firstRom)
firstAtts.pop(0)
secondAtts = self.getAttributeSplit(secondRom)
secondAtts.pop(0)
firstUniqueAtts = []
tempStarters = self.g_generalAttributes[:]
try:
tempStarters.remove("Proto") # Exerion
except:
pass
for att in firstAtts:
if att in secondAtts or att in self.g_specificAttributes or self.attIsRegion(att):
continue
if att.startswith("v") and len(att) > 1 and att[1].isdigit():
continue
if att.startswith("b") and (len(att) == 1 or att[1].isdigit()):
continue
if not any(att.startswith(starter) for starter in tempStarters):
firstUniqueAtts.append(att)
secondUniqueAtts = []
for att in secondAtts:
if att in firstAtts or att in self.g_specificAttributes or self.attIsRegion(att):
continue
if att.startswith("v") and len(att) > 1 and att[1].isdigit():
continue
if att.startswith("b") and (len(att) == 1 or att[1].isdigit()):
continue
if not any(att.startswith(starter) for starter in tempStarters):
secondUniqueAtts.append(att)
if ("Proto" in firstUniqueAtts + secondUniqueAtts) and (len(firstUniqueAtts) + len(secondUniqueAtts) > 1):
if "Proto" in firstUniqueAtts:
firstUniqueAtts.remove("Proto")
elif "Proto" in secondUniqueAtts:
secondUniqueAtts.remove("Proto")
return firstUniqueAtts, secondUniqueAtts
def attIsRegion(self, att):
for tags in export_regionTags:
if att in self.commaSplit(tags):
return True
return False
def copyMainRomset(self, currIndex):
global gameRomDict, currGameFolder, romsetCategory, favoritesList, missingFavorites
numGames = len(gameRomDict.keys())
self.romsCopied = []
self.numRomsSkipped = 0
self.romsFailed = []
self.currNumCopiedBytes = 0
self.Export_SubProgress_Bar.configure(maximum=str(numGames))
self.Export_SubProgress_Bar['value'] = 0
for game in gameRomDict.keys():
self.Export_SubProgress_Bar['value'] += 1
bestRom, bestRegion, bestRegionType = self.getBestRom(gameRomDict[game])
bestRegionIsPrimary = (bestRegionType == "Primary")
currSpecialFolders = self.getSpecialFolders(bestRom)
if arrayOverlap(currSpecialFolders, ignoredFolders):
continue
# Start building output path according to attributes
currGameFolder = currSystemTargetFolder
if sortByPrimaryRegion and (not (bestRegionIsPrimary and primaryRegionInRoot)):
currGameFolder = path.join(currGameFolder, "["+bestRegion+"]")
if specialCategoryFolder:
for folder in currSpecialFolders:
currGameFolder = path.join(currGameFolder, "["+folder+"]")
if exportToGameParentFolder:
currGameFolder = path.join(currGameFolder, game)
if romsetCategory in ["All", "Favorites"]:
for rom in gameRomDict[game]:
if romsetCategory == "All":
self.copyRomToTarget(rom)
elif path.splitext(rom)[0] in favoritesList.keys():
self.copyRomToTarget(rom)
favoritesList[path.splitext(rom)[0]] = True
elif romsetCategory == "1G1R" or bestRegionIsPrimary:
self.copyRomToTarget(bestRom)
tk_root.update()
if self.cancelledExport:
break
missingFavorites = []
if self.isExport:
self.writeTextToSubProgress("Copied "+str(len(self.romsCopied))+" new files.\n")
self.writeTextToSubProgress("Skipped "+str(self.numRomsSkipped)+" files that already exist in the output directory.\n")
self.writeTextToSubProgress("Failed to copy "+str(len(self.romsFailed))+" new files.\n")
if romsetCategory == "Favorites":
for rom in favoritesList.keys():
if favoritesList[rom] == False:
missingFavorites.append(rom)
if len(missingFavorites) > 0:
self.writeTextToSubProgress(str(len(missingFavorites))+" roms from favorites list were not copied because they were not found in the input romset.\n")
else:
self.writeTextToSubProgress(str(len(self.romsCopied))+" new files would be copied.\n")
self.writeTextToSubProgress(str(self.numRomsSkipped)+" old files would be skipped.\n")
self.writeTextToSubProgress("Export Size: "+simplifyNumBytes(self.currNumCopiedBytes)+"\n\n")
self.createMainCopiedLog(currIndex, "Export" if self.isExport else "Test")
return self.currNumCopiedBytes
def copyRomToTarget(self, rom):
sourceRomPath = path.join(currSystemSourceFolder, rom)
targetRomPath = path.join(currGameFolder, rom)
if overwriteDuplicates or (not self.targetExists(sourceRomPath, targetRomPath)):
try:
createdFolder = self.isExport and createDir(currGameFolder)
if zipfile.is_zipfile(sourceRomPath) and extractArchives:
with zipfile.ZipFile(sourceRomPath, 'r', zipfile.ZIP_DEFLATED) as zippedFile:
if self.isExport:
zippedFile.extract(zippedFile.namelist()[0], path.dirname(targetRomPath))
self.currNumCopiedBytes += zippedFile.infolist()[0].file_size
else:
if self.isExport:
shutil.copy(sourceRomPath, targetRomPath)
self.currNumCopiedBytes += path.getsize(targetRomPath)
else:
self.currNumCopiedBytes += path.getsize(sourceRomPath)
self.romsCopied.append(rom)
except:
# progressBar.write("\nFailed to copy: "+rom)
if createdFolder and len(listdir(currGameFolder)) == 0:
rmdir(currGameFolder)
self.romsFailed.append(rom)
else:
self.numRomsSkipped += 1
def targetExists(self, sourceRomPath, targetRomPath):
if not (extractArchives and zipfile.is_zipfile(sourceRomPath)):
return path.isfile(targetRomPath)
with zipfile.ZipFile(sourceRomPath, 'r', zipfile.ZIP_DEFLATED) as zippedFile:
tempTargetRomPath = path.join(path.dirname(targetRomPath), zippedFile.namelist()[0])
return path.isfile(tempTargetRomPath)
def getSpecialFolders(self, rom):
currSpecialFolders = []
if "[BIOS]" in rom:
currSpecialFolders.append("BIOS")
if "(Unl" in rom or "(Pirate" in rom:
currSpecialFolders.append("Unlicensed")
for keyword in ["(Test Program", "(SDK Build", "Production Test Program", "Enhancement Chip", "Test Cart"]:
if keyword in rom:
currSpecialFolders.append("Misc. Programs")
if "(Proto" in rom:
currSpecialFolders.append("Unreleased")
# if "(Sample" in rom or "(Demo" in rom:
# currSpecialFolders.append("Demo")
for keyword in self.barSplit(defaultSettings["Keywords"]["Compilation"]):
if keyword in rom:
currSpecialFolders.append("Compilation")
break
if "Classic NES Series" in rom or "Famicom Mini" in rom or "Hudson Best Collection" in rom or "Kunio-kun Nekketsu Collection" in rom:
currSpecialFolders.append("NES & Famicom")
if "Game Boy Advance Video" in rom:
currSpecialFolders.append("GBA Video")
return currSpecialFolders
def createMainCopiedLog(self, currIndex, logType="Export"):
currTime = datetime.now().isoformat(timespec='seconds').replace(":", ".")
if len(self.romsCopied) + len(self.romsFailed) > 0:
self.romsCopied.sort()
self.romsFailed.sort()
romsetLogFile = open(path.join(logFolder, currTime+" "+logType+" ("+currSystemName+") ["+str(len(self.romsCopied))+"] ["+str(len(self.romsFailed)+len(missingFavorites))+"].txt"), "w", encoding="utf-8", errors="replace")
romsetLogFile.writelines("=== Copied "+str(len(self.romsCopied))+" new ROMs from \""+currSystemSourceFolder+"\" to \""+currSystemTargetFolder+"\" ===\n\n")
for file in self.romsCopied:
romsetLogFile.writelines(file+"\n")
if len(self.romsFailed) > 0:
romsetLogFile.writelines("\n= FAILED TO COPY =\n")
for file in self.romsFailed:
romsetLogFile.writelines(file+"\n")
if len(missingFavorites) > 0:
romsetLogFile.writelines("\n= FAVORITES NOT FOUND IN INPUT =\n")
for file in missingFavorites:
romsetLogFile.writelines(file+"\n")
romsetLogFile.close()
#############
# FAVORITES #
#############
def favorites_loadList(self):
pass
def favorites_addFiles(self):
pass
def favorites_saveList(self):
pass
##########
# CONFIG #
##########
def addRegionGroup(self, groupName="", groupType="", groupTags=""):
self.Config_Region_Choice_RemoveButton_.append(None)
self.Config_Region_Choice_UpButton_.append(None)
self.Config_Region_Choice_DownButton_.append(None)
self.Config_Region_Choice_Name_Label_.append(None)
self.Config_Region_Choice_Name_Entry_.append(None)
self.regionGroupNames.append(None)
self.Config_Region_Choice_Name_Entry_.append(None)
self.Config_Region_Choice_Type_Label_.append(None)
self.Config_Region_Choice_Type_Combobox_.append(None)
self.regionPriorityTypes.append(None)
self.Config_Region_Choice_Type_Combobox_.append(None)
self.Config_Region_Choice_Tags_Label_.append(None)
self.Config_Region_Choice_Tags_Entry_.append(None)
self.regionTags.append(None)
self.Config_Region_Choice_Tags_Entry_.append(None)
self.Config_Region_Choice_RemoveButton_[self.regionNum] = ttk.Button(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_RemoveButton_[self.regionNum].configure(text='X', width='2')
self.Config_Region_Choice_RemoveButton_[self.regionNum].grid(column='0', padx='20', pady='10', row=self.regionNum, sticky='w')
self.Config_Region_Choice_RemoveButton_[self.regionNum].configure(command=lambda n=self.regionNum: self.removeRegionGroup(n))
self.Config_Region_Choice_UpButton_[self.regionNum] = ttk.Button(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_UpButton_[self.regionNum].configure(text='↑', width='2')
self.Config_Region_Choice_UpButton_[self.regionNum].grid(column='0', padx='50', pady='10', row=self.regionNum, sticky='w')
self.Config_Region_Choice_UpButton_[self.regionNum].configure(command=lambda n=self.regionNum: self.moveRegionGroupUp(n))
self.Config_Region_Choice_DownButton_[self.regionNum] = ttk.Button(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_DownButton_[self.regionNum].configure(text='↓', width='2')
self.Config_Region_Choice_DownButton_[self.regionNum].grid(column='0', padx='80', pady='10', row=self.regionNum, sticky='w')
self.Config_Region_Choice_DownButton_[self.regionNum].configure(command=lambda n=self.regionNum: self.moveRegionGroupDown(n))
self.Config_Region_Choice_Name_Label_[self.regionNum] = ttk.Label(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_Name_Label_[self.regionNum].configure(text='Region Group')
self.Config_Region_Choice_Name_Label_[self.regionNum].grid(column='0', padx='130', pady='10', row=self.regionNum, sticky='w')
self.Config_Region_Choice_Name_Entry_[self.regionNum] = ttk.Entry(self.Config_Region_Frame.innerframe)
self.regionGroupNames[self.regionNum] = tk.StringVar(value=groupName)
self.Config_Region_Choice_Name_Entry_[self.regionNum].configure(textvariable=self.regionGroupNames[self.regionNum])
self.Config_Region_Choice_Name_Entry_[self.regionNum].grid(column='0', padx='220', pady='10', row=self.regionNum, sticky='w')
self.Config_Region_Choice_Type_Label_[self.regionNum] = ttk.Label(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_Type_Label_[self.regionNum].configure(text='Priority Type')
self.Config_Region_Choice_Type_Label_[self.regionNum].grid(column='0', padx='380', pady='10', row=self.regionNum, sticky='w')
self.Config_Region_Choice_Type_Combobox_[self.regionNum] = ttk.Combobox(self.Config_Region_Frame.innerframe)
self.regionPriorityTypes[self.regionNum] = tk.StringVar(value=groupType)
self.Config_Region_Choice_Type_Combobox_[self.regionNum].configure(state='readonly', textvariable=self.regionPriorityTypes[self.regionNum], values='"Primary" "Secondary"', width='12')
self.Config_Region_Choice_Type_Combobox_[self.regionNum].grid(column='0', padx='465', pady='10', row=self.regionNum, sticky='w')
self.Config_Region_Choice_Type_Combobox_[self.regionNum].bind('<<ComboboxSelected>>', self.settings_region_setPriorityType, add='')
if groupType == "Secondary":
self.Config_Region_Choice_Type_Combobox_[self.regionNum].current(1)
else:
self.Config_Region_Choice_Type_Combobox_[self.regionNum].current(0)
self.Config_Region_Choice_Tags_Label_[self.regionNum] = ttk.Label(self.Config_Region_Frame.innerframe)
self.Config_Region_Choice_Tags_Label_[self.regionNum].configure(text='Region/Language Tags')
self.Config_Region_Choice_Tags_Label_[self.regionNum].grid(column='0', padx='580', pady='10', row=self.regionNum, sticky='w')
self.Config_Region_Choice_Tags_Entry_[self.regionNum] = ttk.Entry(self.Config_Region_Frame.innerframe)
self.regionTags[self.regionNum] = tk.StringVar(value=groupTags)
self.Config_Region_Choice_Tags_Entry_[self.regionNum].configure(textvariable=self.regionTags[self.regionNum], width='45')
self.Config_Region_Choice_Tags_Entry_[self.regionNum].grid(column='0', padx='720', pady='10', row=self.regionNum, sticky='w')
tooltip.create(self.Config_Region_Choice_Name_Label_[self.regionNum], 'The name of the region group. If \"Create Region Folders\" is enabled, then games marked as one of this group\'s region tags will be exported to a folder named after this group, surround by brackets (e.g. [World], [USA], etc).')
tooltip.create(self.Config_Region_Choice_Type_Label_[self.regionNum], 'The type of region group.\n\nPrimary: The most significant region; 1G1R exports will prioritize this. If there are multiple Primary groups, then higher groups take priority.\n\nSecondary: \"Backup\" regions that will not be used in a 1G1R export unless no Primary-group version of a game exists, and \"Include Games from Non-Primary Regions\" is also enabled. If there are multiple Secondary groups, then higher groups take priority.\n\nTertiary: Any known region/language tag that is not part of a Primary/Secondary group is added to the Tertiary group by default. This is functionally the same as a Secondary group.')
tooltip.create(self.Config_Region_Choice_Tags_Label_[self.regionNum], 'Rom tags that signify that a rom belongs to this group (region tags like USA and Europe, language tags like En and Fr, etc). If a rom contains tags from multiple region groups, then the higher group will take priority.')
self.regionNum += 1
def settings_region_setPriorityType(self, event=None):
pass
def config_region_applyTemplate(self, event=None):
choice = self.templateChoice.get()
if choice != "":
while self.regionNum > 0:
self.removeRegionGroup(0)
if choice == "English":
self.addRegionGroup(groupName="World", groupType="Primary", groupTags="World")
self.addRegionGroup(groupName="USA", groupType="Primary", groupTags="U, USA")
self.addRegionGroup(groupName="Europe", groupType="Primary", groupTags="E, Europe, United Kingdom")
self.addRegionGroup(groupName="Other (English)", groupType="Primary", groupTags="En, A, Australia, Ca, Canada")
self.regionGroupTertiary.set("Other (non-English)")
elif choice == "English + Secondary":
self.addRegionGroup(groupName="World", groupType="Primary", groupTags="World")
self.addRegionGroup(groupName="USA", groupType="Primary", groupTags="U, USA")
self.addRegionGroup(groupName="Europe", groupType="Primary", groupTags="E, Europe, United Kingdom")
self.addRegionGroup(groupName="Other (English)", groupType="Primary", groupTags="En, A, Australia, Ca, Canada")
self.addRegionGroup(groupName="Japan", groupType="Secondary", groupTags="J, Japan, Ja")
self.regionGroupTertiary.set("Other (non-English)")
elif choice == "English (USA Focus)":
self.addRegionGroup(groupName="World", groupType="Primary", groupTags="World")
self.addRegionGroup(groupName="USA", groupType="Primary", groupTags="U, USA")
self.addRegionGroup(groupName="Europe", groupType="Secondary", groupTags="E, Europe, United Kingdom")
self.addRegionGroup(groupName="Other (English)", groupType="Secondary", groupTags="En, A, Australia, Ca, Canada")
self.regionGroupTertiary.set("Other (non-English)")
elif choice == "English (Europe Focus)":
self.addRegionGroup(groupName="World", groupType="Primary", groupTags="World")
self.addRegionGroup(groupName="Europe", groupType="Primary", groupTags="E, Europe, United Kingdom")
self.addRegionGroup(groupName="USA", groupType="Secondary", groupTags="U, USA")
self.addRegionGroup(groupName="Other (English)", groupType="Secondary", groupTags="En, A, Australia, Ca, Canada")
self.regionGroupTertiary.set("Other (non-English)")
elif choice == "Japanese":
self.addRegionGroup(groupName="World", groupType="Primary", groupTags="World")
self.addRegionGroup(groupName="Japan", groupType="Primary", groupTags="J, Japan")
self.addRegionGroup(groupName="Other (Japanese)", groupType="Primary", groupTags="Ja")
self.regionGroupTertiary.set("Other (non-Japanese)")
elif choice == "Japanese + Secondary":
self.addRegionGroup(groupName="World", groupType="Primary", groupTags="World")
self.addRegionGroup(groupName="Japan", groupType="Primary", groupTags="J, Japan")
self.addRegionGroup(groupName="Other (Japanese)", groupType="Primary", groupTags="Ja")
self.addRegionGroup(groupName="USA", groupType="Secondary", groupTags="U, USA")
self.addRegionGroup(groupName="Europe", groupType="Secondary", groupTags="E, Europe, United Kingdom")
self.regionGroupTertiary.set("Other (non-Japanese)")
def settings_region_addNewRegionCategory(self, event=None):
self.addRegionGroup()
pass
def moveRegionGroupUp(self, num):
if num > 0:
self.Config_Region_Choice_RemoveButton_.insert(num-1, self.Config_Region_Choice_RemoveButton_.pop(num))
self.Config_Region_Choice_UpButton_.insert(num-1, self.Config_Region_Choice_UpButton_.pop(num))
self.Config_Region_Choice_DownButton_.insert(num-1, self.Config_Region_Choice_DownButton_.pop(num))
self.Config_Region_Choice_Name_Label_.insert(num-1, self.Config_Region_Choice_Name_Label_.pop(num))
self.Config_Region_Choice_Name_Entry_.insert(num-1, self.Config_Region_Choice_Name_Entry_.pop(num))
self.regionGroupNames.insert(num-1, self.regionGroupNames.pop(num))
self.Config_Region_Choice_Type_Label_.insert(num-1, self.Config_Region_Choice_Type_Label_.pop(num))
self.Config_Region_Choice_Type_Combobox_.insert(num-1, self.Config_Region_Choice_Type_Combobox_.pop(num))
self.regionPriorityTypes.insert(num-1, self.regionPriorityTypes.pop(num))
self.Config_Region_Choice_Tags_Label_.insert(num-1, self.Config_Region_Choice_Tags_Label_.pop(num))
self.Config_Region_Choice_Tags_Entry_.insert(num-1, self.Config_Region_Choice_Tags_Entry_.pop(num))
self.regionTags.insert(num-1, self.regionTags.pop(num))
self.refreshRegionChoicePlacement()
def moveRegionGroupDown(self, num):
if num < (self.regionNum - 1):
self.moveRegionGroupUp(num+1)
def removeRegionGroup(self, num):
self.Config_Region_Choice_RemoveButton_[num].grid_remove()
self.Config_Region_Choice_RemoveButton_.pop(num)
self.Config_Region_Choice_UpButton_[num].grid_remove()
self.Config_Region_Choice_UpButton_.pop(num)
self.Config_Region_Choice_DownButton_[num].grid_remove()
self.Config_Region_Choice_DownButton_.pop(num)
self.Config_Region_Choice_Name_Label_[num].grid_remove()
self.Config_Region_Choice_Name_Label_.pop(num)
self.Config_Region_Choice_Name_Entry_[num].grid_remove()
self.Config_Region_Choice_Name_Entry_.pop(num)
self.regionGroupNames.pop(num)
self.Config_Region_Choice_Type_Label_[num].grid_remove()
self.Config_Region_Choice_Type_Label_.pop(num)
self.Config_Region_Choice_Type_Combobox_[num].grid_remove()
self.Config_Region_Choice_Type_Combobox_.pop(num)
self.regionPriorityTypes.pop(num)
self.Config_Region_Choice_Tags_Label_[num].grid_remove()
self.Config_Region_Choice_Tags_Label_.pop(num)
self.Config_Region_Choice_Tags_Entry_[num].grid_remove()
self.Config_Region_Choice_Tags_Entry_.pop(num)
self.regionTags.pop(num)
self.regionNum -= 1
self.refreshRegionChoicePlacement()
def refreshRegionChoicePlacement(self):
for i in range(self.regionNum):
self.Config_Region_Choice_RemoveButton_[i].configure(command=lambda n=i: self.removeRegionGroup(n))
self.Config_Region_Choice_UpButton_[i].configure(command=lambda n=i: self.moveRegionGroupUp(n))
self.Config_Region_Choice_DownButton_[i].configure(command=lambda n=i: self.moveRegionGroupDown(n))
self.Config_Region_Choice_RemoveButton_[i].grid(row=str(i))
self.Config_Region_Choice_UpButton_[i].grid(row=str(i))
self.Config_Region_Choice_DownButton_[i].grid(row=str(i))
self.Config_Region_Choice_Name_Label_[i].grid(row=str(i))
self.Config_Region_Choice_Name_Entry_[i].grid(row=str(i))
self.Config_Region_Choice_Type_Label_[i].grid(row=str(i))
self.Config_Region_Choice_Type_Combobox_[i].grid(row=str(i))
self.Config_Region_Choice_Tags_Label_[i].grid(row=str(i))
self.Config_Region_Choice_Tags_Entry_[i].grid(row=str(i))
def settings_region_help(self, event=None):
showinfo("Region Help", "Region settings are used in organizing roms from different regions and, in the case of 1G1R exports, determining which region of a game should be exported."
+"\n\nHover over each setting to learn more; or you can simply use one of the pre-made templates. \"English + Secondary\" is recommended (it's the default), but use whatever you want."
+"\n\nAny changes made on this page will be lost upon leaving the Config menu unless you click \"Save Changes\". This includes applying a template; remember to save!")
def createRegionSettings(self):
global regionSettings
regionSettings = configparser.ConfigParser(allow_no_value=True)
regionSettings.optionxform = str
regionSettings["1"] = {}
regionSettings["1"]["Region Group"] = "World"
regionSettings["1"]["Priority Type"] = "Primary"
regionSettings["1"]["Region/Language Tags"] = "World"
regionSettings["2"] = {}
regionSettings["2"]["Region Group"] = "USA"
regionSettings["2"]["Priority Type"] = "Primary"
regionSettings["2"]["Region/Language Tags"] = "U, USA"
regionSettings["3"] = {}
regionSettings["3"]["Region Group"] = "Europe"
regionSettings["3"]["Priority Type"] = "Primary"
regionSettings["3"]["Region/Language Tags"] = "E, Europe, United Kingdom"
regionSettings["4"] = {}
regionSettings["4"]["Region Group"] = "Other (English)"
regionSettings["4"]["Priority Type"] = "Primary"
regionSettings["4"]["Region/Language Tags"] = "En, A, Australia, Ca, Canada"
regionSettings["5"] = {}
regionSettings["5"]["Region Group"] = "Japan"
regionSettings["5"]["Priority Type"] = "Secondary"
regionSettings["5"]["Region/Language Tags"] = "J, Japan, Ja"
regionSettings["Other"] = {}
regionSettings["Other"]["Region Group"] = "Other (non-English)"
regionSettings["Other"]["Priority Type"] = "Tertiary"
with open(regionsFile, 'w') as rf:
regionSettings.write(rf)
def settings_saveChanges(self):
global defaultSettings, regionSettings
if not path.exists(defaultSettingsFile):
self.createDefaultSettings()
if not path.exists(regionsFile):
self.createRegionSettings()
try:
defaultSettings = configparser.ConfigParser(allow_no_value=True)
defaultSettings.optionxform = str
defaultSettings.read(defaultSettingsFile)
defaultSettings["General"] = {}
defaultSettings["General"]["Input No-Intro DAT Directory"] = self.g_datFilePath.get()
defaultSettings["General"]["Input Romset Directory"] = self.g_romsetFolderPath.get()
defaultSettings["Organization"] = {}
defaultSettings["Organization"]["Extract Compressed Roms"] = self.ssch(self.g_extractArchives)
defaultSettings["Organization"]["Create Game Folder for Each Game"] = self.ssch(self.g_parentFolder)
defaultSettings["Organization"]["Create Region Folders"] = self.ssch(self.g_sortByPrimaryRegion)
defaultSettings["Organization"]["Do Not Create Folder for Primary Region"] = self.ssch(self.g_primaryRegionInRoot)
defaultSettings["Organization"]["Create Folders for Special Categories"] = self.ssch(self.g_specialCategoryFolder)
defaultSettings["Organization"]["Overwrite Duplicate Files"] = self.ssch(self.g_overwriteDuplicates)
defaultSettings["Include"] = {}
defaultSettings["Include"]["Unlicensed"] = self.ssch(self.g_includeUnlicensed)
defaultSettings["Include"]["Unreleased"] = self.ssch(self.g_includeUnreleased)
defaultSettings["Include"]["Compilations"] = self.ssch(self.g_includeCompilations)
defaultSettings["Include"]["Misc. Programs"] = self.ssch(self.g_includeTestPrograms)
defaultSettings["Include"]["BIOS"] = self.ssch(self.g_includeBIOS)
defaultSettings["Include"]["(GBA) NES Ports"] = self.ssch(self.g_includeNESPorts)
defaultSettings["Include"]["(GBA) GBA Video"] = self.ssch(self.g_includeGBAVideo)
defaultSettings["Include"]["(1G1R) Games from Other Regions"] = self.ssch(self.g_includeOtherRegions)
with open(defaultSettingsFile, 'w') as mcf:
defaultSettings.write(mcf)
savedDefaultSettings = True
except:
savedDefaultSettings = False
regionFailureReasons = ""
try:
for i in range(self.regionNum):
if self.regionGroupNames[i].get().strip() == "":
regionFailureReasons += "Region group "+str(i+1)+" has no name.\n"
elif self.regionGroupNames[i].get().strip() != slugify(self.regionGroupNames[i].get().strip()):
regionFailureReasons += "Region group "+str(i+1)+" has an invalid name.\n"
else:
for j in range(i+1, self.regionNum):
if self.regionGroupNames[i].get().strip() == self.regionGroupNames[j].get().strip():
regionFailureReasons += "Region groups "+str(i+1)+" and "+str(j+1)+" have the same name.\n"
tagsAreInvalid = True
for tag in commaSplit(self.regionTags[i].get()):
if tag != "":
tagsAreInvalid = False
break
if tagsAreInvalid:
regionFailureReasons += "Region group "+str(i+1)+" has invalid tag(s).\n"
if self.regionGroupTertiary.get().strip() == "":
regionFailureReasons += "Tertiary region group has no name.\n"
if self.regionGroupTertiary.get().strip() != slugify(self.regionGroupTertiary.get().strip()):
regionFailureReasons += "Tertiary region group has an invalid name.\n"
regionFailureReasons = regionFailureReasons.strip()
if regionFailureReasons != "":
raise Exception("Invalid region group settings.")
regionSettings = configparser.ConfigParser(allow_no_value=True)
regionSettings.optionxform = str
for i in range(self.regionNum):
regionSettings[str(i+1)] = {}
regionSettings[str(i+1)]["Region Group"] = self.regionGroupNames[i].get().strip()
regionSettings[str(i+1)]["Priority Type"] = self.regionPriorityTypes[i].get()
regionSettings[str(i+1)]["Region/Language Tags"] = self.regionTags[i].get().strip()
regionSettings["Other"] = {}
regionSettings["Other"]["Region Group"] = self.regionGroupTertiary.get().strip()
regionSettings["Other"]["Priority Type"] = "Tertiary"
with open(regionsFile, 'w') as rf:
regionSettings.write(rf)
savedRegionSettings = True
except:
savedRegionSettings = False
if savedDefaultSettings and savedRegionSettings:
showinfo("EzRO", "Successfully saved all settings.")
elif savedDefaultSettings and (not savedRegionSettings):
showerror("EzRO", "An error has occurred: Failed to save region settings.\n\nReasons:\n"+regionFailureReasons)
elif (not savedDefaultSettings) and savedRegionSettings:
showerror("EzRO", "An error has occurred: Failed to save default settings.")
else:
showerror("EzRO", "An error has occurred: Failed to save default settings and region settings.\n\nReasons:\n"+regionFailureReasons)
def ssch(self, val): # settings_saveChangesHelper
if val.get():
return "True"
else:
return "False"
def changeMainTab(self, event=None):
if self.Main_Notebook.tab(self.Main_Notebook.select(), "text") == "Config":
self.loadConfig()
def createDefaultSettings(self):
global defaultSettings
defaultSettings = configparser.ConfigParser(allow_no_value=True)
defaultSettings.optionxform = str
defaultSettings["General"] = {}
defaultSettings["General"]["Input No-Intro DAT Directory"] = ""
defaultSettings["General"]["Input Romset Directory"] = ""
defaultSettings["Organization"] = {}
defaultSettings["Organization"]["Extract Compressed Roms"] = "False"
defaultSettings["Organization"]["Create Game Folder for Each Game"] = "False"
defaultSettings["Organization"]["Create Region Folders"] = "False"
defaultSettings["Organization"]["Do Not Create Folder for Primary Region"] = "True"
defaultSettings["Organization"]["Create Folders for Special Categories"] = "True"
defaultSettings["Organization"]["Overwrite Duplicate Files"] = "False"
defaultSettings["Include"] = {}
defaultSettings["Include"]["Unlicensed"] = "True"
defaultSettings["Include"]["Unreleased"] = "True"
defaultSettings["Include"]["Compilations"] = "True"
defaultSettings["Include"]["Misc. Programs"] = "False"
defaultSettings["Include"]["BIOS"] = "False"
defaultSettings["Include"]["(GBA) NES Ports"] = "False"
defaultSettings["Include"]["(GBA) GBA Video"] = "False"
defaultSettings["Include"]["(1G1R) Games from Other Regions"] = "True"
# Keywords
defaultSettings["Keywords"] = {}
defaultSettings["Keywords"]["Compilation"] = "|".join([
"2 Games in 1 -", "2 Games in 1! -", "2 Disney Games -", "2-in-1 Fun Pack -",
"2 Great Games! -", "2 in 1 -", "2 in 1 Game Pack -", "2 Jeux en 1 -",
"3 Games in 1 -", "4 Games on One Game Pak", "Castlevania Double Pack",
"Combo Pack - ", "Crash Superpack -", "Crash & Spyro Superpack",
"Crash & Spyro Super Pack", "Double Game! -", "Double Pack -", "Spyro Superpack -"
])
defaultSettings["Keywords"]["Specific Attributes"] = "|".join([
"Virtual Console", "Switch Online", "GameCube", "Namcot Collection",
"Namco Museum Archives", "Kiosk", "iQue", "Sega Channel", "WiiWare",
"DLC", "Minis", "Promo", "Nintendo Channel", "Nintendo Channel, Alt",
"DS Broadcast", "Wii Broadcast", "DS Download Station", "Dwnld Sttn",
"Undumped Japanese Download Station", "WiiWare Broadcast",
"Disk Writer", "Collection of Mana", "Namco Museum Archives Vol 1",
"Namco Museum Archives Vol 2", "Castlevania Anniversary Collection",
"Sega Smash Pack", "Steam Version", "Nintendo Switch", "NP",
"Genesis Mini", "Mega Drive Mini", "Classic Mini"
])
defaultSettings["Keywords"]["General Attributes"] = "|".join([
"Rev", "Beta", "Demo", "Sample", "Proto", "Alt", "Earlier",
"Download Station", "FW", "Reprint"
])
with open(defaultSettingsFile, 'w') as dsf:
defaultSettings.write(dsf)
def loadConfig(self):
global defaultSettings, regionSettings
try:
defaultSettings = configparser.ConfigParser(allow_no_value=True)
defaultSettings.optionxform = str
defaultSettings.read(defaultSettingsFile)
self.g_datFilePath.set(defaultSettings["General"]["Input No-Intro DAT Directory"])
self.g_romsetFolderPath.set(defaultSettings["General"]["Input Romset Directory"])
self.Config_Default_DATDir_PathChooser.configure(textvariable=self.g_datFilePath, type='directory')
self.Config_Default_RomsetDir_PathChooser.configure(textvariable=self.g_romsetFolderPath, type='directory')
self.g_extractArchives.set(defaultSettings["Organization"]["Extract Compressed Roms"] == "True")
self.g_parentFolder.set(defaultSettings["Organization"]["Create Game Folder for Each Game"] == "True")
self.g_sortByPrimaryRegion.set(defaultSettings["Organization"]["Create Region Folders"] == "True")
self.g_primaryRegionInRoot.set(defaultSettings["Organization"]["Do Not Create Folder for Primary Region"] == "True")
self.g_specialCategoryFolder.set(defaultSettings["Organization"]["Create Folders for Special Categories"] == "True")
self.g_overwriteDuplicates.set(defaultSettings["Organization"]["Overwrite Duplicate Files"] == "True")
self.g_includeUnlicensed.set(defaultSettings["Include"]["Unlicensed"] == "True")
self.g_includeUnreleased.set(defaultSettings["Include"]["Unreleased"] == "True")
self.g_includeCompilations.set(defaultSettings["Include"]["Compilations"] == "True")
self.g_includeTestPrograms.set(defaultSettings["Include"]["Misc. Programs"] == "True")
self.g_includeBIOS.set(defaultSettings["Include"]["BIOS"] == "True")
self.g_includeNESPorts.set(defaultSettings["Include"]["(GBA) NES Ports"] == "True")
self.g_includeGBAVideo.set(defaultSettings["Include"]["(GBA) GBA Video"] == "True")
self.g_includeOtherRegions.set(defaultSettings["Include"]["(1G1R) Games from Other Regions"] == "True")
self.g_specificAttributes = defaultSettings["Keywords"]["Specific Attributes"]
self.g_generalAttributes = defaultSettings["Keywords"]["General Attributes"]
except:
showerror("EzRO", "Invalid settings.ini file. Delete it and reload, then a new default file will be created.")
sys.exit()
try:
regionSettings = configparser.ConfigParser(allow_no_value=True)
regionSettings.optionxform = str
regionSettings.read(regionsFile)
while self.regionNum > 0:
self.removeRegionGroup(0)
keys = list(regionSettings.keys())[1:]
for i in range(len(keys)):
if keys[i].isdigit():
self.addRegionGroup(groupName=regionSettings[str(i+1)]["Region Group"], groupType=regionSettings[str(i+1)]["Priority Type"], groupTags=regionSettings[str(i+1)]["Region/Language Tags"])
elif keys[i] == "Other":
self.regionGroupTertiary.set(regionSettings["Other"]["Region Group"])
self.Config_Region_Choice_Name_Entry_Tertiary.configure(text=self.regionGroupTertiary)
except:
showerror("EzRO", "Invalid regions.ini file. Delete it and reload, then a new default file will be created.")
sys.exit()
########
# MISC #
########
def menu_viewHelp(self):
showinfo("Help", "Hover over certain options for further details about them. You can also click the \"?\" button on some pages for more information.")
def menu_viewAbout(self):
showinfo("About", "EzRO Rom Organizer v1.00\nhttps://github.com/Mips96/EzRO-gui\n\nQuestions? Bug reports? Feel free to leave an issue on the project GitHub!")
def menu_viewExternalLibraries(self):
showinfo("External Libraries", "ttkScrollableNotebook\nhttps://github.com/muhammeteminturgut/ttkScrollableNotebook\nLicensed under GPL-3.0")
def show_error(self, *args):
err = traceback.format_exception(*args)
showerror('Exception',err)
tk.Tk.report_callback_exception = show_error
if __name__ == '__main__':
tk_root = tk.Tk()
# screenHeight = tk_root.winfo_screenheight()
# screenHeightMult = screenHeight / 1440.0
tk_root.geometry('1200x675')
# tk_root.resizable(False, False)
tk_root.title("EzRO")
screenHeightMult = 1
app = EzroApp(tk_root)
app.run()
| StarcoderdataPython |
5124709 | <gh_stars>10-100
from network_pipeline.consts import SOURCE
from network_pipeline.consts import FORWARD_EXCHANGE
from network_pipeline.consts import FORWARD_ROUTING_KEY
from network_pipeline.consts import FORWARD_QUEUE
from spylunking.log.setup_logging import console_logger
from network_pipeline.utils import rnow
from network_pipeline.convert_pkt_to_json import convert_pkt_to_json
from network_pipeline.publisher import pub
import kamene.all as kamene
log = console_logger(
name='proc')
def handle_packets(pk):
"""handle_packets
:param pk: data packet that kamene sends in
"""
log.info(("processing with pub={}")
.format(pub))
# get the lowest layer
eth = pk.getlayer(kamene.Ether)
should_forward = False
send_msg = {"data": {},
"created": rnow(),
"source": SOURCE}
if eth:
# parse all layer frames under ethernet
send_msg["data"] = convert_pkt_to_json(eth)
should_forward = True
else:
log.error(("unsupported pk={}")
.format(pk))
# end of if supported
if should_forward:
log.info("forwarding")
# Publish the message:
msg_sent = pub.publish(body=send_msg,
exchange=FORWARD_EXCHANGE,
routing_key=FORWARD_ROUTING_KEY,
queue=FORWARD_QUEUE,
serializer="json",
retry=True)
log.info("done forwarding={}".format(msg_sent))
# end of should_forward
# end of handle_packets
| StarcoderdataPython |
1756757 |
import math
import torch
import torch.nn as nn
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
assert mode in ['iou', 'giou', 'diou', 'ciou'], f'Unsupported mode {mode}'
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
@weighted_loss
def iou_loss(pred, target, eps=1e-6):
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
loss = -ious.log()
return loss
@weighted_loss
def giou_loss(pred, target, eps=1e-7):
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
@weighted_loss
def diou_loss(pred, target, eps=1e-7):
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
# DIoU
dious = ious - rho2 / c2
loss = 1 - dious
return loss
@weighted_loss
def ciou_loss(pred, target, eps=1e-7):
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
factor = 4 / math.pi**2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
# CIoU
cious = ious - (rho2 / c2 + v**2 / (1 - ious + v))
loss = 1 - cious
return loss
class IoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(IoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.reduction)
if (weight is not None) and (not torch.any(weight > 0)) and (reduction != 'none'):
return (pred * weight).sum() # 0
if weight is not None and weight.dim() > 1:
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * iou_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss
class GIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(GIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
if weight is not None and not torch.any(weight > 0):
# return (pred * weight).sum() # 0 #TODO: fix bug
return pred.sum() * 0. # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * giou_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss
class DIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(DIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
if weight is not None and not torch.any(weight > 0):
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * diou_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss
class CIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(CIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
if weight is not None and not torch.any(weight > 0):
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * ciou_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss
| StarcoderdataPython |
3313924 | #!/usr/bin/env vpython3
# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Calls process_perf_results.py with a python 3 interpreter."""
import sys
import subprocess
# TODO(crbug.com/webrtc/13835): Delete this file and use
# process_perf_results.py instead.
def main():
cmd = sys.argv[0].replace('_py2', '')
print('Calling "%s" with py3 in case this script was called with py2.' % cmd)
return subprocess.call(['vpython3', cmd] + sys.argv[1:])
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
9779180 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
import sys
import shutil
import copy
import socket
import ruamel.yaml
import six
from ordereddict_backport import OrderedDict
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.tty.color import colorize
import spack.error
import spack.hash_types as ht
import spack.repo
import spack.schema.env
import spack.spec
import spack.util.spack_json as sjson
import spack.config
from spack.filesystem_view import YamlFilesystemView
from spack.util.environment import EnvironmentModifications
import spack.architecture as architecture
from spack.spec import Spec
from spack.spec_list import SpecList, InvalidSpecConstraintError
from spack.variant import UnknownVariantError
#: environment variable used to indicate the active environment
spack_env_var = 'SPACK_ENV'
#: currently activated environment
_active_environment = None
#: path where environments are stored in the spack tree
env_path = os.path.join(spack.paths.var_path, 'environments')
#: Name of the input yaml file for an environment
manifest_name = 'spack.yaml'
#: Name of the input yaml file for an environment
lockfile_name = 'spack.lock'
#: Name of the directory where environments store repos, logs, views
env_subdir_name = '.spack-env'
#: default spack.yaml file to put in new environments
default_manifest_yaml = """\
# This is a Spack Environment file.
#
# It describes a set of packages to be installed, along with
# configuration settings.
spack:
# add package specs to the `specs` list
specs:
-
view: true
"""
#: regex for validating enviroment names
valid_environment_name_re = r'^\w[\w-]*$'
#: version of the lockfile format. Must increase monotonically.
lockfile_format_version = 2
#: legal first keys in the spack.yaml manifest file
env_schema_keys = ('spack', 'env')
# Magic names
# The name of the standalone spec list in the manifest yaml
user_speclist_name = 'specs'
# The name of the default view (the view loaded on env.activate)
default_view_name = 'default'
# Default behavior to link all packages into views (vs. only root packages)
default_view_link = 'all'
def valid_env_name(name):
return re.match(valid_environment_name_re, name)
def validate_env_name(name):
if not valid_env_name(name):
raise ValueError((
"'%s': names must start with a letter, and only contain "
"letters, numbers, _, and -.") % name)
return name
def activate(
env, use_env_repo=False, add_view=True, shell='sh', prompt=None
):
"""Activate an environment.
To activate an environment, we add its configuration scope to the
existing Spack configuration, and we set active to the current
environment.
Arguments:
env (Environment): the environment to activate
use_env_repo (bool): use the packages exactly as they appear in the
environment's repository
add_view (bool): generate commands to add view to path variables
shell (string): One of `sh`, `csh`.
prompt (string): string to add to the users prompt, or None
Returns:
cmds: Shell commands to activate environment.
TODO: environment to use the activated spack environment.
"""
global _active_environment
_active_environment = env
prepare_config_scope(_active_environment)
if use_env_repo:
spack.repo.path.put_first(_active_environment.repo)
tty.debug("Using environmennt '%s'" % _active_environment.name)
# Construct the commands to run
cmds = ''
if shell == 'csh':
# TODO: figure out how to make color work for csh
cmds += 'setenv SPACK_ENV %s;\n' % env.path
cmds += 'alias despacktivate "spack env deactivate";\n'
if prompt:
cmds += 'if (! $?SPACK_OLD_PROMPT ) '
cmds += 'setenv SPACK_OLD_PROMPT "${prompt}";\n'
cmds += 'set prompt="%s ${prompt}";\n' % prompt
else:
if os.getenv('TERM') and 'color' in os.getenv('TERM') and prompt:
prompt = colorize('@G{%s} ' % prompt, color=True)
cmds += 'export SPACK_ENV=%s;\n' % env.path
cmds += "alias despacktivate='spack env deactivate';\n"
if prompt:
cmds += 'if [ -z ${SPACK_OLD_PS1+x} ]; then\n'
cmds += ' if [ -z ${PS1+x} ]; then\n'
cmds += " PS1='$$$$';\n"
cmds += ' fi;\n'
cmds += ' export SPACK_OLD_PS1="${PS1}";\n'
cmds += 'fi;\n'
cmds += 'export PS1="%s ${PS1}";\n' % prompt
if add_view and default_view_name in env.views:
cmds += env.add_default_view_to_shell(shell)
return cmds
def deactivate(shell='sh'):
"""Undo any configuration or repo settings modified by ``activate()``.
Arguments:
shell (string): One of `sh`, `csh`. Shell style to use.
Returns:
(string): shell commands for `shell` to undo environment variables
"""
global _active_environment
if not _active_environment:
return
deactivate_config_scope(_active_environment)
# use _repo so we only remove if a repo was actually constructed
if _active_environment._repo:
spack.repo.path.remove(_active_environment._repo)
cmds = ''
if shell == 'csh':
cmds += 'unsetenv SPACK_ENV;\n'
cmds += 'if ( $?SPACK_OLD_PROMPT ) '
cmds += 'set prompt="$SPACK_OLD_PROMPT" && '
cmds += 'unsetenv SPACK_OLD_PROMPT;\n'
cmds += 'unalias despacktivate;\n'
else:
cmds += 'if [ ! -z ${SPACK_ENV+x} ]; then\n'
cmds += 'unset SPACK_ENV; export SPACK_ENV;\n'
cmds += 'fi;\n'
cmds += 'unalias despacktivate;\n'
cmds += 'if [ ! -z ${SPACK_OLD_PS1+x} ]; then\n'
cmds += ' if [ "$SPACK_OLD_PS1" = \'$$$$\' ]; then\n'
cmds += ' unset PS1; export PS1;\n'
cmds += ' else\n'
cmds += ' export PS1="$SPACK_OLD_PS1";\n'
cmds += ' fi;\n'
cmds += ' unset SPACK_OLD_PS1; export SPACK_OLD_PS1;\n'
cmds += 'fi;\n'
if default_view_name in _active_environment.views:
cmds += _active_environment.rm_default_view_from_shell(shell)
tty.debug("Deactivated environmennt '%s'" % _active_environment.name)
_active_environment = None
return cmds
def find_environment(args):
"""Find active environment from args, spack.yaml, or environment variable.
This is called in ``spack.main`` to figure out which environment to
activate.
Check for an environment in this order:
1. via ``spack -e ENV`` or ``spack -D DIR`` (arguments)
2. as a spack.yaml file in the current directory, or
3. via a path in the SPACK_ENV environment variable.
If an environment is found, read it in. If not, return None.
Arguments:
args (Namespace): argparse namespace wtih command arguments
Returns:
(Environment): a found environment, or ``None``
"""
# try arguments
env = getattr(args, 'env', None)
# treat env as a name
if env:
if exists(env):
return read(env)
else:
# if env was specified, see if it is a dirctory otherwise, look
# at env_dir (env and env_dir are mutually exclusive)
env = getattr(args, 'env_dir', None)
# if no argument, look for a manifest file
if not env:
if os.path.exists(manifest_name):
env = os.getcwd()
# if no env, env_dir, or manifest try the environment
if not env:
env = os.environ.get(spack_env_var)
# nothing was set; there's no active environment
if not env:
return None
# if we get here, env isn't the name of a spack environment; it has
# to be a path to an environment, or there is something wrong.
if is_env_dir(env):
return Environment(env)
raise SpackEnvironmentError('no environment in %s' % env)
def get_env(args, cmd_name, required=False):
"""Used by commands to get the active environment.
This first checks for an ``env`` argument, then looks at the
``active`` environment. We check args first because Spack's
subcommand arguments are parsed *after* the ``-e`` and ``-D``
arguments to ``spack``. So there may be an ``env`` argument that is
*not* the active environment, and we give it precedence.
This is used by a number of commands for determining whether there is
an active environment.
If an environment is not found *and* is required, print an error
message that says the calling command *needs* an active environment.
Arguments:
args (Namespace): argparse namespace wtih command arguments
cmd_name (str): name of calling command
required (bool): if ``True``, raise an exception when no environment
is found; if ``False``, just return ``None``
Returns:
(Environment): if there is an arg or active environment
"""
# try argument first
env = getattr(args, 'env', None)
if env:
if exists(env):
return read(env)
elif is_env_dir(env):
return Environment(env)
else:
raise SpackEnvironmentError('no environment in %s' % env)
# try the active environment. This is set by find_environment() (above)
if _active_environment:
return _active_environment
elif not required:
return None
else:
tty.die(
'`spack %s` requires an environment' % cmd_name,
'activate an environment first:',
' spack env activate ENV',
'or use:',
' spack -e ENV %s ...' % cmd_name)
def _root(name):
"""Non-validating version of root(), to be used internally."""
return os.path.join(env_path, name)
def root(name):
"""Get the root directory for an environment by name."""
validate_env_name(name)
return _root(name)
def exists(name):
"""Whether an environment with this name exists or not."""
if not valid_env_name(name):
return False
return os.path.isdir(root(name))
def active(name):
"""True if the named environment is active."""
return _active_environment and name == _active_environment.name
def is_env_dir(path):
"""Whether a directory contains a spack environment."""
return os.path.isdir(path) and os.path.exists(
os.path.join(path, manifest_name))
def read(name):
"""Get an environment with the supplied name."""
validate_env_name(name)
if not exists(name):
raise SpackEnvironmentError("no such environment '%s'" % name)
return Environment(root(name))
def create(name, init_file=None, with_view=None):
"""Create a named environment in Spack."""
validate_env_name(name)
if exists(name):
raise SpackEnvironmentError("'%s': environment already exists" % name)
return Environment(root(name), init_file, with_view)
def config_dict(yaml_data):
"""Get the configuration scope section out of an spack.yaml"""
key = spack.config.first_existing(yaml_data, env_schema_keys)
return yaml_data[key]
def all_environment_names():
"""List the names of environments that currently exist."""
# just return empty if the env path does not exist. A read-only
# operation like list should not try to create a directory.
if not os.path.exists(env_path):
return []
candidates = sorted(os.listdir(env_path))
names = []
for candidate in candidates:
yaml_path = os.path.join(_root(candidate), manifest_name)
if valid_env_name(candidate) and os.path.exists(yaml_path):
names.append(candidate)
return names
def all_environments():
"""Generator for all named Environments."""
for name in all_environment_names():
yield read(name)
def validate(data, filename=None):
import jsonschema
try:
spack.schema.Validator(spack.schema.env.schema).validate(data)
except jsonschema.ValidationError as e:
raise spack.config.ConfigFormatError(
e, data, filename, e.instance.lc.line + 1)
def _read_yaml(str_or_file):
"""Read YAML from a file for round-trip parsing."""
data = ruamel.yaml.load(str_or_file, ruamel.yaml.RoundTripLoader)
filename = getattr(str_or_file, 'name', None)
validate(data, filename)
return data
def _write_yaml(data, str_or_file):
"""Write YAML to a file preserving comments and dict order."""
filename = getattr(str_or_file, 'name', None)
validate(data, filename)
ruamel.yaml.dump(data, str_or_file, Dumper=ruamel.yaml.RoundTripDumper,
default_flow_style=False)
def _eval_conditional(string):
"""Evaluate conditional definitions using restricted variable scope."""
arch = architecture.Arch(
architecture.platform(), 'default_os', 'default_target')
valid_variables = {
'target': str(arch.target),
'os': str(arch.os),
'platform': str(arch.platform),
'arch': str(arch),
'architecture': str(arch),
're': re,
'env': os.environ,
'hostname': socket.gethostname()
}
return eval(string, valid_variables)
class ViewDescriptor(object):
def __init__(self, root, projections={}, select=[], exclude=[],
link=default_view_link):
self.root = root
self.projections = projections
self.select = select
self.select_fn = lambda x: any(x.satisfies(s) for s in self.select)
self.exclude = exclude
self.exclude_fn = lambda x: not any(x.satisfies(e)
for e in self.exclude)
self.link = link
def to_dict(self):
ret = {'root': self.root}
if self.projections:
ret['projections'] = self.projections
if self.select:
ret['select'] = self.select
if self.exclude:
ret['exclude'] = self.exclude
if self.link != default_view_link:
ret['link'] = self.link
return ret
@staticmethod
def from_dict(d):
return ViewDescriptor(d['root'],
d.get('projections', {}),
d.get('select', []),
d.get('exclude', []),
d.get('link', default_view_link))
def view(self):
return YamlFilesystemView(self.root, spack.store.layout,
ignore_conflicts=True,
projections=self.projections)
def regenerate(self, all_specs, roots):
specs_for_view = []
specs = all_specs if self.link == 'all' else roots
for spec in specs:
# The view does not store build deps, so if we want it to
# recognize environment specs (which do store build deps), then
# they need to be stripped
if spec.concrete: # Do not link unconcretized roots
specs_for_view.append(spec.copy(deps=('link', 'run')))
if self.select:
specs_for_view = list(filter(self.select_fn, specs_for_view))
if self.exclude:
specs_for_view = list(filter(self.exclude_fn, specs_for_view))
installed_specs_for_view = set(s for s in specs_for_view
if s.package.installed)
view = self.view()
view.clean()
specs_in_view = set(view.get_all_specs())
tty.msg("Updating view at {0}".format(self.root))
rm_specs = specs_in_view - installed_specs_for_view
view.remove_specs(*rm_specs, with_dependents=False)
add_specs = installed_specs_for_view - specs_in_view
view.add_specs(*add_specs, with_dependencies=False)
class Environment(object):
def __init__(self, path, init_file=None, with_view=None):
"""Create a new environment.
The environment can be optionally initialized with either a
spack.yaml or spack.lock file.
Arguments:
path (str): path to the root directory of this environment
init_file (str or file object): filename or file object to
initialize the environment
with_view (str or bool): whether a view should be maintained for
the environment. If the value is a string, it specifies the
path to the view.
"""
self.path = os.path.abspath(path)
self.clear()
if init_file:
with fs.open_if_filename(init_file) as f:
if hasattr(f, 'name') and f.name.endswith('.lock'):
self._read_manifest(default_manifest_yaml)
self._read_lockfile(f)
self._set_user_specs_from_lockfile()
else:
self._read_manifest(f)
else:
default_manifest = not os.path.exists(self.manifest_path)
if default_manifest:
# No manifest, use default yaml
self._read_manifest(default_manifest_yaml)
else:
with open(self.manifest_path) as f:
self._read_manifest(f)
if os.path.exists(self.lock_path):
with open(self.lock_path) as f:
read_lock_version = self._read_lockfile(f)
if default_manifest:
# No manifest, set user specs from lockfile
self._set_user_specs_from_lockfile()
if read_lock_version == 1:
tty.debug(
"Storing backup of old lockfile {0} at {1}".format(
self.lock_path, self._lock_backup_v1_path))
shutil.copy(self.lock_path, self._lock_backup_v1_path)
if with_view is False:
self.views = {}
elif with_view is True:
self.views = {
default_view_name: ViewDescriptor(self.view_path_default)}
elif isinstance(with_view, six.string_types):
self.views = {default_view_name: ViewDescriptor(with_view)}
# If with_view is None, then defer to the view settings determined by
# the manifest file
def _read_manifest(self, f):
"""Read manifest file and set up user specs."""
self.yaml = _read_yaml(f)
self.spec_lists = OrderedDict()
for item in config_dict(self.yaml).get('definitions', []):
entry = copy.deepcopy(item)
when = _eval_conditional(entry.pop('when', 'True'))
assert len(entry) == 1
if when:
name, spec_list = next(iter(entry.items()))
user_specs = SpecList(name, spec_list, self.spec_lists.copy())
if name in self.spec_lists:
self.spec_lists[name].extend(user_specs)
else:
self.spec_lists[name] = user_specs
spec_list = config_dict(self.yaml).get(user_speclist_name)
user_specs = SpecList(user_speclist_name, [s for s in spec_list if s],
self.spec_lists.copy())
self.spec_lists[user_speclist_name] = user_specs
enable_view = config_dict(self.yaml).get('view')
# enable_view can be boolean, string, or None
if enable_view is True or enable_view is None:
self.views = {
default_view_name: ViewDescriptor(self.view_path_default)}
elif isinstance(enable_view, six.string_types):
self.views = {default_view_name: ViewDescriptor(enable_view)}
elif enable_view:
self.views = dict((name, ViewDescriptor.from_dict(values))
for name, values in enable_view.items())
else:
self.views = {}
@property
def user_specs(self):
return self.spec_lists[user_speclist_name]
def _set_user_specs_from_lockfile(self):
"""Copy user_specs from a read-in lockfile."""
self.spec_lists = {
user_speclist_name: SpecList(
user_speclist_name,
[str(s) for s in self.concretized_user_specs]
)
}
def clear(self):
self.spec_lists = {user_speclist_name: SpecList()} # specs from yaml
self.concretized_user_specs = [] # user specs from last concretize
self.concretized_order = [] # roots of last concretize, in order
self.specs_by_hash = {} # concretized specs by hash
self.new_specs = [] # write packages for these on write()
self._repo = None # RepoPath for this env (memoized)
self._previous_active = None # previously active environment
@property
def internal(self):
"""Whether this environment is managed by Spack."""
return self.path.startswith(env_path)
@property
def name(self):
"""Human-readable representation of the environment.
This is the path for directory environments, and just the name
for named environments.
"""
if self.internal:
return os.path.basename(self.path)
else:
return self.path
@property
def active(self):
"""True if this environment is currently active."""
return _active_environment and self.path == _active_environment.path
@property
def manifest_path(self):
"""Path to spack.yaml file in this environment."""
return os.path.join(self.path, manifest_name)
@property
def lock_path(self):
"""Path to spack.lock file in this environment."""
return os.path.join(self.path, lockfile_name)
@property
def _lock_backup_v1_path(self):
"""Path to backup of v1 lockfile before conversion to v2"""
return self.lock_path + '.backup.v1'
@property
def env_subdir_path(self):
"""Path to directory where the env stores repos, logs, views."""
return os.path.join(self.path, env_subdir_name)
@property
def repos_path(self):
return os.path.join(self.path, env_subdir_name, 'repos')
@property
def log_path(self):
return os.path.join(self.path, env_subdir_name, 'logs')
@property
def view_path_default(self):
# default path for environment views
return os.path.join(self.env_subdir_path, 'view')
@property
def repo(self):
if self._repo is None:
self._repo = make_repo_path(self.repos_path)
return self._repo
def included_config_scopes(self):
"""List of included configuration scopes from the environment.
Scopes are listed in the YAML file in order from highest to
lowest precedence, so configuration from earlier scope will take
precedence over later ones.
This routine returns them in the order they should be pushed onto
the internal scope stack (so, in reverse, from lowest to highest).
"""
scopes = []
# load config scopes added via 'include:', in reverse so that
# highest-precedence scopes are last.
includes = config_dict(self.yaml).get('include', [])
for i, config_path in enumerate(reversed(includes)):
# allow paths to contain environment variables
config_path = config_path.format(**os.environ)
# treat relative paths as relative to the environment
if not os.path.isabs(config_path):
config_path = os.path.join(self.path, config_path)
config_path = os.path.normpath(os.path.realpath(config_path))
if os.path.isdir(config_path):
# directories are treated as regular ConfigScopes
config_name = 'env:%s:%s' % (
self.name, os.path.basename(config_path))
scope = spack.config.ConfigScope(config_name, config_path)
else:
# files are assumed to be SingleFileScopes
base, ext = os.path.splitext(os.path.basename(config_path))
config_name = 'env:%s:%s' % (self.name, base)
scope = spack.config.SingleFileScope(
config_name, config_path, spack.schema.merged.schema)
scopes.append(scope)
return scopes
def env_file_config_scope_name(self):
"""Name of the config scope of this environment's manifest file."""
return 'env:%s' % self.name
def env_file_config_scope(self):
"""Get the configuration scope for the environment's manifest file."""
config_name = self.env_file_config_scope_name()
return spack.config.SingleFileScope(config_name,
self.manifest_path,
spack.schema.env.schema,
[env_schema_keys])
def config_scopes(self):
"""A list of all configuration scopes for this environment."""
return self.included_config_scopes() + [self.env_file_config_scope()]
def destroy(self):
"""Remove this environment from Spack entirely."""
shutil.rmtree(self.path)
def update_stale_references(self, from_list=None):
"""Iterate over spec lists updating references."""
if not from_list:
from_list = next(iter(self.spec_lists.keys()))
index = list(self.spec_lists.keys()).index(from_list)
# spec_lists is an OrderedDict, all list entries after the modified
# list may refer to the modified list. Update stale references
for i, (name, speclist) in enumerate(
list(self.spec_lists.items())[index + 1:], index + 1
):
new_reference = dict((n, self.spec_lists[n])
for n in list(self.spec_lists.keys())[:i])
speclist.update_reference(new_reference)
def add(self, user_spec, list_name=user_speclist_name):
"""Add a single user_spec (non-concretized) to the Environment
Returns:
(bool): True if the spec was added, False if it was already
present and did not need to be added
"""
spec = Spec(user_spec)
if list_name not in self.spec_lists:
raise SpackEnvironmentError(
'No list %s exists in environment %s' % (list_name, self.name)
)
if list_name == user_speclist_name:
if not spec.name:
raise SpackEnvironmentError(
'cannot add anonymous specs to an environment!')
elif not spack.repo.path.exists(spec.name):
raise SpackEnvironmentError('no such package: %s' % spec.name)
list_to_change = self.spec_lists[list_name]
existing = str(spec) in list_to_change.yaml_list
if not existing:
list_to_change.add(str(spec))
self.update_stale_references(list_name)
return bool(not existing)
def remove(self, query_spec, list_name=user_speclist_name, force=False):
"""Remove specs from an environment that match a query_spec"""
query_spec = Spec(query_spec)
list_to_change = self.spec_lists[list_name]
matches = []
if not query_spec.concrete:
matches = [s for s in list_to_change if s.satisfies(query_spec)]
if not matches:
# concrete specs match against concrete specs in the env
specs_hashes = zip(
self.concretized_user_specs, self.concretized_order)
matches = [
s for s, h in specs_hashes
if query_spec.dag_hash() == h
]
if not matches:
raise SpackEnvironmentError(
"Not found: {0}".format(query_spec))
old_specs = set(self.user_specs)
for spec in matches:
if spec in list_to_change:
list_to_change.remove(spec)
self.update_stale_references(list_name)
# If force, update stale concretized specs
# Only check specs removed by this operation
new_specs = set(self.user_specs)
for spec in old_specs - new_specs:
if force and spec in self.concretized_user_specs:
i = self.concretized_user_specs.index(spec)
del self.concretized_user_specs[i]
dag_hash = self.concretized_order[i]
del self.concretized_order[i]
del self.specs_by_hash[dag_hash]
def concretize(self, force=False):
"""Concretize user_specs in this environment.
Only concretizes specs that haven't been concretized yet unless
force is ``True``.
This only modifies the environment in memory. ``write()`` will
write out a lockfile containing concretized specs.
Arguments:
force (bool): re-concretize ALL specs, even those that were
already concretized
Returns:
List of specs that have been concretized. Each entry is a tuple of
the user spec and the corresponding concretized spec.
"""
if force:
# Clear previously concretized specs
self.concretized_user_specs = []
self.concretized_order = []
self.specs_by_hash = {}
# keep any concretized specs whose user specs are still in the manifest
old_concretized_user_specs = self.concretized_user_specs
old_concretized_order = self.concretized_order
old_specs_by_hash = self.specs_by_hash
self.concretized_user_specs = []
self.concretized_order = []
self.specs_by_hash = {}
for s, h in zip(old_concretized_user_specs, old_concretized_order):
if s in self.user_specs:
concrete = old_specs_by_hash[h]
self._add_concrete_spec(s, concrete, new=False)
# Concretize any new user specs that we haven't concretized yet
concretized_specs = []
for uspec, uspec_constraints in zip(
self.user_specs, self.user_specs.specs_as_constraints):
if uspec not in old_concretized_user_specs:
concrete = _concretize_from_constraints(uspec_constraints)
self._add_concrete_spec(uspec, concrete)
concretized_specs.append((uspec, concrete))
return concretized_specs
def install(self, user_spec, concrete_spec=None, **install_args):
"""Install a single spec into an environment.
This will automatically concretize the single spec, but it won't
affect other as-yet unconcretized specs.
"""
spec = Spec(user_spec)
if self.add(spec):
concrete = concrete_spec if concrete_spec else spec.concretized()
self._add_concrete_spec(spec, concrete)
else:
# spec might be in the user_specs, but not installed.
# TODO: Redo name-based comparison for old style envs
spec = next(s for s in self.user_specs if s.satisfies(user_spec))
concrete = self.specs_by_hash.get(spec.build_hash())
if not concrete:
concrete = spec.concretized()
self._add_concrete_spec(spec, concrete)
self._install(concrete, **install_args)
def _install(self, spec, **install_args):
spec.package.do_install(**install_args)
# Make sure log directory exists
log_path = self.log_path
fs.mkdirp(log_path)
with fs.working_dir(self.path):
# Link the resulting log file into logs dir
build_log_link = os.path.join(
log_path, '%s-%s.log' % (spec.name, spec.dag_hash(7)))
if os.path.lexists(build_log_link):
os.remove(build_log_link)
os.symlink(spec.package.build_log_path, build_log_link)
@property
def default_view(self):
if not self.views:
raise SpackEnvironmentError(
"{0} does not have a view enabled".format(self.name))
if default_view_name not in self.views:
raise SpackEnvironmentError(
"{0} does not have a default view enabled".format(self.name))
return self.views[default_view_name]
def update_default_view(self, viewpath):
name = default_view_name
if name in self.views and self.default_view.root != viewpath:
shutil.rmtree(self.default_view.root)
if viewpath:
if name in self.views:
self.default_view.root = viewpath
else:
self.views[name] = ViewDescriptor(viewpath)
else:
self.views.pop(name, None)
def regenerate_views(self):
if not self.views:
tty.debug("Skip view update, this environment does not"
" maintain a view")
return
specs = self._get_environment_specs()
for view in self.views.values():
view.regenerate(specs, self.roots())
def _shell_vars(self):
updates = [
('PATH', ['bin']),
('MANPATH', ['man', 'share/man']),
('ACLOCAL_PATH', ['share/aclocal']),
('LD_LIBRARY_PATH', ['lib', 'lib64']),
('LIBRARY_PATH', ['lib', 'lib64']),
('CPATH', ['include']),
('PKG_CONFIG_PATH', ['lib/pkgconfig', 'lib64/pkgconfig']),
('CMAKE_PREFIX_PATH', ['']),
]
path_updates = list()
if default_view_name in self.views:
for var, subdirs in updates:
paths = filter(lambda x: os.path.exists(x),
list(os.path.join(self.default_view.root, x)
for x in subdirs))
path_updates.append((var, paths))
return path_updates
def add_default_view_to_shell(self, shell):
env_mod = EnvironmentModifications()
for var, paths in self._shell_vars():
for path in paths:
env_mod.prepend_path(var, path)
return env_mod.shell_modifications(shell)
def rm_default_view_from_shell(self, shell):
env_mod = EnvironmentModifications()
for var, paths in self._shell_vars():
for path in paths:
env_mod.remove_path(var, path)
return env_mod.shell_modifications(shell)
def _add_concrete_spec(self, spec, concrete, new=True):
"""Called when a new concretized spec is added to the environment.
This ensures that all internal data structures are kept in sync.
Arguments:
spec (Spec): user spec that resulted in the concrete spec
concrete (Spec): spec concretized within this environment
new (bool): whether to write this spec's package to the env
repo on write()
"""
assert concrete.concrete
# when a spec is newly concretized, we need to make a note so
# that we can write its package to the env repo on write()
if new:
self.new_specs.append(concrete)
# update internal lists of specs
self.concretized_user_specs.append(spec)
h = concrete.build_hash()
self.concretized_order.append(h)
self.specs_by_hash[h] = concrete
def install_all(self, args=None):
"""Install all concretized specs in an environment."""
for concretized_hash in self.concretized_order:
spec = self.specs_by_hash[concretized_hash]
# Parse cli arguments and construct a dictionary
# that will be passed to Package.do_install API
kwargs = dict()
if args:
spack.cmd.install.update_kwargs_from_args(args, kwargs)
self._install(spec, **kwargs)
if not spec.external:
# Link the resulting log file into logs dir
build_log_link = os.path.join(
self.log_path, '%s-%s.log' % (spec.name, spec.dag_hash(7)))
if os.path.lexists(build_log_link):
os.remove(build_log_link)
os.symlink(spec.package.build_log_path, build_log_link)
self.regenerate_views()
def all_specs_by_hash(self):
"""Map of hashes to spec for all specs in this environment."""
# Note this uses dag-hashes calculated without build deps as keys,
# whereas the environment tracks specs based on dag-hashes calculated
# with all dependencies. This function should not be used by an
# Environment object for management of its own data structures
hashes = {}
for h in self.concretized_order:
specs = self.specs_by_hash[h].traverse(deptype=('link', 'run'))
for spec in specs:
hashes[spec.dag_hash()] = spec
return hashes
def all_specs(self):
"""Return all specs, even those a user spec would shadow."""
return sorted(self.all_specs_by_hash().values())
def all_hashes(self):
"""Return all specs, even those a user spec would shadow."""
return list(self.all_specs_by_hash().keys())
def roots(self):
"""Specs explicitly requested by the user *in this environment*.
Yields both added and installed specs that have user specs in
`spack.yaml`.
"""
concretized = dict(self.concretized_specs())
for spec in self.user_specs:
concrete = concretized.get(spec)
yield concrete if concrete else spec
def added_specs(self):
"""Specs that are not yet installed.
Yields the user spec for non-concretized specs, and the concrete
spec for already concretized but not yet installed specs.
"""
concretized = dict(self.concretized_specs())
for spec in self.user_specs:
concrete = concretized.get(spec)
if not concrete:
yield spec
elif not concrete.package.installed:
yield concrete
def concretized_specs(self):
"""Tuples of (user spec, concrete spec) for all concrete specs."""
for s, h in zip(self.concretized_user_specs, self.concretized_order):
yield (s, self.specs_by_hash[h])
def removed_specs(self):
"""Tuples of (user spec, concrete spec) for all specs that will be
removed on nexg concretize."""
needed = set()
for s, c in self.concretized_specs():
if s in self.user_specs:
for d in c.traverse():
needed.add(d)
for s, c in self.concretized_specs():
for d in c.traverse():
if d not in needed:
yield d
def _get_environment_specs(self, recurse_dependencies=True):
"""Returns the specs of all the packages in an environment.
If these specs appear under different user_specs, only one copy
is added to the list returned.
"""
spec_list = list()
for spec_hash in self.concretized_order:
spec = self.specs_by_hash[spec_hash]
specs = (spec.traverse(deptype=('link', 'run'))
if recurse_dependencies else (spec,))
spec_list.extend(specs)
return spec_list
def _to_lockfile_dict(self):
"""Create a dictionary to store a lockfile for this environment."""
concrete_specs = {}
for spec in self.specs_by_hash.values():
for s in spec.traverse():
dag_hash_all = s.build_hash()
if dag_hash_all not in concrete_specs:
spec_dict = s.to_node_dict(hash=ht.build_hash)
spec_dict[s.name]['hash'] = s.dag_hash()
concrete_specs[dag_hash_all] = spec_dict
hash_spec_list = zip(
self.concretized_order, self.concretized_user_specs)
# this is the lockfile we'll write out
data = {
# metadata about the format
'_meta': {
'file-type': 'spack-lockfile',
'lockfile-version': lockfile_format_version,
},
# users specs + hashes are the 'roots' of the environment
'roots': [{
'hash': h,
'spec': str(s)
} for h, s in hash_spec_list],
# Concrete specs by hash, including dependencies
'concrete_specs': concrete_specs,
}
return data
def _read_lockfile(self, file_or_json):
"""Read a lockfile from a file or from a raw string."""
lockfile_dict = sjson.load(file_or_json)
self._read_lockfile_dict(lockfile_dict)
return lockfile_dict['_meta']['lockfile-version']
def _read_lockfile_dict(self, d):
"""Read a lockfile dictionary into this environment."""
roots = d['roots']
self.concretized_user_specs = [Spec(r['spec']) for r in roots]
self.concretized_order = [r['hash'] for r in roots]
json_specs_by_hash = d['concrete_specs']
root_hashes = set(self.concretized_order)
specs_by_hash = {}
for dag_hash, node_dict in json_specs_by_hash.items():
specs_by_hash[dag_hash] = Spec.from_node_dict(node_dict)
for dag_hash, node_dict in json_specs_by_hash.items():
for dep_name, dep_hash, deptypes in (
Spec.dependencies_from_node_dict(node_dict)):
specs_by_hash[dag_hash]._add_dependency(
specs_by_hash[dep_hash], deptypes)
# If we are reading an older lockfile format (which uses dag hashes
# that exclude build deps), we use this to convert the old
# concretized_order to the full hashes (preserving the order)
old_hash_to_new = {}
self.specs_by_hash = {}
for _, spec in specs_by_hash.items():
dag_hash = spec.dag_hash()
build_hash = spec.build_hash()
if dag_hash in root_hashes:
old_hash_to_new[dag_hash] = build_hash
if (dag_hash in root_hashes or build_hash in root_hashes):
self.specs_by_hash[build_hash] = spec
if old_hash_to_new:
# Replace any older hashes in concretized_order with hashes
# that include build deps
self.concretized_order = [
old_hash_to_new.get(h, h) for h in self.concretized_order]
def write(self):
"""Writes an in-memory environment to its location on disk.
This will also write out package files for each newly concretized spec.
"""
# ensure path in var/spack/environments
fs.mkdirp(self.path)
if self.specs_by_hash:
# ensure the prefix/.env directory exists
fs.mkdirp(self.env_subdir_path)
for spec in self.new_specs:
for dep in spec.traverse():
if not dep.concrete:
raise ValueError('specs passed to environment.write() '
'must be concrete!')
root = os.path.join(self.repos_path, dep.namespace)
repo = spack.repo.create_or_construct(root, dep.namespace)
pkg_dir = repo.dirname_for_package_name(dep.name)
fs.mkdirp(pkg_dir)
spack.repo.path.dump_provenance(dep, pkg_dir)
self.new_specs = []
# write the lock file last
with fs.write_tmp_and_move(self.lock_path) as f:
sjson.dump(self._to_lockfile_dict(), stream=f)
else:
if os.path.exists(self.lock_path):
os.unlink(self.lock_path)
# invalidate _repo cache
self._repo = None
# put any changes in the definitions in the YAML
for name, speclist in self.spec_lists.items():
if name == user_speclist_name:
# The primary list is handled differently
continue
conf = config_dict(self.yaml)
active_yaml_lists = [l for l in conf.get('definitions', [])
if name in l and
_eval_conditional(l.get('when', 'True'))]
# Remove any specs in yaml that are not in internal representation
for ayl in active_yaml_lists:
# If it's not a string, it's a matrix. Those can't have changed
# If it is a string that starts with '$', it's a reference.
# Those also can't have changed.
ayl[name][:] = [s for s in ayl.setdefault(name, [])
if (not isinstance(s, six.string_types)) or
s.startswith('$') or Spec(s) in speclist.specs]
# Put the new specs into the first active list from the yaml
new_specs = [entry for entry in speclist.yaml_list
if isinstance(entry, six.string_types) and
not any(entry in ayl[name]
for ayl in active_yaml_lists)]
list_for_new_specs = active_yaml_lists[0].setdefault(name, [])
list_for_new_specs[:] = list_for_new_specs + new_specs
# put the new user specs in the YAML.
# This can be done directly because there can't be multiple definitions
# nor when clauses for `specs` list.
yaml_spec_list = config_dict(self.yaml).setdefault(user_speclist_name,
[])
yaml_spec_list[:] = self.user_specs.yaml_list
default_name = default_view_name
if self.views and len(self.views) == 1 and default_name in self.views:
path = self.default_view.root
if self.default_view == ViewDescriptor(self.view_path_default):
view = True
elif self.default_view == ViewDescriptor(path):
view = path
else:
view = dict((name, view.to_dict())
for name, view in self.views.items())
elif self.views:
view = dict((name, view.to_dict())
for name, view in self.views.items())
else:
view = False
yaml_dict = config_dict(self.yaml)
if view is not True:
# The default case is to keep an active view inside of the
# Spack environment directory. To avoid cluttering the config,
# we omit the setting in this case.
yaml_dict['view'] = view
elif 'view' in yaml_dict:
del yaml_dict['view']
# if all that worked, write out the manifest file at the top level
with fs.write_tmp_and_move(self.manifest_path) as f:
_write_yaml(self.yaml, f)
# TODO: for operations that just add to the env (install etc.) this
# could just call update_view
self.regenerate_views()
def __enter__(self):
self._previous_active = _active_environment
activate(self)
return
def __exit__(self, exc_type, exc_val, exc_tb):
deactivate()
if self._previous_active:
activate(self._previous_active)
def display_specs(concretized_specs):
"""Displays the list of specs returned by `Environment.concretize()`.
Args:
concretized_specs (list): list of specs returned by
`Environment.concretize()`
"""
def _tree_to_display(spec):
return spec.tree(
recurse_dependencies=True,
status_fn=spack.spec.Spec.install_status,
hashlen=7, hashes=True)
for user_spec, concrete_spec in concretized_specs:
tty.msg('Concretized {0}'.format(user_spec))
sys.stdout.write(_tree_to_display(concrete_spec))
print('')
def _concretize_from_constraints(spec_constraints):
# Accept only valid constraints from list and concretize spec
# Get the named spec even if out of order
root_spec = [s for s in spec_constraints if s.name]
if len(root_spec) != 1:
m = 'The constraints %s are not a valid spec ' % spec_constraints
m += 'concretization target. all specs must have a single name '
m += 'constraint for concretization.'
raise InvalidSpecConstraintError(m)
spec_constraints.remove(root_spec[0])
invalid_constraints = []
while True:
# Attach all anonymous constraints to one named spec
s = root_spec[0].copy()
for c in spec_constraints:
if c not in invalid_constraints:
s.constrain(c)
try:
return s.concretized()
except spack.spec.InvalidDependencyError as e:
dep_index = e.message.index('depend on ') + len('depend on ')
invalid_msg = e.message[dep_index:]
invalid_deps_string = ['^' + d.strip(',')
for d in invalid_msg.split()
if d != 'or']
invalid_deps = [c for c in spec_constraints
if any(c.satisfies(invd, strict=True)
for invd in invalid_deps_string)]
if len(invalid_deps) != len(invalid_deps_string):
raise e
invalid_constraints.extend(invalid_deps)
except UnknownVariantError as e:
invalid_variants = re.findall(r"'(\w+)'", e.message)
invalid_deps = [c for c in spec_constraints
if any(name in c.variants
for name in invalid_variants)]
if len(invalid_deps) != len(invalid_variants):
raise e
invalid_constraints.extend(invalid_deps)
def make_repo_path(root):
"""Make a RepoPath from the repo subdirectories in an environment."""
path = spack.repo.RepoPath()
if os.path.isdir(root):
for repo_root in os.listdir(root):
repo_root = os.path.join(root, repo_root)
if not os.path.isdir(repo_root):
continue
repo = spack.repo.Repo(repo_root)
path.put_last(repo)
return path
def prepare_config_scope(env):
"""Add env's scope to the global configuration search path."""
for scope in env.config_scopes():
spack.config.config.push_scope(scope)
def deactivate_config_scope(env):
"""Remove any scopes from env from the global config path."""
for scope in env.config_scopes():
spack.config.config.remove_scope(scope.name)
class SpackEnvironmentError(spack.error.SpackError):
"""Superclass for all errors to do with Spack environments."""
| StarcoderdataPython |
12864117 | def decode(to_be_decoded):
"""
Decodes a run-length encoded string.
:param to_be_decoded: run-length encoded string
:return: run-length decoded string
"""
to_be_decoded_list = list(to_be_decoded)
decoded_str_as_list = list()
num_to_print_as_list = list()
for c in to_be_decoded_list:
if c.isdigit():
num_to_print_as_list.append(c)
else:
if len(num_to_print_as_list) > 0:
num_to_print = int(''.join(num_to_print_as_list))
append = c * num_to_print
decoded_str_as_list.append(append)
num_to_print_as_list = list()
else:
decoded_str_as_list.append(c)
return ''.join(decoded_str_as_list)
def encode(to_be_encoded):
"""
Run-length encodes a string
:param to_be_encoded: string to be run-length encoded
:return: run-length encoded string
"""
last_seen = None
last_seen_count = 0
to_be_encoded_as_list = list(to_be_encoded)
encoded_str_as_list = list()
for c in to_be_encoded_as_list:
if last_seen:
if last_seen == c:
last_seen_count += 1
else:
if last_seen_count > 1:
encoded_str_as_list.append('{}{}'.format(last_seen_count, last_seen))
else:
encoded_str_as_list.append('{}'.format(last_seen))
last_seen_count = 1
else:
last_seen_count += 1
last_seen = c
if last_seen_count > 1:
encoded_str_as_list.append('{}{}'.format(last_seen_count, last_seen))
else:
if last_seen:
encoded_str_as_list.append('{}'.format(last_seen))
else:
encoded_str_as_list = list()
return ''.join(encoded_str_as_list)
| StarcoderdataPython |
6615017 | from rest_framework import serializers, viewsets
from .models import Article, Publication
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = "__all__"
class PublicationSerializer(serializers.ModelSerializer):
class Meta:
model = Publication
fields = "__all__"
class ArticleViewSet(viewsets.ModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
class PublicationViewSet(viewsets.ModelViewSet):
queryset = Publication.objects.all()
serializer_class = PublicationSerializer
| StarcoderdataPython |
8104086 | from os import pipe
import numpy as np
from sklearn import datasets, tree, model_selection, metrics, preprocessing, pipeline
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.naive_bayes import BernoulliNB
import matplotlib.pyplot as plt
# (a) load newsgroups
ng_train = datasets.fetch_20newsgroups(subset='train', categories=['rec.autos', 'talk.religion.misc', 'comp.graphics', 'sci.space'], remove=('headers', 'footers', 'quotes'))
ng_test = datasets.fetch_20newsgroups(subset='test', categories=['rec.autos', 'talk.religion.misc', 'comp.graphics', 'sci.space'], remove=('headers', 'footers', 'quotes'))
print(f'''
Set\t_|_ # Docs\t_|_ Attributes''')
# (b) decision tree
max_depth_range = [None, 2, 5, 10]
min_samples_leaf_range = [1, 5, 10]
min_sample_split_range = [2, 10, 20]
min_leaf_nodes_range = [None, 5, 10, 20]
param_grid = {"clf__criterion": ['gini'],
"clf__max_depth": [10],
"clf__min_samples_leaf": [1, 5, 10],
"clf__min_samples_split": [20],
"clf__max_leaf_nodes": [None, 5, 10, 20]
}
pipe_ = Pipeline([('vect', TfidfVectorizer()),
('tfidf', TfidfTransformer()),
('clf', tree.DecisionTreeClassifier())])
#grid = model_selection.RandomizedSearchCV(estimator=pipe_, param_distributions=param_grid, scoring='accuracy', refit=True, verbose=True)
# RandomizedSearchCV results.
# {*'clf__min_samples_split': 20, 'clf__min_samples_leaf': 1, 'clf__max_leaf_nodes': None, *'clf__max_depth': 10, 'clf__criterion': 'gini'}
# Optimized results from adjusted param_grid and GridSearchCV.
# {'clf__criterion': 'gini', 'clf__max_depth': 10, 'clf__max_leaf_nodes': None, 'clf__min_samples_leaf': 5, 'clf__min_samples_split': 20}
grid = model_selection.GridSearchCV(estimator=pipe_, param_grid=param_grid, scoring='accuracy', refit=True, verbose=True)
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english',)
x_train = vectorizer.fit_transform(ng_train.data)
print(f'''Train\t | {ng_train.target.shape[0]}\t | {x_train.shape[1]}
Test\t | {ng_test.target.shape[0]}\t | N/A''')
grid.fit(ng_train.data, ng_train.target)
print(grid.best_params_)
y_pred = grid.best_estimator_.predict(ng_test.data)
print(metrics.confusion_matrix(ng_test.target, y_pred))
#tree.plot_tree(grid.best_estimator_['clf'], filled=True, class_names=ng_test.target_names)
#plt.show() | StarcoderdataPython |
1687477 | import json
from rest_framework import serializers
from v1.accounts.models.account import Account
from v1.bank_transactions.models.bank_transaction import BankTransaction
from v1.blocks.models.block import Block
from v1.confirmation_blocks.models.confirmation_block import ConfirmationBlock
from v1.keys.models.key import Key
from v1.self_configurations.helpers.self_configuration import get_self_configuration
from v1.self_configurations.helpers.signing_key import get_signing_key
from v1.utils.encryption import symmetric_encrypt, asymmetric_encrypt, asymmetric_decrypt
def get_json_transactions(encryption_key):
return []
def create_bank_transactions(*, block, message):
"""Crete bank transactions from given block data"""
bank_transactions = []
sender = block.get('sender')
encrypted_symmetric_key = None
keys_to_add = []
keys_to_delete = []
for tx in message['txs']:
json_data_for_db = None
if 'json_data' in tx:
json_data = tx.get('json_data')
type = json_data.get('type')
encryption_key = json_data.get('account', sender)
if type not in ["register_data", "append_data", "ask_for_access", "grant_access", "revoke_access"]:
continue
node_private_key = get_signing_key()
node_public_key = node_private_key.verify_key
if type == "register_data" or type == "grant_access":
keys_to_add.append({'accessor': encryption_key, 'patient_id': sender})
# add the node as an accessor so it can manipulate the symmetric key
keys_to_add.append({'accessor': node_public_key, 'patient_id': sender})
elif type == "revoke_access":
keys_to_delete.append({'accessor': encryption_key, 'patient_id': sender})
# get all transactions that contain JSON data for the patient
transactions = get_json_transactions(sender)
new_transaction_data = {}
for transaction in transactions:
if transaction["json_data"]["type"] in ["register_data", "append_data"]:
decrypted_data = asymmetric_decrypt(transaction["json_data"]["data"], node_private_key)
new_transaction_data.update(decrypted_data)
new_data_symmetric_result = symmetric_encrypt(json.dumps(new_transaction_data))
new_transaction_json_data_for_db = {
"patient_id": encryption_key,
"type": type
"data": new_data_symmetric_result,
"access": encrypted_symmetric_key
}
new_data_transaction = BankTransaction(
amount=0,
block=block,
fee=tx.get('fee', ''),
memo=tx.get('memo', ''),
json_data=new_transaction_json_data_for_db,
recipient=tx['recipient']
)
bank_transactions.append(new_data_transaction)
symmetric_result = symmetric_encrypt(json.dumps(json_data["data"]))
encrypted_symmetric_key = asymmetric_encrypt(symmetric_result['key'], encryption_key)
json_data_for_db = {
"patient_id": encryption_key,
"type": type
"data": symmetric_result['message'],
"access": encrypted_symmetric_key
}
bank_transaction = BankTransaction(
amount=tx['amount'],
block=block,
fee=tx.get('fee', ''),
memo=tx.get('memo', ''),
json_data=json_data_for_db,
recipient=tx['recipient']
)
bank_transactions.append(bank_transaction)
keys_to_add = [Key(
patient_id=key['patient_id'],
accessor=key['accessor'],
encrypted_symmetric_key=encrypted_symmetric_key
) for key in keys_to_add]
Key.objects.bulk_create(keys_to_add)
keys_to_delete = [Key(
patient_id=key['patient_id'],
accessor=key['accessor']
) for key in keys_to_delete]
for key in keys_to_delete:
key.delete()
BankTransaction.objects.bulk_create(bank_transactions)
def create_block_and_related_objects(block_data):
"""
Create block, bank transactions, and account if necessary
Returns block, block_created
"""
account_number = block_data['account_number']
message = block_data['message']
signature = block_data['signature']
balance_key = message['balance_key']
block = Block.objects.filter(balance_key=balance_key).first()
if block:
# User is attempting to resend the same exact block
if block.signature == signature:
if ConfirmationBlock.objects.filter(block=block).exists():
raise serializers.ValidationError('Block has already been confirmed')
return block, False
# User is using that balance key to send a new block (different Txs)
BankTransaction.objects.filter(block=block).delete()
create_bank_transactions(block=block, message=message)
return block, False
block = Block.objects.create(
balance_key=balance_key,
sender=account_number,
signature=signature
)
create_bank_transactions(block=block, message=message)
Account.objects.get_or_create(
account_number=account_number,
defaults={'trust': 0},
)
return block, True
| StarcoderdataPython |
4971015 | #
# Copyright (c) 2009-2015, <NAME>
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from El.core import *
from El.lapack_like.factor import *
(FULL_KKT,AUGMENTED_KKT,NORMAL_KKT) = (0,1,2)
lib.ElIPFLineSearchCtrlDefault_s.argtypes = \
lib.ElIPFLineSearchCtrlDefault_d.argtypes = \
[c_void_p]
class IPFLineSearchCtrl_s(ctypes.Structure):
_fields_ = [("gamma",sType),("beta",sType),("psi",sType),
("stepRatio",sType),("progress",bType)]
def __init__(self):
lib.ElIPFLineSearchCtrlDefault_s(pointer(self))
class IPFLineSearchCtrl_d(ctypes.Structure):
_fields_ = [("gamma",dType),("beta",dType),("psi",dType),
("stepRatio",dType),("progress",bType)]
def __init__(self):
lib.ElIPFLineSearchCtrlDefault_d(pointer(self))
# Linear program
# ==============
(LP_ADMM,LP_IPF,LP_IPF_SELFDUAL,LP_MEHROTRA,LP_MEHROTRA_SELFDUAL)=(0,1,2,3,4)
# Direct conic form
# -----------------
lib.ElLPDirectADMMCtrlDefault_s.argtypes = \
lib.ElLPDirectADMMCtrlDefault_d.argtypes = \
[c_void_p]
class LPDirectADMMCtrl_s(ctypes.Structure):
_fields_ = [("rho",sType),("alpha",sType),
("maxIter",iType),
("absTol",sType),("relTol",sType),
("inv",bType),("progress",bType)]
def __init__(self):
lib.ElLPDirectADMMCtrlDefault_s(pointer(self))
class LPDirectADMMCtrl_d(ctypes.Structure):
_fields_ = [("rho",dType),("alpha",dType),
("maxIter",iType),
("absTol",dType),("relTol",dType),
("inv",bType),("progress",bType)]
def __init__(self):
lib.ElLPDirectADMMCtrlDefault_d(pointer(self))
lib.ElLPDirectIPFCtrlDefault_s.argtypes = \
lib.ElLPDirectIPFCtrlDefault_d.argtypes = \
[c_void_p,bType]
class LPDirectIPFCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("centering",sType),
("system",c_uint),("qsdCtrl",RegQSDCtrl_s),
("lineSearchCtrl",IPFLineSearchCtrl_s),
("equilibrate",bType),("progress",bType),("time",bType)]
def __init__(self,isSparse=True):
lib.ElLPDirectIPFCtrlDefault_s(pointer(self),isSparse)
class LPDirectIPFCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("centering",dType),
("system",c_uint),("qsdCtrl",RegQSDCtrl_d),
("lineSearchCtrl",IPFLineSearchCtrl_d),
("equilibrate",bType),("progress",bType),("time",bType)]
def __init__(self,isSparse=True):
lib.ElLPDirectIPFCtrlDefault_d(pointer(self),isSparse)
lib.ElLPDirectMehrotraCtrlDefault_s.argtypes = \
lib.ElLPDirectMehrotraCtrlDefault_d.argtypes = \
[c_void_p,bType]
class LPDirectMehrotraCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("maxStepRatio",sType),
("system",c_uint),("qsdCtrl",RegQSDCtrl_s),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self,isSparse=True):
lib.ElLPDirectMehrotraCtrlDefault_s(pointer(self),isSparse)
class LPDirectMehrotraCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("maxStepRatio",dType),
("system",c_uint),("qsdCtrl",RegQSDCtrl_d),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self,isSparse=True):
lib.ElLPDirectMehrotraCtrlDefault_d(pointer(self),isSparse)
lib.ElLPDirectCtrlDefault_s.argtypes = \
lib.ElLPDirectCtrlDefault_d.argtypes = \
[c_void_p,bType]
class LPDirectCtrl_s(ctypes.Structure):
_fields_ = [("approach",c_uint),("admmCtrl",LPDirectADMMCtrl_s),
("ipfCtrl",LPDirectIPFCtrl_s),
("mehrotraCtrl",LPDirectMehrotraCtrl_s)]
def __init__(self,isSparse=True):
lib.ElLPDirectCtrlDefault_s(pointer(self),isSparse)
class LPDirectCtrl_d(ctypes.Structure):
_fields_ = [("approach",c_uint),("admmCtrl",LPDirectADMMCtrl_d),
("ipfCtrl",LPDirectIPFCtrl_d),
("mehrotraCtrl",LPDirectMehrotraCtrl_d)]
def __init__(self,isSparse=True):
lib.ElLPDirectCtrlDefault_d(pointer(self),isSparse)
lib.ElLPDirect_s.argtypes = \
lib.ElLPDirect_d.argtypes = \
lib.ElLPDirectDist_s.argtypes = \
lib.ElLPDirectDist_d.argtypes = \
lib.ElLPDirectSparse_s.argtypes = \
lib.ElLPDirectSparse_d.argtypes = \
lib.ElLPDirectDistSparse_s.argtypes = \
lib.ElLPDirectDistSparse_d.argtypes = \
[c_void_p,
c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p]
lib.ElLPDirectX_s.argtypes = \
lib.ElLPDirectXSparse_s.argtypes = \
lib.ElLPDirectXDist_s.argtypes = \
lib.ElLPDirectXDistSparse_s.argtypes = \
[c_void_p,
c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
LPDirectCtrl_s]
lib.ElLPDirectX_d.argtypes = \
lib.ElLPDirectXSparse_d.argtypes = \
lib.ElLPDirectXDist_d.argtypes = \
lib.ElLPDirectXDistSparse_d.argtypes = \
[c_void_p,
c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
LPDirectCtrl_d]
def LPDirect(A,b,c,x,y,z,ctrl=None):
if A.tag != b.tag or b.tag != c.tag or c.tag != x.tag or \
x.tag != y.tag or y.tag != z.tag:
raise Exception('Datatypes of {A,b,c,x,y,z} must match')
if type(b) is not type(c) or type(b) is not type(x) or \
type(b) is not type(y) or type(b) is not type(z):
raise Exception('{b,c,x,y,z} must be of the same type')
args = [A.obj,b.obj,c.obj,x.obj,y.obj,z.obj]
argsCtrl = [A.obj,b.obj,c.obj,x.obj,y.obj,z.obj,ctrl]
if type(A) is Matrix:
if type(b) is not Matrix: raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElLPDirect_s(*args)
else: lib.ElLPDirectX_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElLPDirect_d(*args)
else: lib.ElLPDirectX_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistMatrix:
if type(b) is not DistMatrix: raise Exception('b must be a DistMatrix')
if A.tag == sTag:
if ctrl == None: lib.ElLPDirectDist_s(*args)
else: lib.ElLPDirectXDist_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElLPDirectDist_d(*args)
else: lib.ElLPDirectXDist_d(*argsCtrl)
else: DataExcept()
elif type(A) is SparseMatrix:
if type(b) is not Matrix: raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElLPDirectSparse_s(*args)
else: lib.ElLPDirectXSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElLPDirectSparse_d(*args)
else: lib.ElLPDirectXSparse_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if type(b) is not DistMultiVec: raise Exception('b must be a DistMultiVec')
if A.tag == sTag:
if ctrl == None: lib.ElLPDirectDistSparse_s(*args)
else: lib.ElLPDirectXDistSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElLPDirectDistSparse_d(*args)
else: lib.ElLPDirectXDistSparse_d(*argsCtrl)
else: DataExcept()
else: TypeExcept()
# Affine conic form
# -----------------
lib.ElLPAffineIPFCtrlDefault_s.argtypes = \
lib.ElLPAffineIPFCtrlDefault_d.argtypes = \
[c_void_p]
class LPAffineIPFCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("centering",sType),
("qsdCtrl",RegQSDCtrl_s),
("lineSearchCtrl",IPFLineSearchCtrl_s),
("equilibrate",bType),("progress",bType),("time",bType)]
def __init__(self):
lib.ElLPAffineIPFCtrlDefault_s(pointer(self))
class LPAffineIPFCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("centering",dType),
("qsdCtrl",RegQSDCtrl_d),
("lineSearchCtrl",IPFLineSearchCtrl_d),
("equilibrate",bType),("progress",bType),("time",bType)]
def __init__(self):
lib.ElLPAffineIPFCtrlDefault_d(pointer(self))
lib.ElLPAffineMehrotraCtrlDefault_s.argtypes = \
lib.ElLPAffineMehrotraCtrlDefault_d.argtypes = \
[c_void_p]
class LPAffineMehrotraCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("maxStepRatio",sType),
("qsdCtrl",RegQSDCtrl_s),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElLPAffineMehrotraCtrlDefault_s(pointer(self))
class LPAffineMehrotraCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("maxStepRatio",dType),
("qsdCtrl",RegQSDCtrl_d),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElLPAffineMehrotraCtrlDefault_d(pointer(self))
lib.ElLPAffineCtrlDefault_s.argtypes = \
lib.ElLPAffineCtrlDefault_d.argtypes = \
[c_void_p]
class LPAffineCtrl_s(ctypes.Structure):
_fields_ = [("approach",c_uint),
("ipfCtrl",LPAffineIPFCtrl_s),
("mehrotraCtrl",LPAffineMehrotraCtrl_s)]
def __init__(self):
lib.ElLPAffineCtrlDefault_s(pointer(self))
class LPAffineCtrl_d(ctypes.Structure):
_fields_ = [("approach",c_uint),
("ipfCtrl",LPAffineIPFCtrl_d),
("mehrotraCtrl",LPAffineMehrotraCtrl_d)]
def __init__(self):
lib.ElLPAffineCtrlDefault_d(pointer(self))
lib.ElLPAffine_s.argtypes = \
lib.ElLPAffine_d.argtypes = \
lib.ElLPAffineDist_s.argtypes = \
lib.ElLPAffineDist_d.argtypes = \
lib.ElLPAffineSparse_s.argtypes = \
lib.ElLPAffineSparse_d.argtypes = \
lib.ElLPAffineDistSparse_s.argtypes = \
lib.ElLPAffineDistSparse_d.argtypes = \
[c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,c_void_p]
lib.ElLPAffineX_s.argtypes = \
lib.ElLPAffineXDist_s.argtypes = \
lib.ElLPAffineXSparse_s.argtypes = \
lib.ElLPAffineXDistSparse_s.argtypes = \
[c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,c_void_p,
LPAffineCtrl_s]
lib.ElLPAffineX_d.argtypes = \
lib.ElLPAffineXDist_d.argtypes = \
lib.ElLPAffineXSparse_d.argtypes = \
lib.ElLPAffineXDistSparse_d.argtypes = \
[c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,c_void_p,
LPAffineCtrl_d]
def LPAffine(A,G,b,c,h,x,y,z,s,ctrl=None):
if type(A) is not type(G):
raise Exception('A and G must be of the same type')
if type(b) is not type(c) or type(b) is not type(c) or \
type(b) is not type(h) or type(b) is not type(x) or \
type(b) is not type(y) or type(b) is not type(z) or \
type(b) is not type(s):
raise Exception('{b,c,h,x,y,z,s} must be of the same type')
args = [A.obj,G.obj,b.obj,c.obj,h.obj,x.obj,y.obj,z.obj,s.obj]
argsCtrl = [A.obj,G.obj,b.obj,c.obj,h.obj,x.obj,y.obj,z.obj,s.obj,ctrl]
if type(A) is Matrix:
if type(b) is not Matrix:
raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElLPAffine_s(*args)
else: lib.ElLPAffineX_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElLPAffine_d(*args)
else: lib.ElLPAffineX_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistMatrix:
if type(b) is not DistMatrix:
raise Exception('b must be a DistMatrix')
if A.tag == sTag:
if ctrl == None: lib.ElLPAffineDist_s(*args)
else: lib.ElLPAffineXDist_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElLPAffineDist_d(*args)
else: lib.ElLPAffineXDist_d(*argsCtrl)
else: DataExcept()
elif type(A) is SparseMatrix:
if type(b) is not Matrix:
raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElLPAffineSparse_s(*args)
else: lib.ElLPAffineXSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElLPAffineSparse_d(*args)
else: lib.ElLPAffineXSparse_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if type(b) is not DistMultiVec:
raise Exception('b must be a DistMultiVec')
if A.tag == sTag:
if ctrl == None: lib.ElLPAffineDistSparse_s(*args)
else: lib.ElLPAffineXDistSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElLPAffineDistSparse_d(*args)
else: lib.ElLPAffineXDistSparse_d(*argsCtrl)
else: DataExcept()
else: TypeExcept()
# Quadratic program
# =================
(QP_ADMM,QP_IPF,QP_IPF_SELFDUAL,QP_MEHROTRA,QP_MEHROTRA_SELFDUAL)=(0,1,2,3,4)
# Direct conic form
# -----------------
lib.ElQPDirectIPFCtrlDefault_s.argtypes = \
lib.ElQPDirectIPFCtrlDefault_d.argtypes = \
[c_void_p]
class QPDirectIPFCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("centering",sType),
("system",c_uint),("qsdCtrl",RegQSDCtrl_s),
("lineSearchCtrl",IPFLineSearchCtrl_s),("equilibrate",bType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElQPDirectIPFCtrlDefault_s(pointer(self))
class QPDirectIPFCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("centering",dType),
("system",c_uint),("qsdCtrl",RegQSDCtrl_d),
("lineSearchCtrl",IPFLineSearchCtrl_d),("equilibrate",bType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElQPDirectIPFCtrlDefault_d(pointer(self))
lib.ElQPDirectMehrotraCtrlDefault_s.argtypes = \
lib.ElQPDirectMehrotraCtrlDefault_d.argtypes = \
[c_void_p]
class QPDirectMehrotraCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("maxStepRatio",sType),
("system",c_uint),("qsdCtrl",RegQSDCtrl_s),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElQPDirectMehrotraCtrlDefault_s(pointer(self))
class QPDirectMehrotraCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("maxStepRatio",dType),
("system",c_uint),("qsdCtrl",RegQSDCtrl_d),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElQPDirectMehrotraCtrlDefault_d(pointer(self))
lib.ElQPDirectCtrlDefault_s.argtypes = \
lib.ElQPDirectCtrlDefault_d.argtypes = \
[c_void_p]
class QPDirectCtrl_s(ctypes.Structure):
_fields_ = [("approach",c_uint),
("ipfCtrl",QPDirectIPFCtrl_s),
("mehrotraCtrl",QPDirectMehrotraCtrl_s)]
def __init__(self):
lib.ElQPDirectCtrlDefault_s(pointer(self))
class QPDirectCtrl_d(ctypes.Structure):
_fields_ = [("approach",c_uint),
("ipfCtrl",QPDirectIPFCtrl_d),
("mehrotraCtrl",QPDirectMehrotraCtrl_d)]
def __init__(self):
lib.ElQPDirectCtrlDefault_d(pointer(self))
lib.ElQPDirect_s.argtypes = \
lib.ElQPDirect_d.argtypes = \
lib.ElQPDirectDist_s.argtypes = \
lib.ElQPDirectDist_d.argtypes = \
lib.ElQPDirectSparse_s.argtypes = \
lib.ElQPDirectSparse_d.argtypes = \
lib.ElQPDirectDistSparse_s.argtypes = \
lib.ElQPDirectDistSparse_d.argtypes = \
[c_void_p,c_void_p,
c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p]
lib.ElQPDirectX_s.argtypes = \
lib.ElQPDirectXSparse_s.argtypes = \
lib.ElQPDirectXDist_s.argtypes = \
lib.ElQPDirectXDistSparse_s.argtypes = \
[c_void_p,c_void_p,
c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
QPDirectCtrl_s]
lib.ElQPDirectX_d.argtypes = \
lib.ElQPDirectXSparse_d.argtypes = \
lib.ElQPDirectXDist_d.argtypes = \
lib.ElQPDirectXDistSparse_d.argtypes = \
[c_void_p,c_void_p,
c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
QPDirectCtrl_d]
def QPDirect(Q,A,b,c,x,y,z,ctrl=None):
if type(Q) is not type(A):
raise Exception('A and Q must be of the same type')
if type(b) is not type(c) or type(b) is not type(x) or \
type(b) is not type(y) or type(b) is not type(z):
raise Exception('{b,c,x,y,z} must be of the same type')
args = [Q.obj,A.obj,b.obj,c.obj,x.obj,y.obj,z.obj]
argsCtrl = [Q.obj,A.obj,b.obj,c.obj,x.obj,y.obj,z.obj,ctrl]
if type(A) is Matrix:
if type(b) is not Matrix: raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElQPDirect_s(*args)
else: lib.ElQPDirectX_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElQPDirect_d(*args)
else: lib.ElQPDirectX_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistMatrix:
if type(b) is not DistMatrix: raise Exception('b must be a DistMatrix')
if A.tag == sTag:
if ctrl == None: lib.ElQPDirectDist_s(*args)
else: lib.ElQPDirectXDist_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElQPDirectDist_d(*args)
else: lib.ElQPDirectXDist_d(*argsCtrl)
else: DataExcept()
elif type(A) is SparseMatrix:
if type(b) is not Matrix: raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElQPDirectSparse_s(*args)
else: lib.ElQPDirectXSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElQPDirectSparse_d(*args)
else: lib.ElQPDirectXSparse_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if type(b) is not DistMultiVec: raise Exception('b must be a DistMultiVec')
if A.tag == sTag:
if ctrl == None: lib.ElQPDirectDistSparse_s(*args)
else: lib.ElQPDirectXDistSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElQPDirectDistSparse_d(*args)
else: lib.ElQPDirectXDistSparse_d(*argsCtrl)
else: DataExcept()
else: TypeExcept()
# Affine conic form
# -----------------
lib.ElQPAffineIPFCtrlDefault_s.argtypes = \
lib.ElQPAffineIPFCtrlDefault_d.argtypes = \
[c_void_p]
class QPAffineIPFCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("centering",sType),
("qsdCtrl",RegQSDCtrl_s),
("lineSearchCtrl",IPFLineSearchCtrl_s),
("equilibrate",bType),("progress",bType),("time",bType)]
def __init__(self):
lib.ElQPAffineIPFCtrlDefault_s(pointer(self))
class QPAffineIPFCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("centering",dType),
("qsdCtrl",RegQSDCtrl_d),
("lineSearchCtrl",IPFLineSearchCtrl_d),
("equilibrate",bType),("progress",bType),("time",bType)]
def __init__(self):
lib.ElQPAffineIPFCtrlDefault_d(pointer(self))
lib.ElQPAffineMehrotraCtrlDefault_s.argtypes = \
lib.ElQPAffineMehrotraCtrlDefault_d.argtypes = \
[c_void_p]
class QPAffineMehrotraCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("maxStepRatio",sType),
("qsdCtrl",RegQSDCtrl_s),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElQPAffineMehrotraCtrlDefault_s(pointer(self))
class QPAffineMehrotraCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("maxStepRatio",dType),
("qsdCtrl",RegQSDCtrl_d),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElQPAffineMehrotraCtrlDefault_d(pointer(self))
lib.ElQPAffineCtrlDefault_s.argtypes = \
lib.ElQPAffineCtrlDefault_d.argtypes = \
[c_void_p]
class QPAffineCtrl_s(ctypes.Structure):
_fields_ = [("approach",c_uint),
("ipfCtrl",QPAffineIPFCtrl_s),
("mehrotraCtrl",QPAffineMehrotraCtrl_s)]
def __init__(self):
lib.ElQPAffineCtrlDefault_s(pointer(self))
class QPAffineCtrl_d(ctypes.Structure):
_fields_ = [("approach",c_uint),
("ipfCtrl",QPAffineIPFCtrl_d),
("mehrotraCtrl",QPAffineMehrotraCtrl_d)]
def __init__(self):
lib.ElQPAffineCtrlDefault_d(pointer(self))
lib.ElQPAffine_s.argtypes = \
lib.ElQPAffine_d.argtypes = \
lib.ElQPAffineDist_s.argtypes = \
lib.ElQPAffineDist_d.argtypes = \
lib.ElQPAffineSparse_s.argtypes = \
lib.ElQPAffineSparse_d.argtypes = \
lib.ElQPAffineDistSparse_s.argtypes = \
lib.ElQPAffineDistSparse_d.argtypes = \
[c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,c_void_p]
lib.ElQPAffineX_s.argtypes = \
lib.ElQPAffineXDist_s.argtypes = \
lib.ElQPAffineXSparse_s.argtypes = \
lib.ElQPAffineXDistSparse_s.argtypes = \
[c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,c_void_p,
QPAffineCtrl_s]
lib.ElQPAffineX_d.argtypes = \
lib.ElQPAffineXDist_d.argtypes = \
lib.ElQPAffineXSparse_d.argtypes = \
lib.ElQPAffineXDistSparse_d.argtypes = \
[c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,c_void_p,
QPAffineCtrl_d]
def QPAffine(Q,A,G,b,c,h,x,y,z,s,ctrl=None):
if type(Q) is not type(A) or type(A) is not type(G):
raise Exception('{Q,A,G} must be of the same type')
if type(b) is not type(c) or type(b) is not type(c) or \
type(b) is not type(h) or type(b) is not type(x) or \
type(b) is not type(y) or type(b) is not type(z) or \
type(b) is not type(s):
raise Exception('{b,c,h,x,y,z,s} must be of the same type')
args = [Q.obj,A.obj,G.obj,b.obj,c.obj,h.obj,x.obj,y.obj,z.obj,s.obj]
argsCtrl = [Q.obj,A.obj,G.obj,b.obj,c.obj,h.obj,x.obj,y.obj,z.obj,s.obj,ctrl]
if type(A) is Matrix:
if type(b) is not Matrix:
raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElQPAffine_s(*args)
else: lib.ElQPAffineX_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElQPAffine_d(*args)
else: lib.ElQPAffineX_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistMatrix:
if type(b) is not DistMatrix:
raise Exception('b must be a DistMatrix')
if A.tag == sTag:
if ctrl == None: lib.ElQPAffineDist_s(*args)
else: lib.ElQPAffineXDist_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElQPAffineDist_d(*args)
else: lib.ElQPAffineXDist_d(*argsCtrl)
else: DataExcept()
elif type(A) is SparseMatrix:
if type(b) is not Matrix:
raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElQPAffineSparse_s(*args)
else: lib.ElQPAffineXSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElQPAffineSparse_d(*args)
else: lib.ElQPAffineXSparse_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if type(b) is not DistMultiVec:
raise Exception('b must be a DistMultiVec')
if A.tag == sTag:
if ctrl == None: lib.ElQPAffineDistSparse_s(*args)
else: lib.ElQPAffineXDistSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElQPAffineDistSparse_d(*args)
else: lib.ElQPAffineXDistSparse_d(*argsCtrl)
else: DataExcept()
else: TypeExcept()
# Box form
# --------
lib.ElQPBoxADMMCtrlDefault_s.argtypes = \
lib.ElQPBoxADMMCtrlDefault_d.argtypes = \
[c_void_p]
class QPBoxADMMCtrl_s(ctypes.Structure):
_fields_ = [("rho",sType),("alpha",sType),
("maxIter",iType),
("absTol",sType),("relTol",sType),
("inv",bType),("progress",bType)]
def __init__(self):
lib.ElQPBoxADMMCtrlDefault_s(pointer(self))
class QPBoxADMMCtrl_d(ctypes.Structure):
_fields_ = [("rho",dType),("alpha",dType),
("maxIter",iType),
("absTol",dType),("relTol",dType),
("inv",bType),("progress",bType)]
def __init__(self):
lib.ElQPBoxADMMCtrlDefault_d(pointer(self))
lib.ElQPBoxADMM_s.argtypes = \
lib.ElQPBoxADMMDist_s.argtypes = \
[c_void_p,c_void_p,sType,sType,c_void_p,POINTER(iType)]
lib.ElQPBoxADMM_d.argtypes = \
lib.ElQPBoxADMMDist_d.argtypes = \
[c_void_p,c_void_p,dType,dType,c_void_p,POINTER(iType)]
lib.ElQPBoxADMMX_s.argtypes = \
lib.ElQPBoxADMMXDist_s.argtypes = \
[c_void_p,c_void_p,sType,sType,c_void_p,QPBoxADMMCtrl_s,POINTER(iType)]
lib.ElQPBoxADMMX_d.argtypes = \
lib.ElQPBoxADMMXDist_d.argtypes = \
[c_void_p,c_void_p,dType,dType,c_void_p,QPBoxADMMCtrl_d,POINTER(iType)]
def QPBoxADMM(Q,C,lb,ub,ctrl=None):
if type(Q) is not type(C):
raise Exception('Types of Q and C must match')
if Q.tag != C.tag:
raise Exception('Datatypes of Q and C must match')
numIts = iType()
if type(Q) is Matrix:
Z = Matrix(Q.tag)
args = [Q.obj,C.obj,lb,ub,Z.obj,pointer(numIts)]
argsCtrl = [Q.obj,C.obj,lb,ub,Z.obj,ctrl,pointer(numIts)]
if Q.tag == sTag:
if ctrl==None: lib.ElQPBoxADMM_s(*args)
else: lib.ElQPBoxADMMX_s(*argsCtrl)
elif Q.tag == dTag:
if ctrl==None: lib.ElQPBoxADMM_d(*args)
else: lib.ElQPBoxADMMX_d(*argsCtrl)
else: DataExcept()
return Z, numIts
elif type(Q) is DistMatrix:
Z = DistMatrix(Q.tag,MC,MR,Q.Grid())
args = [Q.obj,C.obj,lb,ub,Z.obj,pointer(numIts)]
argsCtrl = [Q.obj,C.obj,lb,ub,Z.obj,ctrl,pointer(numIts)]
if Q.tag == sTag:
if ctrl==None: lib.ElQPBoxADMMDist_s(*args)
else: lib.ElQPBoxADMMXDist_s(*argsCtrl)
elif Q.tag == dTag:
if ctrl==None: lib.ElQPBoxADMMDist_d(*args)
else: lib.ElQPBoxADMMXDist_d(*argsCtrl)
else: DataExcept()
return Z, numIts
else: TypeExcept()
# Second-order cone programs
# ==========================
(SOCP_ADMM,SOCP_IPF,SOCP_IPF_SELFDUAL,SOCP_MEHROTRA,SOCP_MEHROTRA_SELFDUAL)=\
(0,1,2,3,4)
# Direct conic form
# -----------------
lib.ElSOCPDirectMehrotraCtrlDefault_s.argtypes = \
lib.ElSOCPDirectMehrotraCtrlDefault_d.argtypes = \
[c_void_p,bType]
class SOCPDirectMehrotraCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("maxStepRatio",sType),
("qsdCtrl",RegQSDCtrl_s),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElSOCPDirectMehrotraCtrlDefault_s(pointer(self))
class SOCPDirectMehrotraCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("maxStepRatio",dType),
("qsdCtrl",RegQSDCtrl_d),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElSOCPDirectMehrotraCtrlDefault_d(pointer(self))
lib.ElSOCPDirectCtrlDefault_s.argtypes = \
lib.ElSOCPDirectCtrlDefault_d.argtypes = \
[c_void_p,bType]
class SOCPDirectCtrl_s(ctypes.Structure):
_fields_ = [("approach",c_uint),
("mehrotraCtrl",SOCPDirectMehrotraCtrl_s)]
def __init__(self):
lib.ElSOCPDirectCtrlDefault_s(pointer(self))
class SOCPDirectCtrl_d(ctypes.Structure):
_fields_ = [("approach",c_uint),
("mehrotraCtrl",SOCPDirectMehrotraCtrl_d)]
def __init__(self):
lib.ElSOCPDirectCtrlDefault_d(pointer(self))
lib.ElSOCPDirect_s.argtypes = \
lib.ElSOCPDirect_d.argtypes = \
lib.ElSOCPDirectDist_s.argtypes = \
lib.ElSOCPDirectDist_d.argtypes = \
lib.ElSOCPDirectSparse_s.argtypes = \
lib.ElSOCPDirectSparse_d.argtypes = \
lib.ElSOCPDirectDistSparse_s.argtypes = \
lib.ElSOCPDirectDistSparse_d.argtypes = \
[c_void_p,
c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p]
lib.ElSOCPDirectX_s.argtypes = \
lib.ElSOCPDirectXSparse_s.argtypes = \
lib.ElSOCPDirectXDist_s.argtypes = \
lib.ElSOCPDirectXDistSparse_s.argtypes = \
[c_void_p,
c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
SOCPDirectCtrl_s]
lib.ElSOCPDirectX_d.argtypes = \
lib.ElSOCPDirectXSparse_d.argtypes = \
lib.ElSOCPDirectXDist_d.argtypes = \
lib.ElSOCPDirectXDistSparse_d.argtypes = \
[c_void_p,
c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
SOCPDirectCtrl_d]
def SOCPDirect(A,b,c,orders,firstInds,labels,x,y,z,ctrl=None):
if A.tag != b.tag or b.tag != c.tag or c.tag != x.tag or \
x.tag != y.tag or y.tag != z.tag:
raise Exception('Datatypes of {A,b,c,x,y,z} must match')
if orders.tag != iTag or firstInds.tag != iTag or labels.tag != iTag:
raise Exception('Datatypes of conic descriptions should be integers')
if type(b) is not type(c) or type(b) is not type(x) or \
type(b) is not type(y) or type(b) is not type(z) or \
type(b) is not type(orders) or type(b) is not type(firstInds) or \
type(b) is not type(labels):
raise Exception('{b,c,x,y,z,orders,firstInds,labels} must have same type')
args = [A.obj,b.obj,c.obj,orders.obj,firstInds.obj,labels.obj,
x.obj,y.obj,z.obj]
argsCtrl = [A.obj,b.obj,c.obj,orders.obj,firstInds.obj,labels.obj,
x.obj,y.obj,z.obj,ctrl]
if type(A) is Matrix:
if type(b) is not Matrix: raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElSOCPDirect_s(*args)
else: lib.ElSOCPDirectX_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSOCPDirect_d(*args)
else: lib.ElSOCPDirectX_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistMatrix:
if type(b) is not DistMatrix: raise Exception('b must be a DistMatrix')
if A.tag == sTag:
if ctrl == None: lib.ElSOCPDirectDist_s(*args)
else: lib.ElSOCPDirectXDist_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSOCPDirectDist_d(*args)
else: lib.ElSOCPDirectXDist_d(*argsCtrl)
else: DataExcept()
elif type(A) is SparseMatrix:
if type(b) is not Matrix: raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElSOCPDirectSparse_s(*args)
else: lib.ElSOCPDirectXSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSOCPDirectSparse_d(*args)
else: lib.ElSOCPDirectXSparse_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if type(b) is not DistMultiVec: raise Exception('b must be a DistMultiVec')
if A.tag == sTag:
if ctrl == None: lib.ElSOCPDirectDistSparse_s(*args)
else: lib.ElSOCPDirectXDistSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSOCPDirectDistSparse_d(*args)
else: lib.ElSOCPDirectXDistSparse_d(*argsCtrl)
else: DataExcept()
else: TypeExcept()
# Affine conic form
# -----------------
lib.ElSOCPAffineMehrotraCtrlDefault_s.argtypes = \
lib.ElSOCPAffineMehrotraCtrlDefault_d.argtypes = \
[c_void_p]
class SOCPAffineMehrotraCtrl_s(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",sType),("targetTol",sType),
("maxIts",iType),("maxStepRatio",sType),
("qsdCtrl",RegQSDCtrl_s),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElSOCPAffineMehrotraCtrlDefault_s(pointer(self))
class SOCPAffineMehrotraCtrl_d(ctypes.Structure):
_fields_ = [("primalInit",bType),("dualInit",bType),
("minTol",dType),("targetTol",dType),
("maxIts",iType),("maxStepRatio",dType),
("qsdCtrl",RegQSDCtrl_d),
("outerEquil",bType),("innerEquil",bType),
("scaleTwoNorm",bType),("basisSize",iType),
("progress",bType),("time",bType)]
def __init__(self):
lib.ElSOCPAffineMehrotraCtrlDefault_d(pointer(self))
lib.ElSOCPAffineCtrlDefault_s.argtypes = \
lib.ElSOCPAffineCtrlDefault_d.argtypes = \
[c_void_p]
class SOCPAffineCtrl_s(ctypes.Structure):
_fields_ = [("approach",c_uint),
("mehrotraCtrl",SOCPAffineMehrotraCtrl_s)]
def __init__(self):
lib.ElSOCPAffineCtrlDefault_s(pointer(self))
class SOCPAffineCtrl_d(ctypes.Structure):
_fields_ = [("approach",c_uint),
("mehrotraCtrl",SOCPAffineMehrotraCtrl_d)]
def __init__(self):
lib.ElSOCPAffineCtrlDefault_d(pointer(self))
lib.ElSOCPAffine_s.argtypes = \
lib.ElSOCPAffine_d.argtypes = \
lib.ElSOCPAffineDist_s.argtypes = \
lib.ElSOCPAffineDist_d.argtypes = \
lib.ElSOCPAffineSparse_s.argtypes = \
lib.ElSOCPAffineSparse_d.argtypes = \
lib.ElSOCPAffineDistSparse_s.argtypes = \
lib.ElSOCPAffineDistSparse_d.argtypes = \
[c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,c_void_p]
lib.ElSOCPAffineX_s.argtypes = \
lib.ElSOCPAffineXDist_s.argtypes = \
lib.ElSOCPAffineXSparse_s.argtypes = \
lib.ElSOCPAffineXDistSparse_s.argtypes = \
[c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,c_void_p,
SOCPAffineCtrl_s]
lib.ElSOCPAffineX_d.argtypes = \
lib.ElSOCPAffineXDist_d.argtypes = \
lib.ElSOCPAffineXSparse_d.argtypes = \
lib.ElSOCPAffineXDistSparse_d.argtypes = \
[c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,
c_void_p,c_void_p,c_void_p,c_void_p,
SOCPAffineCtrl_d]
# TODO
def SOCPAffine(A,G,b,c,h,orders,firstInds,labels,x,y,z,s,ctrl=None):
if type(A) is not type(G):
raise Exception('A and G must be of the same type')
if orders.tag != iTag or firstInds.tag != iTag or labels.tag != iTag:
raise Exception('cone descriptions must have integer datatypes')
if type(b) is not type(c) or type(b) is not type(c) or \
type(b) is not type(h) or type(b) is not type(x) or \
type(b) is not type(y) or type(b) is not type(z) or \
type(b) is not type(s) or type(b) is not type(orders) or \
type(b) is not type(firstInds) or type(b) is not type(labels):
raise Exception('vectors must be of the same type')
args = [A.obj,G.obj,b.obj,c.obj,h.obj,orders.obj,firstInds.obj,labels.obj,
x.obj,y.obj,z.obj,s.obj]
argsCtrl = [A.obj,G.obj,b.obj,c.obj,h.obj,orders.obj,firstInds.obj,labels.obj,
x.obj,y.obj,z.obj,s.obj,ctrl]
if type(A) is Matrix:
if type(b) is not Matrix:
raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElSOCPAffine_s(*args)
else: lib.ElSOCPAffineX_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSOCPAffine_d(*args)
else: lib.ElSOCPAffineX_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistMatrix:
if type(b) is not DistMatrix:
raise Exception('b must be a DistMatrix')
if A.tag == sTag:
if ctrl == None: lib.ElSOCPAffineDist_s(*args)
else: lib.ElSOCPAffineXDist_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSOCPAffineDist_d(*args)
else: lib.ElSOCPAffineXDist_d(*argsCtrl)
else: DataExcept()
elif type(A) is SparseMatrix:
if type(b) is not Matrix:
raise Exception('b must be a Matrix')
if A.tag == sTag:
if ctrl == None: lib.ElSOCPAffineSparse_s(*args)
else: lib.ElSOCPAffineXSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSOCPAffineSparse_d(*args)
else: lib.ElSOCPAffineXSparse_d(*argsCtrl)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if type(b) is not DistMultiVec:
raise Exception('b must be a DistMultiVec')
if A.tag == sTag:
if ctrl == None: lib.ElSOCPAffineDistSparse_s(*args)
else: lib.ElSOCPAffineXDistSparse_s(*argsCtrl)
elif A.tag == dTag:
if ctrl == None: lib.ElSOCPAffineDistSparse_d(*args)
else: lib.ElSOCPAffineXDistSparse_d(*argsCtrl)
else: DataExcept()
else: TypeExcept()
| StarcoderdataPython |
3479279 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import importlib
from lime import LIME
import numpy as np
class Model(object):
def test_auc(self):
"""Returns the area under ROC curve for the test data."""
raise NotImplementedError()
def train_auc(self):
"""Returns the area under ROC curve for the training data."""
raise NotImplementedError()
def shape(self):
"""Returns the shape of the test data."""
raise NotImplementedError()
def features(self):
"""Returns the feature names as list.
Features that contain a '=' are interpreted as categorical
features where the left side is the name and the right side is
the value of the feature.
"""
raise NotImplementedError()
def threshold(self):
"""The threshold for prediction scores."""
raise NotImplementedError()
def get_label(self, rix):
"""Returns the binary (True or False) label of the test data row with the given index."""
raise NotImplementedError()
def get_row(self, rix):
"""Returns the given row of the test data."""
raise NotImplementedError()
def predict_proba(self, X):
"""Returns the prediction scores for X. For each row one prediction
score must be returned (output shape is (X.shape[0],)).
Parameters:
-----------
X : np.matrix or np.array
The data to predict.
"""
raise NotImplementedError()
def predict_label(self, X):
return self.predict_score(self.predict_proba(X))
def predict_score(self, scores):
return scores >= self.threshold()
def total_pos(self):
total = 0
for rix in range(self.shape()[0]):
if self.get_label(rix):
total += 1
return total
def use_csr(self):
"""Whether to use CSR instead of CSV to store the matrix."""
return True
def create_explainer(self):
return LIME()
def load(module, name):
"""Loads the given module and expects a class name derived from Model.
The class is created with the standard constructor.
"""
mod = importlib.import_module(module, __name__)
return getattr(mod, name)()
| StarcoderdataPython |
8064317 | <gh_stars>10-100
# -*- coding: utf-8 -*-
import numpy
class MolecularModel:
"""Representation of a non-Quantarhei molecule.
This class handles conversion of non-Quantarhei models of molecules
"""
def __init__(self, model_type=None):
self.model_type = model_type
self.nstate = 2
self.default_energies = numpy.array([0.0, 0.0], dtype=numpy.float64)
self.default_dipole_lengths = numpy.zeros((self.nstate,self.nstate),
dtype=numpy.float64)
| StarcoderdataPython |
3594531 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-22 18:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('intake', '0063_purgedapplication_purgedstatusupdate'),
]
operations = [
migrations.CreateModel(
name='PurgedVisitor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'purged"."intake_visitor',
'managed': False,
},
),
migrations.RunSQL(
"""CREATE OR REPLACE VIEW purged.intake_visitor AS
SELECT %s From intake_visitor;
""" %
', '.join([
'id',
'uuid',
'first_visit',
'source',
'referrer',
'locale',
]),
"""DROP VIEW purged.intake_visitor;
"""),
]
| StarcoderdataPython |
1860288 | <reponame>vincealdrin/Tutu<filename>detector/categorizer.py<gh_stars>1-10
from db import get_articles_filtered
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.metrics import confusion_matrix, classification_report, auc, roc_curve, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline, FeatureUnion
from nltk.stem.snowball import SnowballStemmer
from sklearn.model_selection import cross_val_score
from scipy.sparse import hstack
import pandas as pd
import numpy as np
from item_selector import ItemSelector
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
import matplotlib.pyplot as plt
import itertools
articles = get_articles_filtered(lambda join: {
'body': join['left']['body'],
'title': join['left']['title'],
'category': join['left']['categories'].nth(0).get_field('label')
}, 0.045)
print(len(articles))
articles = [a for a in articles if a['category'] != 'Culture' and a['category'] != 'Nation']
print(len(articles))
title_tfidf = TfidfVectorizer(
stop_words=ENGLISH_STOP_WORDS,
ngram_range=(1, 3),
max_df=0.85,
min_df=0.01)
df = pd.DataFrame.from_records(articles)
# df = pd.concat([
# df[df['category'] == 'Business & Finance'],
# df[df['category'] == 'Lifestyle'],
# df[df['category'] == 'Disaster & Accident'],
# df[df['category'] == 'Entertainment & Arts'],
# df[df['category'] == 'Sports'],
# df[df['category'] == 'Law & Government'],
# df[df['category'] == 'Politics'],
# df[df['category'] == 'Health'],
# df[df['category'] == 'Crime'],
# df[df['category'] == 'Culture'],
# df[df['category'] == 'Economy'],
# df[df['category'] == 'Weather'],
# df[df['category'] == 'Environment'],
# df[df['category'] == 'Science & Technology'],
# ])
X_train, X_test, y_train, y_test = train_test_split(
df.body.values, df.category.values, test_size=0.15, random_state=42)
clf = Pipeline([
('tfidf',
TfidfVectorizer(
stop_words=ENGLISH_STOP_WORDS,
ngram_range=(1, 2),
max_df=0.85,
min_df=0.01)),
('clf', LogisticRegression(penalty='l1', class_weight='balanced')),
])
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
print('Logistic Regression')
print('Classification Report')
print(classification_report(y_test, pred, target_names=clf.classes_))
print('Accuracy: ' + str(accuracy_score(y_test, pred)))
cv_scores = cross_val_score(clf, X_train, y_train, cv=5)
print("Cross Validation: %0.2f (+/- %0.2f)" % (cv_scores.mean(),
cv_scores.std() * 2))
cnf_matrix = confusion_matrix(y_test, pred)
print('\n MultinomialNB')
clf = Pipeline([
('tfidf',
TfidfVectorizer(
stop_words=ENGLISH_STOP_WORDS,
ngram_range=(1, 2),
max_df=0.85,
min_df=0.01)),
('clf', MultinomialNB(fit_prior=False)),
])
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
print('Logistic Regression')
print('Classification Report')
print(classification_report(y_test, pred, target_names=clf.classes_))
print('Accuracy: ' + str(accuracy_score(y_test, pred)))
cv_scores = cross_val_score(clf, X_train, y_train, cv=5)
print("Cross Validation: %0.2f (+/- %0.2f)" % (cv_scores.mean(),
cv_scores.std() * 2))
cnf_matrix = confusion_matrix(y_test, pred)
| StarcoderdataPython |
4934523 | <reponame>matthiask/django-historylinks
"""Adapters for registering models with django-HistoryLinks."""
from __future__ import unicode_literals
import sys
from itertools import chain
from threading import local
from functools import wraps
from django.core.signals import request_finished
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_save
from django.utils import six
from django.utils.encoding import force_text
from historylinks.models import HistoryLink
class HistoryLinkAdapterError(Exception):
"""Something went wrong with a history link adapter."""
class HistoryLinkAdapter(object):
"""An adapter for generating HistoryLinks for a model."""
# Use to specify the methods that should be used to generate permalinks.
permalink_methods = ()
def __init__(self, model):
"""Initializes the history link adapter."""
self.model = model
def get_permalinks(self, obj):
"""Returns a dictionary of permalinks for the given obj."""
permalink_methods = self.permalink_methods or ("get_absolute_url",)
permalinks = {}
for permalink_method_name in permalink_methods:
# Resolve the method.
try:
permalink_method = getattr(obj, permalink_method_name)
except AttributeError:
raise HistoryLinkAdapterError("Could not find a method called {name!r} on {obj!r}".format(
name=permalink_method_name,
obj=obj,
))
# Get the permalink.
if not callable(permalink_method):
raise HistoryLinkAdapterError("{model}.{method} is not a callable method".format(
model=self.model.__name__,
method=permalink_method_name,
))
permalink = permalink_method()
permalinks[permalink_method_name] = permalink
# Return the resolved permalink.
return permalinks
class RegistrationError(Exception):
"""Something went wrong when registering a model with a history link manager."""
class HistoryLinkContextError(Exception):
"""Something went wrong with the HistoryLink context management."""
def _bulk_save_history_links(history_links):
"""Creates the given history link data in the most efficient way possible."""
if history_links:
if hasattr(HistoryLink.objects, "bulk_create"):
HistoryLink.objects.bulk_create(history_links)
else:
for history_link in history_links:
history_link.save()
class HistoryLinkContextManager(local):
"""A thread-local context manager used to manage saving history link data."""
def __init__(self):
"""Initializes the history link context."""
self._stack = []
# Connect to the signalling framework.
request_finished.connect(self._request_finished_receiver)
def is_active(self):
"""Checks that this history link context is active."""
return bool(self._stack)
def _assert_active(self):
"""Ensures that the history link is active."""
if not self.is_active():
raise HistoryLinkContextError("The history link context is not active.")
def start(self):
"""Starts a level in the history link context."""
self._stack.append((set(), False))
def add_to_context(self, manager, obj):
"""Adds an object to the current context, if active."""
self._assert_active()
objects, _ = self._stack[-1]
objects.add((manager, obj))
def invalidate(self):
"""Marks this history link context as broken, so should not be commited."""
self._assert_active()
objects, _ = self._stack[-1]
self._stack[-1] = (objects, True)
def is_invalid(self):
"""Checks whether this history link context is invalid."""
self._assert_active()
_, is_invalid = self._stack[-1]
return is_invalid
def end(self):
"""Ends a level in the history link context."""
self._assert_active()
# Save all the models.
tasks, is_invalid = self._stack.pop()
if not is_invalid:
_bulk_save_history_links(list(chain.from_iterable(manager._update_obj_history_links_iter(obj) for manager, obj in tasks)))
# Context management.
def update_history_links(self):
"""
Marks up a block of code as requiring the history links to be updated.
The returned context manager can also be used as a decorator.
"""
return HistoryLinkContext(self)
# Signalling hooks.
def _request_finished_receiver(self, **kwargs):
"""
Called at the end of a request, ensuring that any open contexts
are closed. Not closing all active contexts can cause memory leaks
and weird behaviour.
"""
while self.is_active():
self.end()
class HistoryLinkContext(object):
"""An individual context for a history link update."""
def __init__(self, context_manager):
"""Initializes the history link context."""
self._context_manager = context_manager
def __enter__(self):
"""Enters a block of history link management."""
self._context_manager.start()
def __exit__(self, exc_type, exc_value, traceback):
"""Leaves a block of history link management."""
try:
if exc_type is not None:
self._context_manager.invalidate()
finally:
self._context_manager.end()
def __call__(self, func):
"""Allows this history link context to be used as a decorator."""
@wraps(func)
def do_history_link_context(*args, **kwargs):
self.__enter__()
exception = False
try:
return func(*args, **kwargs)
except:
exception = True
if not self.__exit__(*sys.exc_info()):
raise
finally:
if not exception:
self.__exit__(None, None, None)
return do_history_link_context
# The shared, thread-safe history link context manager.
history_link_context_manager = HistoryLinkContextManager()
class HistoryLinkManager(object):
"""A history link manager."""
def __init__(self, history_link_context_manager=history_link_context_manager):
"""Initializes the history link manager."""
# Initialize the manager.
self._registered_models = {}
# Store the history link context.
self._history_link_context_manager = history_link_context_manager
def is_registered(self, model):
"""Checks whether the given model is registered with this history link manager."""
return model in self._registered_models
def register(self, model, adapter_cls=HistoryLinkAdapter, **field_overrides):
"""
Registers the given model with this history link manager.
If the given model is already registered with this history link manager, a
RegistrationError will be raised.
"""
# Check for existing registration.
if self.is_registered(model):
raise RegistrationError("{model!r} is already registered with this history link manager".format(
model=model,
))
# Perform any customization.
if field_overrides:
adapter_cls = type(model.__name__ + adapter_cls.__name__, (adapter_cls,), field_overrides)
# Perform the registration.
adapter_obj = adapter_cls(model)
self._registered_models[model] = adapter_obj
# Connect to the signalling framework.
post_save.connect(self._post_save_receiver, model)
# Return the model, allowing this to be used as a class decorator.
return model
def unregister(self, model):
"""
Unregisters the given model with this history link manager.
If the given model is not registered with this history link manager, a RegistrationError
will be raised.
"""
# Check for registration.
if not self.is_registered(model):
raise RegistrationError("{model!r} is not registered with this history link manager".format(
model=model,
))
# Perform the unregistration.
del self._registered_models[model]
# Disconnect from the signalling framework.
post_save.disconnect(self._post_save_receiver, model)
def get_registered_models(self):
"""Returns a sequence of models that have been registered with this history link manager."""
return self._registered_models.keys()
def get_adapter(self, model):
"""Returns the adapter associated with the given model."""
if self.is_registered(model):
return self._registered_models[model]
raise RegistrationError("{model!r} is not registered with this history link manager".format(
model=model,
))
def _update_obj_history_links_iter(self, obj):
"""Either updates the given object's history links, or yields one or more unsaved history links."""
model = obj.__class__
adapter = self.get_adapter(model)
content_type = ContentType.objects.get_for_model(model)
object_id = force_text(obj.pk)
# Create the history link data.
for permalink_name, permalink_value in six.iteritems(adapter.get_permalinks(obj)):
history_link_data = {
"permalink": permalink_value,
"permalink_name": permalink_name,
"object_id": object_id,
"content_type": content_type,
}
update_count = HistoryLink.objects.filter(
permalink=permalink_value,
).update(**history_link_data)
if update_count == 0:
yield HistoryLink(**history_link_data)
def update_obj_history_links(self, obj):
"""Updates the history links for the given obj."""
_bulk_save_history_links(list(self._update_obj_history_links_iter(obj)))
# Signalling hooks.
def _post_save_receiver(self, instance, raw=False, **kwargs):
"""Signal handler for when a registered model has been saved."""
if not raw:
if self._history_link_context_manager.is_active():
self._history_link_context_manager.add_to_context(self, instance)
else:
self.update_obj_history_links(instance)
# Accessing current URLs.
def get_current_url(self, path):
"""Returns the current URL for whatever used to exist at the given path."""
# Get the history links.
try:
history_link = HistoryLink.objects.get(permalink=path)
except HistoryLink.DoesNotExist:
return None
# Resolve the model.
model = ContentType.objects.get_for_id(id=history_link.content_type_id).model_class()
# Resolve the adapter.
try:
adapter = self.get_adapter(model)
except RegistrationError:
return None
# Resolve the object.
try:
obj = model._default_manager.get(pk=history_link.object_id)
except model.DoesNotExist:
return None
# Resolve the permalinks.
permalinks = adapter.get_permalinks(obj)
# Resolve the specific permalink.
return permalinks.get(history_link.permalink_name, None)
# The default history link manager.
default_history_link_manager = HistoryLinkManager()
| StarcoderdataPython |
4880399 | <reponame>killdary/Minicourse_malware
import socket
HOST = '' # Endereco IP do Servidor
PORT = 5000 # Porta que o Servidor esta
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #cria um socket para conectar o cliente e o servidor
orig = (HOST, PORT)
tcp.bind(orig) #coloca o endereço local, o IP e a porta no socket
tcp.listen(1) #esta função faz com que o servidor fique no modo passivo, esperando conexões
conn, addr = tcp.accept() #aceita uma nova conexão
arq = open('rc1.jpg', 'w') #Abre um arquivo no modo escrita
while 1:
dados = conn.recv(1024) #recebe a linha de dados enviado pelo cliente até encontrar um caractere nulo
if not dados:
break
arq.write(dados) #os dados recebidos são escritos no arquivo
arq.close() #fecha o arquivo
tcp.close() #fecha a conexao
| StarcoderdataPython |
4969833 | """
Abstractions and operations, which may or may not be protocol related, specific to the voting namespace
"""
from flask import g, current_app
from computable.contracts import Voting
from core.protocol import get_voting
def filter_candidate_added(from_block, to_block, arg_filters=None):
"""
given filter create a voting contract
and execute the filter for candidate added event, returning the results
"""
v = get_voting()
if arg_filters != None:
filter = v.deployed.events.CandidateAdded.createFilter(fromBlock=from_block, toBlock=to_block, argument_filters=arg_filters)
else:
filter = v.deployed.events.CandidateAdded.createFilter(fromBlock=from_block, toBlock=to_block)
return filter.get_all_entries()
def filter_candidate_removed(from_block, to_block):
"""
given filter create a voting contract
and execute the filter for candidate removed event, returning the results
"""
v = get_voting()
filter = v.deployed.events.CandidateRemoved.createFilter(fromBlock=from_block, toBlock=to_block)
return filter.get_all_entries()
| StarcoderdataPython |
9779404 | <reponame>fizista/django
from optparse import make_option
from django.core.management.commands.startproject import Command as BaseCommand
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--extra',
action='store', dest='extra',
help='An arbitrary extra value passed to the context'),
)
| StarcoderdataPython |
4940462 | def main():
n = int(input())
a, b, c = list(map(int, input().split()))
n_a = [i * a for i in range(1, n // a + 1)]
n_b = [i * b for i in range(1, n // b + 1)]
n_c = [i * c for i in range(1, n // c + 1)]
print(len(set(n_a + n_b + n_c)))
def fizzbuzz():
for i in range(100):
print(i % 3 // 2 * "Fizz" + i % 5 // 4 * "Buzz" or i + 1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8170440 | class Solution:
def canAttendMeetings(self, intervals: List[List[int]]) -> bool:
intervals = sorted(intervals, key=lambda x: x[1])
end = -1
for interval in intervals:
if interval[0] >= end:
end = interval[1]
continue
return False
return True
| StarcoderdataPython |
20542 | import os
from datetime import datetime
import sys
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from jet_bridge_base import configuration
from jet_bridge.configuration import JetBridgeConfiguration
conf = JetBridgeConfiguration()
configuration.set_configuration(conf)
from jet_bridge_base.commands.check_token import check_token_command
from jet_bridge_base.db import database_connect
from jet_bridge_base.logger import logger
from jet_bridge import settings, VERSION
from jet_bridge.settings import missing_options, required_options_without_default
def main():
args = sys.argv[1:]
if 'ARGS' in os.environ:
args = os.environ['ARGS'].split(' ')
logger.info(datetime.now().strftime('%B %d, %Y - %H:%M:%S %Z'))
logger.info('Jet Bridge version {}'.format(VERSION))
if (len(args) >= 1 and args[0] == 'config') or missing_options == required_options_without_default:
from jet_bridge.utils.create_config import create_config
create_config(missing_options == required_options_without_default)
return
elif len(missing_options) and len(missing_options) < len(required_options_without_default):
logger.info('Required options are not specified: {}'.format(', '.join(missing_options)))
return
address = 'localhost' if settings.ADDRESS == '0.0.0.0' else settings.ADDRESS
url = 'http://{}:{}/'.format(address, settings.PORT)
api_url = '{}api/'.format(url)
if len(args) >= 1:
if args[0] == 'check_token':
check_token_command(api_url)
return
database_connect()
from jet_bridge.app import make_app
app = make_app()
server = HTTPServer(app)
server.bind(settings.PORT, settings.ADDRESS)
server.start(settings.WORKERS if not settings.DEBUG else 1)
if settings.WORKERS > 1 and settings.DEBUG:
logger.warning('Multiple workers are not supported in DEBUG mode')
logger.info('Starting server at {}'.format(url))
if settings.DEBUG:
logger.warning('Server is running in DEBUG mode')
logger.info('Quit the server with CONTROL-C')
check_token_command(api_url)
IOLoop.current().start()
if __name__ == '__main__':
main()
| StarcoderdataPython |
6558764 | <filename>resources/colors.py
# With the only proper notation WITHOUT U
white = (255, 255, 255)
background = (10, 10, 30)
backup = (125, 125, 125)
| StarcoderdataPython |
1885050 | from parallelm.mlapp_directory.mlapp_from_directory_builder import MLAppFromDirectoryBuilder
from parallelm.mlapp_directory.mlapp_defs import MLAppProfileKeywords
from parallelm.mcenter_cli.delete_mlapp import MLAppDeleteHelper
def _is_mlapp_exists(mclient, mlapp_name):
for profile_info in mclient.list_ion_profiles():
profile_name = profile_info[MLAppProfileKeywords.NAME]
if profile_name == mlapp_name:
return True
return False
def upload_mlapp_v2(mclient, mlapp_dir, force, ee=None):
builder = MLAppFromDirectoryBuilder(mclient, mlapp_dir).set_ee(ee)
mlapp_name = builder.get_mlapp_name()
if _is_mlapp_exists(mclient, mlapp_name):
if force:
MLAppDeleteHelper(mclient, mlapp_name, dry_run=False).delete()
else:
raise Exception("MLApp [{}] exists, to override see help".format(mlapp_name))
builder.build()
| StarcoderdataPython |
3360972 | import sys
import os
import json
import bsddb
import marshal
TEMP_DATA_FILE = "old_temp_data.db"
def get_data_keys():
data_keys = set()
for key,val in cass.getRowRange(TEMPFILES, columns=['size']):
# Have to check for a column since deleted rows can still be returned here
# See: http://wiki.apache.org/cassandra/FAQ#range_ghosts
if len(val) > 0:
data_keys.add(key)
return data_keys
def get_referenced_keys():
unique_keys = set()
for row_key, row_data in cass.getRowRange(USERS,
column_start="uploading:.",
column_finish="uploading:~"):
for column_key, column_data in row_data.iteritems():
parsed_column = json.loads(column_data)
main_rowkey = parsed_column['main_rowkey']
unique_keys.add(main_rowkey)
for subfile_name, subfile_key in parsed_column['subfiles'].iteritems():
unique_keys.add(subfile_key)
return unique_keys
def save_unused_keys(unused_keys):
size_ct = 0
db = bsddb.hashopen(TEMP_DATA_FILE, 'n')
for unused_key in unused_keys:
key_data = cass.getRecord(TEMPFILES, unused_key, column_count=10000)
size_ct += int(key_data['size'])
db[unused_key] = marshal.dumps(dict(key_data))
db.close()
return size_ct
def read_back_db():
print 'CHECKING FOR PREVIOUS DB FILE'
print '-----------------------------'
try:
db = bsddb.hashopen(TEMP_DATA_FILE, 'r')
except bsddb.db.DBNoSuchFileError:
print 'Did not find a temporary BDB file. Skipping read-back step.'
return
previous_keys = db.keys()
print 'Found', len(previous_keys), 'keys in previous data file.'
if len(previous_keys) == 0:
print 'Since no keys in previous file, continuing.'
return
referenced_keys = get_referenced_keys()
print 'Checking', len(referenced_keys), 'referenced keys for previous items'
still_referenced = referenced_keys.intersection(previous_keys)
print 'Found', len(still_referenced), 'keys previously deleted that are now referenced.'
if len(still_referenced) == 0:
erase_check = raw_input('No previously deleted keys are referenced. Okay to erase old db (y/n)? ')
if erase_check.upper().strip() == 'Y':
print 'Okay, continuing.'
return
else:
print 'Okay, nothing left to do here. Exiting.'
sys.exit(0)
else:
rewrite_check = raw_input('Would you like me to rewrite the %d keys (y/n)? ' % (len(still_referenced),))
if rewrite_check.upper().strip() == 'Y':
print 'Okay, going to write keys now.'
for previous_key in still_referenced:
previous_data = marshal.loads(db[previous_key])
print 'Writing key', previous_key, 'size', len(data)
cass.insertRecord(TEMPFILES, previous_key, previous_data)
print 'Finished writing', len(still_referenced), 'keys. Exiting'
sys.exit(0)
else:
print 'Okay, nothing left to do here. Exiting.'
sys.exit(0)
def delete_unused(unused_keys):
delete_check = raw_input('Shall I delete %d unreferenced keys (y/n)? ' % len(unused_keys))
if delete_check.upper().strip() == 'Y':
print 'Okay, deleting keys...'
for unused_key in unused_keys:
print 'Deleting', unused_key
cass.removeRecord(TEMPFILES, unused_key)
print "Finished deleting %d keys. Backup is saved at '%s' Exiting." % (len(unused_keys), TEMP_DATA_FILE)
sys.exit(0)
else:
print 'Okay, nothing to do here. Exiting.'
sys.exit(0)
def main():
read_back_db()
print
print 'LOOKING FOR UNUSED KEYS TO CLEAN UP'
print '-----------------------------------'
data_keys = get_data_keys()
print 'Found', len(data_keys), 'data keys in the database.'
referenced_keys = get_referenced_keys()
print 'Found', len(referenced_keys), 'referenced keys'
unused_keys = data_keys.difference(referenced_keys)
print 'Found', len(unused_keys), 'unused keys that can be deleted'
print 'Fetching unused keys from database and writing to temp file'
size_ct = save_unused_keys(unused_keys)
print 'Wrote', size_ct, 'bytes to temporary db file'
if len(unused_keys) == 0:
print 'Since no unused keys, nothing left to do. Exiting.'
sys.exit(0)
else:
delete_unused(unused_keys)
def add_dirs():
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
thisdir = os.path.dirname( os.path.realpath( __file__ ) )
upone, tail = os.path.split(thisdir)
cdndir = os.path.join(upone, 'sirikata-cdn')
celerydir = os.path.join(cdndir, 'celery_tasks')
sys.path.append(cdndir)
sys.path.append(celerydir)
if __name__ == '__main__':
add_dirs()
import cassandra_storage.cassandra_util as cass
from celery_tasks.import_upload import get_temp_file
TEMPFILES = cass.getColumnFamily("TempFiles")
USERS = cass.getColumnFamily('Users')
main()
| StarcoderdataPython |
3332832 | import sys
lines = []
for line in sys.stdin:
lines.append(line.rstrip('\n'))
for line in lines[1:]:
if (len(line) % 2 == 0 and line[0:round(len(line)/2)] == line[round(len(line)/2):]):
print("YES")
else:
print("NO") | StarcoderdataPython |
8173642 | <reponame>chnghia/pytorch-lightning-gan<filename>models/modules.py
import os
import numpy as np
from PIL import Image
from torch.utils import data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
import torchvision.transforms as transforms
class Dataset(data.Dataset):
"Characterizes a dataset for PyTorch"
def __init__(self, filenames, labels, transform=None):
"Initialization"
self.filenames = filenames
self.labels = labels
self.transform = transform
def __len__(self):
"Denotes the total number of samples"
return len(self.filenames)
def __getitem__(self, index):
"Generates one sample of data"
# Select sample
filename = self.filenames[index]
X = Image.open(filename)
if self.transform:
X = self.transform(X) # transform
y = torch.LongTensor([self.labels[index]])
return X, y
## ---------------------- end of Dataloaders ---------------------- ##
def conv2D_output_size(img_size, padding, kernel_size, stride):
# compute output shape of conv2D
outshape = (np.floor((img_size[0] + 2 * padding[0] - (kernel_size[0] - 1) - 1) / stride[0] + 1).astype(int),
np.floor((img_size[1] + 2 * padding[1] - (kernel_size[1] - 1) - 1) / stride[1] + 1).astype(int))
return outshape
def convtrans2D_output_size(img_size, padding, kernel_size, stride):
# compute output shape of conv2D
outshape = ((img_size[0] - 1) * stride[0] - 2 * padding[0] + kernel_size[0],
(img_size[1] - 1) * stride[1] - 2 * padding[1] + kernel_size[1])
return outshape
## ---------------------- ResNet VAE ---------------------- ##
class ResNet_VAE(nn.Module):
def __init__(self, fc_hidden1=1024, fc_hidden2=768, drop_p=0.3, CNN_embed_dim=256):
super(ResNet_VAE, self).__init__()
self.fc_hidden1, self.fc_hidden2, self.CNN_embed_dim = fc_hidden1, fc_hidden2, CNN_embed_dim
# CNN architechtures
self.ch1, self.ch2, self.ch3, self.ch4 = 16, 32, 64, 128
self.k1, self.k2, self.k3, self.k4 = (5, 5), (3, 3), (3, 3), (3, 3) # 2d kernal size
self.s1, self.s2, self.s3, self.s4 = (2, 2), (2, 2), (2, 2), (2, 2) # 2d strides
self.pd1, self.pd2, self.pd3, self.pd4 = (0, 0), (0, 0), (0, 0), (0, 0) # 2d padding
# encoding components
resnet = models.resnet152(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.fc1 = nn.Linear(resnet.fc.in_features, self.fc_hidden1)
self.bn1 = nn.BatchNorm1d(self.fc_hidden1, momentum=0.01)
self.fc2 = nn.Linear(self.fc_hidden1, self.fc_hidden2)
self.bn2 = nn.BatchNorm1d(self.fc_hidden2, momentum=0.01)
# Latent vectors mu and sigma
self.fc3_mu = nn.Linear(self.fc_hidden2, self.CNN_embed_dim) # output = CNN embedding latent variables
self.fc3_logvar = nn.Linear(self.fc_hidden2, self.CNN_embed_dim) # output = CNN embedding latent variables
# Sampling vector
self.fc4 = nn.Linear(self.CNN_embed_dim, self.fc_hidden2)
self.fc_bn4 = nn.BatchNorm1d(self.fc_hidden2)
self.fc5 = nn.Linear(self.fc_hidden2, 64 * 4 * 4)
self.fc_bn5 = nn.BatchNorm1d(64 * 4 * 4)
self.relu = nn.ReLU(inplace=True)
# Decoder
self.convTrans6 = nn.Sequential(
nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=self.k4, stride=self.s4,
padding=self.pd4),
nn.BatchNorm2d(32, momentum=0.01),
nn.ReLU(inplace=True),
)
self.convTrans7 = nn.Sequential(
nn.ConvTranspose2d(in_channels=32, out_channels=8, kernel_size=self.k3, stride=self.s3,
padding=self.pd3),
nn.BatchNorm2d(8, momentum=0.01),
nn.ReLU(inplace=True),
)
self.convTrans8 = nn.Sequential(
nn.ConvTranspose2d(in_channels=8, out_channels=3, kernel_size=self.k2, stride=self.s2,
padding=self.pd2),
nn.BatchNorm2d(3, momentum=0.01),
nn.Sigmoid() # y = (y1, y2, y3) \in [0 ,1]^3
)
def encode(self, x):
x = self.resnet(x) # ResNet
x = x.view(x.size(0), -1) # flatten output of conv
# FC layers
x = self.bn1(self.fc1(x))
x = self.relu(x)
x = self.bn2(self.fc2(x))
x = self.relu(x)
# x = F.dropout(x, p=self.drop_p, training=self.training)
mu, logvar = self.fc3_mu(x), self.fc3_logvar(x)
return mu, logvar
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
x = self.relu(self.fc_bn4(self.fc4(z)))
x = self.relu(self.fc_bn5(self.fc5(x))).view(-1, 64, 4, 4)
x = self.convTrans6(x)
x = self.convTrans7(x)
x = self.convTrans8(x)
x = F.interpolate(x, size=(224, 224), mode='bilinear')
return x
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
x_reconst = self.decode(z)
return x_reconst, z, mu, logvar | StarcoderdataPython |
3409237 | <reponame>onap/optf-osdf
# -------------------------------------------------------------------------
# Copyright (c) 2020 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import json
import subprocess
import traceback
from datetime import datetime
from osdf.logging.osdf_logging import error_log, debug_log
from osdf.utils.file_utils import delete_file_folder
def py_solver(py_content, opt_info):
py_file = '/tmp/custom_heuristics_{}.py'.format(datetime.timestamp(datetime.now()))
with open(py_file, "wt") as f:
f.write(py_content)
if opt_info['optData'].get('json'):
data_content = json.dumps(opt_info['optData']['json'])
input_file = '/tmp/optim_engine_{}.json'.format(datetime.timestamp(datetime.now()))
elif opt_info['optData'].get('text'):
data_content = opt_info['optData']['text']
input_file = '/tmp/optim_engine_{}.txt'.format(datetime.timestamp(datetime.now()))
with open(input_file, "wt") as f:
f.write(data_content)
output_file = '/tmp/opteng_output_{}.json'.format(datetime.timestamp(datetime.now()))
command = ['python', py_file, input_file, output_file]
try:
p = subprocess.run(command, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
debug_log.debug('Process return code {}'.format(p.returncode))
if p.returncode > 0:
error_log.error('Process return code {} {}'.format(p.returncode, p.stdout))
return 'error', {}
with open(output_file) as file:
data = file.read()
return 'success', json.loads(data)
except Exception as e:
error_log.error("Error running optimizer {}".format(traceback.format_exc()))
return 'error', {}
finally:
cleanup((input_file, output_file, py_file))
def cleanup(file_tup):
for f in file_tup:
try:
delete_file_folder(f)
except Exception as e:
error_log.error("Failed deleting the file {} - {}".format(f, traceback.format_exc()))
def solve(request_json, py_content):
req_info = request_json['requestInfo']
opt_info = request_json['optimInfo']
try:
status, solution = py_solver(py_content, opt_info)
response = {
'transactionId': req_info['transactionId'],
'requestID': req_info['requestID'],
'requestStatus': status,
'statusMessage': "completed",
'solutions': solution if solution else {}
}
return 200, json.dumps(response)
except Exception as e:
response = {
'transactionId': req_info['transactionId'],
'requestID': req_info['requestID'],
'requestStatus': 'failed',
'statusMessage': 'Failed due to {}'.format(e)
}
return 400, json.dumps(response)
| StarcoderdataPython |
1818624 | import os
import unittest
from unittest.mock import patch
from kubernetes import config
from xcube_hub.k8scfg import K8sCfg
class TestK8sCfg(unittest.TestCase):
@patch.object(config, 'load_incluster_config')
def test_load_config_once(self, incluster_cfg_p):
K8sCfg.load_config_once()
self.assertTrue(K8sCfg._config_loaded)
incluster_cfg_p.assert_called_once()
@patch.object(config, 'load_incluster_config')
@patch.object(config, 'load_kube_config')
def test_load_config(self, cfg_p, incluster_cfg_p):
os.environ['XCUBE_HUB_RUN_LOCAL'] = "1"
K8sCfg._load_config()
cfg_p.assert_called_once()
os.environ['XCUBE_HUB_RUN_LOCAL'] = "0"
K8sCfg._load_config()
incluster_cfg_p.assert_called_once()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4885527 | from viraal.config.config import *
| StarcoderdataPython |
3576472 | <filename>LeetCode/python/tree/populating-next-right-pointers-in-each-node[1].py
# Definition for binary tree with next pointer.
# class TreeLinkNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution(object):
def connect(self, root):
if root is None:
return
parent = root
next = root.left
while parent and next:
pre = None
while parent:
if pre is None:
pre = parent.left
else:
pre.next = parent.left
pre = pre.next
pre.next = parent.right
pre = pre.next
parent = parent.next
parent = next
next = next.left
| StarcoderdataPython |
4877778 | a = input("Enter a Character: ")
print("The ASCII value of " + a + " is", ord(a)) | StarcoderdataPython |
11371018 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import os
def retrieve_model(model, **kwargs):
# if data not extracted, download zip and extract
outdirname = 'models.5.15.2019'
if not os.path.exists(outdirname):
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
import zipfile
zipfilename = outdirname + '.zip'
urlretrieve('https://publictestdatasets.blob.core.windows.net/data/' + zipfilename, zipfilename)
with zipfile.ZipFile(zipfilename, 'r') as unzip:
unzip.extractall('.')
extension = os.path.splitext(model)[1]
filepath = os.path.join(outdirname, model)
if extension == '.pkl':
from joblib import load
return load(filepath, **kwargs)
else:
raise Exception('Unrecognized file extension: ' + extension)
| StarcoderdataPython |
6543144 | <reponame>Remydeme/zodiac
import unittest
import pytest
from dateutil import parser
from GmailAnalyser import GmailAnalytics,GmailClient
@pytest.fixture(scope="module")
def sutime():
return GmailClient()
def test_fetch_message_from_forum(self):
pass
def test_fetch_2_page_from_forum(self):
pass
def test_hot_trend_function(self):
pass
def test_is_respons(self):
pass | StarcoderdataPython |
6668776 | # 792->心理科普 876->婚恋情感 660->家庭关系 2206->人际社交 2199->自我察觉 862->成长学习 823->心理健康 844->职场技能
ARTICLE_CLASS = {'792': '心理科普', '876': '婚恋情感', '660': '家庭关系',
'2206': '人际社交', '2199': '自我察觉', '862': '成长学习', '823': '心理健康', '844': '职场技能'}
PAGEREAD = 2
PAGEQUESTIONANDANSWER = 6
# mongodb数据库链接
MONGO_URL = 'localhost'
MONGO_DB = 'InnerVoice'
MONGO_TABLE_Read = "Read"
| StarcoderdataPython |
4883684 | import docker
import random
import thread
import time
import os
import psutil
from gnlpy.cgroupstats import CgroupstatsClient
client = docker.from_env()
JOB_RANDOM_PARAM = 0.7
TIME_DELAY_JOBS_MIN = 3
TIME_DELAY_JOBS_MAX = 7
def stop_all_containers():
for container in client.containers.list():
container.stop()
def create_directories(containers):
try:
os.mkdir('data')
except Exception as e:
pass
try:
for container in containers:
os.mkdir("data/" + container.name)
except Exception as e:
pass
def collect_container(container):
top_output = container.top(ps_args='aux')
# additional_data_add to top_output
titles = top_output['Titles']
titles.append('Timestamp')
titles.append('OOM_Score')
titles.append('io_read_count')
titles.append('io_write_count')
titles.append('io_read_bytes')
titles.append('io_write_bytes')
titles.append('io_read_chars')
titles.append('io_write_chars')
titles.append('num_fds')
titles.append('num_ctx_switches_voluntary')
titles.append('num_ctx_switches_involuntary')
titles.append('mem_rss')
titles.append('mem_vms')
titles.append('mem_shared')
titles.append('mem_text')
titles.append('mem_lib')
titles.append('mem_data')
titles.append('mem_dirty')
titles.append('mem_uss')
titles.append('mem_pss')
titles.append('mem_swap')
titles.append('num_threads')
titles.append('cpu_time_user')
titles.append('cpu_time_system')
titles.append('cpu_time_children_user')
titles.append('cpu_time_children_system')
# Container metrics
titles.append('container_nr_sleeping')
titles.append('container_nr_running')
titles.append('container_nr_stopped')
titles.append('container_nr_uninterruptible')
titles.append('container_nr_iowait')
titles.append('container_under_oom')
top_output['Titles'] = titles
timestamp = str(int(time.time()))
procs = top_output['Processes']
for row in procs:
row.append(timestamp)
pid = row[1]
process_obj = psutil.Process(pid=int(pid))
# oom_score
with open('/proc/{}/oom_score'.format(pid), 'r') as f:
oom_score = int(f.read())
row.append(str(oom_score))
# proc io_counters
io_counters = process_obj.io_counters()
row.append(io_counters.read_count)
row.append(io_counters.write_count)
row.append(io_counters.read_bytes)
row.append(io_counters.write_bytes)
row.append(io_counters.read_chars)
row.append(io_counters.write_chars)
# proc number-of-file-descriptors
row.append(process_obj.num_fds())
# proc number-context-switches, voluntary and involuntary
row.append(process_obj.num_ctx_switches().voluntary)
row.append(process_obj.num_ctx_switches().involuntary)
# proc memory params full
mem_obj = process_obj.memory_full_info()
row.append(mem_obj.rss)
row.append(mem_obj.vms)
row.append(mem_obj.shared)
row.append(mem_obj.text)
row.append(mem_obj.lib)
row.append(mem_obj.data)
row.append(mem_obj.dirty)
row.append(mem_obj.uss)
row.append(mem_obj.pss)
row.append(mem_obj.swap)
# proc num_threads
row.append(process_obj.num_threads())
# proc cpu times
cpu_time_obj = process_obj.cpu_times()
row.append(cpu_time_obj.user)
row.append(cpu_time_obj.system)
row.append(cpu_time_obj.children_user)
row.append(cpu_time_obj.children_system)
# container level metrics
c = CgroupstatsClient()
cgrp_metrics_obj = c.get_cgroup_stats("/sys/fs/cgroup/cpu/docker/{}".format(container.id))
row.append(cgrp_metrics_obj.nr_sleeping)
row.append(cgrp_metrics_obj.nr_running)
row.append(cgrp_metrics_obj.nr_stopped)
row.append(cgrp_metrics_obj.nr_uninterruptible)
row.append(cgrp_metrics_obj.nr_iowait)
# under_oom
with open('/sys/fs/cgroup/memory/docker/{}/memory.oom_control'.format(container.id), 'r') as f:
under_oom = f.readlines()[1].split()[1]
row.append(str(under_oom))
with open("data/{}/{}.csv".format(container.name, timestamp), 'wb') as f:
f.write(",".join(top_output['Titles']))
f.write('\n')
for row in top_output['Processes']:
f.write(",".join([str(item) for item in row]))
f.write('\n')
def start_collection_all_containers(containers):
while True:
for container in containers:
thread.start_new_thread(collect_container, (container,))
time.sleep(1)
if __name__ == '__main__':
containers = client.containers.list()
print containers
create_directories(containers)
start_collection_all_containers(containers)
# stop_all_containers()
| StarcoderdataPython |
9654280 | <gh_stars>1-10
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 <NAME>
#
import jasy.script.api.Text as Text
from jasy.script.util import *
import jasy.core.Console as Console
from jasy import UserError
class ApiData():
"""
Container for all relevant API data.
Automatically generated, filled and cached by jasy.item.Script.getApiDocs().
"""
__slots__ = [
"main", "construct", "statics", "properties", "events", "members",
"id",
"package", "basename",
"errors", "size", "assets", "permutations",
"content", "isEmpty",
"uses", "usedBy",
"includes", "includedBy",
"implements", "implementedBy",
"highlight"
]
def __init__(self, id, highlight=True):
self.id = id
self.highlight = highlight
splits = id.split(".")
self.basename = splits.pop()
self.package = ".".join(splits)
self.isEmpty = False
self.uses = set()
self.main = {
"type" : "Unsupported",
"name" : id,
"line" : 1
}
def addSize(self, size):
"""Adds the statistics on different size aspects."""
self.size = size
def addAssets(self, assets):
"""Adds the info about used assets."""
self.assets = assets
def addUses(self, uses):
self.uses.add(uses)
def removeUses(self, uses):
self.uses.remove(uses)
def addFields(self, permutations):
self.permutations = permutations
def scanTree(self, tree):
self.uses.update(tree.scope.shared)
for package in tree.scope.packages:
splits = package.split(".")
current = splits[0]
for split in splits[1:]:
current = "%s.%s" % (current, split)
self.uses.add(current)
try:
if not self.__processTree(tree):
self.main["errornous"] = True
except UserError as myError:
raise myError
except Exception as error:
self.main["errors"] = ({
"line": 1,
"message": "%s" % error
})
self.main["errornous"] = True
self.warn("Error during processing file: %s" % error, 1)
def __processTree(self, tree):
success = False
callNode = findCall(tree, ("core.Module", "core.Interface", "core.Class", "core.Main.declareNamespace"))
if callNode:
callName = getCallName(callNode)
#
# core.Module
#
if callName == "core.Module":
self.setMain(callName, callNode.parent, self.id)
staticsMap = getParameterFromCall(callNode, 1)
if staticsMap:
success = True
self.statics = {}
for staticsEntry in staticsMap:
self.addEntry(staticsEntry[0].value, staticsEntry[1], staticsEntry, self.statics)
else:
self.warn("Invalid core.Module()", callNode.line)
#
# core.Interface
#
elif callName == "core.Interface":
self.setMain(callName, callNode.parent, self.id)
configMap = getParameterFromCall(callNode, 1)
if configMap:
success = True
for propertyInit in configMap:
sectionName = propertyInit[0].value
sectionValue = propertyInit[1]
if sectionName == "properties":
self.properties = {}
for propertyEntry in sectionValue:
self.addProperty(propertyEntry[0].value, propertyEntry[1], propertyEntry, self.properties)
elif sectionName == "events":
self.events = {}
for eventEntry in sectionValue:
self.addEvent(eventEntry[0].value, eventEntry[1], eventEntry, self.events)
elif sectionName == "members":
self.members = {}
for memberEntry in sectionValue:
self.addEntry(memberEntry[0].value, memberEntry[1], memberEntry, self.members)
else:
self.warn('Invalid core.Interface section "%s"' % sectionName, propertyInit.line)
else:
self.warn("Invalid core.Interface()", callNode.line)
#
# core.Class
#
elif callName == "core.Class":
self.setMain(callName, callNode.parent, self.id)
configMap = getParameterFromCall(callNode, 1)
if configMap:
success = True
for propertyInit in configMap:
sectionName = propertyInit[0].value
sectionValue = propertyInit[1]
if sectionName == "construct":
self.addConstructor(sectionValue, propertyInit)
elif sectionName == "properties":
self.properties = {}
for propertyEntry in sectionValue:
self.addProperty(propertyEntry[0].value, propertyEntry[1], propertyEntry, self.properties)
elif sectionName == "events":
self.events = {}
for eventEntry in sectionValue:
self.addEvent(eventEntry[0].value, eventEntry[1], eventEntry, self.events)
elif sectionName == "members":
self.members = {}
for memberEntry in sectionValue:
self.addEntry(memberEntry[0].value, memberEntry[1], memberEntry, self.members)
elif sectionName == "include":
self.includes = [valueToString(entry) for entry in sectionValue]
elif sectionName == "implement":
self.implements = [valueToString(entry) for entry in sectionValue]
elif sectionName == "pooling":
# TODO
pass
else:
self.warn('Invalid core.Class section "%s"' % sectionName, propertyInit.line)
else:
self.warn("Invalid core.Class()", callNode.line)
#
# core.Main.declareNamespace
#
elif callName == "core.Main.declareNamespace":
target = getParameterFromCall(callNode, 0)
assigned = getParameterFromCall(callNode, 1)
if target:
success = True
if assigned and assigned.type == "function":
# Use callNode call for constructor, find first doc comment for main documentation
self.setMain("core.Main", findCommentNode(tree), target.value)
self.addConstructor(assigned, callNode.parent)
else:
self.setMain("core.Main", callNode.parent, target.value)
if assigned and assigned.type == "object_init":
self.statics = {}
for staticsEntry in assigned:
self.addEntry(staticsEntry[0].value, staticsEntry[1], staticsEntry, self.statics)
#
# Handle plain JS namespace -> object assignments
#
else:
def assignMatcher(node):
if node.type == "assign" and node[0].type == "dot":
if node[1].type == "object_init":
doc = getDocComment(node.parent)
if not doc is None:
return True
elif node[1].type == "function":
doc = getDocComment(node.parent)
if not doc is None:
return True
return False
result = query(tree, assignMatcher)
if not result is None:
name = assembleDot(result[0])
self.setMain("Native", result.parent, name)
success = True
if result[1].type == "object_init":
# Ingore empty objects and do not produce namespaces for them
#
# e.g. some.namespace.foo = {};
if len(result[1]) == 0:
success = False
self.isEmpty = True
self.statics = {}
for prop in result[1]:
self.addEntry(prop[0].value, prop[1], prop, self.statics)
elif result[1].type == "function":
self.addConstructor(result[1], result.parent)
def memberMatcher(node):
if node is not result and node.type == "assign" and node[0].type == "dot":
assignName = assembleDot(node[0])
if assignName is not None and assignName != name and assignName.startswith(name) and len(assignName) > len(name):
localName = assignName[len(name):]
if localName.startswith("."):
localName = localName[1:]
# Support for MyClass.prototype.memberFoo = function() {}
if "." in localName:
splittedLocalName = localName.split(".")
if len(splittedLocalName) == 2 and splittedLocalName[0] == "prototype":
if not hasattr(self, "members"):
self.members = {}
self.addEntry(splittedLocalName[1], node[1], node.parent, self.members)
# Support for MyClass.staticFoo = function() {}
elif localName != "prototype":
if not hasattr(self, "statics"):
self.statics = {}
self.addEntry(localName, node[1], node.parent, self.statics)
else:
if not hasattr(self, "members"):
self.members = {}
# Support for MyClass.prototype = {};
if node[1].type == "object_init":
membersMap = node[1]
for membersEntry in membersMap:
self.addEntry(membersEntry[0].value, membersEntry[1], membersEntry, self.members)
# Support for MyClass.prototype = new BaseClass;
elif node[1].type == "new" or node[1].type == "new_with_args":
self.includes = [valueToString(node[1][0])]
queryAll(tree, memberMatcher)
#
# core.Main.addStatics
#
# addStatics = findCall(tree, "core.Main.addStatics")
# if addStatics:
# target = getParameterFromCall(addStatics, 0)
# staticsMap = getParameterFromCall(addStatics, 1)
#
# if target and staticsMap and target.type == "string" and staticsMap.type == "object_init":
#
# if self.main["type"] == "Unsupported":
# self.setMain("core.Main", addStatics.parent, target.value)
#
# success = True
# if not hasattr(self, "statics"):
# self.statics = {}
#
# for staticsEntry in staticsMap:
# self.addEntry(staticsEntry[0].value, staticsEntry[1], staticsEntry, self.statics)
#
# else:
# self.warn("Invalid core.Main.addStatics()")
#
# core.Main.addMembers
#
# addMembers = findCall(tree, "core.Main.addMembers")
# if addMembers:
# target = getParameterFromCall(addMembers, 0)
# membersMap = getParameterFromCall(addMembers, 1)
#
# if target and membersMap and target.type == "string" and membersMap.type == "object_init":
#
# if self.main["type"] == "Unsupported":
# self.setMain("core.Main", addMembers.parent, target.value)
#
# success = True
# if not hasattr(self, "members"):
# self.members = {}
#
# for membersEntry in membersMap:
# self.addEntry(membersEntry[0].value, membersEntry[1], membersEntry, self.members)
#
# else:
# self.warn("Invalid core.Main.addMembers()")
#
return success
def export(self):
ret = {}
for name in self.__slots__:
if hasattr(self, name):
ret[name] = getattr(self, name)
return ret
def warn(self, message, line):
Console.warn("%s at line %s in %s" % (message, line, self.id))
def setMain(self, mainType, mainNode, exportName):
callComment = getDocComment(mainNode)
entry = self.main = {
"type" : mainType,
"name" : exportName,
"line" : mainNode.line
}
if callComment:
if callComment.text:
html = callComment.getHtml(self.highlight)
entry["doc"] = html
entry["summary"] = Text.extractSummary(html)
if hasattr(callComment, "tags"):
entry["tags"] = callComment.tags
if callComment is None or not callComment.text:
entry["errornous"] = True
self.warn('Missing comment on "%s" namespace' % exportName, mainNode.line)
def addProperty(self, name, valueNode, commentNode, collection):
entry = collection[name] = {
"line": (commentNode or valueNode).line
}
comment = getDocComment(commentNode)
if comment is None or not comment.text:
entry["errornous"] = True
self.warn('Missing or empty comment on property "%s"' % name, valueNode.line)
else:
html = comment.getHtml(self.highlight)
entry["doc"] = html
entry["summary"] = Text.extractSummary(html)
if comment and comment.tags:
entry["tags"] = comment.tags
# Copy over value
ptype = getKeyValue(valueNode, "type")
if ptype and ptype.type == "string":
entry["type"] = ptype.value
pfire = getKeyValue(valueNode, "fire")
if pfire and pfire.type == "string":
entry["fire"] = pfire.value
# Produce nice output for init value
pinit = getKeyValue(valueNode, "init")
if pinit:
entry["init"] = valueToString(pinit)
# Handle nullable, default value is true when an init value is there. Otherwise false.
pnullable = getKeyValue(valueNode, "nullable")
if pnullable:
entry["nullable"] = pnullable.type == "true"
elif pinit is not None and pinit.type != "null":
entry["nullable"] = False
else:
entry["nullable"] = True
# Just store whether an apply routine was defined
papply = getKeyValue(valueNode, "apply")
if papply and papply.type == "function":
entry["apply"] = True
# Multi Properties
pthemeable = getKeyValue(valueNode, "themeable")
if pthemeable and pthemeable.type == "true":
entry["themeable"] = True
pinheritable = getKeyValue(valueNode, "inheritable")
if pinheritable and pinheritable.type == "true":
entry["inheritable"] = True
pgroup = getKeyValue(valueNode, "group")
if pgroup and len(pgroup) > 0:
entry["group"] = [child.value for child in pgroup]
pshorthand = getKeyValue(valueNode, "shorthand")
if pshorthand and pshorthand.type == "true":
entry["shorthand"] = True
def addConstructor(self, valueNode, commentNode=None):
entry = self.construct = {
"line" : (commentNode or valueNode).line
}
if commentNode is None:
commentNode = valueNode
# Root doc comment is optional for constructors
comment = getDocComment(commentNode)
if comment and comment.hasContent():
html = comment.getHtml(self.highlight)
entry["doc"] = html
entry["summary"] = Text.extractSummary(html)
if comment and comment.tags:
entry["tags"] = comment.tags
entry["init"] = self.main["name"]
funcParams = getParamNamesFromFunction(valueNode)
if funcParams:
entry["params"] = {}
for paramPos, paramName in enumerate(funcParams):
entry["params"][paramName] = {
"position" : paramPos
}
# Use comment for enrich existing data
comment = getDocComment(commentNode)
if comment:
if not comment.params:
self.warn("Documentation for parameters of constructor are missing", valueNode.line)
for paramName in funcParams:
entry["params"][paramName]["errornous"] = True
else:
for paramName in funcParams:
if paramName in comment.params:
entry["params"][paramName].update(comment.params[paramName])
else:
entry["params"][paramName]["errornous"] = True
self.warn("Missing documentation for parameter %s in constructor" % paramName, valueNode.line)
else:
entry["errornous"] = True
def addEvent(self, name, valueNode, commentNode, collection):
entry = collection[name] = {
"line" : (commentNode or valueNode).line
}
if valueNode.type == "dot":
entry["type"] = assembleDot(valueNode)
elif valueNode.type == "identifier":
entry["type"] = valueNode.value
# Try to resolve identifier with local variable assignment
assignments, values = findAssignments(valueNode.value, valueNode)
if assignments:
# We prefer the same comment node as before as in these
# szenarios a reference might be used for different event types
if not findCommentNode(commentNode):
commentNode = assignments[0]
self.addEvent(name, values[0], commentNode, collection)
return
comment = getDocComment(commentNode)
if comment:
if comment.tags:
entry["tags"] = comment.tags
# Prefer type but fall back to returns (if the developer has made an error here)
if comment.type:
entry["type"] = comment.type
elif comment.returns:
entry["type"] = comment.returns[0]
if comment.hasContent():
html = comment.getHtml(self.highlight)
entry["doc"] = html
entry["summary"] = Text.extractSummary(html)
else:
self.warn("Comment contains invalid HTML", commentNode.line)
entry["errornous"] = True
else:
self.warn("Invalid doc comment", commentNode.line)
entry["errornous"] = True
def addEntry(self, name, valueNode, commentNode, collection):
#
# Use already existing type or get type from node info
#
if name in collection:
entry = collection[name]
else:
entry = collection[name] = {
"type" : nodeTypeToDocType[valueNode.type]
}
#
# Store generic data like line number and visibility
#
entry["line"] = valueNode.line
entry["visibility"] = getVisibility(name)
if name.upper() == name:
entry["constant"] = True
#
# Complex structured types are processed in two steps
#
if entry["type"] == "Call" or entry["type"] == "Hook":
commentNode = findCommentNode(commentNode)
if commentNode:
comment = getDocComment(commentNode)
if comment:
# Static type definition
if comment.type:
entry["type"] = comment.type
self.addEntry(name, valueNode, commentNode, collection)
return
else:
# Maybe type function: We need to ignore returns etc. which are often
# the parent of the comment.
funcValueNode = findFunction(commentNode)
if funcValueNode:
# Switch to function type for re-analysis
entry["type"] = "Function"
self.addEntry(name, funcValueNode, commentNode, collection)
return
if entry["type"] == "Call":
callFunction = None
if valueNode[0].type == "function":
callFunction = valueNode[0]
elif valueNode[0].type == "identifier":
assignNodes, assignValues = findAssignments(valueNode[0].value, valueNode[0])
if assignNodes:
callFunction = assignValues[0]
if callFunction:
# We try to analyze what the first return node returns
returnNode = findReturn(callFunction)
if returnNode and len(returnNode) > 0:
returnValue = returnNode[0]
entry["type"] = nodeTypeToDocType[returnValue.type]
self.addEntry(name, returnValue, returnValue, collection)
elif entry["type"] == "Hook":
thenEntry = valueNode[1]
thenType = nodeTypeToDocType[thenEntry.type]
if not thenType in ("void", "null"):
entry["type"] = thenType
self.addEntry(name, thenEntry, thenEntry, collection)
# Try second item for better data then null/void
else:
elseEntry = valueNode[2]
elseType = nodeTypeToDocType[elseEntry.type]
entry["type"] = elseType
self.addEntry(name, elseEntry, elseEntry, collection)
return
#
# Try to resolve identifiers
#
if entry["type"] == "Identifier":
assignTypeNode, assignCommentNode = resolveIdentifierNode(valueNode)
if assignTypeNode is not None:
entry["type"] = nodeTypeToDocType[assignTypeNode.type]
# Prefer comment from assignment, not from value if available
self.addEntry(name, assignTypeNode, assignCommentNode, collection)
return
#
# Processes special types:
#
# - Plus: Whether a string or number is created
# - Object: Figures out the class instance which is created
#
if entry["type"] == "Plus":
entry["type"] = detectPlusType(valueNode)
elif entry["type"] == "Object":
entry["type"] = detectObjectType(valueNode)
#
# Add human readable value
#
valueNodeHumanValue = valueToString(valueNode)
if valueNodeHumanValue != entry["type"] and not valueNodeHumanValue in ("Other", "Call"):
entry["value"] = valueNodeHumanValue
#
# Read data from comment and add documentation
#
comment = getDocComment(commentNode)
if comment:
if comment.tags:
entry["tags"] = comment.tags
if comment.type:
entry["type"] = comment.type
if comment.hasContent():
html = comment.getHtml(self.highlight)
entry["doc"] = html
entry["summary"] = Text.extractSummary(html)
else:
entry["errornous"] = True
if comment.tags:
entry["tags"] = comment.tags
else:
entry["errornous"] = True
#
# Add additional data for function types (params, returns)
#
if entry["type"] == "Function":
# Add basic param data
funcParams = getParamNamesFromFunction(valueNode)
if funcParams:
entry["params"] = {}
for paramPos, paramName in enumerate(funcParams):
entry["params"][paramName] = {
"position" : paramPos
}
# Detect return type automatically
returnNode = findReturn(valueNode)
if returnNode and len(returnNode) > 0:
autoReturnType = nodeTypeToDocType[returnNode[0].type]
if autoReturnType == "Plus":
autoReturnType = detectPlusType(returnNode[0])
elif autoReturnType in ("Call", "Object"):
autoReturnType = "var"
autoReturnEntry = {
"name" : autoReturnType,
"auto" : True
}
if autoReturnType in builtinTypes:
autoReturnEntry["builtin"] = True
if autoReturnType in pseudoTypes:
autoReturnEntry["pseudo"] = True
entry["returns"] = [autoReturnEntry]
# Use comment for enrich existing data
if comment:
if comment.returns:
entry["returns"] = comment.returns
if funcParams:
if not comment.params:
for paramName in funcParams:
entry["params"][paramName]["errornous"] = True
else:
for paramName in funcParams:
if paramName in comment.params:
entry["params"][paramName].update(comment.params[paramName])
else:
entry["params"][paramName]["errornous"] = True
| StarcoderdataPython |
6668393 | <reponame>ChaitanyaJoshiX/Pirple-Python
"""
Creating a dictionary of my favourite song.
Printing out each key and it's value through a loop.
Also, creating a function that allows the user to guess the value of any key .
If the key exists in dictionary and the value is correct, function returns true.
Or else, it returns false.
GitHub : @ChaitanyaJoshiX
"""
#Base program starts here
SongDetails = {"Artist":"OneRepublic", "Genre":"Pop", "Duration":169,
"YearofRelease":2021, "LikeDislikeRatio":95.18, "Views":6410185,
"Album":"Human", "LeadVocalist":"<NAME>"}
print("Key\t\tValue")
for detail in SongDetails:
print(detail +"\t\t"+ (str(SongDetails[detail])))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
"""
Identation will vary as per terminal display settings
"""
#Extra credit stars here
def Guess(key, value):
c = 0
for detail in SongDetails:
SongDetails[detail] == str(SongDetails[detail])
if key == detail and value == SongDetails[detail]:
return True
c += 1
if c !=1:
return False
#Testing out the function from here
#Tried using input with my current knowledge but failed so hardcoded the checks
print(Guess("YearofRelease", 2021)) #Will return True
print(Guess("LikeDislikeRatio", 65.6)) #Will return False
print(Guess("HitorFlop", "Hit")) #Will return False
"""
GitHub : @ChaitanyaJoshiX
"""
| StarcoderdataPython |
378944 | import json
from django.http import HttpResponse
from swiper.common import status_code as STATUS
def render_json(data, code=STATUS.STATUS_OK):
result = {
'code': code,
'data': data
}
json_str = json.dumps(result, ensure_ascii=False,indent=2, sort_keys=True)
return HttpResponse(json_str) | StarcoderdataPython |
5184669 | <filename>scripts/tools/memory/memdf/df.py
#
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DataFrame utilities."""
from typing import Dict
import numpy as np # type: ignore
import pandas as pd # type: ignore
class DF(pd.DataFrame): # pylint: disable=too-many-ancestors
"""DataFrame builder with default columns and types."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for c in self.required:
if c not in self.columns:
self[c] = pd.Series()
types = {c: self.dtype[c] for c in self.columns if c in self.dtype}
typed_columns = list(types.keys())
self[typed_columns] = self.astype(types, copy=False)[typed_columns]
self.attrs['name'] = self.name
class SymbolSourceDF(DF): # pylint: disable=too-many-ancestors
"""Maps symbol to compilation unit"""
name: str = 'symbolsource'
required = frozenset(['symbol', 'address', 'cu'])
dtype = {
'symbol': 'string',
'address': np.int64,
'cu': 'string',
'line': np.int64,
}
class SegmentDF(DF): # pylint: disable=too-many-ancestors
"""Segment memory map"""
name: str = 'segment'
required = frozenset(['type', 'vaddress', 'paddress', 'size'])
dtype = {
'type': 'string',
'vaddress': np.int64,
'paddress': np.int64,
'size': np.int64,
'flags': np.int32
}
class SectionDF(DF): # pylint: disable=too-many-ancestors
"""Section memory map"""
name: str = 'section'
required = frozenset(['section', 'type', 'address', 'size'])
dtype = {
'section': 'string',
'type': 'string',
'address': np.int64,
'size': np.int64,
'flags': np.int32,
'segment': np.int32,
}
class SymbolDF(DF): # pylint: disable=too-many-ancestors
"""Symbol table"""
name: str = 'symbol'
required = frozenset(['symbol', 'type', 'address', 'size'])
dtype = {
'symbol': 'string',
'type': 'string',
'address': np.int64,
'size': np.int64,
'shndx': 'string'
}
class ExtentDF(DF): # pylint: disable=too-many-ancestors
"""Gaps between symbols"""
name: str = 'gap'
required = frozenset(['address', 'size', 'section'])
dtype = {
'address': np.int64,
'size': np.int64,
'section': 'string'
}
class StackDF(DF): # pylint: disable=too-many-ancestors
"""Stack usage table"""
name: str = 'stack'
required = frozenset(['symbol', 'type', 'size'])
dtype = {
'symbol': 'string',
'type': 'string',
'size': np.int64,
'file': 'string',
'line': np.int64,
}
def find_class(df: pd.DataFrame):
"""Find a core DF subclass for a data frame.
Given a arbitrary pandas DataFrame, determine whether it is usable
as one of the main memory map tables (symbol, section, segment)
by checking whether the required columns are present.
"""
if isinstance(df, DF):
return type(df)
for c in [SymbolDF, SectionDF, SegmentDF]:
if c.required.issubset(df.columns):
return c
return None
DFs = Dict[str, DF]
| StarcoderdataPython |
8036000 | import requests
def compare_holiday_counts(country_code1, country_code2, year=2019):
"""Returns the difference between the holiday counts of country 1 and country 2.
Args:
country_code1: The ISO 3166-1 alpha-2 country code for country 1.
country_code2: The ISO 3166-1 alpha-2 country code for country 2.
year: The year to use for the comparison.
Returns:
The number of holidays of country 1 minus the number of holidays of country 2.
"""
# build the link depending on the country that you are looking for
link = 'https://date.nager.at/api/v2/publicholidays/{year}/{country_code}'
link1 = link.format(year=year, country_code=country_code1)
link2 = link.format(year=year, country_code=country_code2)
# request information as json
holidays1 = requests.get(link1).json()
holidays2 = requests.get(link2).json()
# calculate the difference
return len(holidays1) - len(holidays2)
if __name__ == '__main__':
country_code1 = "DE"
country_code2 = "NI"
print("{} has {} more days of holidays than {}.".format(country_code1,
compare_holiday_counts(country_code1, country_code2), country_code2))
| StarcoderdataPython |
1775560 | from functools import wraps
from timeit import default_timer
from typing import Any, Iterable, List, Dict
'''
timer - 1.0
此版本调用起来不够优雅,以及类型注解等一些细节没做好
故重构为 1.1 版本,该版本源码留作纪念
'''
funcs: List[Any] = []
times: List[float] = []
funcs_times: Dict[str, float] = {}
def timer(flag: str = None, input: Iterable = None):
if flag != 'run' and flag != 'run1arg' and input is not None:
raise AttributeError("flag != 'run' and flag != 'run1arg',不要输入 input")
elif flag == 'report':
return ((i for i in times), (i / sum(times) for i in times))
elif flag == 'print_report':
print('-' * 38)
for key, value in funcs_times.items():
print(f'| {key : <8} | {value : <10.5f} | {value / sum(funcs_times.values()) : <10.5%} |')
print('-' * 38)
elif flag == 'run':
for index, func in enumerate(funcs):
func(*input[index])
elif flag == 'run1arg':
for func in funcs:
func(*input)
def inner(func: Any) -> Any:
@wraps(func)
def clock(*args, **kwargs) -> Any:
start: float = default_timer()
result: Any = func(*args, **kwargs)
end: float = default_timer() - start
times.append(end)
funcs_times[func.__name__] = end
return result
funcs.append(clock)
return clock
return inner
| StarcoderdataPython |
3212213 | <filename>Django/Cycl/migrations/0002_auto_20190804_1641.py
# Generated by Django 2.2.3 on 2019-08-04 14:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Cycl', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='lineup',
options={'ordering': ['rider']},
),
migrations.AlterModelOptions(
name='manage',
options={'ordering': ['staff']},
),
migrations.AlterModelOptions(
name='rider',
options={'ordering': ['lastName', 'firstName']},
),
migrations.AlterModelOptions(
name='staff',
options={'ordering': ['lastName', 'firstName']},
),
migrations.AlterModelOptions(
name='team',
options={'ordering': ['name']},
),
migrations.RemoveField(
model_name='country',
name='code',
),
migrations.AddField(
model_name='country',
name='alpha2Code',
field=models.CharField(default='AB', max_length=2),
preserve_default=False,
),
migrations.AddField(
model_name='country',
name='alpha3Code',
field=models.CharField(default='ABC', max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='country',
name='numericCode',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
| StarcoderdataPython |
1933693 | # Copyright (c) 2017-2020 <NAME>. All rights reserved.
# Use of this source code is governed by an MIT license that can be
# found in the LICENSE file.
import re
import socket
import urllib.error
import urllib.request
import pynvim
__version__ = '1.1'
API_URL = 'https://www.gitignore.io/api/{}'
USER_AGENT = 'fzf-gitignore/{}'.format(__version__)
@pynvim.plugin
class FzfGitignore():
def __init__(self, nvim):
self.cache = []
self.nvim = nvim
self.newline_re = re.compile(r'\n')
def error(self, msg):
self.nvim.command(
'echohl ErrorMsg | echomsg "[fzf-gitignore] {}" | echohl None'.format(msg))
def fetch(self, params):
req = urllib.request.Request(API_URL.format(params))
req.add_header('User-Agent', USER_AGENT)
try:
with urllib.request.urlopen(req, timeout=30) as f:
data = f.read().decode('utf-8')
if 'Content-Length' in f.info():
if len(data) != int(f.info()['Content-Length']):
raise urllib.error.URLError('Download incomplete')
return data
except (urllib.error.HTTPError, urllib.error.URLError) as err:
self.error('{}: {}'.format(err, req.get_full_url()))
raise
except socket.error as err:
self.error('Socket error: {}: {}'.format(err, req.get_full_url()))
raise
except socket.timeout as err:
self.error('Connection timed out: {}: {}'.format(err, req.get_full_url()))
raise
@pynvim.function('_fzf_gitignore_get_all_templates', sync=True)
def get_all_templates(self, args):
if not self.cache:
data = self.newline_re.sub(',', self.fetch('list'))
self.cache = data.split(',')
return self.cache
@pynvim.function('_fzf_gitignore_create', sync=True)
def create(self, args):
data = self.fetch(','.join(args[0]))
return data.split('\n')
# vim: ts=4 et sw=4
| StarcoderdataPython |
3598290 | from enum import Enum
LEFT_PORT = 'INPUT'
RIGHT_PORT = 'OUTPUT'
class NODE_TYPE(Enum):
art = 0
mod = 1
trk = 2
rig = 3
tex = 4
shd = 5
ani = 6
vfx = 7
cfx = 8
lgt = 9
render = 10
roto = 12
mat = 13
cmp = 14
user = 15
file = 16
class NODE_COLOR(Enum):
art = (245, 174, 21, 255)
mod = (142, 142, 142, 255)
trk = (220, 30, 30, 255)
rig = (27, 27, 27, 255)
tex = (228, 228, 228, 255)
shd = (107, 107, 107, 255)
ani = (150, 18, 18, 255)
vfx = (171, 74, 9, 255)
cfx = (122, 101, 141, 255)
lgt = (244, 242, 112, 255)
render = (62, 62, 154, 255)
roto = (18, 214, 46, 255)
mat = (18, 214, 46, 255)
cmp = (37, 170, 186, 255)
user = (212, 208, 169, 255)
file = (107, 107, 107, 255)
def getNodeInitialData(_type):
if _type == NODE_TYPE.art:
return {'name': 'Art', 'color': NODE_COLOR.art}
elif _type == NODE_TYPE.mod:
return {'name': 'Modeling', 'color': NODE_COLOR.mod}
elif _type == NODE_TYPE.tex:
return {'name': 'Texture', 'color': NODE_COLOR.tex}
elif _type == NODE_TYPE.shd:
return {'name': 'Shader', 'color': NODE_COLOR.shd}
elif _type == NODE_TYPE.rig:
return {'name': 'Rigging', 'color': NODE_COLOR.rig}
elif _type == NODE_TYPE.ani:
return {'name': 'Animation', 'color': NODE_COLOR.ani}
elif _type == NODE_TYPE.trk:
return {'name': 'Tracking', 'color': NODE_COLOR.trk}
elif _type == NODE_TYPE.vfx:
return {'name': 'VFX', 'color': NODE_COLOR.vfx}
elif _type == NODE_TYPE.cfx:
return {'name': 'CFX', 'color': NODE_COLOR.cfx}
elif _type == NODE_TYPE.lgt:
return {'name': 'Lighting', 'color': NODE_COLOR.lgt}
elif _type == NODE_TYPE.render:
return {'name': 'Rendering', 'color': NODE_COLOR.render}
elif _type == NODE_TYPE.roto:
return {'name': 'Roto', 'color': NODE_COLOR.roto}
elif _type == NODE_TYPE.mat:
return {'name': 'Matte', 'color': NODE_COLOR.mat}
elif _type == NODE_TYPE.cmp:
return {'name': 'Composite', 'color': NODE_COLOR.cmp} | StarcoderdataPython |
11225594 | <reponame>ravinkohli/Auto-PyTorch
from typing import Any, Dict, Type, Union
from autoPyTorch.pipeline.components.base_component import (
ThirdPartyComponents,
)
from autoPyTorch.pipeline.components.setup.traditional_ml.traditional_learner.base_traditional_learner import \
BaseTraditionalLearner
from autoPyTorch.pipeline.components.setup.traditional_ml.traditional_learner.learners import (
CatboostModel,
ExtraTreesModel,
KNNModel,
LGBModel,
RFModel,
SVMModel)
_traditional_learners = {
# Sort by more robust models
# Depending on the allocated time budget, only the
# top models from this dict are two be fitted.
# LGBM is the more robust model, with
# internal measures to prevent crashes, overfit
# Additionally, it is one of the state of the art
# methods for tabular prediction.
# Then follow with catboost for categorical heavy
# datasets. The other models are complementary and
# their ordering is not critical
'lgb': LGBModel,
'catboost': CatboostModel,
'random_forest': RFModel,
'extra_trees': ExtraTreesModel,
'svm': SVMModel,
'knn': KNNModel,
}
_addons = ThirdPartyComponents(BaseTraditionalLearner)
def add_traditional_learner(traditional_learner: BaseTraditionalLearner) -> None:
_addons.add_component(traditional_learner)
def get_available_traditional_learners() -> Dict[str, Union[Type[BaseTraditionalLearner], Any]]:
traditional_learners = dict()
traditional_learners.update(_traditional_learners)
return traditional_learners
| StarcoderdataPython |
282442 | <reponame>Apkawa/django-modeltranslation-rosetta<gh_stars>0
# coding=utf-8
from __future__ import unicode_literals
from django.contrib import admin
from .models import (
Article
)
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
pass
| StarcoderdataPython |
9682428 | <filename>LittlePerformance/module/insane_part.py
import time
import random
import sys
from naoqi import ALProxy
argvs = sys.argv
ip = argvs[1]
port = int(argvs[2])
REPEAT = int(argvs[3])
MODE = int(argvs[4])
try:
motion_proxy = ALProxy('ALMotion',ip,port)
except:
quit()
part = 'Body'
if MODE == 0: # only head
body_names = ['HeadYaw', 'HeadPitch']
elif MODE == 1: # only left hand
body_names = ['LShoulderPitch', 'LShoulderRoll', 'LElbowYaw', 'LElbowRoll', 'LWristYaw', 'LHand']
elif MODE == 2: # only right hand
body_names = ['RShoulderPitch', 'RShoulderRoll', 'RElbowYaw', 'RElbowRoll', 'RWristYaw', 'RHand']
elif MODE == 3: # both hands
body_names = [\
'LShoulderPitch', 'LShoulderRoll', 'LElbowYaw', 'LElbowRoll', 'LWristYaw', 'LHand', \
'RShoulderPitch', 'RShoulderRoll', 'RElbowYaw', 'RElbowRoll', 'RWristYaw', 'RHand']
elif MODE == 4: # heads and head
body_names = [\
'HeadYaw', 'HeadPitch',\
'LShoulderPitch', 'LShoulderRoll', 'LElbowYaw', 'LElbowRoll', 'LWristYaw', 'LHand', \
'RShoulderPitch', 'RShoulderRoll', 'RElbowYaw', 'RElbowRoll', 'RWristYaw', 'RHand']
body_limits = [motion_proxy.getLimits(l)[0] for l in body_names]
body_limits_angles = [ [l[0],l[1]] for l in body_limits]
#stiffen to move
motion_proxy.setStiffnesses(part,1.0)
for i in range(REPEAT):
target_angles = [(angles[1]-angles[0])*random.uniform(0.15,0.85)+angles[0] for angles in body_limits_angles]
fractionMaxSpeed = random.random()*0.5+0.1
motion_proxy.setAngles(body_names,target_angles,fractionMaxSpeed)
time.sleep(random.random()) | StarcoderdataPython |
3393621 | <filename>project1/application.py
import os
import requests
from flask import Flask, session, render_template, request
from flask_session import Session
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from datetime import datetime
# from flask.ext.heroku import Heroku
os.getcwd()
# res = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "0yOQWbyRCLpwoPzvLkCe9Q", "isbns": "9781632168146"})
# print(res.json())
app = Flask(__name__)
# Check for environment variable
# if not os.getenv("DATABASE_URL"):
# raise RuntimeError("DATABASE_URL is not set")
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
# engine manages communitations between the flask app and the postgres db
engine = create_engine(os.getenv("DATABASE_URL"))
# scoped_session allows to manage different sessions for different people
db = scoped_session(sessionmaker(bind=engine))
# Set "homepage" to index.html
@app.route("/")
def index():
return render_template("index.html")
@app.route("/register",methods=["POST"])
def register():
# Form variables
username = request.form.get("username")
email = request.form.get("email")
password = request.form.get("password")
repassword = request.form.get("repassword")
dt = datetime.now()
# Print for debugging purposes
_username_exists = username_exists(username)
_email_exists = email_exists(email)
print("usernames_available:",username_exists(username))
print("email_available:",email_exists(email))
# Check if everything is ok
if not _username_exists and not _email_exists:
# Insert new user to accounts
db.execute("Insert into accounts (username,password,email,created_on) values (:username,:password,:email,:created_on)",
{"username":username,"email":email,"password":password,"created_on":dt})
# Print for debugging purposes
print(username+" "+email+" "+password+" registered")
# Commit changes to database
db.commit()
# Render registered page
text = username + "! You have been successfully registered!"
elif _username_exists:
text = "Username not available..."
elif _email_exists:
text = "Email not available..."
else:
text = "Unkown error..."
return render_template("registered.html",text=text)
@app.route("/login",methods=["POST"])
def login():
username = request.form.get("username")
password = request.form.get("password")
if user_match(username,password):
return render_template("hello.html",name=username)
elif not username_exists(username):
text = "Username " + username + " does no exist"
return render_template("index.html",text=text)
else:
text = "User and password do not match"
return render_template("index.html",text=text)
@app.route("/hello",methods=["POST"])
def hello():
name = request.form.get("name")
return render_template("hello.html",name=name)
# Method that checks if username exists
def username_exists(username):
return 0 != db.execute("Select count(*) from accounts where username = :username",
{"username":username}).scalar()
# Method that checks if email exists
def email_exists(email):
return 0 != db.execute("Select count(*) from accounts where email = :email",
{"email":email}).scalar()
# Method that checks if username and password match
def user_match(username,password):
return 1 == db.execute("Select count(*) from accounts where username = :username and password = :password",
{"username":username,"password":password}).scalar()
| StarcoderdataPython |
177964 | from app import __metadata__ as meta
from configparser import ConfigParser, ExtendedInterpolation
from pathlib import Path
from shutil import copy
import os
import io
import logging
log = logging.getLogger(__name__)
APP = meta.APP_NAME
'''
This component is responsible for configuration management.
If this is the first time the app is run then it will create the
config file, and set the system environment variables for the app
The logic for checking the existence of the conf file
and creating it if it's missing should be in init.py
TODO:
Add 2 more config sources
1) service - Get config from a service endpoint (JSON)
2) db - Get config from a database
'''
class MetaConf(object):
def __init__(self):
conf = ConfigParser()
conf.read_string(meta.__default_config__)
self.config = conf
self.resolveMacros()
self.log_file, self.config_file = self.getConfPaths()
def resolveMacros(self):
for section in self.config.sections():
for k in self.config.options(section):
v = self.config.get(section, k)
if '{{app_root}}' in v:
if 'SNAP_USER_COMMON' in Config.envGet2('SNAP_USER_COMMON'):
v = v.replace('{{app_root}}', self.getSnapPath())
else:
v = v.replace('{{app_root}}', self.getConfRoot())
self.config.set(section, k, v)
def getConfRoot(self):
module_dir = os.path.dirname(__file__)
app_dir = os.path.dirname(module_dir)
return os.path.dirname(app_dir)
def getSnapPath(self):
if not os.path.exists(str(Path.home()) + '/conf'):
os.makedirs(str(Path.home()) + '/conf')
os.makedirs(str(Path.home()) + '/logs')
Path(str(Path.home()) + '/logs/lxdui.log').touch()
copy(self.getConfRoot() + '/conf/auth.conf', str(Path.home()) + '/conf')
copy(self.getConfRoot() + '/conf/log.conf', str(Path.home()) + '/conf')
return str(Path.home())
def getConfPaths(self):
f = io.StringIO()
self.config.write(f)
c = ConfigParser(interpolation=ExtendedInterpolation())
f.seek(0)
c.read_file(f)
log_file = c.get(APP, '{}.log.file'.format(APP.lower()))
config_file = c.get(APP, '{}.conf.file'.format(APP.lower()))
return log_file, config_file
class Config(object):
def __init__(self, **kwargs):
"""
Initialises the Config object and loads the configuration into memory.
Order of operations:
1) if a config file has been provided then use that one
2) check to see if we have a local config file, and if so use that
3) no config file found so we'll create one with defaults
:param kwargs: conf=</path/to/config/file> #External source
"""
m = MetaConf()
self.config = None
self.log_file = m.log_file
self.config_file = m.config_file
# conf file specified by the caller
if kwargs:
file = kwargs.get('conf')
log.info('Loading external config file: {}'.format(file))
if file:
self.config = self.load('external', file)
self.envSet(log=self.log_file, conf=file)
else:
raise Exception('Unsupported parameter {}'.format(kwargs))
# no conf parameters specified so check local conf file
elif Path(self.config_file).exists():
log.info('Using config file path = {}'.format(self.config_file))
self.config = self.load('ini', self.config_file)
self.envSet()
# load the default config from meta
elif meta.AUTO_LOAD_CONFIG:
log.info('Load default config (meta)')
self.config = m.config
self.envSet()
self.save()
else:
raise Exception('Unable to load the configuration.')
def load(self, conf_type, *file_path):
"""
Load the configuration into memory.
The configuration is stored in the Config object.
:param conf_type:
:param file_path:
:return:
"""
if conf_type == 'external':
external_conf_file = Path(*file_path)
config = self.getConfig(external_conf_file)
return config
elif conf_type == 'ini':
conf = self.getConfig(self.config_file)
return conf
elif conf_type == 'service':
raise Exception('Not implemented.')
elif conf_type == 'db':
raise Exception('Not implemented.')
else:
raise Exception('Unable to determine configuration type.')
def get(self, section, key):
"""
Retrieve a configuration parameter.
:param section: The section of the ini file to search
:param key: The key to look up
:return: Returns the value associated with the key
"""
return self.config.get(section, key)
def set(self, section, key, value):
"""
Update a configuration parameter.
:param section: The section of the ini config file to update
:param key: The key that needs to be updated
:param value: The new value associated with the key
:return:
"""
self.config.set(section, key, value)
def show(self):
"""
Prints out a listing of the config file to the console.
:return:
"""
for section in self.config.sections():
for k in self.config.options(section):
v = self.config.get(section, k)
print('{} = {}'.format(k, v))
def save(self):
"""
Save the contents of the config object to the conf file.
:return:
"""
with open(self.config_file, 'w') as f:
self.config.write(f)
@staticmethod
def envGet():
"""
Retrieve the environment variables containing the log and conf paths.
:return: Returns a dictionary containing the file paths
"""
env = {}
for k, v in os.environ.items():
if k in ['LXDUI_LOG', 'LXDUI_CONF']:
env.update({k: os.environ.get(k)})
return env
@staticmethod
def envGet2(key):
"""
Retrieve the environment variables containing the log and conf paths.
:return: Returns a dictionary containing the file paths
"""
env = {}
for k, v in os.environ.items():
if key == k:
env.update({k: os.environ.get(k)})
return env
def envSet(self, **kwargs):
"""
Set the environment variables for the log and the conf file
:param kwargs: Specify log=<log_path> and cong=<conf_path>
:return:
"""
log_path = None
conf_path = None
if kwargs.get('log') and kwargs.get('conf'):
log_path = kwargs.get('log')
conf_path = kwargs.get('conf')
log.debug('Setting environment variables')
else:
log_path = self.log_file
conf_path = self.config_file
os.environ['{}_LOG'.format(APP)] = log_path
os.environ['{}_CONF'.format(APP)] = conf_path
def envShow(self):
if not self.envGet():
print('Environment variables for {} have not been set'.format(APP))
else:
for k, v in self.envGet().items():
print('{} = {}'.format(k, v))
def getConfig(self, file):
"""
Checks to ensure that the file exists Retrieves the contents of the config file.
:param file: A string representing the path to the conf file.
:return: Returns a config object.
"""
# if the file exists then read the contents
if Path(file).exists():
try:
config = self.parseConfig(file)
return config
except IOError as e:
log.info('Unable to open file.', e)
else:
raise FileNotFoundError
@staticmethod
def parseConfig(file):
"""
Parses the config file. The file must be of ini format.
If the file exists but is empty and exception will be generated.
:param file: The path of the file to parse
:return: Return a config object
"""
# make sure the file is not empty
size = Path(file).stat().st_size
if size != 0:
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read(file.__str__())
return config
else:
raise Exception('File is empty.') | StarcoderdataPython |
8150636 | # Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import asdict
from enum import EnumMeta
from typing import Dict, Generic, Iterable, List, Sequence, Tuple, Union
import gym
import gym.spaces as gym_spaces
import numpy as np
from skdecide import EnumerableSpace, SamplableSpace, SerializableSpace, T
class GymSpace(Generic[T], SamplableSpace[T], SerializableSpace[T]):
"""This class wraps an OpenAI Gym space (gym.spaces) as a scikit-decide space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, gym_space: gym.Space) -> None:
"""Initialize GymSpace.
# Parameters
gym_space: The Gym space (gym.spaces) to wrap.
"""
super().__init__()
self._gym_space = gym_space
self.shape = gym_space.shape # TODO: remove if unnecessary?
self.dtype = gym_space.dtype # TODO: remove if unnecessary?
def contains(self, x: T) -> bool:
return self._gym_space.contains(x)
def sample(self) -> T:
return self._gym_space.sample()
def to_jsonable(self, sample_n: Iterable[T]) -> Sequence:
return self._gym_space.to_jsonable(sample_n)
def from_jsonable(self, sample_n: Sequence) -> Iterable[T]:
return self._gym_space.from_jsonable(sample_n)
def unwrapped(self) -> gym.Space:
"""Unwrap the Gym space (gym.spaces) and return it.
# Returns
The original Gym space.
"""
return self._gym_space
def to_unwrapped(self, sample_n: Iterable[T]) -> Iterable:
return sample_n
def from_unwrapped(self, sample_n: Iterable) -> Iterable[T]:
return sample_n
class BoxSpace(GymSpace[T]):
"""This class wraps an OpenAI Gym Box space (gym.spaces.Box) as a scikit-decide space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, low, high, shape=None, dtype=np.float32):
super().__init__(gym_space=gym_spaces.Box(low, high, shape, dtype))
class DiscreteSpace(GymSpace[T]):
"""This class wraps an OpenAI Gym Discrete space (gym.spaces.Discrete) as a scikit-decide space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, n):
super().__init__(gym_space=gym_spaces.Discrete(n))
class MultiDiscreteSpace(GymSpace[T]):
"""This class wraps an OpenAI Gym MultiDiscrete space (gym.spaces.MultiDiscrete) as a scikit-decide space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, nvec):
super().__init__(gym_space=gym_spaces.MultiDiscrete(nvec))
class MultiBinarySpace(GymSpace[T]):
"""This class wraps an OpenAI Gym MultiBinary space (gym.spaces.MultiBinary) as a scikit-decide space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, n):
super().__init__(gym_space=gym_spaces.MultiBinary(n))
class TupleSpace(GymSpace[T]):
"""This class wraps an OpenAI Gym Tuple space (gym.spaces.Tuple) as a scikit-decide space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, spaces):
super().__init__(gym_space=gym_spaces.Tuple(spaces))
class DictSpace(GymSpace[T]):
"""This class wraps an OpenAI Gym Dict space (gym.spaces.Dict) as a scikit-decide space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, spaces=None, **spaces_kwargs):
super().__init__(gym_space=gym_spaces.Dict(spaces, **spaces_kwargs))
class EnumSpace(Generic[T], GymSpace[T], EnumerableSpace[T]):
"""This class creates an OpenAI Gym Discrete space (gym.spaces.Discrete) from an enumeration and wraps it as a
scikit-decide enumerable space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, enum_class: EnumMeta) -> None:
"""Initialize EnumSpace.
# Parameters
enum_class: The enumeration class for creating the Gym Discrete space (gym.spaces.Discrete) to wrap.
"""
self._enum_class = enum_class
self._list_enum = list(enum_class)
gym_space = gym_spaces.Discrete(len(enum_class))
super().__init__(gym_space)
def contains(self, x: T) -> bool:
return isinstance(x, self._enum_class)
def get_elements(self) -> Iterable[T]:
return self._list_enum
def sample(self) -> T:
return self._list_enum[super().sample()]
def to_jsonable(self, sample_n: Iterable[T]) -> Sequence:
return [sample.name for sample in sample_n]
def from_jsonable(self, sample_n: Sequence) -> Iterable[T]:
return [self._enum_class[sample] for sample in sample_n]
def unwrapped(self) -> gym_spaces.Discrete:
"""Unwrap the Gym Discrete space (gym.spaces.Discrete) and return it.
# Returns
The original Gym Discrete space created from the enumeration.
"""
return super().unwrapped()
def to_unwrapped(self, sample_n: Iterable[T]) -> Iterable[int]:
return [self._list_enum.index(sample) for sample in sample_n]
def from_unwrapped(self, sample_n: Iterable[int]) -> Iterable[T]:
return [self._list_enum[sample] for sample in sample_n]
class ListSpace(Generic[T], GymSpace[T], EnumerableSpace[T]):
"""This class creates an OpenAI Gym Discrete space (gym.spaces.Discrete) from a list of elements and wraps it as a
scikit-decide enumerable space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, elements: Iterable[T]) -> None:
"""Initialize ListSpace.
# Parameters
elements: The list of elements for creating the Gym Discrete space (gym.spaces.Discrete) to wrap.
"""
self._elements = list(elements)
gym_space = gym_spaces.Discrete(len(self._elements))
super().__init__(gym_space)
def contains(self, x: T) -> bool:
return x in self._elements
def get_elements(self) -> Iterable[T]:
return self._elements
def sample(self) -> T:
return self._elements[super().sample()]
def to_jsonable(self, sample_n: Iterable[T]) -> Sequence:
return sample_n
def from_jsonable(self, sample_n: Sequence) -> Iterable[T]:
return sample_n
def unwrapped(self) -> gym_spaces.Discrete:
"""Unwrap the Gym Discrete space (gym.spaces.Discrete) and return it.
# Returns
The original Gym Discrete space created from the list.
"""
return super().unwrapped()
def to_unwrapped(self, sample_n: Iterable[T]) -> Iterable[int]:
return [self._elements.index(sample) for sample in sample_n]
def from_unwrapped(self, sample_n: Iterable[int]) -> Iterable[T]:
return [self._elements[sample] for sample in sample_n]
class DataSpace(GymSpace[T]):
"""This class creates an OpenAI Gym Dict space (gym.spaces.Dict) from a dataclass and wraps it as a scikit-decide space.
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(
self,
data_class: type,
spaces: Union[Dict[str, gym.Space], List[Tuple[str, gym.Space]]],
) -> None:
"""Initialize DataSpace.
# Parameters
data_class: The dataclass for creating the Gym Dict space (gym.spaces.Dict) to wrap.
spaces: The spaces description passed to the created Dict space (see gym.spaces.Dict constructor documentation).
# Example
```python
from skdecide.wrappers.space import DataSpace
@dataclass(frozen=True)
class Action:
position: int
velocity: int
my_action_space = DataSpace(Action, {"position": gym.spaces.Discrete(2), "velocity": gym.spaces.Discrete(3)})
```
"""
self._data_class = data_class
gym_space = gym_spaces.Dict(spaces)
super().__init__(gym_space)
def contains(self, x: T) -> bool:
# works even when fields of the dataclass have been recast (e.g. numpy 0-dimensional array to scalar)
return super().contains(super().from_jsonable(self.to_jsonable([x]))[0])
# # bug: does not work when fields of the dataclass have been recast (e.g. numpy 0-dimensional array to scalar)
# return super().contains(asdict(x))
def sample(self) -> T:
# TODO: convert to simple types (get rid of ndarray created by gym dict space...)?
return self._data_class(**super().sample())
def to_jsonable(self, sample_n: Iterable[T]) -> Sequence:
dict_sample_n = self.to_unwrapped(sample_n)
return super().to_jsonable(dict_sample_n)
def from_jsonable(self, sample_n: Sequence) -> Iterable[T]:
dict_sample_n = super().from_jsonable(sample_n)
return self.from_unwrapped(dict_sample_n)
def unwrapped(self) -> gym_spaces.Dict:
"""Unwrap the Gym Dict space (gym.spaces.Dict) and return it.
# Returns
The original Gym Dict space created from the dataclass.
"""
return super().unwrapped()
def to_unwrapped(self, sample_n: Iterable[T]) -> Iterable[Dict]:
return [asdict(sample) for sample in sample_n]
def from_unwrapped(self, sample_n: Iterable[Dict]) -> Iterable[T]:
# TODO: convert to simple types (get rid of ndarray created by gym dict space...)?
return [self._data_class(**sample) for sample in sample_n]
| StarcoderdataPython |
1944119 | <filename>RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/interface/gre/gre.py
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Gre(Base):
"""The Gre class encapsulates a required gre node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Gre property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'gre'
def __init__(self, parent):
super(Gre, self).__init__(parent)
@property
def Dest(self):
"""Part of the GRE Delivery Header: The IP address of the Destination router at the remote end of the GRE tunnel.
Returns:
str
"""
return self._get_attribute('dest')
@Dest.setter
def Dest(self, value):
self._set_attribute('dest', value)
@property
def InKey(self):
"""This is the user-assigned GRE header authentication key value that the receiving router will check for to validate GRE packets being sent via the tunnel. All packets sent via a specific tunnel should contain the same key value (one key per GRE tunnel).
Returns:
number
"""
return self._get_attribute('inKey')
@InKey.setter
def InKey(self, value):
self._set_attribute('inKey', value)
@property
def OutKey(self):
"""This is the user-assigned GRE header authentication key value that will be included in the GRE packets being sent via the tunnel. All packets sent via a specific tunnel should contain the same key value (one key per GRE tunnel). In most cases, the In Key and Out Key will be the same.
Returns:
number
"""
return self._get_attribute('outKey')
@OutKey.setter
def OutKey(self, value):
self._set_attribute('outKey', value)
@property
def Source(self):
"""Part of the GRE Delivery Header: The IP address of the connected interface associated with the source of this GRE tunnel.
Returns:
str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=ipv4|/api/v1/sessions/1/ixnetwork/vport?deepchild=ipv6)
"""
return self._get_attribute('source')
@Source.setter
def Source(self, value):
self._set_attribute('source', value)
@property
def UseChecksum(self):
"""Enables the use of the optional GRE checksum.
Returns:
bool
"""
return self._get_attribute('useChecksum')
@UseChecksum.setter
def UseChecksum(self, value):
self._set_attribute('useChecksum', value)
@property
def UseKey(self):
"""Enables the use of the optional GRE header key field.
Returns:
bool
"""
return self._get_attribute('useKey')
@UseKey.setter
def UseKey(self, value):
self._set_attribute('useKey', value)
@property
def UseSequence(self):
"""If more than one GRE tunnel will be used, this is the amount that will be added to create each additional authentication key value to be sent in the GRE packets (one key per GRE tunnel).
Returns:
bool
"""
return self._get_attribute('useSequence')
@UseSequence.setter
def UseSequence(self, value):
self._set_attribute('useSequence', value)
| StarcoderdataPython |
11285813 | ''' Make a table to keep track of maximum form each node. Apply
dfs on each node and collect the max length from all four sides. Increment + 1
while returning and store in the table so that next time it will directly fetch
the value from the table'''
class Solution:
def __init__(self):
self.max_len = 0
self.table = {}
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
def dfs(x, y, prev):
if x < 0 or x >= len(matrix) or y < 0 or y >= len(matrix[0]) or matrix[x][y] <= prev:
return 0
if (x,y) in self.table:
return self.table[(x,y)]
path = 1 + max(dfs(x+1, y, matrix[x][y]), dfs(x-1, y, matrix[x][y]), dfs(x, y+1, matrix[x][y]), dfs(x, y-1, matrix[x][y]))
self.max_len = max(self.max_len, path)
self.table[(x,y)] = path
return path
for i in range(len(matrix)):
for j in range(len(matrix[0])):
# set up a very small number before (0,0)
dfs(i, j, -10000)
return self.max_len
# Time: O(MN)
# Space:O(MN) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.