text string | size int64 | token_count int64 |
|---|---|---|
from pw_manager.utils import utils, constants
from pw_manager.ui.settings.color import color
from YeetsMenu.menu import Menu
from YeetsMenu.option import Option
def show():
utils.clear_screen()
menu = Menu(utils.get_noice_text("Settings"), colors=constants.colors)
menu.add_selectable(Option("Color settings", color.show, skip_enter_confirmation=True))
menu.run()
| 386 | 122 |
# -*- coding: utf-8 -*-
import datetime
from difflib import SequenceMatcher
import re
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.core.urlresolvers import reverse
from django.conf import settings
import django.dispatch
from django.template.defaultfilters import slugify
from tagging.fields import TagField
from bundestagger.helper.utils import padd_zeros, invalidate_cache, invalidate_cache_all_pages, get_page
FRAKTIONEN = (u"fraktionslos", u"CDU/CSU", u"BÜNDNIS 90/DIE GRÜNEN", u"FDP", u"SPD", u"DIE LINKE")
class PartyManager(models.Manager):
cache = {}
def get_party(self, name="", abbr=None):
if abbr is None:
return None
if abbr not in FRAKTIONEN:
if u"INKE" in abbr:
abbr = u"DIE LINKE"
if u"CDU" in abbr or u"CSU" in abbr:
abbr = u"CDU/CSU"
if u"NDNIS" in abbr:
abbr = u"BÜNDNIS 90/DIE GRÜNEN"
if u"raktionslos" in abbr:
abbr = u"fraktionslos"
if abbr in FRAKTIONEN:
try:
party = self.get(abbr=abbr)
except self.model.DoesNotExist:
party = self.create(name=name, abbr=abbr)
return party
return None
def find_parties_in_text(self, text, force=False):
abbrs = []
if u"INKE" in text and (not u"INKE]" in text or force):
abbrs.append(u"DIE LINKE")
if u"CDU" in text and (not u"[CDU" in text or force) or \
(u"CSU" in text and (not u"CSU]" in text or force)):
abbrs.append(u"CDU/CSU")
if u"NDNIS" in text and (not u"ÜNEN]" in text or force):
abbrs.append(u"BÜNDNIS 90/DIE GRÜNEN")
if u"raktionslos" in text and (not u"raktionslos]" in text or force):
abbrs.append(u"fraktionslos")
if u"SPD" in text and (not u"[SPD]" in text or force):
abbrs.append(u"SPD")
if u"FDP" in text and (not u"[FDP]" in text or force):
abbrs.append(u"FDP")
parties = []
for abbr in abbrs:
parties.append(self.get_party(abbr=abbr))
return parties
def get(self, *args, **kwargs):
if "id" in kwargs:
if kwargs["id"] in self.__class__.cache:
return self.__class__.cache[kwargs["id"]]
else:
obj = super(PartyManager, self).get_query_set().get(*args, **kwargs)
self.__class__.cache[obj.id] = obj
return obj
return super(PartyManager, self).get_query_set().get(*args, **kwargs)
class Party(models.Model):
name = models.CharField(blank=True, max_length=100)
abbr = models.CharField(blank=True, max_length=30)
objects = PartyManager()
color_mapping = {
1 : "#000000",
2 : "#F1AB00",
3 : "#D71F1D",
4 : "#BE3075",
5 : "#78BC1B",
6 : "#999999"
}
def __unicode__(self):
return self.abbr
@property
def color(self):
return self.color_mapping[self.id]
class PoliticianManager(models.Manager):
def replace_politician(self, real, fake):
real = Politician.objects.get(id=real)
fake = Politician.objects.get(id=fake)
fake.replace_with(real)
fake.delete()
class Politician(models.Model):
first_name = models.CharField(blank=True, max_length=100)
last_name_prefix = models.CharField(blank=True, max_length=100)
last_name = models.CharField(blank=True, max_length=100)
title = models.CharField(blank=True, max_length=100)
party = models.ForeignKey(Party, blank=True, null=True)
# party_member = models.OneToOneField(PartyMember, blank=True, null=True)
location = models.CharField(null=True, blank=True, max_length=255)
born = models.DateField(null=True, blank=True)
objects = PoliticianManager()
def __unicode__(self):
if self.party_id is not None:
party = Party.objects.get(id=self.party_id)
return u"%s (%s)" % (self.name, party)
else:
return u"%s (unbekannt)" % (self.name)
@property
def slug(self):
return slugify(self.name)
def get_absolute_url(self):
return reverse("bundestagger-bundestag-show_politician", args=(self.slug, self.id))
@property
def name(self):
if len(self.title):
title = self.title + u" "
else:
title= u""
if len(self.first_name):
first_name = self.first_name + u" "
else:
first_name = u""
if len(self.last_name_prefix):
last_name_prefix = self.last_name_prefix + u" "
else:
last_name_prefix = u""
return "%s%s%s%s" % (title, first_name, last_name_prefix,self.last_name)
def all_relations(self):
relations = [x for x in dir(self) if x.endswith("_set")]
result = []
for relation in relations:
result.extend(getattr(self, relation).all())
return result
def replace_with(self, real_politician):
relations = [x for x in dir(self) if x.endswith("_set")]
for relation in relations:
relset = getattr(self, relation).all()
if len(relset) > 0:
politician_fields = []
for obj in relset:
fields = obj.__class__._meta.fields
for field in fields:
try:
if field.rel.to.__name__ == self.__class__.__name__:
politician_fields.append(field.name)
except AttributeError:
pass
if len(politician_fields)>0:
for pf in politician_fields:
if getattr(obj, pf) == self:
setattr(obj, pf, real_politician)
obj.save()
class Parliament(models.Model):
number = models.IntegerField()
start = models.DateField(default=datetime.datetime.today, null=True, blank=True)
end = models.DateField(default=datetime.datetime.today, null=True, blank=True)
def __unicode__(self):
return u"Parliament %d" % (self.number,)
class MemberOfParliament(models.Model):
politician = models.ForeignKey(Politician)
parliament = models.ForeignKey(Parliament)
position = models.CharField(null=True, blank=True, max_length=255)
location = models.CharField(blank=True, max_length=100)
direct = models.FloatField(null=True, blank=True, default=None)
class Meta:
unique_together = (("politician", "parliament"),)
def __unicode__(self):
return "%s (%s)" % (self.politician, self.parliament)
class ParliamentSessionManager(models.Manager):
def reversed(self):
return self.order_by("-number")
def ordered(self):
return self.order_by("number")
class ParliamentSession(models.Model):
parliament = models.ForeignKey(Parliament)
number = models.PositiveSmallIntegerField(db_index=True)
date = models.DateTimeField()
until = models.DateTimeField()
checked = models.BooleanField(default=False)
tags = models.TextField(blank=True)
objects = ParliamentSessionManager()
def __unicode__(self):
return u"%d. Sitzung (%s) des %d. Deutschen Bundestags" % (self.number, self.date.strftime("%d.%m.%Y"), self.parliament.number)
def get_absolute_url(self):
return reverse("bundestagger-bundestag-show_session", args=(self.parliament.number, self.number))
@property
def tag_list(self):
return [tag.strip() for tag in self.tags.split(",") if len(tag.strip())>0]
def update_tags(self,new_tags):
tag_set = set([t.strip() for t in self.tags.split(",") if len(t.strip())>0])
tag_set.update([t.strip() for t in new_tags.split(",") if len(t.strip())>0])
self.tags = ",".join(tag_set)
self.clear_cache()
self.save()
@property
def document_url(self):
return "http://dip21.bundestag.de/dip21/btp/%(leg)d/%(leg)d%(id)s.pdf" % {"leg":self.parliament.number, "id": padd_zeros(self.number)}
def clear_cache(self):
invalidate_cache(reverse("bundestagger-bundestag-list_sessions"))
last_speech = self.speech_set.order_by("-ordernr")[0]
url = last_speech.get_base_url()
invalidate_cache_all_pages(url, last_speech.ordernr, settings.SPEECHES_PER_PAGE)
class SpeechManager(models.Manager):
def merge(self):
for speech in self.all():
ordernr = 1
previous_parts = []
for speech_part in speech.speechpart_set.all():
if len(previous_parts) > 1 and \
(speech_part.speaker != previous_parts[-1].speaker or previous_parts[-1].event_set.count() > 0):
text = u""
for p in previous_parts:
text += p.text
previous_parts[-1].text = text
previous_parts[-1].ordernr = ordernr
previous_parts[-1].save()
for p in previous_parts[:-1]:
p.delete()
previous_parts = []
class Speech(models.Model):
session = models.ForeignKey(ParliamentSession)
ordernr = models.IntegerField()
speaker = models.ForeignKey(Politician)
start = models.DateTimeField(blank=True, default=None, null=True)
end = models.DateTimeField(blank=True, default=None, null=True)
webtv = models.IntegerField(blank=True, null=True)
tags = TagField(blank=True)
def save(self, *args, **kwargs):
super(Speech,self).save(*args, **kwargs)
self.session.update_tags(self.tags)
def __unicode__(self):
return u"Rede von %s in der %s" % (self.speaker, self.session)
def text(self):
return u"\n".join(map(lambda x:x.text, self.speechpart_set.order_by("ordernr")))
@property
def tag_list(self):
return [tag.strip() for tag in self.tags.split(",") if len(tag.strip())>0]
@property
def webtv_url(self):
if self.webtv is not None:
return u"http://webtv.bundestag.de/iptv/player/macros/_v_f_514_de/od_player.html?singleton=true&content=%d" % (self.webtv,)
return u""
@property
def webtv_search(self):
url = u"http://webtv.bundestag.de/iptv/player/macros/bttv/list.html?pageOffset=0&pageLength=40&sort=2&lastName=%(lastname)s&firstName=%(firstname)s&fraction=&meetingNumber=%(session)s&period=%(period)s&startDay=&endDay=&topic=&submit=Suchen"
return url % { "firstname": self.speaker.first_name,
"lastname": self.speaker.last_name,
"session": self.session.number,
"period": self.session.parliament.number
}
def get_base_url(self):
return reverse("bundestagger-bundestag-show_session", args=(self.session.parliament.number, self.session.number))
def get_absolute_url(self):
page = get_page(self.ordernr, settings.SPEECHES_PER_PAGE)
if page == 1:
page = ""
else:
page = "?page=%d" % page
fragment = "#speech-%d" % self.id
return self.get_base_url() + page + fragment
def clear_cache(self):
url = self.get_base_url()
invalidate_cache(url, self.ordernr, settings.SPEECHES_PER_PAGE)
class SpeechPartManager(models.Manager):
def list(self):
return self.order_by("ordernr").select_related()
class SpeechPart(models.Model):
speech = models.ForeignKey(Speech)
ordernr = models.IntegerField()
text = models.TextField(blank=True)
objects = SpeechPartManager()
changed_signal = django.dispatch.Signal(providing_args=["old_text", "new_text"])
annotatable = True
def __unicode__(self):
return u"Teil %d der %s" % (self.ordernr, self.speech)
def append_text(self, text):
text = text.strip()
if self.text == u"":
self.text = text
else:
if self.text.endswith("-"):
self.text = self.text[:-1] + text
else:
self.text = self.text+ " " + text
self.save()
return self.text
def set_text(self, text):
old_text = self.text
self.text = text
self.save()
if hasattr(self.__class__, "changed_signal"):
self.__class__.changed_signal.send(sender=self, old_text=old_text, new_text=self.text)
self.clear_cache()
def get_absolute_url(self):
if self.ordernr == 1:
return self.speech.get_absolute_url()
else:
return self.speech.get_absolute_url()+"-%d" % self.ordernr
def clear_cache(self):
self.speech.clear_cache()
@classmethod
def connected_update(cls, sender, **kwargs):
co = kwargs["content_object"]
co.clear_cache()
class SessionTopManager(models.Manager):
def get_tops(self, session):
return self.filter(session=session).filter(Q(title__startswith="Tagesordnungspunkt")|Q(title__startswith="Zusatztagesordnungspunkt")).order_by("ordernr")
class SessionTop(models.Model):
session = models.ForeignKey(ParliamentSession)
ordernr = models.IntegerField()
title = models.TextField(blank=True)
about = models.TextField(blank=True)
objects = SessionTopManager()
def __unicode__(self):
return u"%s: %s" % (self.session, self.title)
class TopSpeaker(models.Model):
top = models.ForeignKey(SessionTop)
speaker = models.ForeignKey(Politician)
def __unicode__(self):
return u"%s: %s(%s) - %s" % (self.speaker, self.top, self.top_id, self.id)
class Attachment(models.Model):
session = models.ForeignKey(ParliamentSession)
attachmentnr = models.IntegerField(blank=True, null=True)
about = models.TextField(blank=True)
text = models.TextField(blank=True)
def __unicode__(self):
return u"%s: %s" % (self.session, self.about)
class Poll(models.Model):
session = models.ForeignKey(ParliamentSession)
about = models.TextField(blank=True)
infavor = models.IntegerField(blank=True, null=True)
against = models.IntegerField(blank=True, null=True)
abstinent = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return u"%s: %s(%s)" % (self.session, self.about[:10], self.id)
class PollVote(models.Model):
# Ja = 1, Nein = 0, Enthalung = -1 oder 2!?
poll = models.ForeignKey(Poll)
vote = models.SmallIntegerField()
politician = models.ForeignKey(Politician)
def __unicode__(self):
return u"%s: %s (%s - %d)" % (self.poll, self.politician, self.poll_id, self.vote)
class Event(models.Model):
# Zwischenruf, Buhruf, Heiterkeit, Beifall
context = models.ForeignKey(SpeechPart, null=True, blank=True)
kind = models.CharField(blank=True, max_length=100)
actor_politician = models.ForeignKey(Politician, null=True, blank=True)
actor_party = models.ForeignKey(Party, null=True, blank=True)
text = models.TextField(blank=True, null=True)
def __unicode__(self):
return u"%s: %s (%s - %s @ %s)" % (self.kind, self.actor, self.text, self.id, self.context_id)
@property
def actor(self):
if self.actor_politician is not None:
return self.actor_politician
else:
return self.actor_party
def display(self):
out = u""
try:
if self.kind != "":
out+=u"[%s] " % self.kind
if self.actor is not None:
out+=u"%s" % self.actor
if self.text is not None and len(self.text)>0:
if self.actor is not None or self.kind !="":
out+=": "
out+=self.text
except Exception,e:
return e
return out | 16,416 | 5,231 |
"""
https://en.wikipedia.org/wiki/Combinatorial_number_system
The code below is no longer used but am saving it for a rainy day
"""
# standard libraries
import math
from typing import List
def choose(a: int, b: int) -> int:
"""
>>> choose(23, 8)
490314
>>> choose(9, 6)
84
>>> choose(8, 5)
56
>>> choose(4, 4)
1
>>> choose(3, 4)
0
>>> choose(0, 1)
0
>>> choose(7, -1)
0
"""
if b < 0:
return 0
elif b == a:
return 1
elif b > a:
return 0
return int(math.factorial(a) / (math.factorial(b) * math.factorial(a - b)))
def encode(perm: List[int]) -> int:
"""
>>> encode([11, 10, 9, 8, 3, 2, 1, 0])
425
>>> encode([7, 6, 5, 4, 3, 2, 1, 0])
0
"""
perm_len = len(perm)
k = perm_len
i = 0
total = 0
while i < perm_len:
result = choose(perm[i], k)
total += result
k -= 1
i += 1
return total
def decode(n: int, k: int, start: int) -> List[int]:
"""
>>> decode(0, 8, 24)
[7, 6, 5, 4, 3, 2, 1, 0]
>>> decode(425, 8, 24)
[11, 10, 9, 8, 3, 2, 1, 0]
"""
result = []
for c in reversed(range(start)):
result_choose = choose(c, k)
if result_choose <= n:
n -= result_choose
k -= 1
result.append(c)
return result
def state_to_list(state: str) -> List[int]:
"""
>>> state_to_list('xxLL')
[3, 2]
>>> state_to_list('LLxx')
[1, 0]
>>> state_to_list('LxLx')
[2, 0]
>>> state_to_list('xLxL')
[3, 1]
"""
result = []
for (index, char) in enumerate(state):
if char != "x":
result.append(index)
result = list(reversed(sorted(result)))
return result
def state_to_rank(state: str) -> int:
"""
>>> state_to_rank('xxLL')
5
>>> state_to_rank('LLxx')
0
>>> state_to_rank('LxLx')
1
>>> state_to_rank('xLxL')
4
"""
state_list = state_to_list(state)
result = encode(state_list)
return result
if __name__ == "__main__":
# standard libraries
import doctest
doctest.testmod()
| 2,181 | 912 |
s1=0
s2=0
n=1
m=50000
while n<50001:
s1=s1+1/(n*1.0)
n+=1
while m>0:
s2=s2+1/(m*1.0)
m=m-1
print('s1='+str(s1))
print('s2='+str(s2))
| 149 | 107 |
"""
Leetcode(https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/
)"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def buildTree(self, preorder, inorder):
if len(preorder) == 0:
return None
root = TreeNode(preorder[0])
root_idx = 0
# find root's index in the inorder array
while root_idx < len(inorder):
if inorder[root_idx] == root.val:
break
root_idx += 1
root.left = self.buildTree(preorder[1: root_idx + 1], inorder[:root_idx])
root.right = self.buildTree(preorder[root_idx + 1:], inorder[root_idx + 1:])
return root
| 835 | 273 |
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import os
import sys
# swarming/
APP_DIR = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
def init_symlinks(root):
"""Adds support for symlink-as-file on Windows.
Manually resolves symlinks in path for directory and add them to sys.path.
"""
if sys.platform != 'win32':
return
for i in os.listdir(root):
if '.' in i:
continue
path = os.path.join(root, i)
if os.path.isfile(path):
# Found a file instead of a symlink to a directory. Adjust sys.path
# accordingly to where the symlink points.
with open(path) as f:
link = f.read()
if '\n' in link:
continue
dest = os.path.normpath(os.path.join(root, link))
# This is not exactly right but close enough.
sys.path.insert(0, os.path.dirname(dest))
def setup_test_env():
"""Sets up App Engine test environment."""
# For application modules.
sys.path.insert(0, APP_DIR)
init_symlinks(APP_DIR)
# TODO(maruel): Remove.
sys.path.insert(0, os.path.join(APP_DIR, 'components', 'third_party'))
from test_support import test_env
test_env.setup_test_env()
| 1,292 | 447 |
"""
Send a template with some default data.
"""
from jinja2 import TemplateNotFound
from tiddlywebplugins.virtualhosting import original_server_host_url
from tiddlyweb import control
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.recipe import Recipe
from tiddlyweb.store import StoreError
from tiddlywebplugins.templates import get_template
from tiddlyweb.web.util import server_base_url, encode_name
from tiddlywebplugins.tiddlyspace.web import (determine_space,
determine_space_recipe, determine_host)
CUSTOMIZABLES = ['friendlytiddler.html', 'friendlytiddlers.html',
'search.html']
def send_template(environ, template_name, template_data=None):
"""
Set some defaults for a template and send the output.
"""
default_css_tiddler = '/bags/common/tiddlers/profile.css'
if template_data is None:
template_data = {}
html_template_prefix = environ['tiddlyweb.space_settings']['htmltemplate']
if html_template_prefix:
default_css_tiddler = ('/bags/common/tiddlers/%s.css' %
html_template_prefix)
html_template_prefix += '/'
try:
name = html_template_prefix + template_name
template = get_template(environ, name)
except TemplateNotFound:
template = get_template(environ, template_name)
else:
template = get_template(environ, template_name)
store = environ['tiddlyweb.store']
linked_resources = {
'HtmlCss': [],
'HtmlJavascript': []}
if not html_template_prefix or template_name in CUSTOMIZABLES:
linked_resources['HtmlCss'] = [default_css_tiddler]
# Load CSS and JavaScript overrides.
current_space = determine_space(environ, determine_host(environ)[0])
if current_space:
recipe_name = determine_space_recipe(environ, current_space)
try:
recipe = store.get(Recipe(recipe_name))
for title in linked_resources:
try:
tiddler = Tiddler(title)
bag = control.determine_bag_from_recipe(recipe,
tiddler, environ)
tiddler.bag = bag.name
try:
tiddler = store.get(tiddler)
if 'Javascript' in title:
url_content = tiddler.text.strip()
if url_content:
urls = url_content.split('\n')
linked_resources[title] = urls
else:
url = '/bags/%s/tiddlers/%s' % (encode_name(
tiddler.bag), title)
linked_resources[title] = [url]
except StoreError:
continue
except StoreError:
pass
except StoreError:
pass
template_defaults = {
'original_server_host': original_server_host_url(environ),
'css': linked_resources['HtmlCss'],
'js': linked_resources['HtmlJavascript'],
'server_host': server_base_url(environ),
}
template_defaults.update(template_data)
return template.generate(template_defaults)
| 3,415 | 908 |
"""
@Author: yanzx
@Date: 2021-08-10 09:27:55
@Desc:
"""
import time
li = [str(i) + "闫振兴" for i in range(1000000)]
li_s = set(li)
start_time1 = time.time()
if "100000闫振兴" in li:
print(time.time() - start_time1)
start_time2 = time.time()
if "100000闫振兴" in li_s:
print(time.time() - start_time2)
import pandas as pd
import numpy as np
import json
df =pd.read_csv("./testdata.csv")
df = df.iloc[1:20, :]
res = list(json.loads(df.to_json(orient='index')).values())
print(res)
data_array = np.array(df)
# 然后转化为list形式
data_list =data_array.tolist()
# print(data_list)
| 588 | 304 |
# these functions help to detect mispronunciations using editops
#
#
# standard libs
import argparse
# third-party libs
import Levenshtein as lev
# local libs
from speech.utils.data_helpers import path_to_id
from speech.utils.io import read_data_json
def main(hypo_path:str, tgt_path:str, eval_phn_path:str):
"""This function will aim to detect mispronunciations of the `target_phn`
in the predictions in `hypo_path` when compared with the reference in `phn_path`
Args:
hypo_path: path to model predictions
tgt_path: path to phones the speaker should have said
eval_phn_path: path to one-hot encoded labels of evaluation phonemes
Notes:
hypo_path file is formatted as:
ay ih t (None-0)
ao r dh ah t ay m (None-7)
ay l iy v d uw (None-6)
tgt_path file is formatted as:
ay iy t
p r ih t iy sh
dh ah jh ih m
eval_phn_path:
id l r dh p v
00F931A9-6EA9-4233-85B4-94015A257352 1 0 0 0 1 0
012C1AC5-13E0-4337-B6CC-BFD58A12A8BC 1 1 0 0 0 0
054C13A4-9499-453F-90A0-950DA50C4576 1 0 1 0 0 0
"""
hypo_dict = {}
with open(hypo_path, 'r') as hypo_f:
for line in hypo_f:
line = line.strip().split()
phones = line[:-1]
# line last element has format '(None-1)'
hypo_id = int(line[-1].split('-')[1].replace(')', ''))
hypo_dict[hypo_id] = phones
# create mapping from record_id to hypo numerical ordering
tsv_path = tgt_path.replace(".phn", ".tsv")
id_to_order = {}
with open(tsv_path, 'r') as tsv_f:
_ = next(tsv_f)
for i, line in enumerate(tsv_f):
sub_path = line.strip().split('\t', maxsplit=1)[0]
id_to_order[path_to_id(sub_path)] = i
ord_to_eval_phns = read_eval_file(eval_phn_path, id_to_order)
with open(tgt_path, 'r') as phn_f:
for i, line in enumerate(phn_f):
ref_phns = line.strip().split()
hyp_phns = hypo_dict[i]
edit_ops = get_editops(hyp_phns, ref_phns)
try:
rec_id, has_mispro, eval_phns = ord_to_eval_phns[i]
except KeyError as e:
print(f"Key error at index: {i} with line: {line}")
raise e
for eval_phn in eval_phns:
print(f"record id: {rec_id}")
print(f"evaluation phone: {eval_phn}")
print(f"has mispro: {bool(has_mispro)}")
print_editops(edit_ops, hyp_phns, ref_phns)
mispro_detected = check_mispro(edit_ops, hyp_phns, ref_phns, eval_phn)
print(f"mispro detected?: {mispro_detected}")
print(f"detector is correct?: {has_mispro == mispro_detected}")
print('\n\n')
def assess_from_json(eval_phn_path, ds_json_path):
ds_preds = read_data_json(ds_json_path)
rec_to_eval_phns = read_eval_file(eval_phn_path)
for xmpl in ds_preds:
ref_phns = xmpl['label']
hyp_phns = xmpl['prediction']
edit_ops = get_editops(hyp_phns, ref_phns)
rec_id = path_to_id(xmpl['filename'])
rec_id, has_mispro, eval_phns = rec_to_eval_phns[rec_id]
for eval_phn in eval_phns:
print(f"record id: {rec_id}")
print(f"evaluation phone: {eval_phn}")
print(f"has mispro: {bool(has_mispro)}")
print_editops(edit_ops, hyp_phns, ref_phns)
mispro_detected = check_mispro(edit_ops, hyp_phns, ref_phns, eval_phn)
print(f"mispro detected?: {mispro_detected}")
print(f"detector is correct?: {has_mispro == mispro_detected}")
print('\n\n')
def read_eval_file(eval_phn_path:str, id_to_order:dict=None)->dict:
"""Reads the eval-phn file that contains information on the mispronunciations
for each record and returns that information as a mapping from record to phonemes.
Args:
eval_phn_path: path to eval file
id_to_order: mapping from record_id to the ordering. used for w2v files
Returns:
dict: mapping record_id or order to target phonemes information
"""
with open(eval_phn_path, 'r') as lbl_f:
header = next(lbl_f).strip().split()
phn_hdr = header[2:]
rec_to_eval_phns = {}
for line in lbl_f:
line = line.strip().split('\t')
rec_id, has_mispro, row_lbl = line[0], int(line[1]), list(map(int, line[2:]))
eval_phns = [phn_hdr[i] for i, one_h in enumerate(row_lbl) if one_h ==1]
key = id_to_order[rec_id] if id_to_order else rec_id
rec_to_eval_phns[key] = (rec_id, has_mispro, eval_phns)
return rec_to_eval_phns
def check_mispro(edit_ops, hyp_phns, ref_phns, target_phn):
hyp_phns, ref_phns = balance_phn_lengths(edit_ops, hyp_phns, ref_phns)
mispro_detected = False
for op, spos, dpos in edit_ops:
if target_phn in ref_phns[dpos]:
# don't include delete operations when assessing mispro
if op == 'delete':
continue
else:
# if target_phn is in both the hypo and tgt
# handles cases where `r` is replaced by `er`, which is not a mispro
if target_phn in hyp_phns[spos] and target_phn in ref_phns[dpos]:
continue
else:
mispro_detected = True
return mispro_detected
def balance_phn_lengths(edit_ops, s_phns, d_phns):
"""lengths the source_phones or dest_phones if the indices in editops are
greater than the lengths of the respective phoneme lists"""
for _, spos, dpos in edit_ops:
if spos > len(s_phns)-1:
s_phns += ['blank'] * (spos - (len(s_phns)-1))
if dpos > len(d_phns)-1:
d_phns += ['blank'] * (dpos - (len(d_phns)-1))
return s_phns, d_phns
def get_editops(hyp_phns, ref_phns):
phn_super_set = set(hyp_phns + ref_phns)
p2c = {ph:chr(65+i) for i, ph in enumerate(sorted(phn_super_set))}
c2p = {chr(65+i):ph for i, ph in enumerate(sorted(phn_super_set))}
hyp_chars = "".join([p2c[ph] for ph in hyp_phns])
ref_chars = "".join([p2c[ph] for ph in ref_phns])
return lev.editops(hyp_chars, ref_chars)
def print_editops(edit_ops, hyp_phns, ref_phns):
print(f"hypos: {hyp_phns}")
print(f"tgts: {ref_phns}")
hyp_phns, ref_phns = balance_phn_lengths(edit_ops, hyp_phns, ref_phns)
for op, spos, dpos in edit_ops:
try:
print(
'{:7} s[{}] --> d[{}] {!r:>8} --> {!r}'.\
format(op, spos, dpos, hyp_phns[spos], ref_phns[dpos])
)
except IndexError as e:
print("Index Error")
print(op, spos, dpos, hyp_phns, ref_phns)
raise e
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=""
)
parser.add_argument(
"--action", help="determines what function to call"
)
parser.add_argument(
"--hypo-path", help="path to w2v predictions"
)
parser.add_argument(
"--json-path", help="path to json prediction for deepspeech model"
)
parser.add_argument(
"--phn-path", help="path to w2v predictions"
)
parser.add_argument(
"--eval-phn-path", type=str, help="path to one-hot encoding for evaluation phonemes by utterance id"
)
args = parser.parse_args()
if args.action == "":
main(args.hypo_path, args.phn_path, args.eval_phn_path)
elif args.action == "assess-from-json":
assess_from_json(args.eval_phn_path, args.json_path)
| 7,953 | 2,833 |
#!/usr/bin/env python
"""
<Program Name>
runtests.py
<Author>
Santiago Torres <santiago@nyu.edu>
Lukas Puehringer <lukas.puehringer@nyu.edu>
<Started>
May 23, 2016
<Copyright>
See LICENSE for licensing information.
<Purpose>
Script to search, load and run in-toto tests using the Python `unittest`
framework.
"""
from unittest import defaultTestLoader, TextTestRunner
import sys
import os
import subprocess
def check_usable_gpg():
"""Set `TEST_SKIP_GPG` environment variable if neither gpg2 nor gpg is
available.
"""
os.environ["TEST_SKIP_GPG"] = "1"
for gpg in ["gpg2", "gpg"]:
try:
subprocess.check_call([gpg, "--version"])
except OSError:
pass
else:
# If one of the two exists, we can unset the skip envvar and ...
os.environ.pop("TEST_SKIP_GPG", None)
# ... abort the availability check.:
break
check_usable_gpg()
suite = defaultTestLoader.discover(start_dir=".")
result = TextTestRunner(verbosity=2, buffer=True).run(suite)
sys.exit(0 if result.wasSuccessful() else 1)
| 1,057 | 387 |
from tqdm import tqdm
import h5py
from os.path import join
import os
import numpy as np
from pathlib import Path
import json
import numpy as np
MAXDEPTH = 10
def conv_to_json(dataset_root, path_to_npz_folder, output_dir):
root_datasets = Path(dataset_root).parent
dataset_path = join(root_datasets, 'HPointLoc_dataset')
pairs_npz = os.listdir(path_to_npz_folder)
os.makedirs(output_dir, exist_ok = True)
for pair_npz in tqdm(pairs_npz):
npz = np.load(join(path_to_npz_folder, pair_npz))
q_fold, q_cloud, query, q_name = pair_npz.split('_')[:4]
m_fold, m_cloud, mapping, m_name = pair_npz.split('_')[4:8]
q = '_'.join(pair_npz.split('_')[:4])
m = '_'.join(pair_npz.split('_')[4:8])
q_cloud = q_fold + '_point' + q_cloud + '.hdf5'
m_cloud = m_fold + '_point' + m_cloud + '.hdf5'
hdf5_q_path = join(dataset_path, q_fold, q_cloud)
hdf5_m_path = join(dataset_path, m_fold, m_cloud)
q_file = h5py.File(hdf5_q_path, 'r')
m_file = h5py.File(hdf5_m_path, 'r')
depth_base = m_file['depth_base']
depth = q_file['depth']
q_depth = np.squeeze(depth[int(q_name)])*MAXDEPTH
m_depth = np.squeeze(depth_base[int(m_name)])*MAXDEPTH
q_coord_frame = []
m_coord_frame = []
for kpt in range(min(npz['keypoints1'].shape[0], npz['matches'].shape[0])):
if npz['matches'][kpt] != -1:
x_q, y_q = map(int, npz['keypoints0'][kpt])
x_m, y_m = map(int, npz['keypoints1'][npz['matches'][kpt]])
q_coord_frame.append((x_q, y_q, float(q_depth[y_q, x_q])))
m_coord_frame.append((x_m, y_m, float(m_depth[y_m, x_m])))
dictionary_kpt = {q: q_coord_frame, m:m_coord_frame}
outpath = join(output_dir, q + '_' + m + '.json')
with open(outpath, 'w') as outfile:
json.dump(str(dictionary_kpt), outfile)
| 1,980 | 801 |
import speedtest
# Lets test Zuku.
# Its getting frustrating now....
test = speedtest.Speedtest()
print("Loading server list...")
test.get_servers() # Get list of servers
print("Getting best server...")
best = test.get_best_server()
print(f"Found: {best['host']} located in : {best['country']}")
print("Performing download test..")
download_result = test.download()
print("Performing upload test..")
upload_result = test.upload()
print("Getting ping")
ping_result = test.results.ping
# Print all information.
print(f"Download speed: {download_result / 1024 / 1024:.2f} mb/s ") # Changing bits to mbs
print(f"Upload speed: {upload_result / 1024 / 1024:.2f} mb/s ")
print(f"Ping: {ping_result:.2f} ms")
| 709 | 257 |
from django_unicorn.settings import get_cache_alias, get_serial_enabled, get_settings
def test_settings_cache_alias(settings):
settings.UNICORN["CACHE_ALIAS"] = "unicorn_cache"
expected = "unicorn_cache"
actual = get_cache_alias()
assert expected == actual
def test_settings_legacy(settings):
settings.DJANGO_UNICORN = {}
settings.DJANGO_UNICORN["CACHE_ALIAS"] = "unicorn_cache"
expected = "unicorn_cache"
actual = get_cache_alias()
assert expected == actual
def test_get_serial_enabled(settings):
settings.UNICORN["SERIAL"]["ENABLED"] = False
assert get_serial_enabled() is False
settings.UNICORN["SERIAL"]["ENABLED"] = True
assert get_serial_enabled() is True
settings.UNICORN["SERIAL"]["ENABLED"] = True
settings.CACHES["unicorn_cache"] = {}
settings.CACHES["unicorn_cache"][
"BACKEND"
] = "django.core.cache.backends.dummy.DummyCache"
settings.UNICORN["CACHE_ALIAS"] = "unicorn_cache"
assert get_serial_enabled() is False
| 1,020 | 363 |
import pygame
pygame.mixer.init()
pygame.mixer.music.load('anzenchitai.mp3')
pygame.mixer.music.play()
| 104 | 44 |
# -*- coding: utf-8 -*-
import logging
from operator import itemgetter
from typing import List, Tuple, Callable, Dict
# TODO(davidr) is it faster to do this here or to save the function as an attribute of the
# class instance?
from timeit import default_timer
from collections import defaultdict
_logger = logging.getLogger(__name__)
class TimeMarker(object):
"""Time Marker class
Todo:
* how should this behave as a context manager vs. an instantiated object? right now, it
only works as the former
* we should support recurring tags (i.e. a timer.tag("foo") inside of a for loop and average
the time taken for all the calls together, perhaps with stats?
"""
__slots__ = [
'_start', '_end', '_cumulative_time', '_last_tag', '_last_ts',
'_timing'
]
_reserved_tags = ['start', 'stop']
_stats_formats = ['percentage', 'raw']
def __init__(self):
self._start: float = None
self._end: float = None
self._timing: Dict[str, float] = defaultdict(lambda: 0.0)
self._cumulative_time: float = None
self._last_tag: str = None
self._last_ts: float = None
def __enter__(self):
self._start = default_timer()
# Set up the initial values for our last_* tags
self._last_ts = self._start
self._last_tag = 'start'
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.tag("stop", _override=True)
self._end = self._last_ts
self._cumulative_time = self._end - self._start
def __repr__(self) -> str:
return "TimeMarker(start={}, last=({}, {}))".format(
self._start, self._last_tag, self._last_ts)
def tag(self, tag: str, _override: bool = False) -> None:
"""Mark a tagged event
Args:
tag: name of timer tag
_override: override checking for reserved tags *Warning: don't use this!*
Returns:
None
Raises:
ValueError: if given invalid tag name
"""
if tag in TimeMarker._reserved_tags and _override is False:
raise ValueError('tag name {} is reserved'.format(tag))
_ts = default_timer()
self._cumulative_time = _ts - self._start
self._timing[self._last_tag] += _ts - self._last_ts
self._last_tag = tag
self._last_ts = _ts
_logger.debug("tag took {}".format(default_timer() - _ts))
def stats(self,
sort: bool = True,
fmt: str = "percentage",
oneline: bool = True,
print_function: Callable = print,
pctg_cap: float = 1.):
"""Print out TimeMarker stats
Args:
sort: if True, results are sorted by time taken, else in the order in which they were
tagged
fmt: format of stats. "raw" is time in seconds, "percentage" is the percentage of the
total measured time
oneline: if True, print stats on one line
print_function: callable with string as only parameter
pctg_cap: do not print results accounting for more than pctg_cap percentage of the total
measured time
Returns:
None
"""
if fmt not in ['percentage', 'raw']:
raise ValueError(f'Invalid format: {fmt}')
if not 0. < pctg_cap <= 1.:
raise ValueError('pctg must be < 0., <= 1.')
elif fmt not in TimeMarker._stats_formats:
raise ValueError('"{:s}" not a valid stats formatter')
elif pctg_cap < 1. and fmt != "percentage":
raise ValueError('pctg_cap < 1. requires fmt="percentage"')
elif pctg_cap < 1. and not sort:
raise ValueError('pctg_cap < 1. requires sort=True')
tag_separator = " " if oneline else "\n - "
tag_suffix = ""
stats = list()
timing_results = list(self._timing.items())
stats.append("TIME:{:.6f}s".format(self._cumulative_time))
if sort:
# Sort by the time value of the tag
timing_results = sorted(
timing_results, key=itemgetter(1), reverse=True)
if fmt == "raw":
tag_suffix = "s"
elif fmt == "percentage":
timing_results = self._calculate_tag_percentage(
self._cumulative_time, pctg_cap, timing_results)
for tag, duration in timing_results:
stats.append("{:s}:{:02.3f}{:s}".format(tag, duration, tag_suffix))
stats_string = tag_separator.join(stats)
print_function(stats_string)
@staticmethod
def _calculate_tag_percentage(cumulative_time, pctg_cap, timing_results):
running_pctg = 0.
tags_pctg: List[Tuple[str, float]] = list()
for tag, ts in timing_results:
tag_pctg = ts / cumulative_time
# if we have a cap of the percentage of time we want accounted for, see if we've
# gone over
running_pctg += tag_pctg
if running_pctg > pctg_cap:
break
tags_pctg.append((tag, tag_pctg))
return tags_pctg
| 5,208 | 1,553 |
import inspect
from collections import UserDict
class RendererRegistry(UserDict):
'''
Registry for the form field renderers.
A renderer is a function that takes a context dict and returns the html
representation of the form field, for example the render method of a
Template instance.
'''
def __getitem__(self, key):
'''Return the best renderer function for the field'''
# Look up the chain of inheritance for a registered renderer
for form_class in inspect.getmro(key):
try:
return super().__getitem__(form_class)
except KeyError:
pass
# If no renderer was found raise an Exception
raise ValueError('No renderer found for field %s' % key)
def register(self, renderer):
'''
Form Field class decorator to register a renderer into the registry.
Example:
from django.template.loader import get_template
from my_registry import renderer_registry
renderer_registry.register(get_template('/path/to/my_field.html').render)
class MyField(forms.Field):
pass
'''
def registerer(form_field_class):
self[form_field_class] = renderer
return form_field_class
return registerer
| 1,151 | 378 |
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from ..models import DjangoAdminLog
from ..forms import DjangoAdminLogForm
from django.urls import reverse_lazy
from django.urls import reverse
from django.http import Http404
class DjangoAdminLogListView(ListView):
model = DjangoAdminLog
template_name = "salika/django_admin_log_list.html"
paginate_by = 20
context_object_name = "django_admin_log_list"
allow_empty = True
page_kwarg = 'page'
paginate_orphans = 0
def __init__(self, **kwargs):
return super(DjangoAdminLogListView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(DjangoAdminLogListView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return super(DjangoAdminLogListView, self).get(request, *args, **kwargs)
def get_queryset(self):
return super(DjangoAdminLogListView, self).get_queryset()
def get_allow_empty(self):
return super(DjangoAdminLogListView, self).get_allow_empty()
def get_context_data(self, *args, **kwargs):
ret = super(DjangoAdminLogListView, self).get_context_data(*args, **kwargs)
return ret
def get_paginate_by(self, queryset):
return super(DjangoAdminLogListView, self).get_paginate_by(queryset)
def get_context_object_name(self, object_list):
return super(DjangoAdminLogListView, self).get_context_object_name(object_list)
def paginate_queryset(self, queryset, page_size):
return super(DjangoAdminLogListView, self).paginate_queryset(queryset, page_size)
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True):
return super(DjangoAdminLogListView, self).get_paginator(queryset, per_page, orphans=0, allow_empty_first_page=True)
def render_to_response(self, context, **response_kwargs):
return super(DjangoAdminLogListView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoAdminLogListView, self).get_template_names()
class DjangoAdminLogDetailView(DetailView):
model = DjangoAdminLog
template_name = "salika/django_admin_log_detail.html"
context_object_name = "django_admin_log"
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
def __init__(self, **kwargs):
return super(DjangoAdminLogDetailView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(DjangoAdminLogDetailView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return super(DjangoAdminLogDetailView, self).get(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(DjangoAdminLogDetailView, self).get_object(queryset)
def get_queryset(self):
return super(DjangoAdminLogDetailView, self).get_queryset()
def get_slug_field(self):
return super(DjangoAdminLogDetailView, self).get_slug_field()
def get_context_data(self, **kwargs):
ret = super(DjangoAdminLogDetailView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(DjangoAdminLogDetailView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(DjangoAdminLogDetailView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoAdminLogDetailView, self).get_template_names()
class DjangoAdminLogCreateView(CreateView):
model = DjangoAdminLog
form_class = DjangoAdminLogForm
# fields = ['action_time', 'object_id', 'object_repr', 'action_flag', 'change_message', 'content_type', 'user']
template_name = "salika/django_admin_log_create.html"
success_url = reverse_lazy("django_admin_log_list")
def __init__(self, **kwargs):
return super(DjangoAdminLogCreateView, self).__init__(**kwargs)
def dispatch(self, request, *args, **kwargs):
return super(DjangoAdminLogCreateView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return super(DjangoAdminLogCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return super(DjangoAdminLogCreateView, self).post(request, *args, **kwargs)
def get_form_class(self):
return super(DjangoAdminLogCreateView, self).get_form_class()
def get_form(self, form_class=None):
return super(DjangoAdminLogCreateView, self).get_form(form_class)
def get_form_kwargs(self, **kwargs):
return super(DjangoAdminLogCreateView, self).get_form_kwargs(**kwargs)
def get_initial(self):
return super(DjangoAdminLogCreateView, self).get_initial()
def form_invalid(self, form):
return super(DjangoAdminLogCreateView, self).form_invalid(form)
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return super(DjangoAdminLogCreateView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(DjangoAdminLogCreateView, self).get_context_data(**kwargs)
return ret
def render_to_response(self, context, **response_kwargs):
return super(DjangoAdminLogCreateView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoAdminLogCreateView, self).get_template_names()
def get_success_url(self):
return reverse("salika:django_admin_log_detail", args=(self.object.pk,))
class DjangoAdminLogUpdateView(UpdateView):
model = DjangoAdminLog
form_class = DjangoAdminLogForm
# fields = ['action_time', 'object_id', 'object_repr', 'action_flag', 'change_message', 'content_type', 'user']
template_name = "salika/django_admin_log_update.html"
initial = {}
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
context_object_name = "django_admin_log"
def __init__(self, **kwargs):
return super(DjangoAdminLogUpdateView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(DjangoAdminLogUpdateView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return super(DjangoAdminLogUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return super(DjangoAdminLogUpdateView, self).post(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(DjangoAdminLogUpdateView, self).get_object(queryset)
def get_queryset(self):
return super(DjangoAdminLogUpdateView, self).get_queryset()
def get_slug_field(self):
return super(DjangoAdminLogUpdateView, self).get_slug_field()
def get_form_class(self):
return super(DjangoAdminLogUpdateView, self).get_form_class()
def get_form(self, form_class=None):
return super(DjangoAdminLogUpdateView, self).get_form(form_class)
def get_form_kwargs(self, **kwargs):
return super(DjangoAdminLogUpdateView, self).get_form_kwargs(**kwargs)
def get_initial(self):
return super(DjangoAdminLogUpdateView, self).get_initial()
def form_invalid(self, form):
return super(DjangoAdminLogUpdateView, self).form_invalid(form)
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return super(DjangoAdminLogUpdateView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(DjangoAdminLogUpdateView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(DjangoAdminLogUpdateView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(DjangoAdminLogUpdateView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoAdminLogUpdateView, self).get_template_names()
def get_success_url(self):
return reverse("salika:django_admin_log_detail", args=(self.object.pk,))
class DjangoAdminLogDeleteView(DeleteView):
model = DjangoAdminLog
template_name = "salika/django_admin_log_delete.html"
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
context_object_name = "django_admin_log"
def __init__(self, **kwargs):
return super(DjangoAdminLogDeleteView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(DjangoAdminLogDeleteView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
raise Http404
def post(self, request, *args, **kwargs):
return super(DjangoAdminLogDeleteView, self).post(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return super(DjangoAdminLogDeleteView, self).delete(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(DjangoAdminLogDeleteView, self).get_object(queryset)
def get_queryset(self):
return super(DjangoAdminLogDeleteView, self).get_queryset()
def get_slug_field(self):
return super(DjangoAdminLogDeleteView, self).get_slug_field()
def get_context_data(self, **kwargs):
ret = super(DjangoAdminLogDeleteView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(DjangoAdminLogDeleteView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(DjangoAdminLogDeleteView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(DjangoAdminLogDeleteView, self).get_template_names()
def get_success_url(self):
return reverse("salika:django_admin_log_list")
| 10,068 | 3,117 |
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import os
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = 'private key'
SEND_FILE_MAX_AGE_DEFAULT = timedelta(hours=1)
CACHE_TYPE = 'redis'
CACHE_DEFAULT_TIMEOUT = 240 * 60 * 60
CACHE_KEY_PREFIX = 'cilichong' | 353 | 151 |
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Algolia DocManager."""
import base64
import sys
import time
sys.path[0:0] = [""]
from tests import elastic_pair, unittest, TESTARGS
from tests.test_algolia import AlgoliaTestCase
from tests.test_gridfs_file import MockGridFSFile
from mongo_connector.command_helper import CommandHelper
from mongo_connector.doc_managers.algolia_doc_manager import DocManager
class AlgoliaDocManagerTester(AlgoliaTestCase):
"""Unit tests for the Algolia DocManager."""
def test_update(self):
"""Test the update method."""
doc = {"_id": '1', "a": 1, "b": 2}
self.algolia_doc.upsert(doc)
self.algolia_doc.commit(True)
# $set only
update_spec = {"$set": {"a": 1, "b": 2}}
self.algolia_doc.update(doc, update_spec)
self.algolia_doc.commit(True)
doc = self.algolia_index.getObject('1')
self.assertEqual(doc, {"_id": '1', "objectID": '1', "a": 1, "b": 2})
# $unset only
update_spec = {"$unset": {"a": True}}
self.algolia_doc.update(doc, update_spec)
self.algolia_doc.commit(True)
doc = self.algolia_index.getObject('1')
self.assertEqual(doc, {"_id": '1', "objectID": '1', "b": 2, "a": None})
# mixed $set/$unset
update_spec = {"$unset": {"b": True}, "$set": {"c": 3}}
self.algolia_doc.update(doc, update_spec)
self.algolia_doc.commit(True)
doc = self.algolia_index.getObject('1')
self.assertEqual(doc, {"_id": '1', "objectID": '1', "c": 3, "a": None, "b": None})
def test_upsert(self):
"""Test the upsert method."""
docc = {'_id': '1', 'name': 'John'}
self.algolia_doc.upsert(docc)
self.algolia_doc.commit(True)
res = self.algolia_index.search('')["hits"]
for doc in res:
self.assertEqual(doc['_id'], '1')
self.assertEqual(doc['name'], 'John')
def test_bulk_upsert(self):
"""Test the bulk_upsert method."""
self.algolia_doc.bulk_upsert([], *TESTARGS)
self.algolia_doc.commit(True)
docs = ({"_id": i} for i in range(100))
self.algolia_doc.bulk_upsert(docs, *TESTARGS)
self.algolia_doc.commit(True)
res = self.algolia_index.search('', { 'hitsPerPage': 101 })["hits"]
returned_ids = sorted(int(doc["_id"]) for doc in res)
self.assertEqual(len(returned_ids), 100)
for i, r in enumerate(returned_ids):
self.assertEqual(r, i)
docs = ({"_id": i, "weight": 2*i} for i in range(100))
self.algolia_doc.bulk_upsert(docs, *TESTARGS)
self.algolia_doc.commit(True)
res = self.algolia_index.search('', { 'hitsPerPage': 101 })["hits"]
returned_ids = sorted(int(doc["weight"]) for doc in res)
self.assertEqual(len(returned_ids), 100)
for i, r in enumerate(returned_ids):
self.assertEqual(r, 2*i)
def test_remove(self):
"""Test the remove method."""
docc = {'_id': '1', 'name': 'John'}
self.algolia_doc.upsert(docc)
self.algolia_doc.commit(True)
res = self.algolia_index.search('')["hits"]
self.assertEqual(len(res), 1)
self.algolia_doc.remove(docc)
self.algolia_doc.commit(True)
res = self.algolia_index.search('')["hits"]
self.assertEqual(len(res), 0)
@unittest.skip("WIP")
def test_get_last_doc(self):
"""Test the get_last_doc method.
Make sure we can retrieve the document most recently modified from Algolia.
"""
base = self.algolia_doc.get_last_doc()
ts = base.get("_ts", 0) if base else 0
docc = {'_id': '4', 'name': 'Hare', '_ts': ts+3, 'ns': 'test.test'}
self.algolia_doc.upsert(docc)
docc = {'_id': '5', 'name': 'Tortoise', '_ts': ts+2, 'ns': 'test.test'}
self.algolia_doc.upsert(docc)
docc = {'_id': '6', 'name': 'Mr T.', '_ts': ts+1, 'ns': 'test.test'}
self.algolia_doc.upsert(docc)
self.algolia_doc.commit(True)
self.assertEqual(self.algolia_index.search('')['nbHits'], 3)
doc = self.elastic_doc.get_last_doc()
self.assertEqual(doc['_id'], '4')
docc = {'_id': '6', 'name': 'HareTwin', '_ts': ts+4, 'ns': 'test.test'}
self.elastic_doc.upsert(docc)
self.algolia_doc.commit(True)
doc = self.elastic_doc.get_last_doc()
self.assertEqual(doc['_id'], '6')
self.assertEqual(self.algolia_index.search('')['nbHits'], 3)
if __name__ == '__main__':
unittest.main()
| 5,132 | 1,825 |
import os
import helpers as H
# Cette function va afficher le menu
def menu():
print("1- Ajouter un Etudiant")
print("2- Ajouter un Matieres")
print("3- Ajouter un Note")
print("4- afficher la Moyenne d'un Etudiant")
print("5- Modifie le numIns d'un Etudiant")
print("6- suprimer une Note")
print("7- quiter")
# Une classs qui definie les propreiter d'Etudiant
class Etudiant:
numInsc = 0
nom = ""
prenom = ""
group = 0
# Constructeur
def __init__(self, numInsc, nom, prenom, group):
self.numInsc = numInsc
self.nom = nom
self.prenom = prenom
self.group = group
# Une classs qui definie les propreiter d'Matieres
class Matieres:
codeMat = ""
libelle = ""
coef = 0
# Constructeur
def __init__(self, codeMat, libelle, coef):
self.codeMat = codeMat
self.libelle = libelle
self.coef = coef
# Une classs qui definie les propreiter d'Note
class Note:
numInsc = 0
codeMat = ""
note = 0.0
# Constructeur
def __init__(self, numInsc, codeMat, note):
self.numInsc = numInsc
self.codeMat = codeMat
self.note = note
def ajoutEtudiant(Etudiant):
# Virifie si le ficher existe
if not os.path.isfile("Etudiants.dat"):
with open("Etudiants.dat", 'w') as file:
file.write("Gestion des Etudiants\n")
file.write("----------------------\n")
# Ecrire dans le ficher les donner importer depui l'objet
with open("Etudiants.dat", 'a+', encoding='utf-8') as file:
file.tell()
file.write("\n")
file.write(str(Etudiant.numInsc) + "\n")
file.write(Etudiant.nom + "\n")
file.write(Etudiant.prenom + "\n")
file.write(str(Etudiant.group) + "\n")
file.write("------------------------")
def ajoutMatiere(Matieres):
# Virifie si le ficher existe
if not os.path.isfile("Matieres.dat"):
with open("Matieres.dat", 'w') as file:
file.write("Gestion des Matieres\n")
file.write("---------------\n")
# Ecrire dans le ficher les donner importer depui l'objet
with open("Matieres.dat", 'a+', encoding='utf-8') as file:
file.tell()
file.write("\n")
file.write(Matieres.libelle + "\n")
file.write(str(Matieres.codeMat) + "\n")
file.write(str(Matieres.coef) + "\n")
file.write("------------------------")
def ajoutNote(Note):
# Virifie si le ficher existe
if not os.path.isfile("Notes.dat"):
with open("Notes.dat", 'w') as file:
file.write("Gestion des Notes\n")
file.write("---------------\n")
# Ecrire dans le ficher les donner importer depui l'objet
with open("Notes.dat", 'a+', encoding='utf-8') as file:
file.tell()
file.write("\n")
file.write(str(Note.numInsc) + "\n")
file.write(Note.codeMat + "\n")
file.write(str(Note.note) + "\n")
file.write("------------------------")
def MoyenneEtudiant(NumInsc):
if os.path.isfile("Notes.dat"):
n = H.fileTolist("Notes.dat")
else:
return print("Pas de fichier Notes")
if os.path.isfile("Matieres.dat"):
m = H.fileTolist("Matieres.dat")
else:
return print("Pas de fichier Matieres")
n = (H.focusNote(NumInsc, n))
if not n:
return print("Etudiant indisponible")
k = []
for i in range(1, len(n), 3):
if n[i] not in k:
cm = m.copy()
k.append(H.focusMat(n[i], cm))
m = []
for i in k:
for j in i:
m.append(j)
if not m:
return print("Matieres indisponible")
H.extraTrimer(m)
H.extraTrimer(n)
s = 0
for i in range(len(m)):
s += float(m[i]) * float(n[i])
return print("la moyen est: ", s / sum(m))
def ModifEtudiant(NumInsc, newId):
# Virifie si le ficher existe
if os.path.isfile("Notes.dat"):
n = H.fileTolist("Notes.dat")
else:
return print("Pas de fichier Notes")
if os.path.isfile("Etudiants.dat"):
e = H.fileTolist("Etudiants.dat")
else:
return print("Pas de fichier Etudiants")
if str(NumInsc) not in e:
return print("Etudiants indisponible")
if str(NumInsc) not in n:
return
print(n)
print(e)
note, std = [], []
os.remove("Etudiants.dat")
i = 0
while i < len(e):
if e[i] == str(NumInsc):
e[i] = str(newId)
ajoutEtudiant(Etudiant(e[i], e[i+1], e[i+2], e[i+3]))
i += 4
os.remove("Notes.dat")
i = 0
while i < len(n):
if n[i] == str(NumInsc):
n[i] = str(newId)
ajoutNote(Note(n[i], n[i+1], n[i+2]))
i += 3
def SupprimerNote(NumInsc, CodeMat):
if os.path.isfile("Notes.dat"):
n = H.fileTolist("Notes.dat")
else:
return print("Pas de fichier Notes")
os.remove("Notes.dat")
i = 0
while i < len(n):
if n[i] == str(NumInsc) and n[i+1] == str(CodeMat):
i += 3
continue
ajoutNote(Note(n[i], n[i+1], n[i+2]))
i += 3
while True:
menu()
choice = int(input("Option: "))
if choice == 1:
obj = Etudiant(input("Numero d'inscription: "), input("Nom: "), input("Prenom: "), input("Group: "))
ajoutEtudiant(obj)
elif choice == 2:
obj = Matieres(input("Code du matieres: "), input("Libelle: "), input("Coef: "))
ajoutMatiere(obj)
elif choice == 3:
obj = Note(input("Numero d'inscription: "), input("Code du matieres: "), input("Note: "))
ajoutNote(obj)
elif choice == 4:
MoyenneEtudiant(input("donner NumInsc d'Etudiant"))
elif choice == 5:
ModifEtudiant(input("donner NumInsc d'Etudiant"), input("donner le nouveau NumInsc"))
elif choice == 6:
SupprimerNote(input("donner NumInsc d'Etudiant"), input("donner le Code du matiere"))
elif choice == 7:
os._exit(1)
else:
print("Donner une bonne option")
| 6,281 | 2,463 |
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
#
# baa99: Annotated with location of stochastic rhs entries
# for use with pysp2smps conversion tool.
from pyomo.core import *
model = ConcreteModel()
# use mutable parameters so that the constraint
# right-hand-sides can be updated for each scenario
model.d1_rhs = Param(mutable=True)
model.d2_rhs = Param(mutable=True)
# first-stage variables
model.x1 = Var(bounds=(0,217))
model.x2 = Var(bounds=(0,217))
# second-stage variables
model.v1 = Var(within=NonNegativeReals)
model.v2 = Var(within=NonNegativeReals)
model.u1 = Var(within=NonNegativeReals)
model.u2 = Var(within=NonNegativeReals)
model.w11 = Var(within=NonNegativeReals)
model.w12 = Var(within=NonNegativeReals)
model.w22 = Var(within=NonNegativeReals)
# stage-cost expressions
model.FirstStageCost = \
Expression(initialize=(4*model.x1 + 2*model.x2))
model.SecondStageCost = \
Expression(initialize=(-8*model.w11 - 4*model.w12 - 4*model.w22 +\
0.2*model.v1 + 0.2*model.v2 + 10*model.u1 + 10*model.u2))
# always define the objective as the sum of the stage costs
model.obj = Objective(expr=model.FirstStageCost + model.SecondStageCost)
#
# this model only has second-stage constraints
#
model.s1 = Constraint(expr=-model.x1 + model.w11 + model.w12 + model.v1 == 0)
model.s2 = Constraint(expr=-model.x2 + model.w22 + model.v2 == 0)
#
# these two constraints have stochastic right-hand-sides
#
model.d1 = Constraint(expr=model.w11 + model.u1 == model.d1_rhs)
model.d2 = Constraint(expr=model.w12 + model.w22 + model.u2 == model.d2_rhs)
#
# Store the possible table values for the stochastic parameters
# on the model. These will be used to either generate an explicit
# list of scenarios or to represent the SP implicitly.
#
model.d1_rhs_table = \
[17.75731865,
32.96224832,
43.68044355,
52.29173734,
59.67893765,
66.27551249,
72.33076402,
78.00434172,
83.40733268,
88.62275117,
93.71693266,
98.74655459,
103.7634931,
108.8187082,
113.9659517,
119.2660233,
124.7925174,
130.6406496,
136.9423425,
143.8948148,
151.8216695,
161.326406,
173.7895514,
194.0396804,
216.3173937]
model.d2_rhs_table = \
[5.960319592,
26.21044859,
38.673594,
48.17833053,
56.10518525,
63.05765754,
69.35935045,
75.20748263,
80.73397668,
86.03404828,
91.18129176,
96.2365069,
101.2534454,
106.2830673,
111.3772488,
116.5926673,
121.9956583,
127.669236,
133.7244875,
140.3210624,
147.7082627,
156.3195565,
167.0377517,
182.2426813,
216.3173937]
| 3,214 | 1,505 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
# QDoubleValidator needs QValidator in qgis 3.4!
from PyQt5.QtCore import Qt, QLocale, pyqtSignal
from PyQt5.QtGui import QDoubleValidator
from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QHBoxLayout, QToolButton, QToolBar, QComboBox, QDoubleSpinBox
from PyQt5 import uic
from qgis.core import QgsProject, QgsCoordinateReferenceSystem, QgsPointXY, QgsCoordinateTransform, QgsVectorLayerUtils, \
QgsWkbTypes, QgsGeometry
from qgis.gui import QgsProjectionSelectionWidget, QgsVertexMarker
import os
from .gto_point_tool import GTOPointTool
class GTOPointWidget(QWidget):
isActive = pyqtSignal(bool)
def __init__(self, gtoObj, parent=None):
super(GTOPointWidget, self).__init__(parent)
self.gtomain = gtoObj.gtomain
self.info = self.gtomain.info
self.debug = self.gtomain.debug
try:
# references
self.helper = self.gtomain.helper
self.iface = self.gtomain.iface
self.prj = QgsProject.instance()
self.canvas = self.iface.mapCanvas()
# references
self.x = 0
self.y = 0
self.xt = 0
self.yt = 0
self.snaped = False
self.crs_transform = None
self.crs_layer = None
self.parent_widget = None # e.g toolbar
self.userEditX = False
self.userEditY = False
# config
self.tools = []
self.coordinatereferences = None
self.scale = 0
self.center = True
self.enable_save = False
self.precision = -1
self.cboCoordSystems = None
self.is_widgetaction = False
self.show_tool_button = False
self.addpoint_attributes = {}
self.tools_after_addpoint = []
# widgets:
uic.loadUi(os.path.join(os.path.dirname(__file__), 'gto_point.ui'), self)
# point tool
self.btnPointTool = self.btnPoint
# x
self.coordX = self.coordX
# self.validX = QDoubleValidator(sys.float_info.min, sys.float_info.max, 16, self.coordX) # no negative numbers possible?
# self.validX = QDoubleValidator(-999999999, 999999999, 16, self.coordX) # working but no range limit
self.validX = QDoubleValidator(self.coordX) # so we use the standard:
self.validX.setNotation(QDoubleValidator.StandardNotation) # By default, this property is set to ScientificNotation: i.e. 1.5E-2 is possible
self.coordX.setValidator(self.validX)
self.btnCopyXt = self.btnCopyXt
self.lblX = self.lblX
# y
self.coordY = self.coordY
self.validY = QDoubleValidator(self.coordY)
self.validY.setNotation(QDoubleValidator.StandardNotation)
self.coordY.setValidator(self.validY)
self.btnCopyYt = self.btnCopyYt
self.lblY = self.lblY
# show
self.btnShow = self.btnShow
self.btnShow.setIcon(self.helper.getIcon('mActionZoomPoint.png'))
# add point
self.btnAddPoint = self.btnAddPoint
self.btnAddPoint.setIcon(self.helper.getIcon('mActionAddPoint.png'))
self.btnAddPoint.setToolTip("Punkt erstellen")
# marker
self.marker = QgsVertexMarker(self.canvas)
self.marker.setColor(Qt.yellow)
self.marker.setIconType(QgsVertexMarker.ICON_CROSS)
self.marker.setIconSize(10)
self.marker.setPenWidth(3)
# See the enum IconType from http://www.qgis.org/api/classQgsVertexMarker.html
# maptool
self.mapTool = GTOPointTool(self.iface, self.canvas)
self.mapTool.isActive.connect(self.setToolStatus)
self.mapTool.canvasReleased.connect(self.setCoords)
# signals
# QToolButton.toggled()
self.btnPoint.clicked.connect(self.setMapTool)
# self.coordX.textChanged.connect(self.set_user_editX)
# self.coordY.textChanged.connect(self.set_user_editY)
self.coordX.textEdited.connect(self.set_user_editX)
self.coordY.textEdited.connect(self.set_user_editY)
# self.coordX.editingFinished.connect(self.check_coords)
# self.coordY.editingFinished.connect(self.check_coords)
self.btnShow.clicked.connect(self.showCoordinate)
self.btnCopyXt.clicked.connect(self.copyXt)
self.btnCopyYt.clicked.connect(self.copyYt)
self.btnAddPoint.clicked.connect(self.add_point)
self.prj.crsChanged.connect(self.prj_crs_changed)
self.iface.mapCanvas().currentLayerChanged.connect(self.layer_changed)
except Exception as e:
self.info.err(e)
def set_user_editX(self, *args):
try:
if self.debug: self.info.log("set_user_editX")
self.userEditX = True
self.marker.hide()
self.marker.setColor(Qt.blue)
self.snaped = False
except Exception as e:
self.info.err(e)
def set_user_editY(self, *args):
try:
if self.debug: self.info.log("set_user_editY")
self.userEditY = True
self.marker.hide()
self.marker.setColor(Qt.blue)
self.snaped = False
except Exception as e:
self.info.err(e)
def reset_user_edit(self):
if self.debug: self.info.log("reset_user_edit")
self.userEditX = False
self.userEditY = False
def check_coords(self):
try:
self.marker.hide()
if self.debug: self.info.log("useredit: X:", self.userEditX, "userEditY:", self.userEditY)
if self.coordX.text() == '':
self.coordX.setText('0')
self.x = 0
if self.coordY.text() == '':
self.coordY.setText('0')
self.y = 0
if self.userEditX or self.userEditY:
self.snaped = False
self.userEditX = False
self.userEditY = False
self.xt = float(self.coordX.text().replace(",", "."))
self.yt = float(self.coordY.text().replace(",", "."))
tr = QgsCoordinateTransform(self.crs_transform, self.prj.crs(), self.prj)
trPoint = tr.transform(QgsPointXY(self.xt, self.yt))
self.x = trPoint.x()
self.y = trPoint.y()
if self.debug: self.info.log("check_coords:", self.x, "/", self.y, "/snaped:", self.snaped)
except Exception as e:
self.info.err(e)
def setMapTool(self):
try:
self.canvas.setMapTool(self.mapTool)
except Exception as e:
self.info.err(e)
def set_parent_widget(self, widget):
try:
self.parent_widget = widget
if self.parent_widget.action.isChecked():
self.setMapTool()
except Exception as e:
self.info.err(e)
def setToolStatus(self, isActive):
try:
self.btnPoint.setChecked(isActive)
self.marker.hide()
self.isActive.emit(isActive)
if self.parent_widget is not None:
self.parent_widget.set_status(isActive)
except Exception as e:
self.info.err(e)
def setConfig(self, config):
try:
self.tools = config.get("tools", [])
self.coordinatereferences = config.get("coordinatereferences", None)
self.scale = config.get("scale", 0)
self.center = config.get("center", True)
self.enable_save = config.get('enable_save', False)
self.precision = config.get('precision', -1)
self.is_widgetaction = config.get('is_widgetaction', False)
self.show_tool_button = config.get('show_tool_button', not self.is_widgetaction)
self.addpoint_attributes = config.get("addpoint_attributes", {})
self.tools_after_addpoint = config.get("tools_after_addpoint", [])
if self.precision < 0:
self.precision, type_conversion_ok = self.prj.readNumEntry("PositionPrecision", "DecimalPlaces", 3)
# labels:
self.lblX.setText(config.get('label_x', 'X:'))
self.lblY.setText(config.get('label_y', 'Y:'))
# text
text = ''
if self.scale > 0 and self.center:
text = "Auf Koordinate zentrieren, Maßstab: " + str(self.scale)
elif self.center:
text = "Auf Koordinate zentrieren"
elif self.scale > 0:
text = "Maßstab: " + str(self.scale)
elif len(self.tools) > 0:
text = self.tools[0]
act = self.gtomain.findAction(self.tools[0])
if act is not None:
text = act.toolTip()
if act.icon() is not None:
self.btnShow.setIcon(act.icon())
if self.debug: self.info.log(text)
self.btnShow.setToolTip(text)
if self.btnShow.toolTip() == '':
self.btnShow.setHidden(True)
# add point
self.btnAddPoint.setHidden(not self.enable_save)
# point tool
self.btnPointTool.setHidden(not self.show_tool_button)
except Exception as e:
self.info.err(e)
def added(self): # widget was added to parent
try:
self.crs_transform = self.prj.crs()
self.crs_layer = self.iface.activeLayer().crs()
# set crs widget
if self.coordinatereferences is None:
# qgis transform
self.cboCoordSys.setHidden(True)
self.cboCoordSystems = self.mQgsProjectionSelectionWidget
self.cboCoordSystems.setMinimumWidth(460)
self.cboCoordSystems.setOptionVisible(QgsProjectionSelectionWidget.ProjectCrs, True)
self.cboCoordSystems.setCrs(self.prj.crs())
self.setCrs(self.cboCoordSystems.crs())
self.cboCoordSystems.crsChanged.connect(self.setCrs)
else:
# custom transform
self.mQgsProjectionSelectionWidget.setHidden(True)
self.cboCoordSystems = self.cboCoordSys
self.cboCoordSystems.setMinimumWidth(400)
self.cboCoordSystems.currentIndexChanged.connect(
lambda: self.setCrs(self.cboCoordSystems.currentData()))
self.cboCoordSystems.addItem(
"Projekt CRS: " + self.crs_transform.authid() + " - " + self.crs_transform.description(),
self.crs_transform)
for crsID in self.coordinatereferences:
try:
crs = QgsCoordinateReferenceSystem(crsID)
self.cboCoordSystems.addItem(crs.authid() + " - " + crs.description(), crs)
except Exception as e:
self.info.err(e)
self.cboCoordSystems.setCurrentIndex(0)
# here we know which type is cboCoordSystems!
self.setIconSizes()
except Exception as e:
self.info.err(e)
def setIconSizes(self):
try:
if self.parentWidget() is not None:
btns = self.findChildren(QToolButton)
for btn in btns:
try:
btn.setIconSize(self.iface.iconSize(False))
except:
pass
# help for the QGIS widget :S
self.cboCoordSystems.setMaximumHeight(self.cboCoordSys.height())
btns = self.cboCoordSystems.findChildren(QToolButton)
for btn in btns:
btn.setIconSize(self.iface.iconSize(False))
except Exception as e:
self.info.err(e)
def layer_changed(self, layer):
try:
if layer.geometryType() == QgsWkbTypes.GeometryType.PointGeometry:
self.btnAddPoint.setEnabled(True)
else:
self.btnAddPoint.setEnabled(False)
except Exception as e:
self.info.err(e)
def prj_crs_changed(self):
try:
self.reset_user_edit()
if self.coordinatereferences is not None: # my combo
self.crs_transform = self.prj.crs()
self.cboCoordSystems.setItemText(0,
"Projekt CRS: " + self.crs_transform.authid() + " - " + self.crs_transform.description())
self.cboCoordSystems.setItemData(0, self.crs_transform)
self.x = 0
self.y = 0
self.xt = 0
self.yt = 0
self.coordX.setText("---")
self.coordY.setText("---")
except Exception as e:
self.info.err(e)
def add_point(self):
try:
self.check_coords()
layer = self.iface.activeLayer()
if layer.geometryType() == QgsWkbTypes.GeometryType.PointGeometry:
self.prj.layerTreeRoot().findLayer(layer.id()).setItemVisibilityCheckedParentRecursive(True)
if self.x != 0 and self.y != 0:
feat = QgsVectorLayerUtils.createFeature(layer)
tr = QgsCoordinateTransform(self.prj.crs(), self.crs_layer, self.prj)
trPoint = tr.transform(QgsPointXY(self.x, self.y))
feat.setGeometry(QgsGeometry.fromPointXY(trPoint))
# direct save
# (res, features) = layer.dataProvider().addFeatures([feat])
# if self.debug: self.info.log("new point:", res, features[0])
# set attributes
dic_info = {"x": self.x, "y": self.y, "snaped": self.snaped}
# self.info.err(None,"mapping:",dic_info)
# self.info.err(None, "addpoint_attributes:", self.addpoint_attributes)
for k, v in self.addpoint_attributes.items():
# self.info.err(None,"attribute:",k,"value:",dic_info[v])
feat[k] = layer.fields().field(k).convertCompatible(dic_info[v])
features = [feat]
layer.featureAdded.connect(self.select_new_feature)
self.save_features(layer, features)
layer.featureAdded.disconnect(self.select_new_feature)
self.marker.hide()
self.helper.refreshLayer(layer)
self.gtomain.runcmd(self.tools_after_addpoint)
else:
self.info.gtoWarning('Ungültige Koordinaten! x:', self.x, "y:", self.y)
else:
self.info.gtoWarning('Kein Punktlayer ausgewählt!')
except Exception as e:
self.info.err(e)
def select_new_feature(self, featId):
try:
if self.debug: self.info.log("new featue:", self.iface.activeLayer().name(), "/ fid:", featId)
self.iface.activeLayer().selectByIds([featId])
self.mapTool.reset_marker()
self.marker.hide()
self.helper.refreshLayer(self.iface.activeLayer())
except Exception as e:
self.info.err(e)
def save_features(self, layer, features):
if not layer.isEditable():
layer.startEditing()
layer.beginEditCommand("layer {0} edit".format(layer.name()))
try:
layer.addFeatures(features)
layer.endEditCommand()
except Exception as e:
layer.destroyEditCommand()
raise e
def copyXt(self):
self.check_coords()
dsp = QDoubleSpinBox()
dsp.setDecimals(16)
self.helper.copyToClipboard(dsp.textFromValue(self.xt))
def copyYt(self):
self.check_coords()
dsp = QDoubleSpinBox()
dsp.setDecimals(16)
self.helper.copyToClipboard(dsp.textFromValue(self.yt))
def reset(self):
if self.debug: self.info.log("widget reset")
self.marker.hide()
def setCoords(self, point, snaped):
try:
self.reset_user_edit()
self.snaped = snaped
self.x = point.x()
self.y = point.y()
if self.debug: self.info.log("setCoords", self.x, "/", self.y)
self.setCrs(self.crs_transform)
# marker
self.marker.setCenter(QgsPointXY(self.x, self.y))
if snaped:
self.marker.setColor(Qt.red)
else:
self.marker.setColor(Qt.blue)
self.marker.show()
except Exception as e:
self.info.err(e)
def showCoordinate(self):
try:
self.check_coords()
self.marker.hide()
if self.x != 0 and self.y != 0:
pt_center = QgsPointXY(self.x, self.y)
self.marker.setCenter(pt_center)
self.marker.show()
# center map
if self.center:
self.canvas.setCenter(pt_center)
# scale map
if self.scale is not None and self.scale > 0:
self.canvas.zoomScale(self.scale)
self.canvas.refresh()
# run tools
self.gtomain.runcmd(self.tools)
else:
self.info.gtoWarning('Ungültige Koordinate! x:', self.x, "y:", self.y)
except Exception as e:
self.info.err(e)
def setCrs(self, crs):
try:
if self.debug: self.info.log("setCrs")
self.crs_transform = crs
tr = QgsCoordinateTransform(self.prj.crs(), self.crs_transform, self.prj)
trPoint = tr.transform(QgsPointXY(self.x, self.y))
self.xt = trPoint.x()
self.yt = trPoint.y()
d = round(trPoint.x(), self.precision)
display = str(d).replace(".", QLocale().decimalPoint())
self.coordX.setText(display)
d = round(trPoint.y(), self.precision)
display = str(d).replace(".", QLocale().decimalPoint())
self.coordY.setText(display)
except Exception as e:
self.info.err(e)
| 18,560 | 5,613 |
from .admin import UserView, TweetView
def configure(app):
app.admin.register(
app.db.users,
UserView,
# category='User',
name='User'
)
app.admin.register(
app.db.get_collection('tweets'),
TweetView,
# category='User',
name='Tweets'
)
| 318 | 104 |
from numpy.lib.function_base import diff
import torch
from torch import nn
from torch.nn import functional as F
from itertools import accumulate
import numpy as np
import os
import importlib
from utils.my_utils import carving_t, carving_t2, FeatExt, get_in_range, idx_cam2img, idx_world2cam, normalize_for_grid_sample
import model.conf as conf
if os.environ.get('IDR_USE_ENV', '0') == '1' and os.environ.get('IDR_CONF', '') != '':
print('override conf: ', os.environ.get('IDR_CONF'))
conf = importlib.import_module(os.environ.get('IDR_CONF'))
class IDRLoss(nn.Module):
def __init__(self):
super().__init__()
self.l1_loss = nn.L1Loss(reduction='sum')
def get_rgb_loss(self,rgb_values, rgb_gt, network_object_mask, object_mask):
if (network_object_mask & object_mask).sum() == 0:
return torch.tensor(0.0).cuda().float()
rgb_values = rgb_values[network_object_mask & object_mask]
rgb_gt = rgb_gt.reshape(-1, 3)[network_object_mask & object_mask]
rgb_loss = self.l1_loss(rgb_values, rgb_gt) / float(object_mask.shape[0])
return rgb_loss
def get_eikonal_loss(self, grad_theta):
if grad_theta.shape[0] == 0:
return torch.tensor(0.0).cuda().float()
eikonal_loss = ((grad_theta.norm(2, dim=1) - 1) ** 2).mean()
return eikonal_loss
def get_depth_loss(self, eikonal_points_hom, eikonal_output, depths, cams, size, center, far_thresh, far_att, near_thresh, near_att, smooth):
eikonal_points_hom = eikonal_points_hom.detach()
depths = depths.permute(1,0,2,3,4)
cams = cams.permute(1,0,2,3,4)
eikonal_points_hom[:,:,:3,0] = eikonal_points_hom[:,:,:3,0] / 2 * size.view(1,1,1) + center.view(1,1,3)
if conf.use_invalid: # treat out-of-mask depth as inf
dist, occ, in_range = carving_t(eikonal_points_hom, depths, cams, out_thresh_perc=conf.out_thresh_perc)
else: # ignore out-of-mask depth
dist, occ, in_range = carving_t2(eikonal_points_hom, depths, cams, out_thresh_perc=conf.out_thresh_perc) # scale is applied in cams NOTE: hard code
dist_r = (dist / size.view(1,1) * 2 + (-1.25) * (~in_range).to(torch.float32)).clamp(-1.25,1.25)
# loss = nn.SmoothL1Loss()(eikonal_output, -dist_r)
# single depth
# not_inside = (dist_r < int_thresh)
# inside_weight = not_inside + (~not_inside) * int_att
far_mask = dist_r.abs() > far_thresh
far_weight = far_mask * far_att + (~far_mask)
near_mask = dist_r.abs() < near_thresh
near_weight = near_mask * near_att + (~near_mask)
if smooth is not None:
loss = nn.SmoothL1Loss(reduction='none')(eikonal_output / smooth, -dist_r / smooth) * smooth
else:
loss = nn.L1Loss(reduction='none')(eikonal_output, -dist_r)
loss = (loss * far_weight * near_weight * in_range).mean()
return loss
def get_feat_loss2(self, diff_surf_pts, uncerts, feat, cam, feat_src, src_cams, size, center, network_object_mask, object_mask):
mask = network_object_mask & object_mask
if (mask).sum() == 0:
return torch.tensor(0.0).float().cuda()
sample_mask = mask.view(feat.size()[0], -1)
hit_nums = sample_mask.sum(-1)
accu_nums = [0] + hit_nums.cumsum(0).tolist()
slices = [slice(accu_nums[i], accu_nums[i+1]) for i in range(len(accu_nums)-1)]
loss = []
## for each image in minibatch
for view_i, slice_ in enumerate(slices):
if slice_.start < slice_.stop:
## projection
diff_surf_pts_slice = diff_surf_pts[slice_]
pts_world = (diff_surf_pts_slice / 2 * size.view(1,1) + center.view(1,3)).view(1,-1,1,3,1) # 1m131
pts_world = torch.cat([pts_world, torch.ones_like(pts_world[...,-1:,:])], dim=-2) # 1m141
# rgb_pack = torch.cat([rgb[view_i:view_i+1], rgb_src[view_i]], dim=0) # v3hw
cam_pack = torch.cat([cam[view_i:view_i+1], src_cams[view_i]], dim=0) # v244
pts_img = idx_cam2img(idx_world2cam(pts_world, cam_pack), cam_pack) # vm131
## gathering
grid = pts_img[...,:2,0] # vm12
# feat2_pack = self.feat_ext(rgb_pack)[2] # vchw # TODO: multi-scale feature
feat2_pack = torch.cat([feat[view_i:view_i+1], feat_src[view_i]], dim=0)
grid_n = normalize_for_grid_sample(feat2_pack, grid/2)
grid_in_range = get_in_range(grid_n)
valid_mask = (grid_in_range[:1,...] * grid_in_range[1:,...]).unsqueeze(1) > 0.5 # and
gathered_feat = F.grid_sample(feat2_pack, grid_n, mode='bilinear', padding_mode='zeros', align_corners=False) # vcm1
## calculation
diff = gathered_feat[:1] - gathered_feat[1:]
if uncerts is None:
gathered_norm = gathered_feat.norm(dim=1, keepdim=True) # vcm1
diff_mask = diff.norm(dim=1, keepdim=True) < ((gathered_norm[:1,...] + gathered_norm[1:,...])/2*1)
print('feat loss mask', (valid_mask & diff_mask).sum().item(), '/', valid_mask.size()[0] * valid_mask.size()[2])
sample_loss = (diff * valid_mask * diff_mask).abs().mean()
else:
uncert = uncerts[view_i].unsqueeze(1).unsqueeze(3) # (v-1)1m1
print(f'uncert: {uncert.min():.4f}, {uncert.median():.4f}, {uncert.max():.4f}')
sample_loss = ((diff.abs() * (-uncert).exp() + 0.01 * uncert)*valid_mask).mean()
else:
sample_loss = torch.zeros(1).float().cuda()
loss.append(sample_loss)
loss = sum(loss) / len(loss)
return loss
def get_feat_loss_corr(self, diff_surf_pts, uncerts, feat, cam, feat_src, src_cams, size, center, network_object_mask, object_mask):
mask = network_object_mask & object_mask
if (mask).sum() == 0:
return torch.tensor(0.0).float().cuda()
sample_mask = mask.view(feat.size()[0], -1)
hit_nums = sample_mask.sum(-1)
accu_nums = [0] + hit_nums.cumsum(0).tolist()
slices = [slice(accu_nums[i], accu_nums[i+1]) for i in range(len(accu_nums)-1)]
loss = []
## for each image in minibatch
for view_i, slice_ in enumerate(slices):
if slice_.start < slice_.stop:
## projection
diff_surf_pts_slice = diff_surf_pts[slice_]
pts_world = (diff_surf_pts_slice / 2 * size.view(1,1) + center.view(1,3)).view(1,-1,1,3,1) # 1m131
pts_world = torch.cat([pts_world, torch.ones_like(pts_world[...,-1:,:])], dim=-2) # 1m141
# rgb_pack = torch.cat([rgb[view_i:view_i+1], rgb_src[view_i]], dim=0) # v3hw
cam_pack = torch.cat([cam[view_i:view_i+1], src_cams[view_i]], dim=0) # v244
pts_img = idx_cam2img(idx_world2cam(pts_world, cam_pack), cam_pack) # vm131
## gathering
grid = pts_img[...,:2,0] # vm12
# feat2_pack = self.feat_ext(rgb_pack)[2] # vchw # TODO: multi-scale feature
feat2_pack = torch.cat([feat[view_i:view_i+1], feat_src[view_i]], dim=0)
grid_n = normalize_for_grid_sample(feat2_pack, grid/2)
grid_in_range = get_in_range(grid_n)
valid_mask = (grid_in_range[:1,...] * grid_in_range[1:,...]).unsqueeze(1) > 0.5 # and
gathered_feat = F.grid_sample(feat2_pack, grid_n, mode='bilinear', padding_mode='zeros', align_corners=False) # vcm1
## calculation
gathered_norm = gathered_feat.norm(dim=1, keepdim=True) # v1m1
corr = (gathered_feat[:1] * gathered_feat[1:]).sum(dim=1, keepdim=True) \
/ gathered_norm[:1].clamp(min=1e-9) / gathered_norm[1:].clamp(min=1e-9) # (v-1)1m1
corr_loss = (1 - corr).abs()
if uncerts is None:
diff_mask = corr_loss < 0.5
print('feat loss mask', (valid_mask & diff_mask).sum().item(), '/', valid_mask.size()[0] * valid_mask.size()[2])
sample_loss = (corr_loss * valid_mask * diff_mask).mean()
else:
uncert = uncerts[view_i].unsqueeze(1).unsqueeze(3) # (v-1)1m1
print(f'uncert: {uncert.min():.4f}, {uncert.median():.4f}, {uncert.max():.4f}')
sample_loss = ((corr_loss * (-uncert).exp() + uncert)*valid_mask).mean()
else:
sample_loss = torch.zeros(1).float().cuda()
loss.append(sample_loss)
loss = sum(loss) / len(loss)
return loss
def get_surf_loss(self, surf_indicator_output, network_object_mask, object_mask_true):
mask = network_object_mask & object_mask_true
N = mask.sum()
gt1 = torch.ones(N, dtype=surf_indicator_output.dtype, device=surf_indicator_output.device)
gt0 = torch.zeros(surf_indicator_output.size()[0]-N, dtype=surf_indicator_output.dtype, device=surf_indicator_output.device)
gt = torch.cat([gt1, gt0], dim=0)
loss = nn.BCEWithLogitsLoss(reduction='mean')(surf_indicator_output, gt)
return loss
def forward(self, model_outputs, ground_truth, train_progress, n_img):
rgb_gt = ground_truth['rgb'].cuda()
network_object_mask = model_outputs['network_object_mask']
object_mask = model_outputs['object_mask']
ground_truth['size'] = ground_truth['size'][:1]
ground_truth['center'] = ground_truth['center'][:1]
if conf.enable_rgb:
rgb_loss = self.get_rgb_loss(model_outputs['rgb_values'], rgb_gt, network_object_mask, object_mask)
else:
rgb_loss = torch.zeros(1).float().cuda()
eikonal_loss = self.get_eikonal_loss(model_outputs['grad_theta'])
depth_loss = self.get_depth_loss(model_outputs['eikonal_points_hom'], model_outputs['eikonal_output'], ground_truth['depths'], ground_truth['depth_cams'], ground_truth['size'], ground_truth['center'],
far_thresh=conf.far_thresh, far_att=conf.far_att(train_progress),
near_thresh=conf.near_thresh, near_att=conf.near_att(train_progress),
smooth=conf.smooth(train_progress))
if conf.phase[0] <= train_progress and conf.enable_feat:
feat_loss = self.get_feat_loss_corr(model_outputs['diff_surf_pts'], model_outputs.get('uncerts'), *[ground_truth[attr] for attr in ['feat', 'cam', 'feat_src', 'src_cams', 'size', 'center']], network_object_mask, object_mask)
else:
feat_loss = torch.zeros(1).float().cuda()
if conf.phase[0] <= train_progress:
surf_loss = self.get_surf_loss(model_outputs['surf_indicator_output'], network_object_mask, model_outputs['object_mask_true'])
else:
surf_loss = torch.zeros(1).float().cuda()
loss = rgb_loss * conf.rgb_weight(train_progress) + \
eikonal_loss * conf.eikonal_weight + \
surf_loss * conf.surf_weight + \
feat_loss * conf.feat_weight(train_progress) + \
depth_loss * conf.depth_weight(train_progress)
return {
'loss': loss,
'rgb_loss': rgb_loss,
'eikonal_loss': eikonal_loss,
'depth_loss': depth_loss,
'feat_loss': feat_loss,
'surf_loss': surf_loss
}
| 11,909 | 4,424 |
from .matrix import Matrix
| 28 | 8 |
from sqlalchemy import Column, Integer, String, Date
from run import db
class Transaction(db.Model):
__tablename__ = "transaction"
id = Column(Integer, primary_key=True, autoincrement=True)
itemid = Column(String)
locationid = Column(String)
transactiondate = Column(Date)
transferquantity = Column(Integer)
def __init__(self, itemid, locationid, transactiondate, transferquantity):
self.itemid = itemid
self.locationid = locationid
self.transactiondate = transactiondate
self.transferquantity = transferquantity
def __repr__(self):
return "<Transaction %s>" % self.id
| 645 | 177 |
### Pickle the data
pickle.dump(data, open("data.p", "wb")) | 60 | 25 |
import argparse
from bs4 import BeautifulSoup
def list_users(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
soup = BeautifulSoup(f.read(), 'html.parser')
for user in soup.find_all('li', class_='accessListData'):
user_name = user.find(class_='username').string
user_role = user.find(class_='performAction').span.string
print(f'{user_name} ({user_role})')
def handler():
parser = argparse.ArgumentParser()
parser.add_argument('file', help='Power BI service url with user list')
args = parser.parse_args()
if args.file:
list_users(args.file)
if __name__ == "__main__":
handler()
| 682 | 217 |
import tensorflow as tf
def conv2d_layer(
inputs,
filters,
kernel_size = [4, 4],
strides = [2, 2],
padding = 'same',
activation = None,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.02),
name = None):
conv_layer = tf.layers.conv2d(
inputs = inputs,
filters = filters,
kernel_size = kernel_size,
strides = strides,
padding = padding,
activation = activation,
kernel_initializer = kernel_initializer,
name = name)
return conv_layer
def conv2d_transpose_layer(
inputs,
filters,
kernel_size,
strides,
padding = 'same',
activation = None,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.02),
name = None):
deconv_layer = tf.layers.conv2d_transpose(
inputs = inputs,
filters = filters,
kernel_size = kernel_size,
strides = strides,
padding = padding,
activation = activation,
kernel_initializer = kernel_initializer,
name = name)
return deconv_layer
def instance_norm_layer(
inputs,
epsilon = 1e-06,
activation_fn = None,
name = None):
instance_norm_layer = tf.contrib.layers.instance_norm(
inputs = inputs,
epsilon = epsilon,
activation_fn = activation_fn)
return instance_norm_layer
def residual_block(
inputs,
filters,
kernel_size = [3, 3],
strides = [1, 1],
name_prefix = 'residule_block_'):
p1 = (kernel_size[0] - 1) // 2
p2 = (kernel_size[1] - 1) // 2
paddings = [[0, 0], [p1, p1], [p2, p2], [0, 0]]
h0_pad = tf.pad(tensor = inputs, paddings = paddings, mode = 'REFLECT', name = 'pad0')
h1 = conv2d_layer(inputs = h0_pad, filters = filters, kernel_size = kernel_size, strides = strides, padding = 'valid', activation = None, name = name_prefix + 'conv1')
h1_norm = instance_norm_layer(inputs = h1, activation_fn = tf.nn.relu, name = name_prefix + 'norm1')
h1_pad = tf.pad(tensor = h1_norm, paddings = paddings, mode = 'REFLECT', name = 'pad1')
h2 = conv2d_layer(inputs = h1_pad, filters = filters, kernel_size = kernel_size, strides = strides, padding = 'valid', activation = None, name = name_prefix + 'conv2')
h2_norm = instance_norm_layer(inputs = h2, activation_fn = None, name = name_prefix + 'norm2')
return inputs + h2_norm
def discriminator(inputs, num_filters = 64, reuse = False, scope_name = 'discriminator'):
with tf.variable_scope(scope_name) as scope:
# Discriminator would be reused in CycleGAN
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h0 = conv2d_layer(inputs = inputs, filters = num_filters, activation = tf.nn.leaky_relu, name = 'h0_conv')
h1 = conv2d_layer(inputs = h0, filters = num_filters * 2, activation = None, name = 'h1_conv')
h1_norm = instance_norm_layer(inputs = h1, activation_fn = tf.nn.leaky_relu, name = 'h1_norm')
h2 = conv2d_layer(inputs = h1_norm, filters = num_filters * 4, activation = None, name = 'h2_conv')
h2_norm = instance_norm_layer(inputs = h2, activation_fn = tf.nn.leaky_relu, name = 'h2_norm')
h3 = conv2d_layer(inputs = h2_norm, filters = num_filters * 8, strides = [1, 1], activation = None, name = 'h3_conv')
h3_norm = instance_norm_layer(inputs = h3, activation_fn = tf.nn.leaky_relu, name = 'h3_norm')
h4 = conv2d_layer(inputs = h3_norm, filters = 1, strides = [1, 1], activation = None, name = 'h4_conv')
return h4
def generator_resnet(inputs, num_filters = 64, output_channels = 3, reuse = False, scope_name = 'generator_resnet'):
with tf.variable_scope(scope_name) as scope:
# Discriminator would be reused in CycleGAN
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
#output_channels = inputs.shape[-1]
# Check tf.pad using 'REFLECT' mode
# https://www.tensorflow.org/api_docs/python/tf/pad
c0 = tf.pad(tensor = inputs, paddings = [[0, 0], [3, 3], [3, 3], [0, 0]], mode = 'REFLECT', name = 'c0_pad')
c1 = conv2d_layer(inputs = c0, filters = num_filters, kernel_size = [7, 7], strides = [1, 1], padding = 'valid', activation = None, name = 'c1_conv')
c1_norm = instance_norm_layer(inputs = c1, activation_fn = tf.nn.relu, name = 'c1_norm')
c2 = conv2d_layer(inputs = c1_norm, filters = num_filters * 2, kernel_size = [3, 3], strides = [2, 2], activation = None, name = 'c2_conv')
c2_norm = instance_norm_layer(inputs = c2, activation_fn = tf.nn.relu, name = 'c2_norm')
c3 = conv2d_layer(inputs = c2_norm, filters = num_filters * 4, kernel_size = [3, 3], strides = [2, 2], activation = None, name = 'c3_conv')
c3_norm = instance_norm_layer(inputs = c3, activation_fn = tf.nn.relu, name = 'c3_norm')
r1 = residual_block(inputs = c3_norm, filters = num_filters * 4, name_prefix = 'residual1_')
r2 = residual_block(inputs = r1, filters = num_filters * 4, name_prefix = 'residual2_')
r3 = residual_block(inputs = r2, filters = num_filters * 4, name_prefix = 'residual3_')
r4 = residual_block(inputs = r3, filters = num_filters * 4, name_prefix = 'residual4_')
r5 = residual_block(inputs = r4, filters = num_filters * 4, name_prefix = 'residual5_')
r6 = residual_block(inputs = r5, filters = num_filters * 4, name_prefix = 'residual6_')
r7 = residual_block(inputs = r6, filters = num_filters * 4, name_prefix = 'residual7_')
r8 = residual_block(inputs = r7, filters = num_filters * 4, name_prefix = 'residual8_')
r9 = residual_block(inputs = r8, filters = num_filters * 4, name_prefix = 'residual9_')
d1 = conv2d_transpose_layer(inputs = r9, filters = num_filters * 2, kernel_size = [3, 3], strides = [2, 2], name = 'd1_deconv')
d1_norm = instance_norm_layer(inputs = d1, activation_fn = tf.nn.relu, name = 'd1_norm')
d2 = conv2d_transpose_layer(inputs = d1_norm, filters = num_filters, kernel_size = [3, 3], strides = [2, 2], name = 'd2_deconv')
d2_norm = instance_norm_layer(inputs = d2, activation_fn = tf.nn.relu, name = 'd2_norm')
d2_pad = tf.pad(tensor = d2_norm, paddings = [[0, 0], [3, 3], [3, 3], [0, 0]], mode = 'REFLECT', name = 'd2_pad')
d3 = conv2d_layer(inputs = d2_pad, filters = output_channels, kernel_size = [7, 7], strides = [1, 1], padding = 'valid', activation = tf.nn.tanh, name = 'd3_conv')
return d3
| 6,614 | 2,441 |
from django.utils.module_loading import import_string
from .settings import setting_gpg_path
SETTING_GPG_BACKEND = 'mayan.apps.django_gpg.classes.PythonGNUPGBackend'
gpg_backend = import_string(SETTING_GPG_BACKEND)(
binary_path=setting_gpg_path.value
)
| 260 | 97 |
math_students = {"Matthew", "Helen", "Prashant", "James", "Asparna"}
biology_students = {"Jane", "Matthew", "Charlotte", "Mesut", "Oliver", "James"}
# Union (All unique students)
students_union = math_students | biology_students
# Intersection (Students who are in both sets)
students_intersection = math_students & biology_students
| 335 | 119 |
'''
Copyright <2019> <COPYRIGHT Pingcheng Zhang>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Training methods defined here.
A part of GSPNet project.
'''
import numpy as np
import pandas as pd
import pickle as pkl
import matplotlib.pyplot as plt
import os
import re
import time
import torch
from torch import nn, optim
from torch.utils import data
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data import SubsetRandomSampler, SequentialSampler
from glob import iglob, glob
from matplotlib import pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
from tqdm import tqdm
from pathlib import Path
# import models, loss functions and datasets
import models
from models import *
from losses import *
from datasets import *
# Environment global variable
TRAIN_ON_MULTI_GPUS = False # (torch.cuda.device_count() >= 2)
# helper functions
def create_dir(directory: str):
'''
Helper function to create directory
Args:
directory: a string describing the to be created dir
'''
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
raise OSError
def save_model(model, hyps: dict):
'''
Save model to local file.
Args:
model: trained model
hyps: hyperparameters of the trained model
'''
name = ''
mn = hyps["mn"]
name += f'mn{hyps["mn"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder',
'ConvAutoEncoder', 'ConvAutoEncoderShallow']:
name += f'-os{hyps["os"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder']:
name += f'-is{hyps["is"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder',
'SparseAutoEncoder', 'SparseConvAutoEncoder']:
name += f'-hd{hyps["hd"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN']:
name += f'-nl{hyps["nl"]}-dp{hyps["dp"]}-sl{hyps["sl"]}'
if mn in ['ConvClassifier', 'MLPClassifier']:
name += f'-nc{hyps["nc"]}'
if mn in ['ConvAutoEncoder', 'ConvAutoEncoderShallow', 'VAE',
'SparseAutoEncoder', 'SparseConvAutoEncoder']:
name += f'-md{hyps["md"]}'
if mn in ['VAE']:
name += f'-zd{hyps["z_dim"]}'
if mn in ['GAN']:
name += f'-zs{hyps["zs"]}'
name += f'-ss{hyps["ss"]}'
name += f'-cd{hyps["cd"]}'
name += f'-vs{hyps["vs"]}'
name += f'-md{hyps["md"]}'
name += f'-bs{hyps["bs"]}-lr{hyps["lr"]}.pt'
model_path = str(Path(os.path.dirname(os.path.realpath(__file__))).resolve(
).parents[1].joinpath(f'output/trained_models/weight'))
create_dir(model_path)
model_path = model_path + '/' + name
torch.save(model.state_dict(), model_path)
def get_curve_name(dest: str, hyps: dict):
'''
Generate training loss curve image name.
Args:
dest: folder to save trained model
hyps: hyperparameters of the trained model
'''
name = ''
mn = hyps["mn"]
name += f'mn{hyps["mn"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder',
'ConvAutoEncoder', 'ConvAutoEncoderShallow']:
name += f'-os{hyps["os"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder']:
name += f'-is{hyps["is"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder',
'SparseAutoEncoder']:
name += f'-hd{hyps["hd"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN']:
name += f'-nl{hyps["nl"]}-dp{hyps["dp"]}-sl{hyps["sl"]}'
if mn in ['ConvClassifier', 'MLPClassifier']:
name += f'-nc{hyps["nc"]}'
if mn in ['ConvAutoEncoder', 'ConvAutoEncoderShallow', 'VAE',
'SparseAutoEncoder']:
name += f'-md{hyps["md"]}'
if mn in ['VAE']:
name += f'-zd{hyps["z_dim"]}'
name += f'-bs{hyps["bs"]}-lr{hyps["lr"]}.png'
return dest + '/' + name
# data feeder, type 2, deprecated
def batch_dataset(datadir, seq_len):
'''
Batch the neural network data using DataLoader.
Args:
datadir: Directory storing tensor data
seq_len: The sequence length of each batch
Return:
DataLoader with batched data
'''
# WARNING: this function is deprecated, will remove after 2019 May 1st
data_iter = iglob(datadir + '/*')
states = []
print('Loading dataset...')
print('Loading training set...')
for state in tqdm(data_iter, ascii=True):
state = torch.load(state).numpy()
states.append(state)
states = np.array(states)
states = states.reshape((len(states), -1))
states = states.astype('float32')
num_batches = len(states) // seq_len
# only full batches
states = states[: num_batches * seq_len]
features, targets = [], []
for idx in range(0, (len(states) - seq_len)):
features.append(states[idx: idx + seq_len])
targets.append(states[idx + seq_len])
data = TensorDataset(torch.from_numpy(np.array(features)),
torch.from_numpy(np.array(targets)))
data_loader = torch.utils.data.DataLoader(
data, shuffle=False, batch_size=batch_size, num_workers=0)
return data_loader
def check_encoder_dim(mode: str, model, dataset):
'''
Check whether the convolutional autoencoder architecture matches data dimension.
Args:
mode: `pnf` or `od` mode
model: convencoder model instance
dataset: dataset object
Returns:
if_match: bool
'''
loader = DataLoader(dataset,
batch_size=1, num_workers=0, drop_last=True)
iterator = iter(loader)
X, y = iterator.next()
if mode == 'od':
assert X.size(1) == 1, f'Mode `od`: X expect channel size 1 but get {X.size(1)}.'
elif mode == 'pnf':
assert X.size(1) == 3, f'Mode `pnf`: X expect channel size 3 but get {X.size(1)}'
# training function of CNN classification
def train_classifier(model, optimizer, criterion, n_epochs,
train_loader, valid_loader, hyps,
stop_criterion=20, device='cuda:0',
show_every_n_batches=100):
'''
Train a CNN classifier with the given hyperparameters.
Args:
model: The PyTorch Module that holds the neural network
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
n_epochs: Total go through of entire dataset
train_loader: Training data loader
valid_loader: Validation data loader
hyps: A dict containing hyperparameters
stop_criterion: Early stop variable
device: Training device
show_every_batches: Display loss every this number of time steps
Returns:
A trained model. The best model will also be saved locally.
'''
# clear cache
torch.cuda.empty_cache()
# start timing
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
# validation constants
early_stop_count = 0
valid_loss_min = np.inf
train_losses = []
valid_losses = []
# for plot training loss and validation loss
tl = []
vl = []
model.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
# early stop mechanism:
if early_stop_count >= stop_criterion:
print(f'Early stop triggered after {stop_criterion} epochs.')
break
for data, label in train_loader:
# forward, back prop
if TRAIN_ON_MULTI_GPUS:
data, label = data.cuda(), label.cuda()
elif torch.cuda.is_available():
data, label = data.to(device), label.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, label)
loss.backward()
optimizer.step()
# record loss
train_losses.append(loss.item())
model.eval()
for v_data, v_label in valid_loader:
v_data, v_label = v_data.to(device), v_label.to(device)
v_output = model(v_data)
val_loss = criterion(v_output, v_label)
valid_losses.append(val_loss.item())
model.train()
avg_val_loss = np.mean(valid_losses)
avg_tra_loss = np.mean(train_losses)
tl.append(avg_tra_loss)
vl.append(avg_val_loss)
# printing loss stats
print(
f'Epoch: {epoch_i:>4}/{n_epochs:<4} | Loss: {avg_tra_loss:.6f} | Val Loss: {avg_val_loss:.6f} | Min Val: {valid_loss_min:.6f}',
flush=True)
# decide whether to save model or not:
if avg_val_loss < valid_loss_min:
print(f'Valid Loss {valid_loss_min:.6f} -> {avg_val_loss:.6f}. \
Saving...', flush=True)
save_model(model, hyps)
valid_loss_min = avg_val_loss
early_stop_count = 0
else:
early_stop_count += 1
# clear
train_losses = []
valid_losses = []
# returns a trained model
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:2f} seconds.')
return model, (tl, vl)
def run_classifier_training(model_name, data_dir, epochs, bs, vs, lr, nc,
dp=0.5, device='cuda:0'):
'''
Main function of cnn classifier training.
Args:
model_name: model name
data_dir: data source location
epochs: number of epochs to train
bs: batch_size
vs: validation size, proportion of validation data set
lr: learning_rate
nc: number of classes
dp: drop_prob
device: GPU or CPU
'''
# Training parameters
epochs = epochs
learning_rate = 0.001
batch_size = bs
# Model parameters
input_size = 69 * 69 * 3 # <- don't change this value
drop_prob = 0.5
# Show stats for every n number of batches
senb = 5000
# wrap essential info into dictionary:
hyps = {
'mn': model_name,
'bs': batch_size,
'lr': learning_rate,
'nc': nc,
'dp': drop_prob
}
# LSTM data loader
data_set = SnapshotClassificationDatasetRAM(data_dir)
# split data for training and validation
num_train = len(data_set)
indices = list(range(num_train))
split = int(np.floor(vs * num_train))
# shuffle
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = DataLoader(data_set, sampler=train_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
valid_loader = DataLoader(data_set, sampler=valid_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
# initialize model
model = models.__dict__[model_name](n_classes=nc)
# model training device
if TRAIN_ON_MULTI_GPUS:
model = nn.DataParallel(model).cuda()
elif torch.cuda.is_available():
model = model.to(device)
else:
print('Training on CPU, very long training time is expectable.')
# optimizer and criterion(loss function)
if TRAIN_ON_MULTI_GPUS:
optimizer = optim.SGD(model.module.parameters(), lr=learning_rate)
else:
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
# start training
trained_model, tlvl = train_classifier(model, optimizer, criterion, epochs,
train_loader, valid_loader, hyps, device=device)
# loss plot
tl, vl = tlvl
x = np.arange(len(tl))
# for model 3 of classification only
x, tl, vl = x[1:], tl[1:], vl[1:]
train_curve, = plt.plot(x, tl, 'r-', label='train loss')
valid_curve, = plt.plot(x, vl, 'b-', label='valid loss')
plt.legend(handler_map={train_curve: HandlerLine2D(numpoints=1)})
curve_name = get_curve_name('trained_models', hyps) #BUG
plt.savefig(curve_name)
plt.show()
def forward_back_prop(model, optimizer, criterion, inp, target, hidden, clip):
"""
Forward and backward propagation on the neural network.
Args:
model: The PyTorch Module that holds the neural network
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
inp: A batch of input to the neural network
target: The target output for the batch of input
hidden: Hidden state
clip: Clip the overly large gradient
Returns:
The loss and the latest hidden state Tensor
"""
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
if type(hidden) == tuple:
h = tuple([each.data for each in hidden])
else:
h = hidden.data
# zero accumulated gradients
if TRAIN_ON_MULTI_GPUS:
model.module.zero_grad()
else:
model.zero_grad()
# print(f'input shape: {inp}, target shape: {target}')
# get the output from the model
output, h = model(inp, h)
# perform backpropagation and optimization
# calculate the loss and perform backprop
loss = criterion(output, target)
loss.backward()
# 'clip_grad_norm' helps prevent the exploding gradient problem in RNNs
if TRAIN_ON_MULTI_GPUS:
nn.utils.clip_grad_norm_(model.module.parameters(), clip)
else:
nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
# return the loss over a batch and the hidden state produced by our model
return loss.item(), h
# training function for sequential prediction
def train_recurrent(model, batch_size, optimizer, criterion,
n_epochs, train_loader, valid_loader, hyps, clip=5,
stop_criterion=20, show_every_n_batches=1, multi_gpus=True,
device='cuda:0'):
'''
Train a LSTM model with the given hyperparameters.
Args:
model: The PyTorch Module that holds the neural network
batch_size: batch size, integer
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
n_epochs: Total go through of entire dataset
train_loader: Training data loader
valid_loader: Validation data loader
hyps: A dict containing model parameters
clip: Clip the overly large gradient
show_every_batches: Display loss every this number of time steps
multi_gpus: Whether have multiple GPUs
device: location to put tensor/model
Returns:
A trained model. The best model will also be saved locally.
'''
# clear cache
torch.cuda.empty_cache()
# start timing
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
# validation constants
early_stop_count = 0
valid_loss_min = np.inf
train_losses = []
# for plot training loss and validation loss
tl = []
vl = []
model.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
if TRAIN_ON_MULTI_GPUS and multi_gpus:
hidden = model.module.init_hidden(batch_size)
else:
hidden = model.init_hidden(batch_size)
for batch_i, (inputs, labels) in enumerate(train_loader, 1):
# early stop mechanism:
if early_stop_count >= stop_criterion:
print(f'Early stop triggered after {stop_criterion} epochs.')
break
# make sure you iterate over completely full batches, only
n_batches = len(train_loader.dataset) // batch_size
if batch_i > n_batches:
break
# forward, back prop
# print(f'inputs shape: {inputs.shape} labels shape: {labels.shape}')
# print(f'inputs dtype: {inputs[0][0][0].dtype} label shape: {labels[0][0].dtype}')
if TRAIN_ON_MULTI_GPUS and multi_gpus:
inputs, labels = inputs.cuda(), labels.cuda()
elif torch.cuda.is_available():
inputs, labels = inputs.to(device), labels.to(device)
# print(f'Input shape: {inputs.shape}')
loss, hidden = forward_back_prop(
model, optimizer, criterion, inputs, labels, hidden, clip
)
# record loss
train_losses.append(loss)
# print loss every show_every_n_batches batches
# including validation loss
if batch_i % show_every_n_batches == 0:
# get validation loss
if TRAIN_ON_MULTI_GPUS:
val_h = model.module.init_hidden(batch_size)
else:
val_h = model.init_hidden(batch_size)
valid_losses = []
# switch to validation mode
model.eval()
for v_inputs, v_labels in valid_loader:
if TRAIN_ON_MULTI_GPUS and multi_gpus:
v_inputs, v_labels = v_inputs.cuda(), v_labels.cuda()
elif torch.cuda.is_available():
v_inputs, v_labels = v_inputs.to(device), v_labels.to(device)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
# if type is tuple, then the model is LSTM
if type(val_h) == tuple:
val_h = tuple([each.data for each in val_h])
else:
val_h = val_h.data
v_output, val_h = model(v_inputs, val_h)
val_loss = criterion(v_output, v_labels)
valid_losses.append(val_loss.item())
model.train()
avg_val_loss = np.mean(valid_losses)
avg_tra_loss = np.mean(train_losses)
tl.append(avg_tra_loss)
vl.append(avg_val_loss)
# printing loss stats
print(
f'Epoch: {epoch_i:>4}/{n_epochs:<4} | Loss: {avg_tra_loss:.6f} ' +
f'| Val Loss: {avg_val_loss:.6f} | Min Val: {valid_loss_min:.6f}',
flush=True)
# decide whether to save model or not:
if avg_val_loss < valid_loss_min:
print(f'Valid Loss {valid_loss_min:.6f} -> {avg_val_loss:.6f}. Saving...', flush=True)
# saving state_dict of model
save_model(model, hyps)
valid_loss_min = avg_val_loss
early_stop_count = 0
else:
early_stop_count += 1
train_losses = []
valid_losses = []
# returns a trained model
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:.2f} seconds.')
return model, (tl, vl)
# run functions of this module
def run_recursive_training(model_name, data_dir, epochs, bs, vs, lr, sl=12,
hd=256, nl=2, dp=0.5, device='cuda:0'):
'''
Main function of RNNs training.
Args:
model_name: model name
data_dir: data source location
epochs: number of epochs to train
bs: batch_size
vs: validation proportion
lr: learning_rate
sl: sequence_length
hd: hidden_dim
nl: n_layers
dp: drop_prob
device: training hardware, GPU or CPU
'''
# LSTM Model Data params
sequence_length = sl # number of time slices in a sequence
clip = 5
# Training parameters
epochs = epochs
learning_rate = lr
batch_size = bs
# Model parameters
input_size = 69 * 69 * 3 # <- don't change this value
output_size = input_size
hidden_dim = hd
# Number of RNN Layers
n_layers = nl
drop_prob = dp
# Show stats for every n number of batches
senb = 5000
# wrap essential info into dictionary:
hyps = {
'mn': model_name,
'is': input_size,
'os': output_size,
'sl': sequence_length,
'bs': batch_size,
'lr': learning_rate,
'hd': hidden_dim,
'nl': n_layers,
'dp': drop_prob
}
data_set = S2FDatasetRAM(data_dir, sequence_length)
# split dataset for training and validation
num_train = len(data_set)
indices = list(range(num_train))
split = int(np.floor(vs * num_train)) # hard coded to 0.8
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SequentialSampler(train_idx)
valid_sampler = SequentialSampler(valid_idx)
train_loader = DataLoader(data_set, sampler=train_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
valid_loader = DataLoader(data_set, sampler=valid_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
# initialize model
model = models.__dict__[model_name](input_size, output_size, hidden_dim,
n_layers=n_layers, drop_prob=drop_prob,
device=device)
# model training device
if TRAIN_ON_MULTI_GPUS:
model = nn.DataParallel(model).cuda()
elif torch.cuda.is_available():
model = model.to(device)
else:
print('Training on CPU, very long training time is expectable.')
# optimizer and criterion(loss function)
if TRAIN_ON_MULTI_GPUS:
optimizer = optim.Adam(model.module.parameters(), lr=learning_rate)
else:
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# loss:
# criterion = nn.MSELoss()
criterion = dich_mse_loss
# start training
trained_model, tlvl = train_recurrent(model, batch_size, optimizer,
criterion, epochs, train_loader,
valid_loader, hyps, device=device)
# loss plot
tl, vl = tlvl
x = np.arange(len(tl))
train_curve, = plt.plot(x, tl, 'r-', label='train loss')
valid_curve, = plt.plot(x, vl, 'b-', label='valid loss')
plt.legend(handler_map={train_curve: HandlerLine2D(numpoints=1)})
curve_name = get_curve_name('trained_models', hyps)
plt.savefig(curve_name)
plt.show()
def train_encoder(model, optimizer, criterion, n_epochs, loader, hyps,
early_stop_count=20, device='cuda:0', show_every_n_epochs=10):
'''
Train an auto encoder with the given hyperparameters.
Args:
model: The PyTorch Module that holds the neural network
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
n_epochs: Total go through of entire dataset
early_stop_count: Early stop number
loader: Training data loader
hyps: A dict containing hyperparameters
device: Training device
show_every_batches: Display loss every this number of time steps
Returns:
A trained model. The best model will also be saved locally.
'''
torch.cuda.empty_cache()
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
loss_min = np.inf
losses = []
stop = 0
model.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
if stop >= early_stop_count:
print(f'Stop converging for {stop} epochs, early stop triggered.')
break
for data, _ in loader:
if TRAIN_ON_MULTI_GPUS:
data = data.cuda()
elif torch.cuda.is_available():
data = data.to(device)
optimizer.zero_grad()
output = model(data)
# print(f'output.shape -> {output.shape} | data.shape -> {data.shape}')
loss = criterion(output, data)
loss.backward()
optimizer.step()
losses.append(loss.item())
if epoch_i % show_every_n_epochs == 0:
avg_loss = np.mean(losses)
print(f'Epoch: {epoch_i:>4}/{n_epochs:<4} | Loss: {avg_loss:.6f}')
if avg_loss < loss_min:
print(f'Valid Loss {loss_min:.6f} -> {avg_loss:.6f}. Saving...')
# saving state_dict of model
save_model(model, hyps)
loss_min = avg_loss
stop = 0
else:
stop += 1
losses = []
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:2f} seconds.')
return model
def train_vae(model, optimizer, criterion, n_epochs,
loader, hyps, device='cuda:0', show_every_n_batches=100):
'''
Train an VAE with the given hyperparameters.
Args:
model: The PyTorch Module that holds the neural network
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
n_epochs: Total go through of entire dataset
loader: Training data loader
hyps: A dict containing hyperparameters
device: Training device
show_every_batches: Display loss every this number of time steps
Returns:
A trained model. The best model will also be saved locally.
'''
# clear cache
torch.cuda.empty_cache()
# start timing
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
# validation constants
valid_loss_min = np.inf
losses = []
bces = []
klds = []
model.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
for data, _ in loader:
# forward, back prop
if TRAIN_ON_MULTI_GPUS:
data = data.cuda()
elif torch.cuda.is_available():
data = data.to(device)
recon_images, mu, logvar = model(data)
# print(f'label.shape -> {label.shape} | recon_images.shape -> {recon_images.shape}')
loss, bce, kld = criterion(recon_images, data, mu, logvar)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# record loss
losses.append(loss.item())
bces.append(bce.item())
klds.append(kld.item())
al = np.mean(losses)
ab = np.mean(bces)
ak = np.mean(klds)
# printing loss stats
print(
f'Epoch: {epoch_i:>4}/{n_epochs:<4} | Loss: {al:.6f} | BCE: {ab:.6f} | KLD: {ak:.6f}', flush=True)
# clear
losses = []
save_model(model, hyps)
# returns a trained model
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:2f} seconds.')
return model
def run_encoder_training(model_name, data_dir, epochs, bs, vs, lr, mode='od',
hd=512, device='cuda:0'):
'''
Main function of auto encoder.
Args:
model_name: model name
data_dir: location of training data
epochs: number of epochs to train
bs: batch_size
vs: validation size
lr: learning rate
mode: pnf or od
hd: hidden dim
device: where to train the model
'''
# Training parameters
epochs = epochs
learning_rate = lr
batch_size = bs
# Model parameters
if mode == 'od':
input_size = 69 * 69 * 1
elif mode == 'pnf':
input_size = 69 * 69 * 3
else:
raise ValueError('Only `od` and `pnf` are supported.')
output_size = input_size
hidden_dim = hd
# wrap essential info into dictionary:
hyps = {
'is': input_size,
'os': output_size,
'mn': model_name,
'hd': hidden_dim,
'bs': batch_size,
'lr': learning_rate,
'md': mode
}
# Initialize data loaders
# LSTM data loader
if model_name in ['ConvAutoEncoder',
'ConvAutoEncoderShallow',
'VAE',
'SparseAutoEncoder',
'SparseConvAutoEncoder']:
data_set = ConvEncoderDatasetRAM(data_dir)
else:
data_set = EncoderDatasetRAM(data_dir)
# split dataset for training and validation
num_train = len(data_set)
indices = list(range(num_train))
split = int(np.floor(0.8 * num_train)) # hard coded to 0.8
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SequentialSampler(train_idx)
valid_sampler = SequentialSampler(valid_idx)
loader = DataLoader(data_set, sampler=train_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
valid_loader = DataLoader(data_set, sampler=valid_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
# initialize model
# This part is currently very mixed. Change in the future.
if model_name in ['ConvAutoEncoder', 'ConvAutoEncoderShallow']:
model = models.__dict__[model_name](hyps['os'], mode=hyps['md'])
elif model_name == 'VAE':
model = models.__dict__[model_name](hyps['md'], hidden_dim=hyps['hd'], z_dim=32)
elif model_name in ['SparseConvAutoEncoder', 'SparseAutoEncoder']:
model = models.__dict__[model_name](hyps['md'], hidden_dim=hyps['hd'])
else:
model = models.__dict__[model_name](hyps['is'], hyps['os'], hidden_dim=hyps['hd'])
print(model)
# model training device
if TRAIN_ON_MULTI_GPUS:
model = nn.DataParallel(model).cuda()
elif torch.cuda.is_available():
model = model.to(device)
else:
print('Training on CPU, very long training time is expectable.')
check_encoder_dim(mode, model, data_set)
# optimizer and criterion(loss function)
if TRAIN_ON_MULTI_GPUS:
optimizer = optim.Adam(model.module.parameters(), lr=learning_rate)
else:
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
if model_name == 'VAE':
criterion = vae_loss
trained_model = train_vae(model, optimizer, criterion, epochs, loader,
hyps, device=device)
else:
criterion = nn.MSELoss()
# criterion = nn.L1Loss()
# criterion = dich_mse_loss
# start training
trained_model = train_encoder(
model, optimizer, criterion, epochs, loader, hyps, device=device
)
return trained_model
def train_GAN(D, G, d_optimizer, g_optimizer, n_epochs, z_size,
train_loader, valid_loader, sample_size, hyps, device='cuda:0',
print_every=100):
'''
GAN training function.
Args:
D:
G:
d_optimizer:
g_optimizer:
n_epochs:
z_size: latent vector size
train_loader:
valid_loader:
sample_size:
hyps:
device:
print_every:
Returns:
trained model: G and D
'''
# clear cache
torch.cuda.empty_cache()
# start timing
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
# keep track of loss and generated, "fake" samples
samples = []
truths = []
losses = []
# Get some fixed data for sampling. These are images that are held
# constant throughout training, and allow us to inspect the model's performance
sample_size = 16
fixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size))
fixed_z = torch.from_numpy(fixed_z).float()
# train the network
for epoch in range(n_epochs):
for batch_i, (real_images, _) in enumerate(train_loader):
batch_size = real_images.size(0)
# important rescaling step
real_images = scale(real_images)
# ============================================ #
# TRAIN THE DISCRIMINATOR #
# ============================================ #
d_optimizer.zero_grad()
# 1. Train with real images
# Compute the discriminator losses on real images
if TRAIN_ON_MULTI_GPUS:
real_images = real_images.cuda()
elif torch.cuda.is_available():
real_images = real_images.to(device)
D_real = D(real_images)
d_real_loss = real_loss(D_real)
# 2. Train with fake images
# Generate fake images
z = np.random.uniform(-1, 1, size=(batch_size, z_size))
z = torch.from_numpy(z).float()
# move x to GPU, if available
if TRAIN_ON_MULTI_GPUS:
z = z.cuda()
elif torch.cuda.is_available():
z = z.to(device)
fake_images = G(z)
# Compute the discriminator losses on fake images
D_fake = D(fake_images)
d_fake_loss = fake_loss(D_fake)
# add up loss and perform backprop
d_loss = d_real_loss + d_fake_loss
d_loss.backward()
d_optimizer.step()
# ========================================= #
# TRAIN THE GENERATOR #
# ========================================= #
g_optimizer.zero_grad()
# 1. Train with fake images and flipped labels
# Generate fake images
z = np.random.uniform(-1, 1, size=(batch_size, z_size))
z = torch.from_numpy(z).float()
if TRAIN_ON_MULTI_GPUS:
z = z.cuda()
elif torch.cuda.is_available():
z = z.to(device)
fake_images = G(z)
# Compute the discriminator losses on fake images
# using flipped labels!
D_fake = D(fake_images)
g_loss = real_loss(D_fake) # use real loss to flip labels
# perform backprop
g_loss.backward()
g_optimizer.step()
# Print some loss stats
if batch_i % print_every == 0:
# append discriminator loss and generator loss
losses.append((d_loss.item(), g_loss.item()))
# print discriminator and generator loss
print(f'Epoch [{epoch+1:5d}/{n_epochs:5d}] |' +
f' d_loss: {d_loss.item():6.4f} | g_loss: {g_loss.item():6.4f}')
# AFTER EACH EPOCH
# generate and save sample, fake images
G.eval() # for generating samples
if TRAIN_ON_MULTI_GPUS:
fixed_z = fixed_z.cuda()
elif torch.cuda.is_available():
fixed_z = fixed_z.to(device)
samples_z = G(fixed_z)
samples.append(samples_z)
G.train() # back to training mode
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
save_model(D, hyps)
save_model(G, hyps)
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:2f} seconds.')
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
# print(f'samples.shape -> {len(samples)} {[item.shape for item in samples]}')
_ = view_samples(-1, samples, mode=hyps['md'])
return G, D
def run_GAN_training(data_dir, epochs, bs, vs, lr=0.0002, z_size=128, sample_size=16,
conv_dim=64, beta1=0.5, beta2=0.999, mode='od', device='cuda:0'):
'''
Main function of GAN.
Args:
data_dir: location of training data
epochs: number of epochs to train
bs: batch_size
vs: validation size
lr: learning rate
mode: pnf or od
conv_dim: convolutional layer dimension
device: where to train the model
'''
# Training parameters
epochs = epochs
learning_rate = lr
batch_size = bs
valid_size = vs
# wrap essential info into dictionary:
hyps = {
'mn': 'GAN',
'bs': batch_size,
'vs': valid_size,
'lr': learning_rate,
'zs': z_size,
'ss': sample_size,
'cd': conv_dim,
'md': mode
}
data_set = ConvEncoderDatasetRAM(data_dir)
# split dataset for training and validation
num_train = len(data_set)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train)) # hard coded to 0.8
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SequentialSampler(train_idx)
valid_sampler = SequentialSampler(valid_idx)
train_loader = DataLoader(data_set, sampler=train_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
valid_loader = DataLoader(data_set, sampler=valid_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
# define discriminator and generator
D = Discriminator(conv_dim, mode=hyps['md'])
G = Generator(z_size=z_size, conv_dim=conv_dim, mode=hyps['md'])
# initialize model weights
D.apply(weights_init_normal)
G.apply(weights_init_normal)
print(D)
print()
print(G)
# model training device
if TRAIN_ON_MULTI_GPUS:
D = nn.DataParallel(D).cuda()
G = nn.DataParallel(G).cuda()
elif torch.cuda.is_available():
D = D.to(device)
G = G.to(device)
else:
print('Training on CPU, very long training time is expectable.')
# params
lr = 0.0002
beta1 = 0.5
beta2 = 0.999 # default value
# Create optimizers for the discriminator and generator
d_optimizer = optim.Adam(D.parameters(), lr, [beta1, beta2])
g_optimizer = optim.Adam(G.parameters(), lr, [beta1, beta2])
# Create optimizers for the discriminator and generator
if TRAIN_ON_MULTI_GPUS:
d_optimizer = optim.Adam(D.module.parameters(), lr, [beta1, beta2])
g_optimizer = optim.Adam(G.module.parameters(), lr, [beta1, beta2])
else:
d_optimizer = optim.Adam(D.parameters(), lr, [beta1, beta2])
g_optimizer = optim.Adam(G.parameters(), lr, [beta1, beta2])
G, D = train_GAN(D, G, d_optimizer, g_optimizer, epochs, z_size,
train_loader, valid_loader, sample_size, hyps,
device=device, print_every=100)
return G, D
if __name__ == '__main__':
# dataset folders selecting factors
mode = 'pnf'
year = '2018'
freq = '15min'
data_dir = str(Path(os.path.dirname(os.path.realpath(__file__))).resolve(
).parents[1].joinpath(f'data/processed/{mode}/{year}/{freq}/tensors'))
# run_recursive_training('VanillaLSTM', data_dir, 500, 512, 0.9, 0.01, sl=12, hd=512)
# run_classifier_training('ConvClassifier', data_dir, 50, 128, 0.8, 0.001, 2, device='cuda:1')
run_encoder_training('ConvAutoEncoderShallow', data_dir, 2000, 128, 0.8, 0.1,
mode=mode, hd=113, device='cuda:1')
# run_GAN_training(data_dir, 100, 64, 0.8, z_size=100, conv_dim=256, mode='pnf')
| 40,735 | 13,139 |
import sys
import time
import random
sys.path.append("..")
from pympler import asizeof
from test_automata import *
# For computing average runtimes
def avg_tests(runs, test, string, test_num):
test_avg_runs = runs
test_Ptime = 0
test_Ctime = 0
if test.__name__ in ["test4", "test5"]:
test_Stime = 0
test_Mtime = 0
for _ in range(test_avg_runs):
_time = test(string)
test_Ptime += _time[0]
test_Stime += _time[1]
test_Mtime += _time[2]
test_Ctime += _time[3]
print("Test %s (%s)\n------------------- " %(test_num, len(string)))
print("Computation time for Monolithic Enforcer : %f ms" %(test_Ptime/test_avg_runs))
print("Computation time for Serial Composition : %f ms" %(test_Stime/test_avg_runs))
print("Computation time for Maximal Prefix Parallel Composition : %f ms" %(test_Mtime/test_avg_runs))
print("Computation time for Parallel Composition : %f ms\n" %(test_Ctime/test_avg_runs))
else:
for _ in range(test_avg_runs):
_time = test(string)
test_Ptime += _time[0]
test_Ctime += _time[1]
print("Test %s (%s)\n------------------- " %(test_num, len(string)))
print("Computation time for Monolithic Enforcer : %f ms" %(test_Ptime/test_avg_runs))
print("Computation time for Parallel Composition : %f ms\n" %(test_Ctime/test_avg_runs))
# Generating random strings from given alphabet
def generate_strings(alphabet):
strings = []
ranges = [(10**i, 5*10**i) for i in range(1, 6)]
for size1, size2 in ranges:
strings.append("".join(random.choices(alphabet, k=size1)))
strings.append("".join(random.choices(alphabet, k=size2)))
return strings
# Tests for compositions of EM1 and EM2
# Monolithic Composition with 3*5 = 15 states
# Parallel Composition with 3+5 = 8 states
def test1(Input):
# Monolithic Test
tsP = time.time()
A, B = EM1(), EM2()
A_B = monolithic_enforcer('A_B', A, B)
tsP = time.time()
accept = A_B.checkAccept(Input)
teP = time.time()
# Parallel Composition Test
tsC = time.time()
A, B = EM1("pDFA"), EM2("pDFA")
A_B = parallel_enforcer(A, B)
tsC = time.time()
accept = A_B.checkAccept(Input)
teC = time.time()
return (teP - tsP)*1000, (teC - tsC)*1000
# Tests for compositions of EM1, EM2 and EM3
# Monolithic Composition with 3*5*7 = 105 states
# Parallel Composition with 3+5+7 = 15 states
def test2(Input):
# Monolithic Test
tsP = time.time()
A, B, C = EM1(), EM2(), EM3()
A_B_C = monolithic_enforcer('A_B_C', A, B, C)
tsP = time.time()
accept = A_B_C.checkAccept(Input)
teP = time.time()
if (SIZEOF):
print(asizeof.asized(A_B_C, detail=1).format())
# Parallel Composition Test
tsC = time.time()
A, B, C = EM1("pDFA"), EM2("pDFA"), EM3("pDFA")
A_B_C = parallel_enforcer(A, B, C)
tsC = time.time()
accept = A_B_C.checkAccept(Input)
teC = time.time()
return (teP - tsP)*1000, (teC - tsC)*1000
# Tests for compositions of EM4, EM5, EM6, EM7, EM8 and EM9
# Monolithic Composition with 2*3*4*5*6*7 = 5040 states
# Parallel Composition with 2+3+4+5+6+7 = 27 states
def test3(Input):
# Monolithic Test
tsP = time.time()
R1, R2, R3, R4, R5, R6 = EM4(), EM5(), EM6(), EM7(), EM8(), EM9()
R = monolithic_enforcer('R', R1, R2, R3, R4, R5, R6)
tsP = time.time()
accept = R.checkAccept(Input)
teP = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
# Parallel Composition Test
tsC = time.time()
R1, R2, R3, R4, R5, R6 = EM4("pDFA"), EM5("pDFA"), EM6("pDFA"), EM7("pDFA"), EM8("pDFA"), EM9("pDFA")
R = parallel_enforcer(R1, R2, R3, R4, R5, R6)
tsC = time.time()
accept = R.checkAccept(Input)
teC = time.time()
return (teP - tsP)*1000, (teC - tsC)*1000
# Tests for compositions of EM10, EM11, EM12 (Safety properties)
# Monolithic Composition with 3*4*3 = 36 states
# Serial and (both) Parallel Composition with 3+4+3 = 10 states
def test4(Input):
# Monolithic Test
tsP = time.time()
RS, RT, RU = EM10(), EM11(), EM12()
R = monolithic_enforcer('R', RS, RT, RU)
tsP = time.time()
accept = R.checkAccept(Input)
teP = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
# Serial Composition Test
tsS = time.time()
RS, RT, RU = EM10("DFA"), EM11("DFA"), EM12("DFA")
R = serial_composition_enforcer(RS, RT, RU)
tsS = time.time()
accept = R.checkAccept(Input)
teS = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
# Maximal Prefix Parallel Composition Test
tsM = time.time()
RS, RT, RU = EM10("pDFA"), EM11("pDFA"), EM12("pDFA")
R = maximal_prefix_parallel_enforcer(RS, RT, RU)
tsM = time.time()
accept = R.checkAccept(Input)
teM = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
# Parallel Composition Test
tsC = time.time()
RS, RT, RU = EM10("pDFA"), EM11("pDFA"), EM12("pDFA")
R = parallel_enforcer(RS, RT, RU)
tsC = time.time()
accept = R.checkAccept(Input)
teC = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
return (teP - tsP)*1000, (teS - tsS)*1000, (teM - tsM)*1000, (teC - tsC)*1000
# Tests for compositions of EM13, EM14, EM15 (Co-safety properties)
# Monolithic Composition with 4*5*3 = 60 states
# Serial and (both) Parallel Composition with 4+5+3 = 12 states
def test5(Input):
# Monolithic Test
tsP = time.time()
RCS, RCT, RCU = EM13(), EM14(), EM15()
RC = monolithic_enforcer('RC', RCS, RCT, RCU)
tsP = time.time()
accept = RC.checkAccept(Input)
teP = time.time()
if (SIZEOF):
print(asizeof.asized(RC, detail=1).format())
# Serial Composition Test
tsS = time.time()
RCS, RCT, RCU = EM13("DFA"), EM14("DFA"), EM15("DFA")
RC = serial_composition_enforcer(RCS, RCT, RCU)
tsS = time.time()
accept = RC.checkAccept(Input)
teS = time.time()
if (SIZEOF):
print(asizeof.asized(RC, detail=1).format())
# Maximal Prefix Parallel Composition Test
tsM = time.time()
RCS, RCT, RCU = EM13("pDFA"), EM14("pDFA"), EM15("pDFA")
RC = maximal_prefix_parallel_enforcer(RCS, RCT, RCU)
tsM = time.time()
accept = RC.checkAccept(Input)
teM = time.time()
if (SIZEOF):
print(asizeof.asized(RC, detail=1).format())
# Parallel Composition Test
tsC = time.time()
RCS, RCT, RCU = EM13("pDFA"), EM14("pDFA"), EM15("pDFA")
RC = parallel_enforcer(RCS, RCT, RCU)
tsC = time.time()
accept = RC.checkAccept(Input)
teC = time.time()
if (SIZEOF):
print(asizeof.asized(RC, detail=1).format())
return (teP - tsP)*1000, (teS - tsS)*1000, (teM - tsM)*1000, (teC - tsC)*1000
if __name__ == '__main__':
Input1 = str(bin(15*1859))[2:]
Input2 = "33322555556666661111444422"
Input3 = "bbbbbbbbbbbbbbabbbbbbbbbbbbbb"
avg_tests(1000, test1, Input1, 1)
avg_tests(1000, test2, Input1, 2)
avg_tests(1000, test3, Input2, 3)
avg_tests(1000, test4, Input3, 4)
avg_tests(1000, test5, Input3, 5)
strings1 = generate_strings('01')
strings2 = generate_strings('123456')
strings3 = generate_strings('abc')
for string in strings1:
avg_tests(1000, test2, string, 2)
for string in strings2:
avg_tests(1000, test3, string, 3)
for string in strings3:
avg_tests(100, test4, string, 4)
avg_tests(100, test5, string, 5)
if (SIZEOF):
print(EM_size)
| 7,727 | 3,217 |
'''start = 0
end = int ( input(' insert an value for conting'))
# criando uma variavel indice que recebe o valor inteiro digitado#
while start <= end :
if end % 2 == 0:
print (start)
start = start +1
print('this number is par')
else:
print('this number is impar')
'''
start = 0
end = int ( input(' insert an value for conting'))
# criando uma variavel indice que recebe o valor inteiro digitado#
while start <= end :
print (start)
start = start +3
| 501 | 162 |
#!/usr/local/bin/python3
# This is a script to compile Move source code into artifacts that can be used for testing.
# Copy this to the root of your hardhat project to use it.
#
# Note: this is a temporary solution that will be phased out once we implement the Move plugin.
#
# Move code should be stored within the `contracts` directory, along with an ABI file.
# - contracts
# - MyContract.move
# - MyContract.abi.json
#
# The ABI file should look something like this:
# [
# {
# "inputs": [],
# "name": "foo",
# "outputs": [
# {
# "internalType": "uint256",
# "name": "",
# "type": "uint256"
# }
# ],
# "stateMutability": "view",
# "type": "function"
# }
# ]
dependencies = [
"../stdlib/sources",
"../../move-stdlib/sources"
]
named_address_mapping = {
"Std": "0x1",
"Evm": "0x2"
}
import os
import shutil
import tempfile
import subprocess
import json
import sys
from os import path
path_root = path.dirname(__file__)
path_contracts = path.join(path_root, "contracts")
path_artifacts = path.join(path_root, "artifacts", "contracts")
path_home = path.expanduser("~")
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def locate_solc():
p = path.join(path_home, "bin", "solc")
if path.isfile(p):
return p
p = shutil.which("solc")
if p is not None:
return p
eprint("Failed to locate solc.")
exit(1)
def locate_move_to_yul():
p = shutil.which("move-to-yul")
if p is not None:
return p
eprint("Failed to locate move-to-yul -- you can install it by running `cargo install --path <path to the move-to-yul crate>.`")
exit(1)
path_solc = locate_solc()
path_move_to_yul = locate_move_to_yul()
def list_move_sources():
paths = []
for name in os.listdir(path_contracts):
path_ = path.join(path_contracts, name)
if path.isfile(path_) and path.splitext(path_)[1] == ".move":
paths.append(path_)
return paths
def load_abi(path_source):
path_abi = path.splitext(path_source)[0] + ".abi.json"
if not path.isfile(path_abi):
eprint()
eprint("Missing ABI definition: {}.".format(path_abi))
exit(1)
with open(path_abi, "r") as f:
text = f.read()
return json.loads(text)
def move_to_yul(path_source):
with tempfile.NamedTemporaryFile() as output_file:
args = [path_move_to_yul, "--output", output_file.name]
if len(dependencies) > 0:
args.append("-d")
args.extend(dependencies)
if len(named_address_mapping) > 0:
args.append("-n")
for (name, addr) in named_address_mapping.items():
args.append("{}={}".format(name, addr))
path_abi = path.splitext(path_source)[0] + ".abi.json"
args.append("--abi-output")
args.append(path_abi)
args.extend(["--", path_source])
move_to_yul_res = subprocess.run(args, capture_output = True)
if move_to_yul_res.returncode != 0:
eprint()
eprint(move_to_yul_res.stderr.decode("utf-8"))
exit(1)
return output_file.read()
def solc(path_source, yul_code):
solc_res = subprocess.run([path_solc, "--optimize", "--strict-assembly", "--bin", "-"], input = yul_code, capture_output = True)
if solc_res.returncode != 0:
eprint()
eprint(solc_res.stderr.decode("utf-8"))
exit(1)
output = solc_res.stdout.decode("utf-8")
return "0x{}".format(output.split("Binary representation:")[1].replace("\n", ""))
def gen_artifact(path_source, abi, bytecode):
basename = path.basename(path_source)
contract_name = path.splitext(basename)[0]
path_artifact = path.join(path_artifacts, basename)
if not path.isdir(path_artifact):
if path.exists(path_artifact):
eprint("Failed to generate artifact. Path {} already exists, but it's not a directory.".format(path_artifact))
exit(1)
os.makedirs(path_artifact)
artifact = {
"_format": "hh-sol-artifact-1",
"contractName": contract_name,
"sourceName": path_source,
"abi": abi,
"bytecode": bytecode,
"deployedBytecode": bytecode,
"linkReferences": {},
"deployedLinkReferences": {}
}
with open(path.join(path_artifact, contract_name + ".json"), "w") as f:
json.dump(artifact, f, indent = 4)
def run(path_source):
print("Compiling {}...".format(path_source))
yul_code = move_to_yul(path_source)
abi = load_abi(path_source)
bytecode = solc(path_source, yul_code)
gen_artifact(path_source, abi, bytecode)
for path_source in list_move_sources():
run(path_source)
print("Success.")
| 4,815 | 1,657 |
# Filename : check_internet_con.py
# Author : Abhijit Kumar
# Created : 29 Dec 2017
# Description : Just checks internet connection is works or not.
#!/usr/bin/python3
import urllib2
def checkInternetConnectivity():
try:
urllib2.urlopen("http://google.com", timeout=2)
print("Working connection")
except urllib2.URLError as E:
print("Connection error:%s" % E.reason)
checkInternetConnectivity()
| 443 | 150 |
from rest_framework.status import HTTP_404_NOT_FOUND
ERROR_FIELD_DOES_NOT_EXIST = (
'ERROR_FIELD_DOES_NOT_EXIST',
HTTP_404_NOT_FOUND,
'The requested field does not exist.'
)
ERROR_CANNOT_DELETE_PRIMARY_FIELD = 'ERROR_CANNOT_DELETE_PRIMARY_FIELD'
ERROR_CANNOT_CHANGE_FIELD_TYPE = 'ERROR_CANNOT_CHANGE_FIELD_TYPE'
| 326 | 139 |
# coding: utf-8
# In[325]:
__version__ = "0.0.1"
__all__ = []
# In[8]:
from time import sleep
# In[22]:
get_ipython().magic('ls /Users/tfast/Desktop/SLAC_MG_0716/')
# In[23]:
get_ipython().magic('ls /Users/tfast/Desktop/SLAC_MG_0716/Sample1/')
# In[14]:
get_ipython().magic(
'cat /Users/tfast/Desktop/SLAC_MG_0716/Sample1/Sample1_24x24_t30_0001.txt*')
# * Turn metadata into a dataframe
#
# /Users/tfast/Desktop/SLAC_MG_0716/Sample1/Sample1_24x24_t30_0001.txt*
# In[15]:
from IPython import display
# In[19]:
import skimage.io
# In[ ]:
# In[27]:
# In[33]:
im = skimage.io.imread(
'/Users/tfast/Desktop/SLAC_MG_0716/Sample1/Sample1_24x24_t30_0010.tif')
skimage.io.imshow(im)
# In[29]:
get_ipython().magic('matplotlib inline')
# In[34]:
import dask.dataframe as dd
# In[38]:
from whatever import *
# In[40]:
import glob
# In[42]:
import pandas
# In[ ]:
pandas.read_csv
# In[49]:
from toolz.curried import *
# In[ ]:
# In[ ]:
for the_file in glob.glob(
"/Users/tfast/Desktop/SLAC_MG_0716/Sample1/Processed/*_1D.csv"
):
pandas.read_csv(
the_file, names=['Q', 'I'], header=None
).iloc[:1000].set_index('Q')
# In[102]:
# In[228]:
metadata = pandas.read_csv(
"/Users/tfast/Desktop/SLAC_MG_0716/Sample1/Processed/Sample1_24x24_t30_14715979master_metadata.csv")
metadata = metadata.rename(columns={'scan#.1': 'scan'}).set_index('scan')
for c in 'xy':
metadata['plate_' + c] = metadata['plate_' + c].apply(compose(
first, lambda x: x.split('e', 1)
)).astype(float)
# In[106]:
baselines = [209, 233]
# In[346]:
dfs = []
for i, the_file in enumerate(glob.glob(
"/Users/tfast/Desktop/SLAC_MG_0716/Sample[1-5]/Processed/*_1D.csv"
)):
s = pandas.read_csv(
the_file, header=None,
names=['Q', str(i) + '_' + str(the_file.split('_')[-2])]
).set_index('Q').iloc[:1000]
dfs.append(s)
signals = pandas.concat(dfs, axis=1)
signals.plot(legend=None)
# In[319]:
get_ipython().magic('matplotlib notebook')
# > _hypothesis_ - The rolling _standard deviation_. It will exentuate the
# crystalline patterns because they have larger information entropy. The
# rolling deviations are then used to identify non-crystalline patterns, by
# some thresholding technique, that can be automated later.
# In[333]:
get_ipython().magic('matplotlib notebook')
# In[353]:
roll = signals.rolling(31, center=True).std().fillna(0).sum(axis=0)
# > Identify non-crystalline states using the thresholding below.
# In[350]:
from magical import register_jinja2_magic
env = register_jinja2_magic()
# In[351]:
threshold = 13e3
# In[352]:
get_ipython().run_cell_magic('jinja2', '', '\n> The threshold we using is `{{threshold}}`. This value was identified using the image below.\n\n<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYQAAAEACAYAAACznAEdAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAGT9JREFUeJzt3X+sZHd53/H347W5xga8NpV3FEx8iVAMCW02jmKcuhFXgRpCJBuprQNNUy+0+aNNCoaq9Tr9w80fUSBSFFOlqYRCWZeGgk1+2JViMJZ3+kslhtpbu/gHbqkX22Qvpv7RppVs4336x5zZOzs7c2f2zvfMnO/e90u62nPOnXPO587uznPneWbORGYiSdJZqw4gSeoGC4IkCbAgSJIaFgRJEmBBkCQ1LAiSJGCOghARn46IzYh4cGTbhRFxd0Q8FhFfjogLRr53U0Q8HhGPRMTVbQWXJJU1zzOEzwDvHtt2ELgnMy8D7gVuAoiIHwGuA94K/CzwuxER5eJKktoysyBk5n8EnhvbfC1wa7N8K/C+Zvka4POZ+f3MfAJ4HLiiTFRJUpt2OkO4ODM3ATLzGHBxs/0NwJMjt3u62SZJ6rhSQ2WvfyFJlTt7h/ttRsS+zNyMiB7w3Wb708AbR253SbPtFBFhEZGkHcjMVmaz8z5DiOZr6E7gQLN8PXDHyPb3R8SrIuJNwJuB+6YdNDM7/3XzzTevPIM5zVlzzhoy1pSzTTOfIUTE54AN4PUR8W3gZuDjwO0R8SHgKINXFpGZD0fEbcDDwMvA38+2f4KWPfHEE6uOMBdzlmXOcmrICPXkbNPMgpCZf3PKt9415fa/AfzGIqEkScvnO5VnOHDgwKojzMWcZZmznBoyQj052xSr6uhERO3dJElauoggVzxU3rX6/f6qI8zFnGWZs5waMkI9OdtkQZAkAbaMJKkqtowkSa2zIMxQS1/RnGWZs5waMkI9OdtkQeiAXm+dXm991TEk7XLOEDpg+JER3h+SZnGGIElqnQVhhlr6iuYsy5zl1JAR6snZJgtCB/V660SEcwVJS+UMoQPGZwiD9QTCuYKkkzhDkCS1zoIwQy19RXOWZc5yasgI9eRskwVBkgQ4Q+gEZwiS5uUMQZLUOgvCDLX0Fc1ZljnLqSEj1JOzTRYESRLgDKETnCFImpczBElS6ywIM9TSVzRnWeYsp4aMUE/ONlkQJEmAM4ROcIYgaV7OECRJrbMgzFBLX9GcZZmznBoyQj0522RBkCQBzhA6wRmCpHk5Q5Aktc6CMEMtfUVzlmXOcmrICPXkbJMFQZIEOEPoBGcIkublDEGS1DoLwgwl+4q93jp79pxPRNDrrRc7LtTT/zRnWTXkrCEj1JOzTWevOsBusrl5tFlKNjdbecYnSTu20AwhIj4K/B3gOPAQ8EHgfOALwKXAE8B1mfnChH133QxhOCsYnw84Q5A0r07OECLiB4B/AFyemX+JwbONDwAHgXsy8zLgXuCmEkElSe1adIawBzg/Is4GXg08DVwL3Np8/1bgfQueY6Vq6SuasyxzllNDRqgnZ5t2XBAy8zvAbwHfZlAIXsjMe4B9mbnZ3OYYcHGJoJKkdu14qBwRexk8G7gUeAG4PSJ+gUHze9TUJviBAwdYX18HYO/evezfv5+NjQ1gq1qfaeuj+v3+ie+fut4/5bbbHX98/678vLWuD7d1JU/N6xsbG53Ks936UFfyDO+7Q4cOAZx4vGzLjofKEfHXgXdn5i81678IXAn8DLCRmZsR0QMOZ+ZbJ+zvUNmhsqTT1MmhMoNW0ZURcW4MHsHeCTwM3AkcaG5zPXDHQglXbPw3h64yZ1nmLKeGjFBPzjbtuGWUmfdFxBeBB4CXmz8/BbwWuC0iPgQcBa4rEVSS1C6vZbREtowkLaqrLSNJ0hnEgjBDLX1Fc5ZlznJqyAj15GyTBWFl1k5c6E6SusAZwhKNzxDGl50hSJrFGYIkqXUWhBlq6SuasyxzllNDRqgnZ5ssCJ22VvyDdCRpGmcIS7STGcLodklyhiBJap0FYYZa+ormLMuc5dSQEerJ2SYLgiQJcIawVM4QJC3KGYIkqXUWhBm60lfs9da3fQlqV3LOYs6yashZQ0aoJ2ebdvx5CFquzc2jq44g6QznDGGJFpkhjH9mgqTdyRmCJKl1FoQZaukrmrMsc5ZTQ0aoJ2ebLAiSJMAZwlI5Q5C0KGcIkqTWWRBmqKWvaM6yzFlODRmhnpxtsiBIkgBnCEvlDEHSopwhSJJaZ0GYoZa+ojnLMmc5NWSEenK2yYKwBL3e+ki7aPZtT7Y2976StAhnCEswPhPYboYw75xB0u7kDEGS1DoLwgy19BXNWZY5y6khI9STs00WBEkS4AxhKZwhSCrFGYIkqXUWhBlq6SuasyxzllNDRqgnZ5ssCJIkYMEZQkRcAPwe8DbgOPAh4JvAF4BLgSeA6zLzhQn7OkMYW3aGIGmWLs8QPgn8SWa+Ffgx4FHgIHBPZl4G3AvctOA5JElLsOOCEBGvA346Mz8DkJnfb54JXAvc2tzsVuB9C6dcoVr6iuYsy5zl1JAR6snZpkWeIbwJ+F5EfCYi7o+IT0XEecC+zNwEyMxjwMUlgkqS2rXjGUJE/ATwVeCnMvPrEfHbwP8BfiUzLxq53f/KzNdP2N8ZwtiyMwRJs7Q5Qzh7gX2fAp7MzK8363/AYH6wGRH7MnMzInrAd6cd4MCBA6yvrwOwd+9e9u/fz8bGBrD19O1MWYc+2ztn7Kqm299+1T+P6667vpz1fr/PoUOHAE48XrZl0VcZ/TvglzLzmxFxM3Be861nM/MTEXEjcGFmHpywbxXPEPr9/siD+s7M+wxh3mcRbeVcBnOWVUPOGjJCPTm7+gwB4MPA70fEOcC3gA8Ce4DbIuJDwFHgugXPIUlaAq9ltATLeIYgaXfo8vsQJElnCAvCDMPhTteZsyxzllNDRqgnZ5ssCJIkwBnCUjhDkFSKMwRJUussCDPU0lc0Z1nmLKeGjFBPzjZZECRJgDOEpXCGIKkUZwiSpNZZEGaopa9ozrLMWU4NGaGenG2yIEiSAGcIS+EMQVIpzhAkSa2zIMxQS1/RnGWZs5waMkI9OdtkQZAkAc4QlsIZgqRSnCFIklpnQZihlr6iOcsyZzk1ZIR6crbJgiBJApwhLIUzBEmlOEOQJLXOgjBDLX1Fc5ZlznJqyAj15GyTBUGSBDhDWApnCJJKcYYgSWqdBWGGbvUV14gIer31U77TrZzTmbOsGnLWkBHqydmms1cdQKfjRSDZ3Gzl2aKkXc4ZwhKUnCEMl3fLfSfpZM4QJEmtsyDMUEtf0ZxlmbOcGjJCPTnbZEGQJAHOEJbCGYKkUpwhSJJaZ0GYoZa+ojnLMmc5NWSEenK2yYIgSQKcISyFMwRJpXR6hhARZ0XE/RFxZ7N+YUTcHRGPRcSXI+KCxWNKktpWomX0EeDhkfWDwD2ZeRlwL3BTgXOsTC19RXOWZc5yasgI9eRs00IFISIuAd4L/N7I5muBW5vlW4H3LXIOTbI28QJ3krSIhWYIEXE78OvABcA/zMxrIuK5zLxw5DbPZuZFE/Z1hrDg8m65/yRt6eQMISJ+DtjMzCNsPVpN4qOWJFVgkctfXwVcExHvBV4NvDYiPgsci4h9mbkZET3gu9MOcODAAdbX1wHYu3cv+/fvZ2NjA9jq5616fbht0ePB1vGm609ZnnLrfv/E8W+55ZZO3n9t3Z9tr3t/llsfz7rqPNPWjxw5wg033NCZPMP1fr/PoUOHAE48XrYmMxf+At4B3Nks/yZwY7N8I/DxKftkDQ4fPrzwMYCE4Z/llkvnXAZzllVDzhoyZtaTs/m/X+Sxe/yryPsQIuIdbM0QLgJuA94IHAWuy8znJ+yTJc5dA2cIkkppc4bgG9OWwIIgqZRODpV3i9H+Z5eZsyxzllNDRqgnZ5ssCJIkwJbRUtgyklSKLSNJUussCDPM21fs9dZXejmJWvqf5iyrhpw1ZIR6crZpkTemacTm5tFVR5CkhThDKGQwJ5jc13eGIKkUZwgVa6+NtEZEeNVTScVYEGZYtK/YXivpRSBPHL+W/qc5y6ohZw0ZoZ6cbbIgSJIAZwjFTJshDLe3MUPw85Wl3ccZgiSpdRaEGWrpK5qzLHOWU0NGqCdnmywIkiTAGUIxzhAkLYMzBElS6ywIM9TSVzRnWeYsp4aMUE/ONlkQJEmAM4RiVjdDOJd9+3ocO/ZEgZ9CUtf5mcoVWOVQedJ5JZ2ZHCqvUC19RXOWZc5yasgI9eRskwVBkgTYMirGlpGkZbBlJElqnQVhhlr6iuYsy5zl1JAR6snZJguCJAlwhlCMMwRJy+AMQZLUOgvCDLX0Fc1ZljnLqSEj1JOzTRYESRLgDKEYZwiSlsEZgiSpdRaEGWrpK5qzLHOWU0NGqCdnmywIkiTAGUIxzhAkLYMzBElS63ZcECLikoi4NyK+EREPRcSHm+0XRsTdEfFYRHw5Ii4oF3f5hn3FXm+diKDXW19pnmn6/T693npn8w3V0qc1Zzk1ZIR6crZpkWcI3wc+lpk/CvwU8MsR8RbgIHBPZl4G3AvctHjM1dvcPApk82c3bW4e7XQ+Sd1WbIYQEX8M/E7z9Y7M3IyIHtDPzLdMuH01M4Reb/1EQYCY2K9f7QxhDXjxxDlruV8lnb7Of6ZyRKwDfeBtwJOZeeHI957NzIsm7FNNQRh/UO9eQTh5uZb7VdLpa7MgnL3oASLiNcAXgY9k5p9HxPij0dRHpwMHDrC+vg7A3r172b9/PxsbG8BWP2/V6yfb2jZ++3EXXdTjuec2J+47XX/K8jy3v+Xk73Tk/pt0f25sbHQmz7T1W265pZP/Hmu8P8ezrjrPtPUjR45www03dCbPcL3f73Po0CGAE4+XrcnMHX8xKChfYlAMhtseAfY1yz3gkSn7Zg0OHz6cDIpaQua03MPbnLyep+zb3vLhUzJ00eHDh1cdYS7mLKeGjJn15Gz+jy/02D3ta6GWUUT8K+B7mfmxkW2fAJ7NzE9ExI3AhZl5cMK+uci5l2knLaPB+vLaRLaMpN2hkzOEiLgK+PfAQ5z4LZVfBe4DbgPeCBwFrsvM5yfsb0FoabmW+1XS6evkG9My8z9l5p7M3J+ZP56Zl2fmlzLz2cx8V2ZelplXTyoGNZk8S+ii/qoDzKWW+9Oc5dSQEerJ2SbfqXzGWev0G+gkdZfXMppDbS2j7XJKqlsnW0a72ehlLIbLklQ7C8IMk/qKo5ex2HoH86r1Vx1gLrX0ac1ZTg0ZoZ6cbbIgTDHfheLWlhFFkpbCGcIUozOB07n8hDMESW1yhrAyaxPmA7OfFfgKH0k1siBs60Xg8IRt21vNJaj7Kzjn6aulT2vOcmrICPXkbJMFQZIEOEM4xdZnHwydTv/+5M8lcIYgqbROXsto4RN3tCB0YRhcarmL96+kxThUXqn+qgPMqX/KlvGXznbhc6Fr6dOas5waMkI9Odu08AfkqLvGh9tbb6jzndWSTmXLaMyZ0zI6l+E8Y9J7Iybd98NnDseOPXHK9yR1gzOEJTpzCsLWv5d5C8K0z4WW1B3OEFaqv+oAc+rvaK/5LtFRTi19WnOWU0NGqCdnm5wh7HKreROdpC6yZTRmt7SM9u279KRiMHrNpi7+vUgacIawRLulIIzfzoIg1cEZwkr1Vx1gTv1VB5hLLX1ac5ZTQ0aoJ2ebnCGc8QZXbN2379JVB5HUcbaMxpyJLaN5bmfLSKqDLSMVsPNPd1v2S1MlrYYFYab+qgPMqT/j+7M/x2Garc+OXlwtfVpzllNDRqgnZ5ssCCP8LXhLr7fOnj3nT/jEuO33WfXF8yTtnDOEEafz2cn1LM93u/EZwqT7Ytbf16xLY0hanDMEFVZunuCzAunMYUGYqb/qAHPqn8Zty80Tti6pPd+MoZY+rTnLqSEj1JOzTRYESRLgDOEku3eGMPpZ0NM/F3rSexWmvW9jeK2k4RvihsuTPmth2ucwbPf5DKPfm7Z8uhb9PIjh53FP+zmlEryW0ZLs3oIw3/LpFIRpy6fzOQzbvVFu0gB80TfXLfrGPIfqWgaHyivVX3WAOfWXeK61CYPkaYPqk7cP+7Snvqx10jFPVnKAPetYF13Um3me0QH7IpkWeeNfDX3vGjJCPTnb5LWMgLvuuounnnpq1TEq8iKDQXKMbZt221NtDaGHzyImHXPSPmU+E3rWsZ57bnPOY5y6vLMs0urt+pbRyy+/zNraGuec8zZeeumhZmsXWj3dbhltf/vh5zlPny1sl+3kdtTgvMP+/PjPtl3LaLs5w+ixdvrZENu9b2N0hjKcJ/R66zzzzDMcP/7/Ttpe8hpSJT8Xu8RMxM/pLs8ZQotefvllzj33PCI+xiuv/GaztQsP5DUXhMWWJxWEyefdviDMWj6dWckk8x5z/OfZ7udcVPljLTYT8YKJ5VU5Q4iI90TEoxHxzYi4sa3ztK+/6gBz6rd8/LUJl7HY+RvctjvP7EtmrJ34c3wGUTpLRLBnz/knMo0uz2s4rxg/9jxzg2nzheH2ZfW9J81cunrRw2HWPXvOP618/X5/17/RspUZQkScBfwO8E7gO8DXIuKOzHy0jfO168iqA8yp7ZzjLaDhtvLnOX6cCecazzKeaVK+xbNAcvz41m/2o8vznmtSqwtenGt2MO02w+1HjhxhY2NjrhyLmDRz6ersY5j1+PE4rYxHjhwpOqeqUVvPEK4AHs/Mo5n5MvB54NqWztWy51cdYE615FRJzz/v33sp3pftFYQ3AE+OrD/VbNOutpMW005aQafTShrP1Ebrafq5J53r1LbF9i/JPbV1s3ZKy2T0Zb7jbZ/R7aPtltHjLWbPXC8pHs06aXmefXdym+HP/Gu/9uvbHuN0jj/vNb9Gt09rde20BbYjmVn8C/hrwKdG1v8W8M/GbpNd8NJLL+VZZ52da2s/lAye1yfkyPL1U7Z3bXlazq7ka2P59PfJLJ+j1DGHtvs5T74Nef3118/MMWn7+HFOJ+v4vtuZtu+02223PM++k+6/WceZ9XPOY3KGyZnmOfesfw/ZwuN2ZrbzKqOIuBL4p5n5nmb9YPNDfGLkNuVPLEm7QNb0stOI2AM8xmCo/GfAfcAHMvOR4ieTJBXRyquMMvOViPgV4G4Gc4pPWwwkqdtW9sY0SVK3rOTidst+01pEfDoiNiPiwZFtF0bE3RHxWER8OSIuGPneTRHxeEQ8EhFXj2y/PCIebHLfMrL9VRHx+Waf/xwRP7jDnJdExL0R8Y2IeCgiPtzFrBGxFhF/GhEPNDlv7mLO5jhnRcT9EXFnhzM+ERH/tbk/7+twzgsi4vbmvN+IiLd3LWdE/HBzP97f/PlCRHy4azmb43w0Iv5bc47fb4672pxtTaunfTEoQv8duBQ4h8E7qt7S8jn/CrAfeHBk2yeAf9ws3wh8vFn+EeABBu209Sbr8JnUnwI/2Sz/CfDuZvnvAb/bLP888Pkd5uwB+5vl1zCYw7ylo1nPa/7cA3yVwXtPupjzo8C/Bu7s8N/7t4ALx7Z1Mech4IPN8tnABV3MOZL3LAZvjH1j13ICP9D8vb+qWf8CcP2qc7b2ILzNHXElcNfI+kHgxiWc91JOLgiPAvua5R7w6KQ8wF3A25vbPDyy/f3Av2iWvwS8vVneAzxTKPMfA+/qclbgPODrwE92LSdwCfAVYIOtgtCpjM2+/xN4/di2TuUEXgf8jwnbO5VzLNvVwH/oYk4GBeEocCGDB/k76cD/9VW0jLryprWLM3MTIDOPARc328fzPd1sewODrEOjuU/sk5mvAM9HxEWLhIuIdQbPar7K4B9Ip7I2rZgHgGPAVzLzax3M+dvAP2Lw2u2hrmWkyfeViPhaRPzdjuZ8E/C9iPhM0475VESc18Gco34e+Fyz3Kmcmfkd4LeAbzfnfCEz71l1Tj8gZ0vOvsncFnqNcES8Bvgi8JHM/HNOzbbyrJl5PDN/nMFv4VdExI9OyLWynBHxc8BmZh6Zse/K70vgqsy8HHgv8MsR8dN06L5snA1cDvzzJuv/ZfBba9dyDnaMOAe4Bri92dSpnBGxl8HlfC5l8Gzh/Ij4hQm5lppzFQXhaWB0uHFJs23ZNiNiH0BE9IDvNtufZtBzHBrmm7b9pH1i8B6M12XmszsJFRFnMygGn83MO7qcFSAz/zeDS62+p2M5rwKuiYhvAf8G+JmI+CxwrEMZAcjMP2v+fIZBm/AKunVfwuA3zycz8+vN+h8wKBBdyzn0s8B/yczvNetdy/ku4FuZ+Wzz2/sfAX951TlXURC+Brw5Ii6NiFcx6HnduYTzBidXyDuBA83y9cAdI9vf30zo3wS8Gbivefr2QkRcEREB/O2xfa5vlv8GcO8COf8lg57gJ7uaNSL+wvDVDxHxauCvAo90KWdm/mpm/mBm/hCDf2P3ZuYvAv+2KxkBIuK85hkhEXE+g773Q3TovgRo2hhPRsQPN5veCXyjazlHfIDBLwJDXcv5beDKiDi3Of47gYdXnnORoc1Ovxj8NvkY8DhwcAnn+xyDVxu82PxFfJDBMOeeJsfdwN6R29/EYIr/CHD1yPafYPCf9XHgkyPb14Dbmu1fBdZ3mPMq4BUGr7x6ALi/ua8u6lJW4C822Y4ADwL/pNneqZwjx3oHW0PlTmVk0Jsf/n0/NPz/0LWczXF+jMEvdEeAP2TwKqMu5jwPeAZ47ci2Lua8uTnng8CtDF51udKcvjFNkgQ4VJYkNSwIkiTAgiBJalgQJEmABUGS1LAgSJIAC4IkqWFBkCQB8P8B7V9InsPpsHoAAAAASUVORK5CYII="/>')
# In[360]:
noncrystalline = signals[roll[roll < threshold].index].columns
# In[376]:
get_ipython().magic('matplotlib notebook')
# In[390]:
xtal = signals.rolling(25).sum()
xtal.plot(legend=False)
# In[380]:
xtal_threshold = 31e3
# In[381]:
get_ipython().run_cell_magic('jinja2', '', '\nThe rolling sum of the signals to find some crystalling examples. {{xtal_threshold}}\n\n')
# In[402]:
xtal_features = xtal.columns[(xtal > xtal_threshold).any(axis=0)]
# In[474]:
trainer = pandas.concat(
[
signals[xtal_features].transpose().reset_index(drop=True).set_index(
np.array([0] * len(xtal_features))),
signals[noncrystalline].transpose().reset_index(drop=True).set_index(
np.array([1] * len(noncrystalline))),
], axis=0)
# In[479]:
h = signals.rolling(25).std().fillna(0)
# In[485]:
train = trainer.transpose().rolling(25).std().fillna(0).transpose()
# In[491]:
X = pipeline.make_pipeline(
preprocessing.RobustScaler(),
ensemble.RandomForestClassifier(),
).fit(train.values, train.index)
# In[493]:
signals.transpose()[X.predict(h.transpose().values)
== 0].transpose().plot(legend=False)
# In[494]:
signals.transpose()[X.predict(h.transpose().values)
== 1].transpose().plot(legend=False)
# In[448]:
clf = tree.DecisionTreeClassifier().fit(trainer.values, trainer.index)
# In[ ]:
# In[303]:
roll.iloc[roll.rolling(25, center=True).std().fillna(0).sum(axis=0) < 3e5]
# In[199]:
model = pipeline.make_pipeline(
preprocessing.MaxAbsScaler(),
decomposition.PCA(), a
).fit(roll[(roll != 0).all(axis=1)].transpose().values)
pandas.DataFrame(
model.transform(roll[(roll != 0).all(axis=1)].transpose().values)
).plot(x=0, y=1, kind='scatter')
# In[146]:
r = signals.rolling(20, center=True)
# In[161]:
(signals.rolling(50, center=True).std().fillna(0) * signals).plot(legend=False)
# In[142]:
get_ipython().magic('matplotlib notebook')
# In[134]:
signals[baselines].plot()
# In[89]:
pandas.concat(l, axis=1, join='inner').plot(legend=None)
# In[36]:
df = dd.read_csv(
"/Users/tfast/Desktop/SLAC_MG_0716/Sample1/Processed/*_1D.csv")
| 253,493 | 205,887 |
import os, random
def get_user_agents():
with open('./data/user-agents.txt') as file:
user_agents = [site.strip() for site in file] #function to turn the txt file content into list
return user_agents #new list of user_agents in list format
class UserAgent():
def __init__(self, user_agent):
self.user_agent = user_agent
def select_agent(self):
self.user_agent = random.choice(user_agents)
return self.user_agent
if __name__ == "__main__":
user_agents = get_user_agents()
print (UserAgent.select_agent())
| 577 | 181 |
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from tuiuiu.utils.pagination import paginate
from tuiuiu.tuiuiuadmin import messages
from tuiuiu.tuiuiuadmin.forms import SearchForm
from tuiuiu.tuiuiuadmin.utils import PermissionPolicyChecker, permission_denied
from tuiuiu.tuiuiuredirects import models
from tuiuiu.tuiuiuredirects.forms import RedirectForm
from tuiuiu.tuiuiuredirects.permissions import permission_policy
permission_checker = PermissionPolicyChecker(permission_policy)
@permission_checker.require_any('add', 'change', 'delete')
@vary_on_headers('X-Requested-With')
def index(request):
query_string = request.GET.get('q', "")
ordering = request.GET.get('ordering', 'old_path')
redirects = models.Redirect.objects.prefetch_related('redirect_page', 'site')
# Search
if query_string:
redirects = redirects.filter(old_path__icontains=query_string)
# Ordering (A bit useless at the moment as only 'old_path' is allowed)
if ordering not in ['old_path']:
ordering = 'old_path'
redirects = redirects.order_by(ordering)
# Pagination
paginator, redirects = paginate(request, redirects)
# Render template
if request.is_ajax():
return render(request, "tuiuiuredirects/results.html", {
'ordering': ordering,
'redirects': redirects,
'query_string': query_string,
})
else:
return render(request, "tuiuiuredirects/index.html", {
'ordering': ordering,
'redirects': redirects,
'query_string': query_string,
'search_form': SearchForm(
data=dict(q=query_string) if query_string else None, placeholder=_("Search redirects")
),
'user_can_add': permission_policy.user_has_permission(request.user, 'add'),
})
@permission_checker.require('change')
def edit(request, redirect_id):
theredirect = get_object_or_404(models.Redirect, id=redirect_id)
if not permission_policy.user_has_permission_for_instance(
request.user, 'change', theredirect
):
return permission_denied(request)
if request.method == 'POST':
form = RedirectForm(request.POST, request.FILES, instance=theredirect)
if form.is_valid():
form.save()
messages.success(request, _("Redirect '{0}' updated.").format(theredirect.title), buttons=[
messages.button(reverse('tuiuiuredirects:edit', args=(theredirect.id,)), _('Edit'))
])
return redirect('tuiuiuredirects:index')
else:
messages.error(request, _("The redirect could not be saved due to errors."))
else:
form = RedirectForm(instance=theredirect)
return render(request, "tuiuiuredirects/edit.html", {
'redirect': theredirect,
'form': form,
'user_can_delete': permission_policy.user_has_permission(request.user, 'delete'),
})
@permission_checker.require('delete')
def delete(request, redirect_id):
theredirect = get_object_or_404(models.Redirect, id=redirect_id)
if not permission_policy.user_has_permission_for_instance(
request.user, 'delete', theredirect
):
return permission_denied(request)
if request.method == 'POST':
theredirect.delete()
messages.success(request, _("Redirect '{0}' deleted.").format(theredirect.title))
return redirect('tuiuiuredirects:index')
return render(request, "tuiuiuredirects/confirm_delete.html", {
'redirect': theredirect,
})
@permission_checker.require('add')
def add(request):
if request.method == 'POST':
form = RedirectForm(request.POST, request.FILES)
if form.is_valid():
theredirect = form.save()
messages.success(request, _("Redirect '{0}' added.").format(theredirect.title), buttons=[
messages.button(reverse('tuiuiuredirects:edit', args=(theredirect.id,)), _('Edit'))
])
return redirect('tuiuiuredirects:index')
else:
messages.error(request, _("The redirect could not be created due to errors."))
else:
form = RedirectForm()
return render(request, "tuiuiuredirects/add.html", {
'form': form,
})
| 4,497 | 1,335 |
from itertools import combinations
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
class BestPair:
''' Find out the feature pairs with the highest correlation (positive and negative). '''
def __init__(self, X, y, how='product'):
self.X = X
self.y = y
self.how = how
self._fit()
def _fit(self):
self.corr_list = []
for row, col in combinations(self.X._get_numeric_data(), r=2):
if self.how == 'product':
self._corr_append('product', self.product(row, col), row, col)
elif self.how == 'distance':
self._corr_append('distance', self.distance(row, col), row, col)
elif self.how == 'all':
self._corr_append('product', self.product(row, col), row, col)
self._corr_append('distance', self.distance(row, col), row, col)
self.corr_list = sorted(self.corr_list, key=lambda item: -item[3])
def product(self, row, col):
return self.X[row] * self.X[col]
def distance(self, row, col):
return (self.X[row]**2 + self.X[col]**2)**0.5
def _corr_append(self, how, new_col, row, col):
self.corr_list.append((how, row, col, self.y.corr(new_col)))
def get_list(self, top=1):
upper_top = self.corr_list[:top]
lower_top = self.corr_list[-top:]
return upper_top + lower_top
def mold(self, top=1):
top_corr = self.get_list(top)
how_space = max([len(item[0]) for item in top_corr])
row_space = max([len(item[1]) for item in top_corr])
col_space = max([len(item[2]) for item in top_corr])
for how, row, col, score in top_corr:
line = '[{:>%(how_space)s}] {:>%(row_space)s} & {:<%(col_space)s} | {:.3f}' % locals()
print(line.format(how.title(), row, col, score))
class CustomFeature(BaseEstimator, TransformerMixin):
''' Select custom features to add to the feature matrix. '''
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
for how, row, col in self.columns:
if how == 'product':
result = X[row] * X[col]
elif how == 'distance':
result = (X[row]**2 + X[col]**2)**0.5
X['{}_{}'.format(row, col)] = result
return X | 2,639 | 823 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark of cocompiled models.
Benchmark are measured with CPU 'performance' mode. To enable it, you need to
install 'cpupower' and run:
sudo cpupower frequency-set --governor performance
The reference number is measured on:
- 'x86_64': Intel Xeon W-2135(4.50GHz) + Edge TPU accelarator + USB 3.0
- 'rp3b': Raspberry Pi 3 B (version1.2)+ Edge TPU accelarator + USB 2.0
- 'rp3b+': Raspberry Pi 3 B+ (version1.3)+ Edge TPU accelarator + USB 2.0
- 'aarch64': Edge TPU dev board.
"""
import time
import timeit
from edgetpu.basic import edgetpu_utils
from edgetpu.basic.basic_engine import BasicEngine
import numpy as np
import test_utils
def _run_inferences(engines, input_data_list):
"""Runs an iteration of inferences for each engine with a random inpt.
Args:
engines: list of basic engines.
input_data_list: list of random input data.
"""
for engine, input_data in zip(engines, input_data_list):
engine.run_inference(input_data)
def _run_benchmark_for_cocompiled_models(model_names):
"""Runs benchmark for a given model set with random inputs. Models run
inferences alternately with random inputs. It benchmarks the total time
running each model once.
Args:
model_names: list of string, file names of the models.
Returns:
float, average sum of inferences times.
"""
iterations = 200
print('Benchmark for ', model_names)
engines = []
input_data_list = []
edge_tpus = edgetpu_utils.ListEdgeTpuPaths(
edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
for model_name in model_names:
# Run models on a single edgetpu to achieve accurate benchmark results.
engine = BasicEngine(test_utils.test_data_path(model_name), edge_tpus[0])
# Prepare a random generated input.
input_size = engine.required_input_array_size()
random_input = test_utils.generate_random_input(1, input_size)
# Convert it to a numpy.array.
input_data = np.array(random_input, dtype=np.uint8)
engines.append(engine)
input_data_list.append(input_data)
benchmark_time = timeit.timeit(
lambda: _run_inferences(engines, input_data_list),
number=iterations)
# Time consumed for each iteration (milliseconds).
time_per_inference = (benchmark_time / iterations) * 1000
print(time_per_inference, 'ms (iterations = ', iterations, ')')
return time_per_inference
if __name__ == '__main__':
args = test_utils.parse_args()
machine = test_utils.machine_info()
test_utils.check_cpu_scaling_governor_status()
# Read references from csv file.
modelsets_list, reference = test_utils.read_reference(
'cocompilation_reference_%s.csv' % machine)
total_modelsets = len(modelsets_list)
# Put column names in first row.
results = [('MODELS', 'INFERENCE_TIME')]
for cnt, modelsets in enumerate(modelsets_list, start=1):
print('-------------- Models ', cnt, '/', total_modelsets, ' ---------------')
results.append((modelsets, _run_benchmark_for_cocompiled_models(modelsets.split(','))))
test_utils.save_as_csv(
'cocompilation_benchmarks_%s_%s.csv' % (
machine, time.strftime('%Y%m%d-%H%M%S')),
results)
test_utils.check_result(reference, results, args.enable_assertion)
| 3,778 | 1,260 |
#Import OpenGL and GLU. Don't import GLUT because it is ancient, broken, inflexible, and poorly
#designed--and we aren't using it.
from OpenGL.GL import *
from OpenGL.GLU import *
#Import PyGame. We'll mostly just use this to make a window. Also import all the local
#declarations (e.g. pygame.KEYDOWN, etc.), so that we don't have to keep typing "pygame." in front
#of everything. E.g., now we can do "KEYDOWN" instead of "pygame.KEYDOWN".
import pygame
from pygame.locals import *
#Import some other useful modules
import sys, os, traceback
#Center the window on the screen, if we're on Windows, which supports it.
if sys.platform in ["win32","win64"]: os.environ["SDL_VIDEO_CENTERED"]="1"
#Import sin, cos, radians, degrees, etc.
from math import *
#Initialize PyGame. You could also call "pygame.init()", but in my experience this can be faster
#(since you aren't initializing *everything*) and more portable (since some modules may require
#extra dependencies).
pygame.display.init()
pygame.font.init()
#Screen configuration
screen_size = [800,600]
multisample = 0
#Set the window's icon, as applicable, to be just a transparent square.
icon = pygame.Surface((1,1)); icon.set_alpha(0); pygame.display.set_icon(icon)
#Set the title of the window.
pygame.display.set_caption("PyOpenGL Example - Ian Mallett - v.1.0.0 - 2013")
#Set the window to be multisampled. This does depth testing at a higher resolution, leading to
#smooth, antialiased edges. Most computers support at least multisample=4, and most support more
#(e.g. mine does 16).
if multisample:
pygame.display.gl_set_attribute(GL_MULTISAMPLEBUFFERS,1)
pygame.display.gl_set_attribute(GL_MULTISAMPLESAMPLES,multisample)
#Create the window of the requested size. The pygame.OPENGL flag tells it to allow OpenGL to write
#directly to the window context. The pygame.DOUBLEBUF flag tells it to make the window
#doublebuffered. This causes the screen to only show a completed image. This function actually
#returns a "surface" object, but it isn't useful for OpenGL programs.
pygame.display.set_mode(screen_size,OPENGL|DOUBLEBUF)
#If we draw a new pixel, we want to blend the new pixel with whatever is already there. This allows
#for transparency, among other things. Since everything here is fully opaque, we don't actually
#*need* this right now.
##glEnable(GL_BLEND)
##glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)
#Enable textured objects. The glTexEnvi calls set up texturing in an intuitive way. Again, since
#nothing here is textured, we don't actually *need* this right now.
##glEnable(GL_TEXTURE_2D)
##glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_MODULATE)
##glTexEnvi(GL_POINT_SPRITE,GL_COORD_REPLACE,GL_TRUE)
#This requests that OpenGL make interpolation (filling in triangles) happen in the nicest way
#possible. It's not guaranteed to happen; it's a request.
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST)
#This enables depth testing (so that closer objects are always drawn in front of farther objects).
#If depth testing is not enabled, then objects are drawn "over" each other in the order you draw
#them. For most 3D rendering, you'll want depth testing enabled.
glEnable(GL_DEPTH_TEST)
#This concludes setup; the program itself will be a single triangle drawn in white at positions
#(0.0,0.0,0.0), (0.8,0.0,0.0), (0.0,0.0,0.4), along with a red, green, and blue line segments
#showing the axes.
#I find that an intuitive basic setup for the camera (where you're looking from) is to have the
#viewer located on the surface of a sphere surrounding everything. You can change your position on
#the sphere, and thus fly around the scene. To do this, I put the camera in (a kind of) spherical
#coordinates.
camera_rot = [30.0,20.0] #The spherical coordinates' angles (degrees).
camera_radius = 3.0 #The sphere's radius
camera_center = [0.0,0.0,0.0] #The sphere's center
def get_input():
global camera_rot, camera_radius
#Input in PyGame is pretty straightforward. For now, we are concerned only with key and mouse
#input. Whenever anything *happens* (move the mouse, click, etc.), an "event" happens. You get
#a list of the events that happened by calling "pygame.event.get()". You can also query the
#*state* of anything by checking it specifically.
#Check the *state* of the keys, the mouse buttons, and the mouse's position within the window.
keys_pressed = pygame.key.get_pressed()
mouse_buttons = pygame.mouse.get_pressed()
mouse_position = pygame.mouse.get_pos()
#Check how much the mouse moved since you last called this function.
mouse_rel = pygame.mouse.get_rel()
#List all the events that happened.
for event in pygame.event.get():
#Clicked the little "X"; close the window (return False breaks the main loop).
if event.type == QUIT: return False
#If the user pressed a key:
elif event.type == KEYDOWN:
#If the user pressed the escape key, close the window.
if event.key == K_ESCAPE: return False
#If the user "clicked" the scroll wheel forward or backward:
elif event.type == MOUSEBUTTONDOWN:
#Zoom in
if event.button == 4: camera_radius *= 0.9
#Or out.
elif event.button == 5: camera_radius /= 0.9
#If the user is left-clicking, then move the camera about in the spherical coordinates.
if mouse_buttons[0]:
camera_rot[0] += mouse_rel[0]
camera_rot[1] += mouse_rel[1]
return True
def draw():
#Clear the screen's color and depth buffers so we have a fresh space to draw geometry onto.
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
#Setup the viewport (the area of the window to draw into)
glViewport(0,0,screen_size[0],screen_size[1])
#Change the matrix mode to the projection matrix (all subsequent calls that change matrices will
#change the projection matrix). The projection matrix should be made responsible for taking all
#the geometry in the 3D world and then distorting it so that it is in perspective on the screen.
glMatrixMode(GL_PROJECTION)
#Set the current matrix (the projection matrix) to be the identity matrix.
glLoadIdentity()
#Multiply the current matrix (the projection matrix) by a matrix that projects everything like a
#camera would. Basically, this makes everything look like it's in perspective. In this case,
#the camera has a (vertical) field of view of 45 degrees, an aspect ratio of 800.0/600.0, a near
#clipping plane of 0.1, and a far clipping plane of 100.0. The clipping planes tell you how
#close and far away from the camera you can see things. Ideally, you'd set them to 0.0 and
#infinity, but the clipping planes also affect the depth buffer; setting them farther apart
#means objects don't occlude each other as correctly (the depth buffer is stretched over a
#larger distance). The general rule is to set the near clipping plane as large as possible (and
#*never* to 0.0), and then make your far plane reasonably small.
gluPerspective(45, float(screen_size[0])/float(screen_size[1]), 0.1,100.0)
#Change the matrix mode to the modelview matrix (all subsequent calls that change matrices will
#change the modelview matrix). The modelview matrix should be made responsible for moving
#things around the world (the "model" part of the name) and also making it look like the camera
#is in a particular position (the "view" part of the name).
glMatrixMode(GL_MODELVIEW)
#Set the current matrix (the modelview matrix) to be the identity matrix.
glLoadIdentity()
#The matrices stay the way they are until they are changed. Since the projection matrix doesn't
#actually change from frame to frame, one *could* only set it once. You will see this approach
#in other tutorials. This isn't a good idea, since more advanced techniques (e.g. image-space
#techniques) require the projection matrix to constantly change.
#Set the camera's position to be in spherical coordinates. These aren't typical spherical
#coordinates, since I take the elevation angle (camera_rot[1]) to be 0.0 at the horizon. I find
#this more intuitive, but you can easily change it to your favorite parameterization by
#exchanging sines and cosines.
camera_pos = [
camera_center[0] + camera_radius*cos(radians(camera_rot[0]))*cos(radians(camera_rot[1])),
camera_center[1] + camera_radius *sin(radians(camera_rot[1])),
camera_center[2] + camera_radius*sin(radians(camera_rot[0]))*cos(radians(camera_rot[1]))
]
#This multiplies the current matrix (the modelview matrix) by a matrix that makes it *look like*
#all subsequent draw calls had the camera at the given position and direction. In reality, it
#actually rotates and translates *the whole world* so that it *looks* that way, but the effect
#is the same. Here, the camera has position "camera_pos" and is oriented so that it is looking
#towards position "camera_center". The last three arguments tell it which way is up.
gluLookAt(
camera_pos[0],camera_pos[1],camera_pos[2],
camera_center[0],camera_center[1],camera_center[2],
0,1,0
)
#Okay! Let's start *actually drawing stuff*! We use "immediate mode" OpenGL here, which is
#obsoleted by vertex arrays and VBOs. Still, immediate mode is far more intuitive, so it is the
#method we'll use here.
#Set the color to white. All subsequent geometry we draw will be white. This is actually the
#default, so we didn't *need* to do this.
glColor3f(1,1,1)
#Start drawing triangles. Each subsequent triplet of glVertex*() calls will draw one triangle.
glBegin(GL_TRIANGLES)
glVertex3f(0.0,0.0,0.0) #Make a vertex at (0.0,0.0,0.0)
glVertex3f(0.8,0.0,0.0) #Make a vertex at (0.8,0.0,0.0)
glVertex3f(0.0,0.0,0.4) #Make a vertex at (0.0,0.0,0.4)
#Now that we've made one triplet of glVertex*() calls, it will draw one (white) triangle between
#those three points. We're done drawing triangles; tell OpenGL so.
glEnd()
#Start drawing lines. Each subsequent pair of glVertex*() calls will draw one line.
glBegin(GL_LINES)
#Change the color to red. All subsequent geometry we draw will be red.
glColor3f(1,0,0)
#Make two vertices, thereby drawing a (red) line.
glVertex(0,0,0); glVertex3f(1,0,0)
#Change the color to green. All subsequent geometry we draw will be green.
glColor3f(0,1,0)
#Make two vertices, thereby drawing a (green) line.
glVertex(0,0,0); glVertex3f(0,1,0)
#Change the color to blue. All subsequent geometry we draw will be blue.
glColor3f(0,0,1)
#Make two vertices, thereby drawing a (blue) line.
glVertex(0,0,0); glVertex3f(0,0,1)
#Change the color to white again. All subsequent geometry we draw will be white. Strictly
#speaking this isn't required (since we reset the color on line 166 before we draw anything
#again). However, it is good practice to reset the color to white, since forgetting to can be a
#hard-to-track-down bug (e.g. when combining with texturing).
glColor3f(1,1,1)
#We're done drawing lines; tell OpenGL so.
glEnd()
#Flip the buffer (draw the internal memory we've been using onto the screen). This is why we
#passed pygame.DOUBLEBUF when we created the window.
pygame.display.flip()
def main():
clock = pygame.time.Clock()
while True:
if not get_input(): break
draw()
clock.tick(60) #Regulate the framerate to be as close as possible to 60Hz.
pygame.quit()
if __name__ == "__main__":
try:
main()
except:
traceback.print_exc()
pygame.quit()
input()
| 11,849 | 3,686 |
from typing import Any, Union, Type, Optional
from cogdl.models import register_model
from cogdl.models.supervised_model import (
SupervisedHomogeneousNodeClassificationModel,
SupervisedHeterogeneousNodeClassificationModel,
)
from cogdl.trainers.gpt_gnn_trainer import (
GPT_GNNHomogeneousTrainer,
GPT_GNNHeterogeneousTrainer,
)
#
# @register_model("gpt_gnn")
# class GPT_GNN(BaseModel):
# def __init__(
# self,
# in_dim,
# n_hid,
# num_types,
# num_relations,
# n_heads,
# n_layers,
# dropout=0.2,
# conv_name="hgt",
# prev_norm=False,
# last_norm=False,
# use_RTE=True,
# ):
# super(GPT_GNN, self).__init__()
# self.gcs = nn.ModuleList()
# self.num_types = num_types
# self.in_dim = in_dim
# self.n_hid = n_hid
# self.adapt_ws = nn.ModuleList()
# self.drop = nn.Dropout(dropout)
# for t in range(num_types):
# self.adapt_ws.append(nn.Linear(in_dim, n_hid))
# for l in range(n_layers - 1):
# self.gcs.append(
# GeneralConv(
# conv_name,
# n_hid,
# n_hid,
# num_types,
# num_relations,
# n_heads,
# dropout,
# use_norm=prev_norm,
# use_RTE=use_RTE,
# )
# )
# self.gcs.append(
# GeneralConv(
# conv_name,
# n_hid,
# n_hid,
# num_types,
# num_relations,
# n_heads,
# dropout,
# use_norm=last_norm,
# use_RTE=use_RTE,
# )
# )
#
# def forward(self, node_feature, node_type, edge_time, edge_index, edge_type):
# res = torch.zeros(node_feature.size(0), self.n_hid).to(node_feature.device)
# for t_id in range(self.num_types):
# idx = node_type == int(t_id)
# if idx.sum() == 0:
# continue
# res[idx] = torch.tanh(self.adapt_ws[t_id](node_feature[idx]))
# meta_xs = self.drop(res)
# del res
# for gc in self.gcs:
# meta_xs = gc(meta_xs, node_type, edge_index, edge_type, edge_time)
# return meta_xs
@register_model("gpt_gnn")
class GPT_GNN(
SupervisedHomogeneousNodeClassificationModel,
SupervisedHeterogeneousNodeClassificationModel,
):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
"""
Dataset arguments
"""
parser.add_argument(
"--use_pretrain", help="Whether to use pre-trained model", action="store_true"
)
parser.add_argument(
"--pretrain_model_dir",
type=str,
default="/datadrive/models/gpt_all_cs",
help="The address for pretrained model.",
)
# parser.add_argument(
# "--model_dir",
# type=str,
# default="/datadrive/models/gpt_all_reddit",
# help="The address for storing the models and optimization results.",
# )
parser.add_argument(
"--task_name",
type=str,
default="reddit",
help="The name of the stored models and optimization results.",
)
parser.add_argument(
"--sample_depth", type=int, default=6, help="How many numbers to sample the graph"
)
parser.add_argument(
"--sample_width",
type=int,
default=128,
help="How many nodes to be sampled per layer per type",
)
"""
Model arguments
"""
parser.add_argument(
"--conv_name",
type=str,
default="hgt",
choices=["hgt", "gcn", "gat", "rgcn", "han", "hetgnn"],
help="The name of GNN filter. By default is Heterogeneous Graph Transformer (hgt)",
)
parser.add_argument("--n_hid", type=int, default=400, help="Number of hidden dimension")
parser.add_argument("--n_heads", type=int, default=8, help="Number of attention head")
parser.add_argument("--n_layers", type=int, default=3, help="Number of GNN layers")
parser.add_argument(
"--prev_norm",
help="Whether to add layer-norm on the previous layers",
action="store_true",
)
parser.add_argument(
"--last_norm",
help="Whether to add layer-norm on the last layers",
action="store_true",
)
parser.add_argument("--dropout", type=int, default=0.2, help="Dropout ratio")
"""
Optimization arguments
"""
parser.add_argument(
"--optimizer",
type=str,
default="adamw",
choices=["adamw", "adam", "sgd", "adagrad"],
help="optimizer to use.",
)
parser.add_argument(
"--scheduler",
type=str,
default="cosine",
help="Name of learning rate scheduler.",
choices=["cycle", "cosine"],
)
parser.add_argument(
"--data_percentage",
type=int,
default=0.1,
help="Percentage of training and validation data to use",
)
parser.add_argument("--n_epoch", type=int, default=50, help="Number of epoch to run")
parser.add_argument(
"--n_pool", type=int, default=8, help="Number of process to sample subgraph"
)
parser.add_argument(
"--n_batch",
type=int,
default=10,
help="Number of batch (sampled graphs) for each epoch",
)
parser.add_argument(
"--batch_size", type=int, default=64, help="Number of output nodes for training"
)
parser.add_argument("--clip", type=int, default=0.5, help="Gradient Norm Clipping")
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return GPT_GNN()
def loss(self, data: Any) -> Any:
pass
def predict(self, data: Any) -> Any:
pass
def evaluate(self, data: Any, nodes: Any, targets: Any) -> Any:
pass
@staticmethod
def get_trainer(args) -> Optional[Type[Union[GPT_GNNHomogeneousTrainer, GPT_GNNHeterogeneousTrainer]]]:
# if taskType == NodeClassification:
return GPT_GNNHomogeneousTrainer
# elif taskType == HeterogeneousNodeClassification:
# return GPT_GNNHeterogeneousTrainer
# else:
# return None
| 6,827 | 2,071 |
'''Reescreva a função leiaInt() que fizemos no DESAFIO 104, incluindo agora a possibilidade da digitação de um número de
tipo inválido. Aproveite e crie também uma função leiaFloot() com a mesma funcionalidade.'''
from utilidadesCeV import moeda, texto
n1 = moeda.leiainteiro('\033[34mDigite um Inteiro:\033[m ')
n2 = moeda.leiafloat('\033[35mDigite um Real:\033[m ')
texto.linha()
print(f'\033[36mO valor inteiro digitado foi {n1} e o valor real foi {n2}\033[m')
texto.fim()
| 476 | 202 |
"""Test QtWidgets."""
import pytest
from qtpy import PYQT5, PYQT_VERSION, QtWidgets
def test_qtextedit_functions():
"""Test functions mapping for QtWidgets.QTextEdit."""
assert QtWidgets.QTextEdit.setTabStopWidth
assert QtWidgets.QTextEdit.tabStopWidth
assert QtWidgets.QTextEdit.print_
def test_qplaintextedit_functions():
"""Test functions mapping for QtWidgets.QPlainTextEdit."""
assert QtWidgets.QPlainTextEdit.setTabStopWidth
assert QtWidgets.QPlainTextEdit.tabStopWidth
assert QtWidgets.QPlainTextEdit.print_
def test_qapplication_functions():
"""Test functions mapping for QtWidgets.QApplication."""
assert QtWidgets.QApplication.exec_
def test_qdialog_functions():
"""Test functions mapping for QtWidgets.QDialog."""
assert QtWidgets.QDialog.exec_
def test_qmenu_functions():
"""Test functions mapping for QtWidgets.QDialog."""
assert QtWidgets.QMenu.exec_
@pytest.mark.skipif(PYQT5 and PYQT_VERSION.startswith('5.9'),
reason="A specific setup with at least sip 4.9.9 is needed for PyQt5 5.9.*"
"to work with scoped enum access")
def test_enum_access():
"""Test scoped and unscoped enum access for qtpy.QtWidgets.*."""
assert QtWidgets.QFileDialog.AcceptOpen == QtWidgets.QFileDialog.AcceptMode.AcceptOpen
assert QtWidgets.QMessageBox.InvalidRole == QtWidgets.QMessageBox.ButtonRole.InvalidRole
assert QtWidgets.QStyle.State_None == QtWidgets.QStyle.StateFlag.State_None
| 1,511 | 485 |
import functools
import sys
from event import EventData, EventCancelled, EventDeferred
class Emitter(object):
def on(self, event, callback=None):
"""Hook to an event
Parameters
----------
event : str
Event to attach to
callback : func(EventData)
Callback to call when event is fired. If not specified, this is
used as a decorator
Returns
-------
wrapper : func
A decorator if callback is not set
Examples
--------
>>> emitter = Emitter()
>>> def my_func1(evt):
print('Event called me!')
>>> emitter.on('some_event', my_func1)
>>> @emitter.on('some_event')
def my_func2(evt):
print('Event called me too!')
>>> emitter.fire('some_event')
Event called me!
Event called me too!
"""
try:
self.event_handlers[event]
except AttributeError:
self.event_handlers = {event: []}
except KeyError:
self.event_handlers[event] = []
if callback is None:
def wrapper(func):
self.event_handlers[event].append(func)
return func
return wrapper
else:
self.event_handlers[event].append(callback)
def off(self, event, callback):
"""Remove callback from an event
Parameters
----------
event : str
Event to remove from
callback : func(EventData)
Callback to be removed from this event. Only the first instance
is removed
Raises
------
ValueError
If the callback is not attached to this event
"""
try:
self.event_handlers[event].remove(callback)
except (AttributeError, KeyError):
raise ValueError('Callback not found')
def once(self, event, callback=None):
"""Hook to an event once.
Parameters
----------
event : str
Event to attach to
callback : func(EventData)
Callback to call when event is fired. If not specified, this is
used as a decorator
Returns
-------
wrapper : func
A decorator if callback is not set
"""
if callback is None:
def wrapper(func):
@functools.wraps(func)
def call_once(*args, **kwargs):
ret = func(*args, **kwargs)
self.off(event, call_once)
return ret
self.on(event, call_once)
return func
return wrapper
else:
@functools.wraps(callback)
def call_once(*args, **kwargs):
ret = callback(*args, **kwargs)
self.off(event, call_once)
return ret
self.on(event, call_once)
def all_off(self):
"""Remove all events"""
try:
del self.event_handlers
except AttributeError:
pass
def fire(self, event, data=None, cancellable=True, catch_errors=True,
late_throw=True):
"""Fires an event
Parameters
----------
event : str
Event to fire
data : object
Data passed to the event
cancellable : bool
If True (default), callbacks can be stopped by calling evt.cancel()
catch_errors : bool
If True (default), callbacks will all be called even if there
is an exception thrown
late_throw : bool
If True (default), this function will raise the first exception
thrown
Returns
-------
evt : EventData
The passed event.
Raises
------
err : Exception
If catch_errors is False, this raises the error that is
generated by callback()
See Also
--------
event.EventData
"""
evt = EventData(event, self, data, cancellable)
try:
callbacks = self.event_handlers[event]
except (KeyError, AttributeError):
pass
else:
deferred_callbacks = []
def process_callbacks(callbacks):
for callback in callbacks[:]:
if evt.cancelled:
return evt
try:
callback(evt)
except EventCancelled as cancel_exc:
# Check to see if nested and actually called for this
if cancel_exc.evt == evt:
return evt
else:
# Raise until this event is caught
raise
except EventDeferred:
deferred_callbacks.append(callback)
except Exception as err:
if catch_errors:
evt.add_error(sys.exc_info())
else:
raise err
process_callbacks(callbacks)
if deferred_callbacks:
evt.deferred = True
process_callbacks(deferred_callbacks)
if late_throw and evt.errors:
raise evt.errors[0][1], None, evt.errors[0][2]
return evt
| 5,519 | 1,356 |
from output.models.ms_data.regex.re_t61_xsd.re_t61 import Doc
__all__ = [
"Doc",
]
| 88 | 42 |
#!/usr/bin/python3
from jinja2 import Environment, FileSystemLoader
import subprocess
import cgi
print("content-type: text/html")
print()
mydata = cgi.FieldStorage()
namenode_ip = mydata.getvalue("namenode_ip")
namenode_port = mydata.getvalue("namenode_port")
namenode_directory = mydata.getvalue("namenode_directory")
datanode_ip = mydata.getvalue("datanode_ip")
datanode_directory = mydata.getvalue("datanode_directory")
def installtionScript(nodeType,directoryPath):
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template('installationScript.sh.j2')
output = template.render(nodeType = nodeType , directoryPath = directoryPath)
file = open("./temp/installationScript.sh", "w")
file.write("%s" %(output))
file.close()
def hdfsSite(nodeType,directoryPath):
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template('hdfs-site.xml.j2')
output = template.render(nodeType = nodeType , directoryPath = directoryPath)
file = open("./temp/hdfs-site.xml", "w")
file.write("%s" %(output))
file.close()
def coreSite(nodeIp,nodePort):
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template('core-site.xml.j2')
output = template.render(IP = nodeIp , port = nodePort)
file = open("./temp/core-site.xml", "w")
file.write("%s" %(output))
file.close()
def copyTemplate(nodeIP):
subprocess.run(f'scp ./temp/hdfs-site.xml root@{nodeIP}:/root/hdfs-site.xml',shell=True)
subprocess.run(f'scp ./temp/core-site.xml root@{nodeIP}:/root/core-site.xml',shell=True)
def nameNode(nameNodeIP):
nameNodeDirectory = namenode_directory
nameNodePort = namenode_port
hdfsSite('name',f'/root/{nameNodeDirectory}')
coreSite(nameNodeIP,nameNodePort)
copyTemplate(nameNodeIP)
installtionScript('name',nameNodeDirectory)
subprocess.run(f"ssh root@{nameNodeIP} 'bash -s' < ./temp/installationScript.sh",shell=True)
return nameNodePort
def dataNode(dataNodeIP,nameNodeIP,nameNodePort):
dataNodeDirectory = datanode_directory
hdfsSite('data',f'/root/{dataNodeDirectory}')
coreSite(nameNodeIP,nameNodePort)
copyTemplate(dataNodeIP)
installtionScript('data',dataNodeDirectory)
subprocess.run(f"ssh root@{dataNodeIP} 'bash -s' < ./temp/installationScript.sh",shell=True)
def configure():
nameNodeIP = namenode_ip
dataNodeIP = datanode_ip
nameNodePort = nameNode(nameNodeIP)
dataNode(dataNodeIP,nameNodeIP,nameNodePort)
print("HADOOP CLUSTER SUCCESS")
configure()
| 2,663 | 890 |
from module import kelas
from lib import numbers, wa, reply, message
import subprocess, config, os
def getMateriFromJadwalID(jadwalid):
db = kelas.dbConnectSiap()
sql = f"select MP from simak_trn_presensi_dosen WHERE `JadwalID` = {jadwalid} ORDER BY Pertemuan ASC"
with db:
cur = db.cursor()
cur.execute(sql)
rows=cur.fetchall()
if rows is not None:
return rows
else:
return None
def materiToList(materiTuple):
materiData=[]
for i in materiTuple:
materiData.append(i[0])
return materiData
def getListJadwalIDfromKaprodi(prodiID):
db=kelas.dbConnectSiap()
sql=f"select JadwalID from simak_trn_jadwal where TahunID={kelas.getTahunID()} and ProdiID='{prodiID}'"
with db:
cur = db.cursor()
cur.execute(sql)
rows=cur.fetchall()
if rows is not None:
return rows
else:
return None
def getListJadwalIDfromDeputi(status, prodiid):
db=kelas.dbConnectSiap()
if status:
sql=f"select JadwalID from simak_trn_jadwal where TahunID={kelas.getTahunID()} and ProdiID='{prodiid}'"
else:
sql=f"select JadwalID from simak_trn_jadwal where TahunID={kelas.getTahunID()}"
with db:
cur = db.cursor()
cur.execute(sql)
rows=cur.fetchall()
if rows is not None:
return rows
else:
return None
def cekMateriPerkuliahan(jadwalid):
MateriTuple = getMateriFromJadwalID(jadwalid)
MateriToList = materiToList(MateriTuple)
if None in MateriToList or '' in MateriToList:
ret = False
else:
ret = True
return ret
def cekStatusBKDKaprodi(jadwalid):
db=kelas.dbConnectSiap()
sql=f'select BKD_Prodi from simak_trn_jadwal where JadwalID={jadwalid}'
with db:
cur=db.cursor()
cur.execute(sql)
row=cur.fetchone()
if row[0] == 'true':
return True
else:
return False
def cekStatusBKDDeputi(jadwalid):
db=kelas.dbConnectSiap()
sql=f'select BKD_Deputi from simak_trn_jadwal where JadwalID={jadwalid}'
with db:
cur=db.cursor()
cur.execute(sql)
row=cur.fetchone()
if row[0] == 'true':
return True
else:
return False
def infoBAPKaprodi(prodiid):
JadwalIDDataProdi=getListJadwalIDfromKaprodi(prodiid)
sudah=[]
siap=[]
belum=[]
for jadwalid in JadwalIDDataProdi:
statusmateri=cekMateriPerkuliahan(jadwalid[0])
statusttd=cekStatusBKDKaprodi(jadwalid[0])
if statusmateri == False and statusttd == False:
belum.append(jadwalid[0])
elif statusmateri == True and statusttd == False:
siap.append(jadwalid[0])
else:
sudah.append(jadwalid[0])
msgsudah = ''
for i in sudah:
kelas_info = kelas.getMatakuliahInfowithJadwalID(i)
msgsudah += f'{config.whatsapp_api_lineBreak}{kelas_info[0]} | {getNamaProdiFromProdiID(kelas_info[5].split(".")[1])} | {kelas_info[12]} | {kelas.getNamaDosen(kelas_info[21])}'
msgsudah += f'{config.whatsapp_api_lineBreak}{config.whatsapp_api_lineBreak}'
msgsiap = ''
for i in siap:
kelas_info = kelas.getMatakuliahInfowithJadwalID(i)
msgsiap += f'{config.whatsapp_api_lineBreak}{kelas_info[0]} | {getNamaProdiFromProdiID(kelas_info[5].split(".")[1])} | {kelas_info[12]} | {kelas.getNamaDosen(kelas_info[21])}'
msgsiap += f'{config.whatsapp_api_lineBreak}{config.whatsapp_api_lineBreak}'
msgbelum = ''
for i in belum:
kelas_info = kelas.getMatakuliahInfowithJadwalID(i)
msgbelum += f'{config.whatsapp_api_lineBreak}{kelas_info[0]} | {getNamaProdiFromProdiID(kelas_info[5].split(".")[1])} | {kelas_info[12]} | {kelas.getNamaDosen(kelas_info[21])}'
msgbelum += f'{config.whatsapp_api_lineBreak}{config.whatsapp_api_lineBreak}'
msgreply = f"BAP yang sudah ditandatangani ada: {len(sudah)} berkas{msgsudah}BAP yang siap ditandatangani ada: {len(siap)} berkas{msgsiap}BAP yang belum siap ditandatangani ada: {len(belum)} berkas{msgbelum}"
return msgreply, sudah, siap, belum
def infoBAPDeputi(msg):
msgs=msg.split(' ')[-1]
if msgs == 'all':
JadwalIDDataDeputi=getListJadwalIDfromDeputi(False, '')
else:
JadwalIDDataDeputi=getListJadwalIDfromDeputi(True, getProdiIDfromSingkatan(msgs))
sudah=[]
siap=[]
belum=[]
for jadwalid in JadwalIDDataDeputi:
statusmateri=cekMateriPerkuliahan(jadwalid[0])
statusttd=cekStatusBKDDeputi(jadwalid[0])
if statusmateri == False and statusttd == False:
belum.append(jadwalid[0])
elif statusmateri == True and statusttd == False:
siap.append(jadwalid[0])
else:
sudah.append(jadwalid[0])
msgsudah=''
for i in sudah:
kelas_info=kelas.getMatakuliahInfowithJadwalID(i)
msgsudah+=f'{config.whatsapp_api_lineBreak}{kelas_info[0]} | {getNamaProdiFromProdiID(kelas_info[5].split(".")[1])} | {kelas_info[12]} | {kelas.getNamaDosen(kelas_info[21])}'
msgsudah+=f'{config.whatsapp_api_lineBreak}{config.whatsapp_api_lineBreak}'
msgsiap=''
for i in siap:
kelas_info=kelas.getMatakuliahInfowithJadwalID(i)
msgsiap+=f'{config.whatsapp_api_lineBreak}{kelas_info[0]} | {getNamaProdiFromProdiID(kelas_info[5].split(".")[1])} | {kelas_info[12]} | {kelas.getNamaDosen(kelas_info[21])}'
msgsiap += f'{config.whatsapp_api_lineBreak}{config.whatsapp_api_lineBreak}'
msgbelum=''
for i in belum:
kelas_info=kelas.getMatakuliahInfowithJadwalID(i)
msgbelum+=f'{config.whatsapp_api_lineBreak}{kelas_info[0]} | {getNamaProdiFromProdiID(kelas_info[5].split(".")[1])} | {kelas_info[12]} | {kelas.getNamaDosen(kelas_info[21])}'
msgbelum += f'{config.whatsapp_api_lineBreak}{config.whatsapp_api_lineBreak}'
msgreply=f"BAP yang sudah ditandatangani ada: {len(sudah)} berkas{msgsudah}BAP yang siap ditandatangani ada: {len(siap)} berkas{msgsiap}BAP yang belum siap ditandatangani ada: {len(belum)} berkas{msgbelum}"
return msgreply, sudah, siap, belum
def approveBAPDeputi(msg):
msgs=msg.split(' ')[-1]
if msgs == 'all':
JadwalIDDataDeputi=getListJadwalIDfromDeputi(False, '')
else:
JadwalIDDataDeputi=getListJadwalIDfromDeputi(True, getProdiIDfromSingkatan(msgs))
sudah=[]
siap=[]
belum=[]
for jadwalid in JadwalIDDataDeputi:
statusmateri=cekMateriPerkuliahan(jadwalid[0])
statusttd=cekStatusBKDDeputi(jadwalid[0])
if statusmateri == False and statusttd == False:
belum.append(jadwalid[0])
elif statusmateri == True and statusttd == False:
siap.append(jadwalid[0])
else:
sudah.append(jadwalid[0])
msgreply = f"BAP yang sudah ditandatangani ada: {len(sudah)} berkas%0ABAP yang siap ditandatangani ada: {len(siap)} berkas%0ABAP yang belum siap ditandatangani ada: {len(belum)} berkas"
return msgreply, sudah, siap, belum
def approveBAPKaprodi(prodiid):
JadwalIDDataProdi=getListJadwalIDfromKaprodi(prodiid)
sudah=[]
siap=[]
belum=[]
for jadwalid in JadwalIDDataProdi:
statusmateri=cekMateriPerkuliahan(jadwalid[0])
statusttd=cekStatusBKDKaprodi(jadwalid[0])
if statusmateri == False and statusttd == False:
belum.append(jadwalid[0])
elif statusmateri == True and statusttd == False:
siap.append(jadwalid[0])
else:
sudah.append(jadwalid[0])
msgreply = f"BAP yang sudah ditandatangani ada: {len(sudah)} berkas%0ABAP yang siap ditandatangani ada: {len(siap)} berkas%0ABAP yang belum siap ditandatangani ada: {len(belum)} berkas"
return msgreply, sudah, siap, belum
def getNIPYfromHandphone(num):
num=numbers.normalize(num)
db=kelas.dbConnectSiap()
sql=f'select NIPY from simak_mst_dosen where Handphone="{num}"'
with db:
cur=db.cursor()
cur.execute(sql)
row=cur.fetchone()
if row is not None:
return row[0]
else:
return None
def getNamaProdiFromProdiID(prodiid):
db=kelas.dbConnectSiap()
sql=f'select Nama from simak_mst_prodi where ProdiID={prodiid}'
with db:
cur=db.cursor()
cur.execute(sql)
row=cur.fetchone()
if row is not None:
return row[0]
else:
return None
def isDeputiAkademik(NIPY):
db=kelas.dbConnectSiap()
sql=f'select * from simak_mst_pejabat where NIPY="{NIPY}" and JenisJabatanID=9'
with db:
cur=db.cursor()
cur.execute(sql)
row=cur.fetchone()
if row is not None:
return True
else:
return False
def isKaprodi(NIPY):
db = kelas.dbConnectSiap()
sql = f'select * from simak_mst_pejabat where NIPY="{NIPY}" and JenisJabatanID=5'
with db:
cur = db.cursor()
cur.execute(sql)
row = cur.fetchone()
if row is not None:
return True
else:
return False
def auth(data):
if isKaprodi(getNIPYfromHandphone(data[0])) or isDeputiAkademik(getNIPYfromHandphone(data[0])):
return True
else:
return False
def replymsg(driver, data):
num=numbers.normalize(data[0])
msg=message.normalize(data[3])
data=f'{num};{msg}'
wmsg = reply.getWaitingMessage(os.path.basename(__file__).split('.')[0])
wmsg = wmsg.replace('#BOTNAME#', config.bot_name)
subprocess.Popen(["python", "run.py", os.path.basename(__file__).split('.')[0], data], cwd=config.cwd)
return wmsg
def run(data):
num=data.split(';')[0]
msg=data.split(';')[1]
if isKaprodi(getNIPYfromHandphone(num)):
status='kaprodi'
else:
status='deputi'
if status == 'kaprodi':
msgreply=infoBAPKaprodi(f'.{kelas.getAllDataDosens(kelas.getKodeDosen(num))[20]}.')
else:
msgreply=infoBAPDeputi(msg)
wa.setOutbox(numbers.normalize(num), msgreply[0])
def getProdiIDfromSingkatan(singkatan):
db=kelas.dbConnectSiap()
sql=f"select ProdiID from simak_mst_prodi where Singkatan = '{singkatan}'"
with db:
cur=db.cursor()
cur.execute(sql)
row = cur.fetchone()
if row:
return f".{row[0]}."
else:
return None | 10,396 | 4,112 |
import os
import shutil
"""
Remaining things to test:
find_plasmid_kmer_scores
find_score
filter_similar_plasmids
generate_consensus
"""
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
from plasmidextractor.PlasmidExtractor import *
def test_mash_paired_gzipped():
mash_for_potential_plasmids(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
reverse_reads='tests/test_fastqs/paired_R2.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/mash',
identity_cutoff=-1)
assert os.path.isfile('tests/mash/screen_results.tsv')
shutil.rmtree('tests/mash')
def test_mash_unpaired_gzipped():
mash_for_potential_plasmids(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/mash',
identity_cutoff=-1)
assert os.path.isfile('tests/mash/screen_results.tsv')
shutil.rmtree('tests/mash')
def test_mash_paired_uncompressed():
mash_for_potential_plasmids(forward_reads='tests/test_fastqs/paired_R1.fastq',
reverse_reads='tests/test_fastqs/paired_R2.fastq',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/mash',
identity_cutoff=-1)
assert os.path.isfile('tests/mash/screen_results.tsv')
shutil.rmtree('tests/mash')
def test_mash_unpaired_uncompressed():
mash_for_potential_plasmids(forward_reads='tests/test_fastqs/paired_R1.fastq',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/mash',
identity_cutoff=-1)
assert os.path.isfile('tests/mash/screen_results.tsv')
shutil.rmtree('tests/mash')
def test_bait_and_trim_paired_gzipped():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
reverse_reads='tests/test_fastqs/paired_R2.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out')
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz') and os.path.isfile('tests/out/plasmid_reads_R2.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_paired_uncompressed():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq',
reverse_reads='tests/test_fastqs/paired_R2.fastq',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out')
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz') and os.path.isfile('tests/out/plasmid_reads_R2.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_unpaired_gzipped():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out')
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_unpaired_uncompressed():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out')
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_paired_gzipped_lowmem():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
reverse_reads='tests/test_fastqs/paired_R2.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out',
low_memory=True)
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz') and os.path.isfile('tests/out/plasmid_reads_R2.fastq.gz')
shutil.rmtree('tests/out')
def test_bait_and_trim_unpaired_gzipped_lowmem():
bait_and_trim(forward_reads='tests/test_fastqs/paired_R1.fastq.gz',
plasmid_db='tests/test_fasta/dummy_db.fasta',
output_dir='tests/out',
low_memory=True)
assert os.path.isfile('tests/out/plasmid_reads_R1.fastq.gz')
shutil.rmtree('tests/out')
def test_fasta_write():
create_individual_fastas(plasmid_db='tests/test_fasta/dummy_db.fasta',
potential_plasmid_list=['seq1'],
output_dir='tests/fasta/')
assert os.path.isfile('tests/fasta/seq1') and not os.path.isfile('tests/fasta/seq2')
shutil.rmtree('tests/fasta')
def test_fasta_kmerization():
kmerize_individual_fastas(potential_plasmid_list=['dummy_db.fasta'],
fasta_dir='tests/test_fasta',
output_dir='tests/kmerization')
assert os.path.isfile('tests/kmerization/dummy_db.fasta.kmc_pre') and os.path.isfile('tests/kmerization/dummy_db.fasta.kmc_suf')
shutil.rmtree('tests/kmerization')
| 5,184 | 1,915 |
#!/usr/bin/env python
#Examples of irreductible polynomes 16 degree
#x^16 + x^9 + x^8 + x^7 + x^6 + x^4 + x^3 + x^2 + 1
#x^16 + x^12 + x^3 + x^1 + 1
#x^16 + x^12 + x^7 + x^2 + 1
from sympy.polys.domains import ZZ
from sympy.polys.galoistools import gf_gcdex, gf_strip
def gf_inv(a): # irriducible polynomial
# mod = 0x18f57 => x^16 + x^15 + x^11 + x^10 + x^9 + x^8 + x^6 + x^4 + x^2 + x^1 + 1 Polynome irreductible
mod = [1,1,0,0,0,1,1,1,1,0,1,0,1,0,1,1,1]
a = hextolist(a)
s, t, g = gf_gcdex(ZZ.map(gf_strip(a)), ZZ.map(mod), 2 , ZZ)
return listtohex(s)
def gf_degree(a) :
res = 0
a >>= 1
while (a != 0) :
a >>= 1
res += 1
return res
def gf_invert(a, mod) :
v = mod
g1 = 1
g2 = 0
j = gf_degree(a) - 8
while (a != 1) :
if (j < 0) :
a, v = v, a
g1, g2 = g2, g1
j = -j
a ^= v << j
g1 ^= g2 << j
a %= 256 # Emulating 8-bit overflow
g1 %= 256 # Emulating 8-bit overflow
j = gf_degree(a) - gf_degree(v)
return g1
def hextolist(num) :
outlist = [1 if num & (1 << (15-n)) else 0 for n in range(16)]
return outlist
def listtohex(bitlist) :
out = 0
for bit in bitlist:
out = (out << 1) | bit
return out
if __name__ == '__main__':
a = 0xf48
s = gf_inv(a)
print(hex(s))
b = 0x4ccd
s = gf_inv(b)
print(hex(s))
| 1,418 | 766 |
# -*- coding: utf-8 -*-
from bot.adapters.ccxt_adapter import CcxtAdapter
from secrets import BITFINEX_KEY, BITFINEX_SECRET
class BitfinexAdapter(CcxtAdapter):
ccxt_module_name = 'bitfinex'
apiKey = BITFINEX_KEY
secret = BITFINEX_SECRET
| 252 | 109 |
import math
import numpy as np
from scipy import interpolate
class Polyline(list):
@staticmethod
def _2Dcheck(value):
if len(value) != 2:
raise ValueError("Value must be 2-D.")
def __init__(self):
super().__init__()
def __setitem__(self, key, value):
self._2Dcheck(value)
value = np.array(value, np.float64)
super().__setitem__(key, value)
def __str__(self):
s = ""
for value in self:
s += "["
for v in value:
s += str(v) + ", "
s = s[:-2] + "], "
s = "[{}]".format(s[:-2])
return s
def append(self, value):
self._2Dcheck(value)
value = np.array(value, np.float64)
super().append(value)
def extend(self, sequence):
for value in sequence:
self.append(value)
def length(self):
res = 0
n = len(self)
for i in range(n-1):
e = self[i+1] - self[i]
d = math.sqrt(np.dot(e, e))
res += d
return res
def get_x_arr(self):
return np.array([p[0] for p in self])
def get_y_arr(self):
return np.array([p[1] for p in self])
def bspline(self, k=2):
c = np.array(self)
n = c.shape[0]
if n <= k:
msg = "The number of points must be more than {}."
raise ValueError(msg.format(k))
t = np.zeros(n+k+1, dtype=np.float64)
t[n+1:] = 1
t[k:n+1] = np.linspace(0, 1, n-k+1)
return interpolate.BSpline(t, c, k, axis=0)
def closed_bspline(self, epsilon=2, k=2):
pl = self._close_polyline(epsilon=epsilon)
c = np.array(pl)
if np.any(c[0, :] != c[-1, :]):
c = np.vstack((c, c[0, :]))
c = np.vstack((c, c[1:k, :]))
n = c.shape[0]
dt = 1 / (n - k)
t0 = - k * dt
tm = 1 + k * dt
t = np.linspace(t0, tm, n+k+1)
return interpolate.BSpline(t, c, k, axis=0)
def _close_polyline(self, epsilon):
"""
折れ線を閉じる
"""
res = self.copy()
r = self[-1] - self[0]
delta = math.sqrt(r[0] ** 2 + r[1] ** 2)
if delta < epsilon:
res.append(self[0])
else:
tmp = res[:-1]
tmp.reverse()
res.extend(tmp)
return res
| 2,469 | 941 |
from main import db
from main import Visit
from main import sqlalchemy
if __name__ == '__main__':
print('Creating all database tables...')
x = db.create_all()
print('Done!') | 188 | 62 |
import os
_THIS_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.dirname(os.path.dirname(_THIS_DIR))
| 122 | 51 |
#!/usr/bin/env python
import sys
import subprocess
import urllib2
import base64
import re
import os.path
from bs4 import BeautifulSoup, NavigableString
USERNAME = "YOUR_USERNAME"
PASSWORD = "YOUR_PASSWORD"
def startAndWaitForAria(chdir, url):
if os.path.exists(chdir) is not True:
print "Path {0} does not exist, attempting to create".format(chdir)
try:
os.makedirs(chdir)
except:
print "Failed to make directory, exiting"
return
os.chdir(chdir)
print "Downloading: {0} to {1}".format(url, chdir)
p = subprocess.Popen(['/usr/bin/aria2c', '-s16', '-x16', '-k1M', '--check-certificate=false', '--http-user={0}'.format(USERNAME), '--http-passwd={0}'.format(PASSWORD), url])
p.wait()
def findFilesWithPattern(cwd, baseurl, pattern):
downloadList = []
data = downloadAuthFile(baseurl)
if data is None:
return downloadList
data = data.read()
soup = BeautifulSoup(data)
table = soup.select('table')
if len(table):
for tr in table[0].find_all('tr'):
if len(tr.contents) < 4:
print "Incompatible HTTP list type"
continue
# Name Last Modified Size Type
dlname = tr.contents[0]
dltype = tr.contents[3]
if dlname is None or dltype is None:
print "Parse error #1"
continue
if type(dlname.next_element) is NavigableString:
continue
dlurl = dlname.next_element
if dlurl is None:
print "Parse error #2"
continue
# I added pattern check because if we're pattern matching we probably only want things from one directory
# Recursion here could end up causing weird problems, especially if we're using it to download files from a root folder for example
# It would traverse all the directories and end up downloading every file on the entire box that matched. Not good.
# I will probably add a -r switch or something for this specific purpose
if dltype.text.startswith('Directory') and dlurl['href'].startswith('.') is not True and pattern is None:
newcwd = cwd + urllib2.unquote(dlurl['href'])
print "Directory: " + newcwd
downloadList = downloadList + findFilesWithPattern(newcwd, "{0}{1}".format(baseurl, dlurl['href']), pattern)
else:
filename = dlurl.contents[0]
href = dlurl['href']
if pattern is not None:
if pattern.findall(filename):
p = [cwd, "{0}{1}".format(baseurl, href)]
downloadList.append(p)
else:
if href.startswith('.') is not True:
p = [cwd, "{0}{1}".format(baseurl, href)]
downloadList.append(p)
return downloadList
def getBasicAuthString():
return base64.encodestring('%s:%s' % (USERNAME, PASSWORD)).replace('\n', '')
def downloadFileList(downloads):
if len(downloads) > 0:
for f in downloads:
startAndWaitForAria(f[0], f[1])
else:
print "No files found in directory!"
def singleFileDownload(url):
if url.endswith('/'):
downloadFileList(findFilesWithPattern(os.getcwd() + '/', url, None))
else:
startAndWaitForAria(os.getcwd() + '/', url)
def multiRegexDownload(url, reg):
if url.endswith('/') is not True:
print "This mode only supports directories!"
else:
downloadFileList(findFilesWithPattern(os.getcwd() + '/', url, re.compile(reg)))
def downloadAuthFile(url):
request = urllib2.Request(url)
request.add_header("Authorization", "Basic %s" % getBasicAuthString())
try:
return urllib2.urlopen(request)
except urllib2.URLError, e:
print "URL Error ({0}): {1}".format(e.errno, e.strerror)
except urllib2.HTTPError, e:
print "HTTP Error ({0}): {1}".format(e.errno, e.strerror)
except:
print "Unknown Exception: ", sys.exc_info()[0]
return None
if len(sys.argv) == 2:
singleFileDownload(sys.argv[1])
elif len(sys.argv) == 3:
multiRegexDownload(sys.argv[1], sys.argv[2])
else:
print "Parameter mismatch: Please enter URL to either a file, or a directory with an optional regex pattern" | 3,824 | 1,447 |
#Should be context manager
class MegaDel(object):
def __del__(self):
a = self.x + self.y
if a:
print(a)
if sys._getframe().f_lineno > 100:
print("Hello")
sum = 0
for a in range(100):
sum += a
print(sum)
class MiniDel(object):
def close(self):
pass
def __del__(self):
self.close() | 396 | 133 |
import requests
from bs4 import BeautifulSoup
import lxml
import re
import ast
import urllib.parse as urllib
class season_crawler():
def __init__(self, url):
self.url = url
self.hostname = urllib.urlparse(url).hostname
self.source = requests.get(url).text
self.soup = BeautifulSoup(self.source, 'html.parser')
def get_calendar(self):
calendar = {}
# Find out where the calendar is
h2s = self.soup.select('h2')
for h2 in h2s:
if h2.text == "Calendar":
div = h2.parent
# Find out where the event and date are
table = div.find('table')
table_head = table.find('thead')
date_idx = 0
gp_idx = 0
idx = 0
rows = table_head.find_all('tr')
for row in rows:
cols = row.find_all('th')
for col in cols:
if col.text == "Date":
date_idx = idx
if col.text == "Event":
gp_idx = idx
idx = idx + 1
tables_body = table.find_all('tbody')
for table in tables_body:
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
links = cols[gp_idx].select('a')
for link in links:
link = link.get("href")
break
link = link.replace("/classification", "/session-facts")
calendar[cols[gp_idx].text] = ["http://" + self.hostname + link, cols[date_idx].text]
return calendar
def get_drivers(self):
drivers = []
# Find out where the Drivers are
h2s = self.soup.select('h2')
for h2 in h2s:
if h2.text == "Entry List":
div = h2.parent
# Find out where the drivers are
table = div.find('table')
table_head = table.find('thead')
driver_idx = 0
idx = 0
rows = table_head.find_all('tr')
for row in rows:
cols = row.find_all('th')
for col in cols:
if col.text == "Drivers":
driver_idx = idx
idx = idx + 1
tables_body = table.find_all('tbody')
for table in tables_body:
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
drivers.append(cols[driver_idx].text)
return drivers
| 2,592 | 742 |
import csv
with open("../sudoku.csv", "r") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
print row | 130 | 45 |
import config
import discord
from discord.errors import HTTPException
from discord.ext import commands
from discord.ext.tasks import loop
from .clock import (
clock_embed,
get_or_create_message,
new_channel_name,
get_embed_title,
)
class Clock(commands.Cog, name="🕒 Clock"):
def __init__(self, bot: commands.Bot):
self.__bot = bot
@commands.Cog.listener()
async def on_ready(self):
"""When discord is connected"""
# Start clock
self.clock.start()
print("Starting clock...")
@loop(seconds=1)
async def clock(self):
"""Loop to check and update clock"""
# update the clock message
try:
embed = clock_embed()
# update only if the time is different
if embed.title != get_embed_title(self.__message):
# edit the message
await self.__message.edit(embed=embed)
except HTTPException:
# if message doesn't exist, create a new one
self.__message = await get_or_create_message(self.__bot, self.__channel)
# update channel name if it has changed
channel_name = new_channel_name()
if self.__channel.name != channel_name:
await self.__channel.edit(name=channel_name)
@clock.before_loop
async def clock_init(self) -> None:
"""print startup info before reddit feed loop begins"""
# get clock channel object
self.__channel = self.__bot.get_channel(config.CLOCK_CHANNEL_ID)
# check that channel exists
if not isinstance(self.__channel, discord.TextChannel):
print("Couldn't find that channel.")
return self.clock.cancel()
# if channel exists, get the last message from the bot or create one
self.__message = await get_or_create_message(self.__bot, self.__channel)
def setup(bot):
bot.add_cog(Clock(bot))
| 1,921 | 537 |
### Required Libraries ###
from datetime import datetime
from dateutil.relativedelta import relativedelta
from botocore.vendored import requests
### Functionality Helper Functions ###
def parse_float(n):
"""
Securely converts a non-numeric value to float.
"""
try:
return float(n)
except ValueError:
return float("nan")
def build_validation_result(is_valid, violated_slot, message_content):
"""
Defines an internal validation message structured as a python dictionary.
"""
if message_content is None:
return {"isValid": is_valid, "violatedSlot": violated_slot}
return {
"isValid": is_valid,
"violatedSlot": violated_slot,
"message": {"contentType": "PlainText", "content": message_content},
}
def validate_data(birthdate, term, intent_request):
birthdate = get_slots(intent_request)["birthdate"]
term = get_slots(intent_request)["term"]
"""
Validates the data provided by the user.
"""
# Validate that the user is over 18 years old
if birthdate is not None:
birth_date = datetime.strptime(birthdate, "%Y-%m-%d")
age = relativedelta(datetime.now(), birth_date).years
if age < 18:
return build_validation_result(
False,
"birthdate",
"You should be at least 18 years old to use this service, "
"please provide a different date of birth.",
)
#Validate term length (short or long)
if term is not None:
if term.lower() not in {"short", "long"}:
return build_validation_result(
False,
"term",
"I don't understand. Please enter short or long."
)
return build_validation_result(True, True, None)
### Dialog Actions Helper Functions ###
def get_slots(intent_request):
"""
Fetch all the slots and their values from the current intent.
"""
return intent_request["currentIntent"]["slots"]
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
"""
Defines an elicit slot type response.
"""
return {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "ElicitSlot",
"intentName": intent_name,
"slots": slots,
"slotToElicit": slot_to_elicit,
"message": message,
},
}
def delegate(session_attributes, slots):
"""
Defines a delegate slot type response.
"""
return {
"sessionAttributes": session_attributes,
"dialogAction": {"type": "Delegate", "slots": slots},
}
def close(session_attributes, fulfillment_state, message):
"""
Defines a close slot type response.
"""
response = {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "Close",
"fulfillmentState": fulfillment_state,
"message": message,
},
}
return response
### Intents Handlers ###
def ellie_conversation(intent_request):
"""
Performs dialog management and fulfillment for converting from dollars to bitcoin.
"""
# Gets slots' values
birthdate = get_slots(intent_request)["birthdate"]
term_length = get_slots(intent_request)["term"]
risk_level = get_slots(intent_request)["risk"]
term_len_num = 0
# Gets the invocation source, for Lex dialogs "DialogCodeHook" is expected.
source = intent_request["invocationSource"]
if source == "DialogCodeHook":
# This code performs basic validation on the supplied input slots.
# Gets all the slots
slots = get_slots(intent_request)
# Validates user's input using the validate_data function
validation_result = validate_data(birthdate, term_length, intent_request)
# If the data provided by the user is not valid,
# the elicitSlot dialog action is used to re-prompt for the first violation detected.
if not validation_result["isValid"]:
slots[validation_result["violatedSlot"]] = None # Cleans invalid slot
# Returns an elicitSlot dialog to request new data for the invalid slot
return elicit_slot(
intent_request["sessionAttributes"],
intent_request["currentIntent"]["name"],
slots,
validation_result["violatedSlot"],
validation_result["message"],
)
# Fetch current session attributes
output_session_attributes = intent_request["sessionAttributes"]
# Once all slots are valid, a delegate dialog is returned to Lex to choose the next course of action.
return delegate(output_session_attributes, get_slots(intent_request))
if term_length is not None:
if term_length.lower() == "short":
term_len_num = 10
elif term_length.lower() == "long":
term_len_num = 20
if risk_level is not None:
if risk_level.lower() == "none":
risk_lev_num = 1
elif risk_level.lower() == "low":
risk_lev_num = 2
elif risk_level.lower() == "medium":
risk_lev_num = 3
elif risk_level.lower() =="high":
risk_lev_num = 4
allocation = term_len_num + risk_lev_num
if allocation in {11, 12, 21, 22}:
return close(
intent_request["sessionAttributes"],
"Fulfilled",
{
"contentType": "PlainText",
"content" : "Your ideal portfolio should be 40% stocks and 60% bonds. Which would you like to explore first?"
})
elif allocation in {13, 14, 23, 24}:
return close(
intent_request["sessionAttributes"],
"Fulfilled",
{
"contentType": "PlainText",
"content": "Your ideal portfolio should be 80% stocks and 20% bonds. Which would you like to explore first?"
})
### Intents Dispatcher ###
def dispatch(intent_request):
"""
Called when the user specifies an intent for this bot.
"""
# Get the name of the current intent
intent_name = intent_request["currentIntent"]["name"]
# Dispatch to bot's intent handlers
if intent_name == "EllieIntro":
return ellie_conversation(intent_request)
raise Exception("Intent with name " + intent_name + " not supported")
### Main Handler ###
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
return dispatch(event)
| 6,989 | 2,075 |
arr=[123,321,487,908,123,465,987,46,762,12389]
def findMax(arr):
Max=None
for num in arr:
if Max==None or num>Max:
Max=num
return Max
def checkDigit(num):
i=0
remainder=0
while (remainder!=num):
i+=1
remainder=num%(10**i)
return i
def digit(i,num):
return int((num%(10**i)-num%(10**(i-1)))/(10**(i-1)))
def radix(arr):
digitMax=checkDigit(findMax(arr))
currentDigit=0
while(currentDigit<=digitMax):
currentDigit+=1
lst=[[],[],[],[],[],[],[],[],[],[]]
for num in arr:
numberDigit=digit(currentDigit,num)
if checkDigit(num)<currentDigit:
lst[0].append(num)
else:
lst[numberDigit].append(num)
arr=[]
for l in lst:
arr+=l
print(lst)
return arr
print(radix(arr))
| 876 | 338 |
from django.urls import path
from .views import (
UserLoginView,
UserLogoutView,
UserSignupView,
UserUpdateView,
UserDeleteView,
UserProfileView,
)
app_name = "user"
urlpatterns = [
path("signup", UserSignupView.as_view(), name="signup"),
path("update", UserUpdateView.as_view(), name="update"),
path("delete", UserDeleteView.as_view(), name="delete"),
path("login", UserLoginView.as_view(), name="login"),
path("logout", UserLogoutView.as_view(), name="logout"),
path("profile", UserProfileView.as_view(), name="profile"),
]
| 577 | 187 |
from django.views import View
class PostOnlyView(View):
form_class = None
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
pass
def form_invalid(self, form):
pass | 391 | 119 |
import io
import cv2
import numpy as np
import tensorflow as tf
from PIL import Image
from keras.models import model_from_json
import dlib
class Model:
"""This class represents the base model of the whole project. The model
predicts if the face on the picture is smiling or not. The model and
its weights are being loaded from data/model/. The main method of the
class is predict_labels(), which takes an input image, finds faces in
this image, draws their boundary boxes and labels these faces by adding
corresponding emojis on the picture.
"""
def __init__(self):
self.smiley = cv2.imread('data/pics/smiling.png')
self.neutral = cv2.imread('data/pics/neutral.png')
self.smiley = cv2.cvtColor(self.smiley, cv2.COLOR_BGR2RGB)
self.neutral = cv2.cvtColor(self.neutral, cv2.COLOR_BGR2RGB)
self.model = model_from_json(open('data/model/model.json').read())
self.model.load_weights('data/model/weights.h5')
self.graph = tf.get_default_graph()
@staticmethod
def convert2rgb(img):
"""Convert BGR image into RGB
Parameters: img: ndarray
:rtype: ndarray
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
@staticmethod
def rect_to_bb(rect):
"""Returns rectangle parameters from rectangle object
Parameters: rect: object
:rtype: list
"""
rect_x = rect.left()
rect_y = rect.top()
rect_w = rect.right() - rect_x
rect_h = rect.bottom() - rect_y
return rect_x, rect_y, rect_w, rect_h
def get_faces(self, img):
"""Find faces in the picture and return list of boundary boxes
of found faces
Parameters: img: ndarray
:rtype: list
"""
image = img
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
faces = []
for rect in rects:
faces.append(list(self.rect_to_bb(rect)))
return faces
def get_smile_label(self, img_face):
"""Predict smile on a face and return label
(0 for neutral and 1 for smiling face)
Parameters: img_face: ndarray
:rtype: int
"""
gray_cr_res = cv2.cvtColor(cv2.resize(img_face, (32, 32)),
cv2.COLOR_BGR2GRAY)
gray_cr_res = np.reshape(gray_cr_res, (32, 32, 1)) / 255
with self.graph.as_default():
score = self.model.predict(np.array([gray_cr_res]))[0][1]
threshold = 0.12
if score > threshold:
label = 1
else:
label = 0
return label
@staticmethod
def get_sticker_backgr(backgr, sticker):
"""Merge a sticker and its background and return merged image
Parameters: backgr: ndarray
sticker: ndarray
:rtype: ndarray
"""
sticker_gray = cv2.cvtColor(sticker, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(sticker_gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
backgr_bg = cv2.bitwise_and(backgr, backgr, mask=mask_inv)
sticker_fg = cv2.bitwise_and(sticker, sticker, mask=mask)
merged = cv2.add(backgr_bg, sticker_fg)
return merged
@staticmethod
def crop_face(bound_box, img):
"""Crop face from the input image according to its boundary box
Parameters: bound_box: list
img: ndarray
:rtype: ndarray
"""
f_x, f_y, f_w, f_h = bound_box
top, bottom, left, right = 0, 0, 0, 0
if f_y < 0:
top = - f_y
f_y = 0
if f_x < 0:
left = - f_x
f_x = 0
if f_x + f_w > img.shape[1]:
right = f_x + f_w - img.shape[1]
f_w -= right
if f_y + f_h > img.shape[0]:
bottom = f_y + f_h - img.shape[0]
f_h -= bottom
img_cropped = img[f_y:f_y + f_h, f_x:f_x + f_w]
img_cropped = cv2.copyMakeBorder(img_cropped,
top, bottom, left, right,
cv2.BORDER_REPLICATE)
return img_cropped
def add_stickers(self, img, faces, labels):
"""Add emoji sticker in the input picture according to predicted
smile labels
Parameters: img: ndarray of input image
faces: list of boundary boxes
labels: list of smile labels
:rtype: ndarray
"""
image = np.array(img)
for i, label in enumerate(labels):
if faces[i][0] < 0:
faces[i][2] += faces[i][0]
faces[i][0] = 0
if faces[i][1] < 0:
faces[i][3] += faces[i][1]
faces[i][1] = 0
if faces[i][0] + faces[i][2] > image.shape[1]:
faces[i][2] = image.shape[1] - faces[i][0]
if faces[i][1] + faces[i][3] > image.shape[0]:
faces[i][3] = image.shape[0] - faces[i][1]
st_size = int(min(faces[i][2], faces[i][3]) // 2.2)
smiley = cv2.resize(self.smiley, (st_size, st_size))
neutral = cv2.resize(self.neutral, (st_size, st_size))
y_1 = faces[i][1] + faces[i][3] - st_size
y_2 = faces[i][1] + faces[i][3]
x_1 = faces[i][0] + faces[i][2] - st_size
x_2 = faces[i][0] + faces[i][2]
if label == 1:
image[y_1:y_2, x_1:x_2] = \
self.get_sticker_backgr(image[y_1:y_2, x_1:x_2], smiley)
else:
image[y_1:y_2, x_1:x_2] = \
self.get_sticker_backgr(image[y_1:y_2, x_1:x_2], neutral)
return image
def predict_labels(self, img):
"""Predict if there smiles on picture and label faces with corresponding emoji
Parameters: img: ndarray of input image
:rtype: ndarray
"""
image = self.bytes2ndarray(img)
faces = self.get_faces(image)
num_faces = len(faces)
labels = []
if num_faces == 0:
return num_faces, image
for bound_box in faces:
img_cropped = self.crop_face(bound_box, image)
label = self.get_smile_label(img_cropped)
labels.append(label)
color = (0, 255, 0)
for (f_x, f_y, f_w, f_h) in faces:
cv2.rectangle(image, (f_x, f_y), (f_x + f_w, f_y + f_h), color, 2)
image = self.add_stickers(image, faces, labels)
return num_faces, self.ndarray2bytes(image)
@staticmethod
def ndarray2bytes(array):
"""Convert image into bytes
Parameters: array: ndarray
:rtype: io.BytesIO
"""
buf = io.BytesIO()
Image.fromarray(array).save(buf, format="jpeg")
return buf
@staticmethod
def bytes2ndarray(buf):
"""Convert image into bytes
Parameters: buf: io.BytesIO
:rtype: ndarray
"""
image = np.array(Image.open(buf))
return image
| 7,232 | 2,416 |
import sys
import signal
from functools import partial
def handler(counters, signum, *args):
print(f"C1 received signal {signum}")
counters["i"] += 1
if counters["i"] == 10:
sys.exit()
def main():
counters = {"i": 0}
if sys.platform.startswith("linux"):
signal.signal(signal.SIGALRM, partial(handler, counters))
else:
signal.signal(signal.SIGBREAK, partial(handler, counters))
while True:
pass
if __name__ == '__main__':
main()
| 498 | 173 |
# region [Imports]
# * Standard Library Imports ---------------------------------------------------------------------------->
import os
import asyncio
from io import BytesIO
from pathlib import Path
from datetime import datetime
from tempfile import TemporaryDirectory
# * Third Party Imports --------------------------------------------------------------------------------->
import discord
from PIL import Image, ImageEnhance, ImageDraw, ImageFont, ImageFilter
from pytz import timezone
from discord.ext import commands, flags
# * Gid Imports ----------------------------------------------------------------------------------------->
import gidlogger as glog
# * Local Imports --------------------------------------------------------------------------------------->
from antipetros_discordbot.utility.misc import alt_seconds_to_pretty
from antipetros_discordbot.utility.checks import allowed_channel_and_allowed_role, has_attachments, log_invoker, owner_or_admin
from antipetros_discordbot.utility.embed_helpers import make_basic_embed
from antipetros_discordbot.utility.gidtools_functions import pathmaker
from antipetros_discordbot.init_userdata.user_data_setup import ParaStorageKeeper
from antipetros_discordbot.utility.exceptions import ParameterError
from antipetros_discordbot.utility.image_manipulation import find_min_fontsize, make_perfect_fontsize
from typing import TYPE_CHECKING
from antipetros_discordbot.utility.enums import CogMetaStatus, UpdateTypus, WatermarkPosition
from antipetros_discordbot.engine.replacements import AntiPetrosBaseCog, AntiPetrosBaseGroup, AntiPetrosFlagCommand, CommandCategory, auto_meta_info_command, auto_meta_info_group
if TYPE_CHECKING:
from antipetros_discordbot.engine.antipetros_bot import AntiPetrosBot
# endregion[Imports]
# region [TODO]
# TODO: create regions for this file
# TODO: Document and Docstrings
# endregion [TODO]
# region [Logging]
log = glog.aux_logger(__name__)
glog.import_notification(log, __name__)
# endregion[Logging]
# region [Constants]
APPDATA = ParaStorageKeeper.get_appdata()
BASE_CONFIG = ParaStorageKeeper.get_config('base_config')
COGS_CONFIG = ParaStorageKeeper.get_config('cogs_config')
THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__)) # location of this file, does not work if app gets compiled to exe with pyinstaller
# endregion [Constants]
class ImageManipulationCog(AntiPetrosBaseCog, command_attrs={'hidden': False, "categories": CommandCategory.GENERAL}):
"""
Commands that manipulate or generate images.
"""
# region [ClassAttributes]
public = True
meta_status = CogMetaStatus.WORKING | CogMetaStatus.OPEN_TODOS | CogMetaStatus.FEATURE_MISSING | CogMetaStatus.NEEDS_REFRACTORING
long_description = ""
extra_info = ""
required_config_data = {'base_config': {},
'cogs_config': {"avatar_stamp": "ASLOGO1",
"avatar_stamp_fraction": "0.2",
"stamps_margin": "5",
"stamp_fraction": "0.3"}}
required_folder = []
required_files = []
allowed_stamp_formats = {".jpg", ".jpeg", ".png", ".tga", ".tiff", ".ico", ".icns", ".gif"}
stamp_positions = {'top': WatermarkPosition.Top, 'bottom': WatermarkPosition.Bottom, 'left': WatermarkPosition.Left, 'right': WatermarkPosition.Right, 'center': WatermarkPosition.Center}
# endregion[ClassAttributes]
# region [Init]
def __init__(self, bot: "AntiPetrosBot"):
super().__init__(bot)
self.stamp_location = APPDATA['stamps']
self.stamps = {}
self.nato_symbol_parts_location = APPDATA['nato_symbol_parts']
self.nato_symbol_parts_images = {}
self.stamp_pos_functions = {WatermarkPosition.Right | WatermarkPosition.Bottom: self._to_bottom_right,
WatermarkPosition.Right | WatermarkPosition.Top: self._to_top_right,
WatermarkPosition.Right | WatermarkPosition.Center: self._to_center_right,
WatermarkPosition.Left | WatermarkPosition.Bottom: self._to_bottom_left,
WatermarkPosition.Left | WatermarkPosition.Top: self._to_top_left,
WatermarkPosition.Left | WatermarkPosition.Center: self._to_center_left,
WatermarkPosition.Center | WatermarkPosition.Center: self._to_center_center,
WatermarkPosition.Center | WatermarkPosition.Bottom: self._to_bottom_center,
WatermarkPosition.Center | WatermarkPosition.Top: self._to_top_center}
self.stamp_pos_functions_by_num = {'3': self._to_bottom_right,
'9': self._to_top_right,
'6': self._to_center_right,
'1': self._to_bottom_left,
'7': self._to_top_left,
'4': self._to_center_left,
'5': self._to_center_center,
'2': self._to_bottom_center,
'8': self._to_top_center}
self.position_normalization_table = {'top': ['upper', 'above', 'up', 't', 'u'],
'bottom': ['down', 'lower', 'b', 'base'],
'center': ['middle', 'c', 'm'],
'left': ['l'],
'right': ['r']}
# self.base_map_image = Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v3_2000_w_outposts.png")
# self.outpost_overlay = {'city': Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v2_2000_city_marker.png"),
# 'volcano': Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v2_2000_volcano_marker.png"),
# 'airport': Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v2_2000_airport_marker.png")}
self.old_map_message = None
self.color = "blue"
# endregion[Init]
# region [Setup]
async def on_ready_setup(self):
await super().on_ready_setup()
self._get_stamps()
self._get_nato_symbol_parts()
self.ready = True
log.debug('setup for cog "%s" finished', str(self))
async def update(self, typus: UpdateTypus):
await super().update(typus=typus)
self._get_stamps()
log.debug('cog "%s" was updated', str(self))
# endregion[Setup]
# region [Properties]
@property
def target_stamp_fraction(self):
return COGS_CONFIG.retrieve(self.config_name, 'stamp_fraction', typus=float, direct_fallback=0.2)
@property
def stamp_margin(self):
return COGS_CONFIG.retrieve(self.config_name, 'stamps_margin', typus=int, direct_fallback=5)
@property
def avatar_stamp_fraction(self):
return COGS_CONFIG.retrieve(self.config_name, 'avatar_stamp_fraction', typus=float, direct_fallback=0.3)
@property
def avatar_stamp(self):
stamp_name = COGS_CONFIG.retrieve(self.config_name, 'avatar_stamp', typus=str, direct_fallback='ASLOGO1').upper()
return self._get_stamp_image(stamp_name, 1)
@property
def fonts(self):
fonts = {}
for file in os.scandir(APPDATA['fonts']):
if file.is_file() and file.name.endswith('ttf'):
fonts[file.name.split('.')[0].casefold()] = pathmaker(file.path)
return fonts
# endregion[Properties]
# region [Listener]
# endregion[Listener]
# region [Commands]
@flags.add_flag("--stamp-image", "-si", type=str, default='ASLOGO')
@flags.add_flag("--first-pos", '-fp', type=str, default="bottom")
@flags.add_flag("--second-pos", '-sp', type=str, default="right")
@flags.add_flag("--stamp-opacity", '-so', type=float, default=1.0)
@flags.add_flag('--factor', '-f', type=float, default=None)
@auto_meta_info_command(cls=AntiPetrosFlagCommand)
@allowed_channel_and_allowed_role(in_dm_allowed=False)
@commands.max_concurrency(1, per=commands.BucketType.guild, wait=True)
async def stamp_image(self, ctx, **flags):
"""
Stamps an image with a small image from the available stamps.
Needs to have the to stamp image as an attachment on the invoking message.
Usefull for watermarking images.
Get all available stamps with '@AntiPetros available_stamps'
Example:
@AntiPetros stamp_image -si ASLOGO -fp bottom -sp right -so 0.5 -f 0.25
"""
async with ctx.channel.typing():
if len(ctx.message.attachments) == 0:
# TODO: make as embed
await ctx.send('! **there is NO image to antistasify** !')
return
if flags.get('stamp_image') not in self.stamps:
# TODO: make as embed
await ctx.send("! **There is NO stamp with that name** !")
return
first_pos = self.stamp_positions.get(flags.get("first_pos").casefold(), None)
second_pos = self.stamp_positions.get(flags.get("second_pos").casefold(), None)
if any(_pos is None for _pos in [first_pos, second_pos]) or first_pos | second_pos not in self.stamp_pos_functions:
# TODO: make as embed
await ctx.send("! **Those are NOT valid position combinations** !")
return
for _file in ctx.message.attachments:
# TODO: maybe make extra attribute for input format, check what is possible and working. else make a generic format list
if any(_file.filename.endswith(allowed_ext) for allowed_ext in self.allowed_stamp_formats):
_stamp = self._get_stamp_image(flags.get('stamp_image'), flags.get('stamp_opacity'))
_stamp = _stamp.copy()
with TemporaryDirectory(prefix='temp') as temp_dir:
temp_file = Path(pathmaker(temp_dir, 'temp_file.png'))
log.debug("Tempfile '%s' created", temp_file)
await _file.save(temp_file)
in_image = await asyncio.to_thread(Image.open, temp_file)
in_image = await asyncio.to_thread(in_image.copy)
factor = self.target_stamp_fraction if flags.get('factor') is None else flags.get('factor')
pos_function = self.stamp_pos_functions.get(first_pos | second_pos)
in_image = await asyncio.to_thread(pos_function, in_image, _stamp, factor)
name = 'antistasified_' + os.path.splitext(_file.filename)[0]
await ctx.message.delete()
# TODO: make as embed
await self._send_image(ctx, in_image, name, f"__**{name}**__")
@auto_meta_info_command()
@allowed_channel_and_allowed_role(in_dm_allowed=False)
@commands.cooldown(1, 120, commands.BucketType.channel)
async def available_stamps(self, ctx):
"""
Posts all available stamps.
Removes them after 2min to keep channel clean.
Example:
@AntiPetros available_stamps
"""
await ctx.message.delete()
await ctx.send(embed=await make_basic_embed(title="__**Currently available Stamps are:**__", footer="These messages will be deleted in 120 seconds", symbol='photo'), delete_after=120)
for name, image_path in self.stamps.items():
thumb_image = Image.open(image_path)
thumb_image.thumbnail((128, 128))
with BytesIO() as image_binary:
await asyncio.sleep(0)
thumb_image.save(image_binary, 'PNG', optimize=True)
image_binary.seek(0)
_file = discord.File(image_binary, filename=name + '.png')
embed = discord.Embed(title="Available Stamp")
embed.add_field(name='Stamp Name:', value=name)
embed.set_image(url=f"attachment://{name}.png")
await ctx.send(embed=embed, file=_file, delete_after=120)
@auto_meta_info_group(case_insensitive=True, cls=AntiPetrosBaseGroup)
async def member_avatar(self, ctx):
"""
Stamps the avatar of a Member with the Antistasi Crest.
Returns the new stamped avatar as a .PNG image that the Member can save and replace his orginal avatar with.
Example:
@AntiPetros member_avatar
"""
@member_avatar.command()
@allowed_channel_and_allowed_role()
async def for_discord(self, ctx):
modified_avatar = await self._member_avatar_helper(ctx.author, self._to_center_center, 0.66)
name = f"{ctx.author.name}_Member_avatar"
await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**", delete_after=300) # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"?
await ctx.message.delete()
@member_avatar.command()
async def for_github(self, ctx):
modified_avatar = await self._member_avatar_helper(ctx.author, self._to_bottom_center, 1)
name = f"{ctx.author.name}_Member_avatar"
await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**", delete_after=300) # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"?
await ctx.message.delete()
@member_avatar.command()
async def by_num(self, ctx, numberpad: str):
if len(numberpad) > 1:
await ctx.send('please only enter a single digit for numberpad position, please retry!')
return
if numberpad == '0':
await ctx.send('0 is not a valid position, please try again!')
return
func = self.stamp_pos_functions_by_num.get(numberpad)
modified_avatar = await self._member_avatar_helper(ctx.author, func, 1)
name = f"{ctx.author.name}_Member_avatar"
await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**", delete_after=300) # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"?
await ctx.message.delete()
@member_avatar.command()
async def by_place(self, ctx, first_pos: str, second_pos: str):
first_pos = await self._normalize_pos(first_pos)
second_pos = await self._normalize_pos(second_pos)
func = self.stamp_pos_functions.get(self.stamp_positions.get(first_pos) | self.stamp_positions.get(second_pos))
modified_avatar = await self._member_avatar_helper(ctx.author, func, 1)
name = f"{ctx.author.name}_Member_avatar"
await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**", delete_after=300) # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"?
await ctx.message.delete()
@auto_meta_info_command(aliases=["get_image"])
@allowed_channel_and_allowed_role()
async def get_stamp_image(self, ctx: commands.Context, image_name: str):
image_name = image_name.split('.')[0].upper()
if image_name not in self.stamps:
await ctx.send(f"Don't have an image named `{image_name}` saved!", delete_after=120)
return
image = self.stamps.get(image_name)
embed_data = await self.bot.make_generic_embed(title=image_name, description="Your requested image", thumbnail=None, image=image)
await ctx.reply(**embed_data, allowed_mentions=discord.AllowedMentions.none())
@auto_meta_info_command(aliases=["add_image"])
@allowed_channel_and_allowed_role()
@has_attachments(1)
@log_invoker(log, "critical")
async def add_stamp(self, ctx: commands.Context):
"""
Adds a new stamp image to the available stamps.
This command needs to have the image as an attachment.
Example:
@AntiPetros add_stamp
"""
attachment = ctx.message.attachments[0]
file_name = attachment.filename
if file_name.casefold() in {file.casefold() for file in os.listdir(self.stamp_location)}:
await ctx.reply(f"A Stamp file with the name `{file_name}` already exists, aborting!")
return
path = pathmaker(self.stamp_location, file_name)
await attachment.save(path)
stamp_name = file_name.split('.')[0].replace(' ', '_').strip().upper()
await ctx.reply(f"successfully, saved new stamp. The stamp name to use is `{stamp_name}`")
await self.bot.creator.send(f"New stamp was added by `{ctx.author.name}`", file=await attachment.to_file())
self._get_stamps()
@auto_meta_info_command()
@allowed_channel_and_allowed_role(in_dm_allowed=False)
@has_attachments(1)
async def text_to_image(self, ctx: commands.Context, font: str, *, text: str):
mod_font_name = font.split('.')[0].casefold()
if mod_font_name not in self.fonts:
embed_data = await self.bot.make_generic_embed(title='Unkown Font', description=f"No font available with the name `{font}`.\nYou may have to add it via `@AntiPetros add_font`",
thumbnail="cancelled")
await ctx.send(**embed_data, delete_after=120)
return
image_attachment = ctx.message.attachments[0]
if image_attachment.filename.split('.')[-1].casefold() not in ['jpeg', 'png', 'jpg', 'tga']:
embed_data = await self.bot.make_generic_embed(title="Wrong Image Format", description=f"Image need to be either `jpeg`, `png` or `tga` and not `{image_attachment.filename.split('.')[-1]}`",
thumbnail="cancelled")
await ctx.send(**embed_data, delete_after=120)
return
with TemporaryDirectory() as tempdir:
imagefilepath = pathmaker(tempdir, image_attachment.filename)
await image_attachment.save(imagefilepath)
base_image = Image.open(imagefilepath)
base_image.load()
width, height = base_image.size
image_font = await asyncio.to_thread(find_min_fontsize, self.fonts.get(mod_font_name), [line for line in text.splitlines() if line != ''], width, height)
top_space = 0
for line in text.splitlines():
if line == '':
top_space += ((height // 20) * 2)
else:
base_image, top_space = await asyncio.to_thread(self.draw_text_line, base_image, line, top_space, image_font)
await self._send_image(ctx, base_image, image_attachment.filename.split('.')[0] + '_with_text.png', "Modified Image", message_text="Here is your image with pasted Text", image_format='png')
@auto_meta_info_command()
@owner_or_admin()
@has_attachments(1)
async def add_font(self, ctx: commands.Context):
font_attachment = ctx.message.attachments[0]
if font_attachment.filename.split('.')[-1] != 'ttf':
embed_data = await self.bot.make_generic_embed(title='Wrong input filetype', description=f"Attachment has to be a Truetype Font (extension: `.ttf`) and not `.{font_attachment.filename.split('.')[-1]}`",
thumbnail="not_possible")
await ctx.send(**embed_data, delete_after=120)
return
new_path = pathmaker(APPDATA['fonts'], font_attachment.filename)
await font_attachment.save(new_path)
embed_data = await self.bot.make_generic_embed(title="Added new Font", description=f"Font `{font_attachment.filename}` was successfully saved!",
thumbnail="save")
await ctx.send(**embed_data, delete_after=300)
async def _make_font_preview(self, font_name, font_path):
b_image = Image.new('RGBA', (512, 512), color=(256, 256, 256, 0))
image_font = await asyncio.to_thread(make_perfect_fontsize, font_path, font_name, 512, 512)
preview_image = await asyncio.to_thread(self.draw_text_center, b_image, font_name, image_font)
return preview_image
@auto_meta_info_command()
@allowed_channel_and_allowed_role()
async def list_fonts(self, ctx: commands.Context):
embed = discord.Embed(title="Available Fonts")
await ctx.send(embed=embed, delete_after=60)
for font_name, font_path in self.fonts.items():
embed_data = await self.bot.make_generic_embed(title=font_name, image=await self._make_font_preview(font_name, font_path), thumbnail=None)
await ctx.send(**embed_data, delete_after=60)
await asyncio.sleep(60)
await ctx.message.delete()
# endregion[Commands]
# region [HelperMethods]
def _get_nato_symbol_parts(self):
self.nato_symbol_parts_images = {}
for file in os.scandir(self.nato_symbol_parts_location):
if os.path.isfile(file.path) is True:
name = file.name.split('.')[0].replace(' ', '_').strip().upper()
self.nato_symbol_parts_images[name] = file.path
def _get_stamps(self):
self.stamps = {}
for file in os.scandir(self.stamp_location):
if os.path.isfile(file.path) is True:
name = file.name.split('.')[0].replace(' ', '_').strip().upper()
self.stamps[name] = file.path
def _get_stamp_image(self, stamp_name, stamp_opacity):
stamp_name = stamp_name.upper()
image = Image.open(self.stamps.get(stamp_name))
alpha = image.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(stamp_opacity)
image.putalpha(alpha)
return image.copy()
@staticmethod
def _stamp_resize(input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
input_image_width_fractioned = input_image_width * factor
input_image_height_fractioned = input_image_height * factor
transform_factor_width = input_image_width_fractioned / stamp_image.size[0]
transform_factor_height = input_image_height_fractioned / stamp_image.size[1]
transform_factor = (transform_factor_width + transform_factor_height) / 2
return stamp_image.resize((round(stamp_image.size[0] * transform_factor), round(stamp_image.size[1] * transform_factor)), resample=Image.LANCZOS)
def _to_bottom_right(self, input_image, stamp_image, factor):
log.debug('pasting image to bottom_right')
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(input_image_width - _resized_stamp.size[0] - self.stamp_margin, input_image_height - _resized_stamp.size[1] - self.stamp_margin),
_resized_stamp)
return input_image
def _to_top_right(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(input_image_width - _resized_stamp.size[0] - self.stamp_margin, 0 + self.stamp_margin),
_resized_stamp)
return input_image
def _to_center_right(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(input_image_width - _resized_stamp.size[0] - self.stamp_margin, round((input_image_height / 2) - (_resized_stamp.size[1] / 2))),
_resized_stamp)
return input_image
def _to_bottom_left(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(0 + self.stamp_margin, input_image_height - _resized_stamp.size[1] - self.stamp_margin),
_resized_stamp)
return input_image
def _to_top_left(self, input_image, stamp_image, factor):
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(0 + self.stamp_margin, 0 + self.stamp_margin),
_resized_stamp)
return input_image
def _to_center_left(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(0 + self.stamp_margin, round((input_image_height / 2) - (_resized_stamp.size[1] / 2))),
_resized_stamp)
return input_image
def _to_center_center(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(round((input_image_width / 2) - (_resized_stamp.size[0] / 2)), round((input_image_height / 2) - (_resized_stamp.size[1] / 2))),
_resized_stamp)
return input_image
def _to_top_center(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(round((input_image_width / 2) - (_resized_stamp.size[0] / 2)), 0 + self.stamp_margin),
_resized_stamp)
return input_image
def _to_bottom_center(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(round((input_image_width / 2) - (_resized_stamp.size[0] / 2)), input_image_height - _resized_stamp.size[1] - self.stamp_margin),
_resized_stamp)
return input_image
async def _send_image(self, ctx, image, name, message_title, message_text=None, image_format=None, delete_after=None):
image_format = 'png' if image_format is None else image_format
with BytesIO() as image_binary:
image.save(image_binary, image_format.upper(), optimize=True)
image_binary.seek(0)
file = discord.File(fp=image_binary, filename=name.replace('_', '') + '.' + image_format)
embed = discord.Embed(title=message_title, description=message_text, color=self.support.cyan.discord_color, timestamp=datetime.now(tz=timezone("Europe/Berlin")), type='image')
embed.set_author(name='AntiPetros', icon_url="https://www.der-buntspecht-shop.de/wp-content/uploads/Baumwollstoff-Camouflage-olivegruen-2.jpg")
embed.set_image(url=f"attachment://{name.replace('_','')}.{image_format}")
if delete_after is not None:
embed.add_field(name='This Message will self destruct', value=f"in {alt_seconds_to_pretty(delete_after)}")
await ctx.send(embed=embed, file=file, delete_after=delete_after)
async def _member_avatar_helper(self, user: discord.Member, placement: callable, opacity: float):
avatar_image = await self.get_avatar_from_user(user)
stamp = self._get_stamp_image('ASLOGO', opacity)
modified_avatar = await asyncio.to_thread(placement, avatar_image, stamp, self.avatar_stamp_fraction)
return modified_avatar
async def _normalize_pos(self, pos: str):
pos = pos.casefold()
if pos not in self.position_normalization_table:
for key, value in self.position_normalization_table.items():
if pos in value:
return key
raise ParameterError('image_position', pos)
async def get_avatar_from_user(self, user):
avatar = user.avatar_url
temp_dir = TemporaryDirectory()
temp_file = pathmaker(temp_dir.name, 'user_avatar.png')
log.debug("Tempfile '%s' created", temp_file)
await avatar.save(temp_file)
avatar_image = Image.open(temp_file)
avatar_image = avatar_image.copy()
avatar_image = avatar_image.convert('RGB')
temp_dir.cleanup()
return avatar_image
def map_image_handling(self, base_image, marker_name, color, bytes_out):
log.debug("creating changed map, changed_location: '%s', changed_color: '%s'", marker_name, color)
marker_image = self.outpost_overlay.get(marker_name)
marker_alpha = marker_image.getchannel('A')
marker_image = Image.new('RGBA', marker_image.size, color=color)
marker_image.putalpha(marker_alpha)
base_image.paste(marker_image, mask=marker_alpha)
base_image.save(bytes_out, 'PNG', optimize=True)
bytes_out.seek(0)
return base_image, bytes_out
def draw_text_line(self, image: Image, text_line: str, top_space: int, in_font: ImageFont.FreeTypeFont):
width, height = image.size
pfont = in_font
draw = ImageDraw.Draw(image)
w, h = draw.textsize(text_line, font=pfont)
draw.text(((width - w) / 2, h + top_space), text_line, fill=(0, 0, 0), stroke_width=width // 150, stroke_fill=(50, 200, 25), font=pfont)
return image, top_space + h + (height // 20)
def draw_text_center(self, image: Image, text: str, in_font: ImageFont.FreeTypeFont):
width, height = image.size
pfont = in_font
draw = ImageDraw.Draw(image)
w, h = draw.textsize(text, font=pfont)
draw.text(((width - w) / 2, (height - h) / 2), text, fill=(0, 0, 0), stroke_width=width // 150, stroke_fill=(204, 255, 204), font=pfont)
return image
# endregion[HelperMethods]
# region [SpecialMethods]
def __repr__(self):
return f"{self.__class__.__name__}({self.bot.__class__.__name__})"
def __str__(self):
return self.qualified_name
# def cog_unload(self):
# log.debug("Cog '%s' UNLOADED!", str(self))
# endregion[SpecialMethods]
def setup(bot):
"""
Mandatory function to add the Cog to the bot.
"""
bot.add_cog(ImageManipulationCog(bot))
| 30,898 | 9,595 |
reserva = list()
principal = list()
maior = menor = 0
while True:
reserva.append(str(input('Nome: ')))
reserva.append(float(input('Peso: ')))
if len(principal) == 0:
maior = menor = reserva[1]
else:
if reserva[1] > maior:
maior = reserva[1]
if reserva[1] < menor:
menor = reserva[1]
principal.append(reserva[:])
reserva.clear()
d = ' '
while d not in 'SN':
d = str(input('Deseja Continuar [S/N]: ')).strip().upper()[0]
if d == 'N':
break
print('-='*20)
print(f'Ao todo {len(principal)} pessoas foram inscrito')
print(f'O maior peso foi {maior}. Peso de ', end='')
for c in principal:
if c[1] == maior:
print(f'{c[0]}' , end='')
print()
print(f'O menor peso foi {menor}. Peso de ', end='')
for c in principal:
if c[1] == menor:
print(f'{c[0]}' , end='')
print()
| 885 | 336 |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
def derive_end_activities_from_log(log, activity_key):
"""
Derive end activities from log
Parameters
-----------
log
Log object
activity_key
Activity key
Returns
-----------
e
End activities
"""
e = set()
for t in log:
if len(t) > 0:
if activity_key in t[len(t) - 1]:
e.add(t[len(t) - 1][activity_key])
return e
def derive_start_activities_from_log(log, activity_key):
"""
Derive start activities from log
Parameters
-----------
log
Log object
activity_key
Activity key
Returns
-----------
s
Start activities
"""
s = set()
for t in log:
if len(t) > 0:
if activity_key in t[0]:
s.add(t[0][activity_key])
return s
| 1,560 | 483 |
from .colors import Colorize
from .banners import banner
def inputc(text: str, foreground_color: str = 'default', background_color: str = 'default') -> str:
return input(Colorize(text, foreground_color, background_color))
def input_banner(text: str, simbol: str = '-', size: int = 50, text_foreground_color: str = 'default',
text_background_color: str = 'default', line_foreground_color: str = 'default',
line_background_color: str = 'default') -> str:
banner(text, simbol, size, text_foreground_color, text_background_color, line_foreground_color, line_background_color)
return inputc('>>> ', text_foreground_color, text_background_color)
| 691 | 211 |
import boto3
from Bootstrap.add_record_id import lambda_handler
from moto import mock_s3
@mock_s3
def test_add_record_id():
manifest_content = b'{"source":"Fed revises guidelines sending stocks up."}\n{"source": "Review Guardians of the Galaxy"}'
s3r = boto3.resource("s3", region_name="us-east-1")
s3r.create_bucket(Bucket="source_bucket")
s3r.Object("source_bucket", "input.manifest").put(Body=manifest_content)
event = {
"ManifestS3Uri": "s3://source_bucket/input.manifest",
}
output = lambda_handler(event, {})
manifest_content_with_id = b'{"source": "Fed revises guidelines sending stocks up.", "id": 0}\n{"source": "Review Guardians of the Galaxy", "id": 1}\n'
updated_body = s3r.Object("source_bucket", "input.manifest").get()["Body"].read()
assert updated_body == manifest_content_with_id
assert output["ManifestS3Uri"] == "s3://source_bucket/input.manifest"
| 926 | 321 |
import redis
REDIS_CACHE = {
"host": '192.168.196.129',
"port": '6379',
"db": 0,
"password": ''
}
class RedisClient(redis.StrictRedis):
"""
Singleton pattern
"""
_instance = {}
def __init__(self, server):
redis.StrictRedis.__init__(self, **server)
def __new__(cls, *args):
if str(args) not in cls._instance:
cls._instance[str(args)] = super(RedisClient, cls).__new__(cls)
return cls._instance[str(args)]
redis = RedisClient(REDIS_CACHE)
| 523 | 207 |
from fastapi.testclient import TestClient
from app.core.config import settings
def test_get_transactions(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/transactions")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/transactions?limit=6")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 6
def test_get_transactions_by_method(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/transactions/add")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/transactions/add?limit=4")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 4
r = client.get(f"{settings.PREFIX}/dex/transactions/remove")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/transactions/remove?limit=2")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 2
def test_get_logs(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/logs")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/logs?limit=3")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 3
def test_get_logs_by_method(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/logs/TransferSingle")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/logs/TransferSingle?limit=1")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/logs/Swap")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
r = client.get(f"{settings.PREFIX}/dex/logs/Swap?limit=1")
response = r.json()
assert r.status_code == 200
assert response
assert len(response) == 1
def test_get_stats(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/stats/1")
response = r.json()
assert r.status_code == 200
assert response
def test_get_stats_invalid_market_id(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/stats/bad-market-id")
response = r.json()
assert r.status_code == 400 # Bad request
assert response
def test_get_balance_of(prep_fixtures, client: TestClient) -> None:
# Fine to leave request as constant since the blockchain is immutable
r = client.get(f"{settings.PREFIX}/dex/balance-of/hxe7af5fcfd8dfc67530a01a0e403882687528dfcb/2")
response = r.json()
assert r.status_code == 200
assert response
def test_get_balance_of_invalid_address(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/balance-of/0xbadaddress/2")
response = r.json()
assert r.status_code == 400 # Bad request
assert response
def test_get_swap_chart_5m(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/0/5m/0/1000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 4
assert response[0][0] == 0 # Timestamp
assert response[0][1] == 1 # Open
assert response[0][2] == 3 # Close
assert response[0][3] == 3 # High
assert response[0][4] == 1 # Low
assert response[0][5] == 3 # Volume
assert response[1][0] == 300000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 600000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 900000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
def test_get_swap_chart_15m(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/1/15m/0/3000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 4
assert response[0][0] == 0
assert response[0][1] == 1
assert response[0][2] == 3
assert response[0][3] == 3
assert response[0][4] == 1
assert response[0][5] == 3
assert response[1][0] == 900000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 1800000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 2700000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
def test_get_swap_chart_1h(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/2/1h/0/20000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 6
assert response[0][0] == 0
assert response[0][1] == 1
assert response[0][2] == 3
assert response[0][3] == 3
assert response[0][4] == 1
assert response[0][5] == 3
assert response[1][0] == 3600000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 7200000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 10800000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
def test_get_swap_chart_4h(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/3/4h/0/50000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 4
assert response[0][0] == 0
assert response[0][1] == 1
assert response[0][2] == 3
assert response[0][3] == 3
assert response[0][4] == 1
assert response[0][5] == 3
assert response[1][0] == 14400000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 28800000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 43200000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
def test_get_swap_chart_1d(prep_fixtures, client: TestClient) -> None:
r = client.get(f"{settings.PREFIX}/dex/swap-chart/4/1d/0/300000000000")
response = r.json()
assert r.status_code == 200
assert len(response) == 4
assert response[0][0] == 0
assert response[0][1] == 1
assert response[0][2] == 3
assert response[0][3] == 3
assert response[0][4] == 1
assert response[0][5] == 3
assert response[1][0] == 86400000000
assert response[1][1] == 3
assert response[1][2] == 6
assert response[1][3] == 6
assert response[1][4] == 3
assert response[1][5] == 3
assert response[2][0] == 172800000000
assert response[2][1] == 6
assert response[2][2] == 6
assert response[2][3] == 6
assert response[2][4] == 6
assert response[2][5] == 0
assert response[3][0] == 259200000000
assert response[3][1] == 6
assert response[3][2] == 9
assert response[3][3] == 9
assert response[3][4] == 6
assert response[3][5] == 3
| 8,517 | 3,347 |
#!/usr/bin/python2.7
import json
import sys
class DecodeMouseData(object):
def decode(self, jsonString):
return json.loads(jsonString)
def getNumberOfClicks(self, jsonString):
return len(self.decode(jsonString))
def getSessionDuration(self, jsonString):
decoded = self.decode(jsonString)
finish = max([x["t"] for x in decoded.values()])
start = min([x["t"] for x in decoded.values()])
# compute number of minutes in this number of milliseconds
return (finish - start) / 60000.0
def getFilesTimeClickDict(self, files):
timeAndClicks = dict()
filename = str()
for jsonFile in files:
with open(jsonFile) as f:
jsonString = f.read()
filename = f.name
print filename
timeAndClicks[filename] = (self.getSessionDuration(jsonString),
self.getNumberOfClicks(jsonString))
return timeAndClicks
def getDictPrettyPrint(self, timeAndClicks):
output = ""
tablelines = "-"*11 + " " + "-"*10
output += tablelines + "\n"
output += "Time (min) Clicks\n"
output += tablelines + "\n"
length = len(timeAndClicks)
for i,filename in enumerate(timeAndClicks):
output += ("%10f %10d" % (timeAndClicks[filename][0],
timeAndClicks[filename][1])) + "\n"
output += tablelines
return output
if __name__ == "__main__":
dmd = DecodeMouseData()
print dmd.getDictPrettyPrint(dmd.getFilesTimeClickDict(sys.argv[1:]))
timeAndClicks = dmd.getFilesTimeClickDict(sys.argv[1:])
total = 0
for filename in timeAndClicks:
total += timeAndClicks[filename][1]
print "TOTAL: ", total
| 1,785 | 546 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from telemetry.core import platform as telemetry_platform
from telemetry.internal.platform import cast_device
from telemetry.internal.platform import platform_backend
class CastPlatformBackend(platform_backend.PlatformBackend):
def __init__(self, device):
super(CastPlatformBackend, self).__init__(device)
self._output_dir = device.output_dir
self._runtime_exe = device.runtime_exe
@classmethod
def SupportsDevice(cls, device):
return isinstance(device, cast_device.CastDevice)
@classmethod
def CreatePlatformForDevice(cls, device, finder_options):
assert cls.SupportsDevice(device)
return telemetry_platform.Platform(CastPlatformBackend(device))
@property
def output_dir(self):
return self._output_dir
@property
def runtime_exe(self):
return self._runtime_exe
def IsRemoteDevice(self):
return False
def GetArchName(self):
return 'Arch type of device not yet supported in Cast'
def GetOSName(self):
return 'castos'
def GetDeviceTypeName(self):
return 'Cast Device'
def GetOSVersionName(self):
return ''
def GetOSVersionDetailString(self):
return 'CastOS'
def GetSystemTotalPhysicalMemory(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
return False
def IsThermallyThrottled(self):
return False
def InstallApplication(self, application):
raise NotImplementedError()
def LaunchApplication(self, application, parameters=None,
elevate_privilege=False):
raise NotImplementedError()
def PathExists(self, path, timeout=None, retries=None):
raise NotImplementedError()
def CanFlushIndividualFilesFromSystemCache(self):
return False
def FlushEntireSystemCache(self):
return None
def FlushSystemCacheForDirectory(self, directory):
return None
def StartActivity(self, intent, blocking):
raise NotImplementedError()
def CooperativelyShutdown(self, proc, app_name):
return False
def SupportFlushEntireSystemCache(self):
return False
def StartDisplayTracing(self):
raise NotImplementedError()
def StopDisplayTracing(self):
raise NotImplementedError()
def TakeScreenshot(self, file_path):
return None
def GetTypExpectationsTags(self):
tags = super(CastPlatformBackend, self).GetTypExpectationsTags()
tags.append(self.GetDeviceTypeName())
return tags
| 2,604 | 785 |
from selenium.webdriver.common.by import By
from utilities import find_all_contains_text
from utilities import find_one_present
from utilities import wait
class Homepage:
def __init__(self, driver, base_url, timeout=10):
self.driver = driver
self.base_url = base_url
self.wait = wait(driver, timeout)
def open(self):
self.driver.get(f"{self.base_url}")
def type_search(self, term):
driver = self.driver
search_input = find_one_present(driver, "//input[@name='q']", "xpath")
search_input.send_keys(term)
return find_all_contains_text(term, driver, "//ul[@role='listbox']//li", "xpath")
def clear_search(self):
driver = self.driver
search_input = driver.find_element(By.XPATH, "//input[@name='q']")
search_input.clear()
| 828 | 260 |
"""Test the annotation of CNVs"""
import io
import sys
import unittest
import pickle
import pathlib
import yaml
import tadacnv.lib.utils as utils
import tadacnv.lib.preprocessing as preprocessing
from tadacnv.annotate_cnvs import annotate
class CnvAnnotationTest(unittest.TestCase):
"""Test class for the annotation of CNVs"""
def test_annotation(self):
capturedOutput = io.StringIO()
sys.stdout = capturedOutput
# read config file
with pathlib.Path("tests/test_config_cnvs.yml").open() as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.Loader)
output_dir = "tests/test_data"
annotated_cnvs = annotate(cfg)
#save annotated cnvs
output_path = pathlib.Path(output_dir)
for label, cnvs in annotated_cnvs.items():
with open(output_path / f'Annotated_{label}.p', "wb") as output:
pickle.dump(cnvs, output)
feature_labels = ['Number of affected Genes','Number of affected Enhancers','Boundary Distance', 'Boundary Stability', 'Gene Distance', 'Enhancer Distance', 'DDG2P Distance', 'Gene LOEUF','Enhancer conservation', 'Gene HI', 'CTCF Distance', 'HI LogOdds Score', 'Exon Overlap', 'MPOI']
feature_df = preprocessing.create_feature_df(cnvs,cfg['FEATURES'],feature_labels,csv=True)
feature_df.to_csv(output_path / f'Annotated_{label}.csv',sep='\t',header=True,index=False)
sys.stdout = sys.__stdout__
self.assertEqual(len(annotated_cnvs['TEST_PATHOGENIC']['chr2'][0].tads),1,'Annotation of TADs does not work!')
self.assertEqual(len(annotated_cnvs['TEST_PATHOGENIC']['chr2'][0].tads[0].annotations['GENES']),17,'Genes are not transferred to the CNV object!')
| 1,731 | 580 |
#
# PySNMP MIB module BTI7800-INVENTORY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BTI7800-INVENTORY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:24:46 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, IpAddress, MibIdentifier, Unsigned32, Gauge32, NotificationType, Bits, Counter32, Counter64, iso, TimeTicks, ModuleIdentity, ObjectIdentity, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "IpAddress", "MibIdentifier", "Unsigned32", "Gauge32", "NotificationType", "Bits", "Counter32", "Counter64", "iso", "TimeTicks", "ModuleIdentity", "ObjectIdentity", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DateAndTime, RowStatus, DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DateAndTime", "RowStatus", "DisplayString", "TextualConvention", "TruthValue")
bTI7800_INVENTORY_MIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2)).setLabel("bTI7800-INVENTORY-MIB")
bTI7800_INVENTORY_MIB.setRevisions(('2014-12-23 00:00',))
if mibBuilder.loadTexts: bTI7800_INVENTORY_MIB.setLastUpdated('201412230000Z')
if mibBuilder.loadTexts: bTI7800_INVENTORY_MIB.setOrganization('@ORGANIZATION')
class UnsignedByte(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 255)
class UnsignedShort(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 65535)
class ConfdString(TextualConvention, OctetString):
status = 'current'
displayHint = '1t'
class InetAddressIP(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), )
class String(TextualConvention, OctetString):
status = 'current'
displayHint = '1t'
class BicIndexT(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 2)
class FanIndexT(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 6)
class PemIndexT(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4)
class CmmIndexT(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 2)
class PortIndexT(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 12)
class ModuleIndexT(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 14)
class ChassisIndexT(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 16)
inventory_chassisTable = MibTable((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 1), ).setLabel("inventory-chassisTable")
if mibBuilder.loadTexts: inventory_chassisTable.setStatus('current')
inventory_chassisEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 1, 1), ).setLabel("inventory-chassisEntry").setIndexNames((0, "BTI7800-INVENTORY-MIB", "inventory-chassisChassisNum"))
if mibBuilder.loadTexts: inventory_chassisEntry.setStatus('current')
inventory_chassisChassisNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 1, 1, 1), ChassisIndexT()).setLabel("inventory-chassisChassisNum")
if mibBuilder.loadTexts: inventory_chassisChassisNum.setStatus('current')
inventory_chassisName = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 1, 1, 2), String()).setLabel("inventory-chassisName").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_chassisName.setStatus('current')
inventory_chassisPEC = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 1, 1, 3), String()).setLabel("inventory-chassisPEC").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_chassisPEC.setStatus('current')
inventory_chassisRevision = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 1, 1, 4), UnsignedShort()).setLabel("inventory-chassisRevision").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_chassisRevision.setStatus('current')
inventory_chassisSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 1, 1, 5), String()).setLabel("inventory-chassisSerialNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_chassisSerialNumber.setStatus('current')
inventory_chassisManufactureDate = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 1, 1, 6), DateAndTime()).setLabel("inventory-chassisManufactureDate").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_chassisManufactureDate.setStatus('current')
inventory_chassisVendor = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 1, 1, 7), String()).setLabel("inventory-chassisVendor").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_chassisVendor.setStatus('current')
inventory_fanTable = MibTable((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2), ).setLabel("inventory-fanTable")
if mibBuilder.loadTexts: inventory_fanTable.setStatus('current')
inventory_fanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2, 1), ).setLabel("inventory-fanEntry").setIndexNames((0, "BTI7800-INVENTORY-MIB", "inventory-fanChassisNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-fanSlotNum"))
if mibBuilder.loadTexts: inventory_fanEntry.setStatus('current')
inventory_fanChassisNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2, 1, 1), ChassisIndexT()).setLabel("inventory-fanChassisNum")
if mibBuilder.loadTexts: inventory_fanChassisNum.setStatus('current')
inventory_fanSlotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2, 1, 2), FanIndexT()).setLabel("inventory-fanSlotNum")
if mibBuilder.loadTexts: inventory_fanSlotNum.setStatus('current')
inventory_fanName = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2, 1, 3), String()).setLabel("inventory-fanName").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_fanName.setStatus('current')
inventory_fanPEC = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2, 1, 4), String()).setLabel("inventory-fanPEC").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_fanPEC.setStatus('current')
inventory_fanRevision = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2, 1, 5), UnsignedShort()).setLabel("inventory-fanRevision").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_fanRevision.setStatus('current')
inventory_fanSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2, 1, 6), String()).setLabel("inventory-fanSerialNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_fanSerialNumber.setStatus('current')
inventory_fanManufactureDate = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2, 1, 7), DateAndTime()).setLabel("inventory-fanManufactureDate").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_fanManufactureDate.setStatus('current')
inventory_fanVendor = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 2, 1, 8), String()).setLabel("inventory-fanVendor").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_fanVendor.setStatus('current')
inventory_pemTable = MibTable((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3), ).setLabel("inventory-pemTable")
if mibBuilder.loadTexts: inventory_pemTable.setStatus('current')
inventory_pemEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3, 1), ).setLabel("inventory-pemEntry").setIndexNames((0, "BTI7800-INVENTORY-MIB", "inventory-pemChassisNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-pemSlotNum"))
if mibBuilder.loadTexts: inventory_pemEntry.setStatus('current')
inventory_pemChassisNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3, 1, 1), ChassisIndexT()).setLabel("inventory-pemChassisNum")
if mibBuilder.loadTexts: inventory_pemChassisNum.setStatus('current')
inventory_pemSlotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3, 1, 2), PemIndexT()).setLabel("inventory-pemSlotNum")
if mibBuilder.loadTexts: inventory_pemSlotNum.setStatus('current')
inventory_pemName = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3, 1, 3), String()).setLabel("inventory-pemName").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_pemName.setStatus('current')
inventory_pemPEC = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3, 1, 4), String()).setLabel("inventory-pemPEC").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_pemPEC.setStatus('current')
inventory_pemRevision = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3, 1, 5), UnsignedShort()).setLabel("inventory-pemRevision").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_pemRevision.setStatus('current')
inventory_pemSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3, 1, 6), String()).setLabel("inventory-pemSerialNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_pemSerialNumber.setStatus('current')
inventory_pemManufactureDate = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3, 1, 7), DateAndTime()).setLabel("inventory-pemManufactureDate").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_pemManufactureDate.setStatus('current')
inventory_pemVendor = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 3, 1, 8), String()).setLabel("inventory-pemVendor").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_pemVendor.setStatus('current')
inventory_cmmTable = MibTable((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4), ).setLabel("inventory-cmmTable")
if mibBuilder.loadTexts: inventory_cmmTable.setStatus('current')
inventory_cmmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4, 1), ).setLabel("inventory-cmmEntry").setIndexNames((0, "BTI7800-INVENTORY-MIB", "inventory-cmmChassisNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-cmmSlotNum"))
if mibBuilder.loadTexts: inventory_cmmEntry.setStatus('current')
inventory_cmmChassisNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4, 1, 1), ChassisIndexT()).setLabel("inventory-cmmChassisNum")
if mibBuilder.loadTexts: inventory_cmmChassisNum.setStatus('current')
inventory_cmmSlotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4, 1, 2), CmmIndexT()).setLabel("inventory-cmmSlotNum")
if mibBuilder.loadTexts: inventory_cmmSlotNum.setStatus('current')
inventory_cmmName = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4, 1, 3), String()).setLabel("inventory-cmmName").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_cmmName.setStatus('current')
inventory_cmmPEC = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4, 1, 4), String()).setLabel("inventory-cmmPEC").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_cmmPEC.setStatus('current')
inventory_cmmRevision = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4, 1, 5), UnsignedShort()).setLabel("inventory-cmmRevision").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_cmmRevision.setStatus('current')
inventory_cmmSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4, 1, 6), String()).setLabel("inventory-cmmSerialNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_cmmSerialNumber.setStatus('current')
inventory_cmmManufactureDate = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4, 1, 7), DateAndTime()).setLabel("inventory-cmmManufactureDate").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_cmmManufactureDate.setStatus('current')
inventory_cmmVendor = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 4, 1, 8), String()).setLabel("inventory-cmmVendor").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_cmmVendor.setStatus('current')
inventory_moduleTable = MibTable((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5), ).setLabel("inventory-moduleTable")
if mibBuilder.loadTexts: inventory_moduleTable.setStatus('current')
inventory_moduleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5, 1), ).setLabel("inventory-moduleEntry").setIndexNames((0, "BTI7800-INVENTORY-MIB", "inventory-moduleChassisNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-moduleSlotNum"))
if mibBuilder.loadTexts: inventory_moduleEntry.setStatus('current')
inventory_moduleChassisNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5, 1, 1), ChassisIndexT()).setLabel("inventory-moduleChassisNum")
if mibBuilder.loadTexts: inventory_moduleChassisNum.setStatus('current')
inventory_moduleSlotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5, 1, 2), ModuleIndexT()).setLabel("inventory-moduleSlotNum")
if mibBuilder.loadTexts: inventory_moduleSlotNum.setStatus('current')
inventory_moduleName = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5, 1, 3), String()).setLabel("inventory-moduleName").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_moduleName.setStatus('current')
inventory_modulePEC = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5, 1, 4), String()).setLabel("inventory-modulePEC").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_modulePEC.setStatus('current')
inventory_moduleRevision = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5, 1, 5), UnsignedShort()).setLabel("inventory-moduleRevision").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_moduleRevision.setStatus('current')
inventory_moduleSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5, 1, 6), String()).setLabel("inventory-moduleSerialNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_moduleSerialNumber.setStatus('current')
inventory_moduleManufactureDate = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5, 1, 7), DateAndTime()).setLabel("inventory-moduleManufactureDate").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_moduleManufactureDate.setStatus('current')
inventory_moduleVendor = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 5, 1, 8), String()).setLabel("inventory-moduleVendor").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_moduleVendor.setStatus('current')
inventory_bicTable = MibTable((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6), ).setLabel("inventory-bicTable")
if mibBuilder.loadTexts: inventory_bicTable.setStatus('current')
inventory_bicEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1), ).setLabel("inventory-bicEntry").setIndexNames((0, "BTI7800-INVENTORY-MIB", "inventory-bicChassisNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-bicSlotNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-bicSubslotNum"))
if mibBuilder.loadTexts: inventory_bicEntry.setStatus('current')
inventory_bicChassisNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1, 1), ChassisIndexT()).setLabel("inventory-bicChassisNum")
if mibBuilder.loadTexts: inventory_bicChassisNum.setStatus('current')
inventory_bicSlotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1, 2), ModuleIndexT()).setLabel("inventory-bicSlotNum")
if mibBuilder.loadTexts: inventory_bicSlotNum.setStatus('current')
inventory_bicSubslotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1, 3), BicIndexT()).setLabel("inventory-bicSubslotNum")
if mibBuilder.loadTexts: inventory_bicSubslotNum.setStatus('current')
inventory_bicName = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1, 4), String()).setLabel("inventory-bicName").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_bicName.setStatus('current')
inventory_bicPEC = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1, 5), String()).setLabel("inventory-bicPEC").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_bicPEC.setStatus('current')
inventory_bicRevision = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1, 6), UnsignedShort()).setLabel("inventory-bicRevision").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_bicRevision.setStatus('current')
inventory_bicSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1, 7), String()).setLabel("inventory-bicSerialNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_bicSerialNumber.setStatus('current')
inventory_bicManufactureDate = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1, 8), DateAndTime()).setLabel("inventory-bicManufactureDate").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_bicManufactureDate.setStatus('current')
inventory_bicVendor = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 6, 1, 9), String()).setLabel("inventory-bicVendor").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_bicVendor.setStatus('current')
inventory_xcvrTable = MibTable((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7), ).setLabel("inventory-xcvrTable")
if mibBuilder.loadTexts: inventory_xcvrTable.setStatus('current')
inventory_xcvrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1), ).setLabel("inventory-xcvrEntry").setIndexNames((0, "BTI7800-INVENTORY-MIB", "inventory-xcvrChassisNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-xcvrSlotNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-xcvrSubslotNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-xcvrPortNum"))
if mibBuilder.loadTexts: inventory_xcvrEntry.setStatus('current')
inventory_xcvrChassisNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 1), ChassisIndexT()).setLabel("inventory-xcvrChassisNum")
if mibBuilder.loadTexts: inventory_xcvrChassisNum.setStatus('current')
inventory_xcvrSlotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 2), ModuleIndexT()).setLabel("inventory-xcvrSlotNum")
if mibBuilder.loadTexts: inventory_xcvrSlotNum.setStatus('current')
inventory_xcvrSubslotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 3), BicIndexT()).setLabel("inventory-xcvrSubslotNum")
if mibBuilder.loadTexts: inventory_xcvrSubslotNum.setStatus('current')
inventory_xcvrPortNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 4), PortIndexT()).setLabel("inventory-xcvrPortNum")
if mibBuilder.loadTexts: inventory_xcvrPortNum.setStatus('current')
inventory_xcvrName = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 5), String()).setLabel("inventory-xcvrName").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_xcvrName.setStatus('current')
inventory_xcvrPEC = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 6), String()).setLabel("inventory-xcvrPEC").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_xcvrPEC.setStatus('current')
inventory_xcvrRevision = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 7), UnsignedShort()).setLabel("inventory-xcvrRevision").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_xcvrRevision.setStatus('current')
inventory_xcvrSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 8), String()).setLabel("inventory-xcvrSerialNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_xcvrSerialNumber.setStatus('current')
inventory_xcvrManufactureDate = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 9), DateAndTime()).setLabel("inventory-xcvrManufactureDate").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_xcvrManufactureDate.setStatus('current')
inventory_xcvrVendor = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 10), String()).setLabel("inventory-xcvrVendor").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_xcvrVendor.setStatus('current')
inventory_xcvrVendorPartNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 11), String()).setLabel("inventory-xcvrVendorPartNum").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_xcvrVendorPartNum.setStatus('current')
inventory_xcvrType = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 7, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("unknown", 0), ("sfp", 1), ("sfpPlus", 2), ("cfp", 3), ("msa", 4), ("qsfp", 5), ("qsfp28", 6), ("msa400", 7)))).setLabel("inventory-xcvrType").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_xcvrType.setStatus('current')
inventory_preampTable = MibTable((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8), ).setLabel("inventory-preampTable")
if mibBuilder.loadTexts: inventory_preampTable.setStatus('current')
inventory_preampEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1), ).setLabel("inventory-preampEntry").setIndexNames((0, "BTI7800-INVENTORY-MIB", "inventory-preampChassisNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-preampSlotNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-preampSubslotNum"))
if mibBuilder.loadTexts: inventory_preampEntry.setStatus('current')
inventory_preampChassisNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1, 1), ChassisIndexT()).setLabel("inventory-preampChassisNum")
if mibBuilder.loadTexts: inventory_preampChassisNum.setStatus('current')
inventory_preampSlotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1, 2), ModuleIndexT()).setLabel("inventory-preampSlotNum")
if mibBuilder.loadTexts: inventory_preampSlotNum.setStatus('current')
inventory_preampSubslotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1, 3), BicIndexT()).setLabel("inventory-preampSubslotNum")
if mibBuilder.loadTexts: inventory_preampSubslotNum.setStatus('current')
inventory_preampName = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1, 4), String()).setLabel("inventory-preampName").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_preampName.setStatus('current')
inventory_preampPEC = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1, 5), String()).setLabel("inventory-preampPEC").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_preampPEC.setStatus('current')
inventory_preampRevision = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1, 6), UnsignedShort()).setLabel("inventory-preampRevision").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_preampRevision.setStatus('current')
inventory_preampSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1, 7), String()).setLabel("inventory-preampSerialNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_preampSerialNumber.setStatus('current')
inventory_preampManufactureDate = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1, 8), DateAndTime()).setLabel("inventory-preampManufactureDate").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_preampManufactureDate.setStatus('current')
inventory_preampVendor = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 8, 1, 9), String()).setLabel("inventory-preampVendor").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_preampVendor.setStatus('current')
inventory_eslTable = MibTable((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9), ).setLabel("inventory-eslTable")
if mibBuilder.loadTexts: inventory_eslTable.setStatus('current')
inventory_eslEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9, 1), ).setLabel("inventory-eslEntry").setIndexNames((0, "BTI7800-INVENTORY-MIB", "inventory-eslChassisNum"), (0, "BTI7800-INVENTORY-MIB", "inventory-eslSlotNum"))
if mibBuilder.loadTexts: inventory_eslEntry.setStatus('current')
inventory_eslChassisNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9, 1, 1), ChassisIndexT()).setLabel("inventory-eslChassisNum")
if mibBuilder.loadTexts: inventory_eslChassisNum.setStatus('current')
inventory_eslSlotNum = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9, 1, 2), UnsignedByte()).setLabel("inventory-eslSlotNum")
if mibBuilder.loadTexts: inventory_eslSlotNum.setStatus('current')
inventory_eslName = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9, 1, 3), String()).setLabel("inventory-eslName").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_eslName.setStatus('current')
inventory_eslPEC = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9, 1, 4), String()).setLabel("inventory-eslPEC").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_eslPEC.setStatus('current')
inventory_eslRevision = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9, 1, 5), UnsignedShort()).setLabel("inventory-eslRevision").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_eslRevision.setStatus('current')
inventory_eslSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9, 1, 6), String()).setLabel("inventory-eslSerialNumber").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_eslSerialNumber.setStatus('current')
inventory_eslManufactureDate = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9, 1, 7), DateAndTime()).setLabel("inventory-eslManufactureDate").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_eslManufactureDate.setStatus('current')
inventory_eslVendor = MibScalar((1, 3, 6, 1, 4, 1, 18070, 2, 9, 3, 2, 9, 1, 8), String()).setLabel("inventory-eslVendor").setMaxAccess("readonly")
if mibBuilder.loadTexts: inventory_eslVendor.setStatus('current')
mibBuilder.exportSymbols("BTI7800-INVENTORY-MIB", inventory_cmmRevision=inventory_cmmRevision, inventory_eslTable=inventory_eslTable, inventory_xcvrSerialNumber=inventory_xcvrSerialNumber, inventory_pemPEC=inventory_pemPEC, inventory_xcvrSlotNum=inventory_xcvrSlotNum, inventory_cmmPEC=inventory_cmmPEC, inventory_pemName=inventory_pemName, inventory_eslSlotNum=inventory_eslSlotNum, FanIndexT=FanIndexT, inventory_moduleSerialNumber=inventory_moduleSerialNumber, inventory_bicName=inventory_bicName, inventory_fanChassisNum=inventory_fanChassisNum, inventory_eslRevision=inventory_eslRevision, inventory_xcvrTable=inventory_xcvrTable, String=String, inventory_chassisName=inventory_chassisName, inventory_fanManufactureDate=inventory_fanManufactureDate, inventory_xcvrEntry=inventory_xcvrEntry, inventory_eslChassisNum=inventory_eslChassisNum, inventory_moduleTable=inventory_moduleTable, inventory_preampVendor=inventory_preampVendor, inventory_pemTable=inventory_pemTable, inventory_xcvrPortNum=inventory_xcvrPortNum, inventory_preampRevision=inventory_preampRevision, inventory_preampSlotNum=inventory_preampSlotNum, inventory_fanName=inventory_fanName, inventory_preampSerialNumber=inventory_preampSerialNumber, inventory_moduleVendor=inventory_moduleVendor, inventory_eslName=inventory_eslName, inventory_bicRevision=inventory_bicRevision, inventory_cmmManufactureDate=inventory_cmmManufactureDate, inventory_preampManufactureDate=inventory_preampManufactureDate, inventory_moduleSlotNum=inventory_moduleSlotNum, inventory_cmmEntry=inventory_cmmEntry, inventory_bicSubslotNum=inventory_bicSubslotNum, inventory_pemManufactureDate=inventory_pemManufactureDate, inventory_xcvrSubslotNum=inventory_xcvrSubslotNum, inventory_bicManufactureDate=inventory_bicManufactureDate, ConfdString=ConfdString, inventory_pemVendor=inventory_pemVendor, inventory_fanSlotNum=inventory_fanSlotNum, inventory_pemSlotNum=inventory_pemSlotNum, inventory_pemChassisNum=inventory_pemChassisNum, inventory_fanEntry=inventory_fanEntry, inventory_cmmChassisNum=inventory_cmmChassisNum, inventory_cmmTable=inventory_cmmTable, inventory_cmmName=inventory_cmmName, UnsignedByte=UnsignedByte, inventory_eslVendor=inventory_eslVendor, PortIndexT=PortIndexT, inventory_cmmSlotNum=inventory_cmmSlotNum, inventory_eslPEC=inventory_eslPEC, ChassisIndexT=ChassisIndexT, inventory_bicEntry=inventory_bicEntry, inventory_preampEntry=inventory_preampEntry, inventory_bicPEC=inventory_bicPEC, inventory_chassisSerialNumber=inventory_chassisSerialNumber, inventory_preampName=inventory_preampName, inventory_xcvrVendorPartNum=inventory_xcvrVendorPartNum, inventory_xcvrType=inventory_xcvrType, inventory_bicChassisNum=inventory_bicChassisNum, inventory_moduleRevision=inventory_moduleRevision, inventory_preampSubslotNum=inventory_preampSubslotNum, inventory_pemEntry=inventory_pemEntry, inventory_xcvrPEC=inventory_xcvrPEC, inventory_xcvrManufactureDate=inventory_xcvrManufactureDate, CmmIndexT=CmmIndexT, inventory_modulePEC=inventory_modulePEC, inventory_moduleManufactureDate=inventory_moduleManufactureDate, inventory_xcvrChassisNum=inventory_xcvrChassisNum, inventory_chassisEntry=inventory_chassisEntry, inventory_chassisManufactureDate=inventory_chassisManufactureDate, inventory_moduleEntry=inventory_moduleEntry, inventory_fanSerialNumber=inventory_fanSerialNumber, inventory_cmmVendor=inventory_cmmVendor, inventory_eslManufactureDate=inventory_eslManufactureDate, InetAddressIP=InetAddressIP, inventory_pemSerialNumber=inventory_pemSerialNumber, inventory_preampChassisNum=inventory_preampChassisNum, inventory_fanRevision=inventory_fanRevision, inventory_moduleChassisNum=inventory_moduleChassisNum, inventory_xcvrVendor=inventory_xcvrVendor, inventory_bicTable=inventory_bicTable, inventory_preampTable=inventory_preampTable, inventory_eslEntry=inventory_eslEntry, PYSNMP_MODULE_ID=bTI7800_INVENTORY_MIB, inventory_chassisPEC=inventory_chassisPEC, inventory_xcvrRevision=inventory_xcvrRevision, inventory_chassisChassisNum=inventory_chassisChassisNum, inventory_preampPEC=inventory_preampPEC, inventory_xcvrName=inventory_xcvrName, inventory_bicVendor=inventory_bicVendor, inventory_bicSerialNumber=inventory_bicSerialNumber, inventory_cmmSerialNumber=inventory_cmmSerialNumber, inventory_chassisTable=inventory_chassisTable, inventory_chassisRevision=inventory_chassisRevision, PemIndexT=PemIndexT, inventory_bicSlotNum=inventory_bicSlotNum, inventory_eslSerialNumber=inventory_eslSerialNumber, UnsignedShort=UnsignedShort, inventory_pemRevision=inventory_pemRevision, inventory_fanTable=inventory_fanTable, inventory_moduleName=inventory_moduleName, BicIndexT=BicIndexT, bTI7800_INVENTORY_MIB=bTI7800_INVENTORY_MIB, inventory_fanPEC=inventory_fanPEC, inventory_chassisVendor=inventory_chassisVendor, inventory_fanVendor=inventory_fanVendor, ModuleIndexT=ModuleIndexT)
| 30,311 | 13,440 |
#_*_ coding: UTF-8 _*_
from flask import request, redirect, render_template
from application import app
import wtforms
import db
import data_models
import mailer
import renderers
import properties
import views
from role_types import RoleType
import urls
from . import grants
from . import purchases
STATE_REQUESTED = 1
STATE_TRANSFERRED = 2
state_labels = ['Closed', 'Requested', 'Transferred']
class TransferModel(data_models.Model):
def __init__(self, entity, grant_list, payment_list):
super(TransferModel, self).__init__(entity, None)
self.grant_list = grant_list
self.payment_list = payment_list
def perform_transferred(self, action_name):
form = self.get_form(action_name)
if not form.validate():
return False
transfer = self.entity
form.populate_obj(transfer)
transfer.state_index = STATE_TRANSFERRED
transfer.put()
parent_audit = self.audit(action_name, 'Transfer performed')
for payment in self.payment_list:
payment.paid = True
payment.put()
purchase = data_models.get_parent(payment)
if payment.payment_type == 'invoice':
purchase.state_index = data_models.STATE_CLOSED
purchase.put()
self.audit(action_name, 'Payment transferred', purchase, parent_audit.key)
data_models.email_entity_creator(purchase, self.user, 'Payment transferred')
for grant in self.grant_list:
data_models.email_entity_creator(grant, self.user, 'Transfer performed')
self.send_supplier_email()
return True
def send_supplier_email(self):
transfer = self.entity
supplier = data_models.get_parent(transfer)
column = views.view_entity_single_column(transfer, email_properties)
purchase_payments = render_purchase_payments_list(self.payment_list)
grant_payments = render_grants_due_list(self.grant_list, selectable=False, no_links=True)
content = renderers.render_div(column, purchase_payments, grant_payments)
mailer.send_email('PONT Transfer %s' % transfer.ref_id, content, supplier.contact_emails)
def perform_ack(self, action_name):
parent_audit = self.perform_close(action_name)
transfer = self.entity
for grant in self.grant_list:
project = grant.project.get()
if project.partner is None:
grant.state_index = data_models.STATE_CLOSED
grant.put()
self.audit(action_name, 'Transfer acknowledged', grant, parent_audit.key)
data_models.email_entity_creator(grant, self.user, 'Transfer acknowledged')
return True
ACTION_TRANSFERRED = views.StateAction('transferred', 'Transferred', RoleType.PAYMENT_ADMIN,
TransferModel.perform_transferred, [STATE_REQUESTED])
ACTION_ACKNOWLEDGED = views.StateAction('ack', 'Received', RoleType.PAYMENT_ADMIN,
TransferModel.perform_ack, [STATE_TRANSFERRED])
action_list = [ACTION_TRANSFERRED, ACTION_ACKNOWLEDGED]
def show_totals(transfer):
sterling, shillings = transfer.totals
return u"£{:,} + {:,} Ush".format(sterling, shillings)
def show_shillings(transfer):
if not transfer.exchange_rate:
return ""
sterling, shillings = transfer.totals
total_shillings = int(sterling * transfer.exchange_rate) + shillings
return u"{:,} Ush".format(total_shillings)
ref_field = properties.StringProperty('ref_id')
state_field = properties.SelectProperty('state_index', 'State', enumerate(state_labels))
creator_field = properties.KeyProperty('creator')
creation_date_field = properties.DateProperty('creation_date', format='%Y-%m-%d')
rate_field = properties.StringProperty('exchange_rate')
request_totals_field = properties.StringProperty(show_totals, 'Request Totals')
shillings_total_field = properties.StringProperty(show_shillings, 'Total Amount')
def get_partner(grant):
project = grant.project.get()
if project.partner:
return project.partner.get().name
return ""
grant_field_list = [
grants.state_field, grants.creator_field, grants.project_field, grants.amount_field,
grants.transferred_amount_field,
properties.StringProperty(get_partner, 'Implementing Partner'),
grants.source_field,
properties.StringProperty(lambda e: e.project.get().fund.get().name, 'Destination Fund')
]
po_number_field = properties.StringProperty(lambda e: e.key.parent().get().po_number, 'PO Number')
requestor_field = properties.KeyProperty(lambda e: e.key.parent().get().creator, 'Requestor')
source_field = properties.StringProperty(lambda e: e.key.parent().parent().get().code, 'Source Fund')
payment_field_list = [purchases.payment_type_field, po_number_field, requestor_field, source_field,
purchases.payment_amount_field]
class ExchangeRateForm(wtforms.Form):
exchange_rate = wtforms.IntegerField('Exchange Rate', validators=[wtforms.validators.InputRequired()])
@app.route('/foreigntransfer_list/<db_id>')
def view_foreigntransfer_list(db_id):
supplier = data_models.lookup_entity(db_id)
dummy_transfer = db.ForeignTransfer(parent=supplier.key)
model = data_models.Model(dummy_transfer, None)
breadcrumbs = views.view_breadcrumbs(supplier)
transfer_query = db.ForeignTransfer.query(ancestor=supplier.key).order(-db.ForeignTransfer.state_index,
db.ForeignTransfer.ref_id)
transfer_fields = [state_field, ref_field, creation_date_field, rate_field]
model.show_closed = request.args.has_key('show_closed')
db_filter = db.ForeignTransfer.state_index == 0 if model.show_closed else db.ForeignTransfer.state_index > 0
transfer_query = transfer_query.filter(db_filter)
entity_table = views.view_entity_list(transfer_query.fetch(), transfer_fields)
buttons = views.view_actions([views.ACTION_FILTER], model)
user_controls = views.view_user_controls(model)
return render_template('layout.html', title='Foreign Transfer List', breadcrumbs=breadcrumbs,
user=user_controls, buttons=buttons, content=entity_table)
def render_grants_due_list(grant_list, selectable=True, no_links=True):
sub_heading = renderers.sub_heading('Grant Payments')
table = views.view_entity_list(grant_list, grant_field_list, selectable, no_links)
return (sub_heading, table)
def render_purchase_payments_list(payment_list):
column_headers = properties.get_labels(payment_field_list)
payment_grid = properties.display_entity_list(payment_list, payment_field_list, no_links=True)
purchase_list = [data_models.get_parent(e) for e in payment_list]
payment_url_list = map(urls.url_for_entity, purchase_list)
sub_heading = renderers.sub_heading('Purchase Payments')
table = renderers.render_table(column_headers, payment_grid,
payment_url_list)
return (sub_heading, table)
def calculate_totals(payments):
total_sterling = 0
total_shillings = 0
for p in payments:
if p.amount.currency == 'sterling':
total_sterling += p.amount.value
else:
total_shillings += p.amount.value
return (total_sterling, total_shillings)
@app.route('/foreigntransfer/<db_id>', methods=['GET', 'POST'])
def view_foreigntransfer(db_id):
transfer = data_models.lookup_entity(db_id)
grant_list = db.Grant.query(db.Grant.transfer == transfer.key).fetch()
payment_list = db.PurchasePayment.query(db.PurchasePayment.transfer == transfer.key).fetch()
transfer.totals = calculate_totals(grant_list + payment_list)
form = ExchangeRateForm(request.form)
model = TransferModel(transfer, grant_list, payment_list)
model.add_form(ACTION_TRANSFERRED.name, form)
if request.method == 'POST'and views.handle_post(model, action_list):
return redirect(request.base_url)
transfer_fields = (creation_date_field, ref_field, state_field, rate_field, request_totals_field,
shillings_total_field, creator_field)
breadcrumbs = views.view_breadcrumbs_list(transfer)
grid = views.view_entity(transfer, transfer_fields)
grant_payments = render_grants_due_list(grant_list)
purchase_payments = render_purchase_payments_list(payment_list)
history = views.view_entity_history(transfer.key)
content = renderers.render_div(grid, purchase_payments, grant_payments, history)
buttons = views.view_actions(action_list, model)
user_controls = views.view_user_controls(model)
return render_template('layout.html', title='Foreign Transfer', breadcrumbs=breadcrumbs, user=user_controls,
buttons=buttons, content=content)
email_properties = (ref_field, shillings_total_field)
| 8,897 | 2,728 |
# Generated by Django 3.1 on 2020-09-09 19:21
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patient_app', '0005_auto_20200817_1713'),
]
operations = [
migrations.AlterField(
model_name='booking_patient',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 9, 19, 21, 45, 74281)),
),
migrations.AlterField(
model_name='patient_register',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 9, 19, 21, 45, 73284)),
),
migrations.AlterField(
model_name='patient_register',
name='image',
field=models.ImageField(default='log.png', upload_to='patient_app/images'),
),
]
| 889 | 316 |
from dataclasses import dataclass
from typing import Callable, Optional
import numpy as np
from scipy.spatial import Delaunay
from ...data import TimeEncoder
from ...domain import (
BoundaryAnglePredicate,
CellPredicate,
Domain,
select_boundary_mesh_cells,
)
@dataclass(frozen=True)
class MeshConfig:
"""Configuration for the generation of a sparse mesh from a larger set of points.
Attributes
----------
k
Number of nodes to select
epsilon
Maximum angle of boundary cells to filter out in degrees
seed
Random seed for reproducibility
"""
k: int
epsilon: float
seed: int
def random_state(self):
return np.random.RandomState(int(self.seed) % 2**32)
def epsilon_radians(self):
return self.epsilon * np.pi / 180
def angle_predicate(self, tri: Delaunay):
return BoundaryAnglePredicate(tri.points, self.epsilon_radians())
def sample_mesh(
config: MeshConfig,
points: np.ndarray,
predicate_factory: Optional[Callable[[Delaunay], CellPredicate]] = None,
) -> tuple[np.ndarray, Domain]:
"""Create a domain from a subset of points, optionally filtering out some cells.
Returns
-------
Indices of the points that were selected as mesh nodes and the domain
"""
import skfem
from sklearn_extra.cluster import KMedoids
# Select k sparse observation points uniformly-ish
km = KMedoids(
n_clusters=config.k, init="k-medoids++", random_state=config.random_state()
)
km.fit(points)
node_indices = km.medoid_indices_
# Mesh the points with Delaunay triangulation
tri = Delaunay(points[node_indices])
# Filter out mesh boundary cells that are too acute or contain mostly land
if predicate_factory is not None:
predicate = predicate_factory(tri)
filter = select_boundary_mesh_cells(tri, predicate)
tri.simplices = tri.simplices[~filter]
# Ensure that every node is in at least one mesh cell
cell_counts = np.zeros(config.k, dtype=int)
np.add.at(cell_counts, tri.simplices, 1)
assert all(cell_counts >= 1)
mesh = skfem.MeshTri(
np.ascontiguousarray(tri.points.T), np.ascontiguousarray(tri.simplices.T)
)
domain = Domain(tri.points, mesh=mesh)
return node_indices, domain
| 2,332 | 739 |
import os
import community
import pandas as pd
import networkx as nx
graphdf = pd.read_csv(os.path.join("..", "NetworkData", "fetchcontent.csv"))
G = nx.from_pandas_edgelist(graphdf)
partition = community.best_partition(G)
# following code comes from https://medium.com/@adityagandhi.7/network-analysis-and-community-structure-for-market-surveillance-using-python-networkx-65413e7b7fee
values=[partition.get(node) for node in G.nodes()]
list_com=partition.values()
# Creating a dictionary like {community_number:list_of_participants}
dict_nodes={}
# Populating the dictionary with items
for each_item in partition.items():
community_num=each_item[1]
community_node=str(each_item[0])
if community_num in dict_nodes.keys():
value=dict_nodes.get(community_num) + ' | ' + community_node
dict_nodes.update({community_num:value})
else:
dict_nodes.update({community_num:community_node})
# Creating a dataframe from the diet, and getting the output into excel
community_df=pd.DataFrame.from_dict(dict_nodes, orient='index',columns=['Members'])
community_df.index.rename('Community Num' , inplace=True)
community_df.to_csv('community.csv')
print("Number of communities: {}".format(len(community_df.index))) | 1,245 | 398 |
import os
import cv2
def ListFilesToTxt(dir,file,wildcard,recursion):
file_list=[]
exts = wildcard.split(" ")
files = os.listdir(dir)
for name in files:
fullname=os.path.join(dir,name)
if(os.path.isdir(fullname) & recursion):
ListFilesToTxt(fullname,file,wildcard,recursion)
else:
for ext in exts:
if(name.endswith(ext)):
file_list.append(name)
break
file_list.sort()
resize = False
factor = 0.5
if(resize):
for i in range(0,len(file_list)):
fullname=os.path.join(dir,file_list[i])
img=cv2.imread(fullname)
height, width = img.shape[:2]
size = (int(width * factor), int(height * factor))
img=cv2.resize(img,size)
fullname = os.path.join("/media/doing/C8BA5288BA5272C4/LINUX/pot", file_list[i])
cv2.imwrite(fullname,img)
for i in range(0,len(file_list)-1):
file.write(file_list[i] + " ")
file.write(file_list[i+1]+"\n")
def getFileList():
dir="/media/doing/Samsung USB/flowerpot" #文件路径
outfile="flowerpot.txt" #写入的txt文件名
wildcard = ".JPG" #要读取的文件类型;
file = open(outfile,"w")
if not file:
print ("cannot open the file %s for writing" % outfile)
ListFilesToTxt(dir,file,wildcard, 1)
file.close()
getFileList()
| 1,404 | 521 |
#
# transform.py -- coordinate transforms for Ginga
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
from ginga import trcalc
__all__ = ['TransformError', 'BaseTransform', 'ComposedTransform',
'CanvasWindowTransform', 'CartesianWindowTransform',
'RotationTransform', 'ScaleTransform',
'DataCartesianTransform', 'OffsetDataTransform',
'WCSDataTransform',
]
class TransformError(Exception):
pass
class BaseTransform(object):
def __init__(self):
super(BaseTransform, self).__init__()
def to_(self, x, y):
raise TransformError("subclass should override this method")
def from_(self, tx, ty):
raise TransformError("subclass should override this method")
def __add__(self, trans):
return ComposedTransform(self, trans)
class ComposedTransform(BaseTransform):
"""
A transform that composes two other transforms to make a new one.
"""
def __init__(self, tform1, tform2):
super(ComposedTransform, self).__init__()
self.tform1 = tform1
self.tform2 = tform2
def to_(self, x, y, **kwargs):
return self.tform2.to_(*self.tform1.to_(x, y, **kwargs))
def from_(self, tx, ty, **kwargs):
return self.tform1.from_(*self.tform2.from_(tx, ty), **kwargs)
class CanvasWindowTransform(BaseTransform):
"""
A transform from a possibly Y-flipped pixel space to a typical
window pixel coordinate space with the lower left at (0, 0).
"""
def __init__(self, viewer):
super(CanvasWindowTransform, self).__init__()
self.viewer = viewer
def to_(self, cvs_x, cvs_y):
if self.viewer._originUpper:
return (cvs_x, cvs_y)
# invert Y coord for backends that have the origin in the lower left
win_wd, win_ht = self.viewer.get_window_size()
win_x, win_y = cvs_x, win_ht - cvs_y
return (win_x, win_y)
def from_(self, win_x, win_y):
return self.to_(win_x, win_y)
class CartesianWindowTransform(BaseTransform):
"""
A transform from cartesian coordinates to the window pixel coordinates
of a viewer.
"""
def __init__(self, viewer, as_int=True):
super(CartesianWindowTransform, self).__init__()
self.viewer = viewer
self.as_int = as_int
def to_(self, off_x, off_y):
# add center pixel to convert from X/Y coordinate space to
# canvas graphics space
ctr_x, ctr_y = self.viewer.get_center()
win_x = off_x + ctr_x
if self.viewer._originUpper:
win_y = ctr_y - off_y
else:
win_y = off_y + ctr_y
# round to pixel units, if asked
if self.as_int:
win_x = np.rint(win_x).astype(np.int)
win_y = np.rint(win_y).astype(np.int)
return (win_x, win_y)
def from_(self, win_x, win_y):
"""Reverse of :meth:`to_`."""
# make relative to center pixel to convert from canvas
# graphics space to standard X/Y coordinate space
ctr_x, ctr_y = self.viewer.get_center()
off_x = win_x - ctr_x
if self.viewer._originUpper:
off_y = ctr_y - win_y
else:
off_y = win_y - ctr_y
return (off_x, off_y)
class RotationTransform(BaseTransform):
"""
A transform in cartesian coordinates based on the flip/swap setting and
rotation setting of a viewer.
"""
def __init__(self, viewer):
super(RotationTransform, self).__init__()
self.viewer = viewer
def to_(self, off_x, off_y):
t_ = self.viewer.t_
if t_['flip_x']:
off_x = - off_x
if t_['flip_y']:
off_y = - off_y
if t_['swap_xy']:
off_x, off_y = off_y, off_x
if t_['rot_deg'] != 0:
off_x, off_y = trcalc.rotate_pt(off_x, off_y, t_['rot_deg'])
return (off_x, off_y)
def from_(self, off_x, off_y):
"""Reverse of :meth:`to_`."""
t_ = self.viewer.t_
if t_['rot_deg'] != 0:
off_x, off_y = trcalc.rotate_pt(off_x, off_y, -t_['rot_deg'])
if t_['swap_xy']:
off_x, off_y = off_y, off_x
if t_['flip_y']:
off_y = - off_y
if t_['flip_x']:
off_x = - off_x
return (off_x, off_y)
class ScaleTransform(BaseTransform):
"""
A transform in cartesian coordinates based on the scale of a viewer.
"""
def __init__(self, viewer):
super(ScaleTransform, self).__init__()
self.viewer = viewer
def to_(self, off_x, off_y):
"""Reverse of :meth:`from_`."""
# scale according to current settings
off_x *= self.viewer._org_scale_x
off_y *= self.viewer._org_scale_y
return (off_x, off_y)
def from_(self, off_x, off_y):
# Reverse scaling
off_x = off_x * (1.0 / self.viewer._org_scale_x)
off_y = off_y * (1.0 / self.viewer._org_scale_y)
return (off_x, off_y)
class DataCartesianTransform(BaseTransform):
"""
A transform from data coordinates to cartesian coordinates based on
a viewer's pan position.
"""
def __init__(self, viewer, use_center=True):
super(DataCartesianTransform, self).__init__()
self.viewer = viewer
# If use_center is True, then the coordinates are mapped such that the
# pixel is centered on the square when the image is zoomed in past
# 1X. This is the specification of the FITS image standard,
# that the pixel is centered on the integer row/column.
self.use_center = use_center
def to_(self, data_x, data_y):
"""Reverse of :meth:`from_`."""
if self.use_center:
data_x -= self.viewer.data_off
data_y -= self.viewer.data_off
# subtract data indexes at center reference pixel
off_x = data_x - self.viewer._org_x
off_y = data_y - self.viewer._org_y
return (off_x, off_y)
def from_(self, off_x, off_y):
# Add data index at center to offset
data_x = self.viewer._org_x + off_x
data_y = self.viewer._org_y + off_y
if self.use_center:
data_x += self.viewer.data_off
data_y += self.viewer.data_off
return (data_x, data_y)
class OffsetDataTransform(BaseTransform):
"""
A transform whose coordinate space is offsets from a point in
data space.
"""
def __init__(self, pt):
super(OffsetDataTransform, self).__init__()
self.pt = pt
def to_(self, delta_x, delta_y):
ref_x, ref_y = self.pt[:2]
return (ref_x + delta_x, ref_y + delta_y)
def from_(self, data_x, data_y):
ref_x, ref_y = self.pt[:2]
return (data_x - ref_x, data_y - ref_y)
class WCSDataTransform(BaseTransform):
"""
A transform whose coordinate space is based on the WCS of the primary
image loaded in a viewer.
"""
def __init__(self, viewer):
super(WCSDataTransform, self).__init__()
self.viewer = viewer
def to_(self, lon, lat):
image = self.viewer.get_image()
if image is None:
raise TransformError("No image, no WCS")
data_x, data_y = image.radectopix(lon, lat)
return (data_x, data_y)
def from_(self, data_x, data_y):
image = self.viewer.get_image()
if image is None:
raise TransformError("No image, no WCS")
lon, lat = image.pixtoradec(data_x, data_y)
return (lon, lat)
#END
| 7,647 | 2,558 |
# coding=utf-8
# Author: Jianghan LI
# Question: 098.Validate_Binary_Search_Tree
# Date: 2017-04-17
class Solution:
def isValidBST(self, root):
self.res = []
return self.inOrder(root)
def inOrder(self, root):
if not root:
return True
if not self.inOrder(root.left):
return False
if len(self) and self.res[-1] >= root.val:
return False
self.res.append(root.val)
return self.inOrder(root.right)
# Simple inorder traversal is quite slow..
# Yes, O(n) guaranteed, but do we have chance to "short circuit" as soon as
# we find the incorrect ordering while doing the inOrder().
# So I modified the solution.
# https://discuss.leetcode.com/topic/10455/python-version-based-on-inorder-traversal
class Solution:
def isValidBST(self, root):
self.prev = -float('inf')
return self.inOrder(root)
def inOrder(self, root):
if not root:
return True
if not self.inOrder(root.left):
return False
if self.prev >= root.val:
return False
self.prev = root.val
return self.inOrder(root.right)
| 1,179 | 374 |
'''A cron wrapper that tells you more about your cron jobs
'''
from __future__ import absolute_import
from __future__ import print_function
from .config import config
from .logger import logger
def main():
print("Here is our logger: " + str(logger))
print("Here is our config: " + str(config))
# parser = argparse.ArgumentParser(description='Tells you more about your cron jobs')
# parser.add_argument('-s', '--statsd', required=False, help='Send metrics to statsd')
# parser.add_argument('-g', '--graphite', required=False, help='Send metrics directly to Graphite')
# parser.add_argument('-f', '--logfile', required=False, help='Write to a logfile')
# parser.add_argument('-l', '--logformat', required=False, help='What kind of logformat you like')
# # TODO: somehow clearly distinguish between the cronsense logs vs.
# # how logs of the child process will be directed
# # TODO: logstash version 1 and version 0
# # TODO: what if logrotate moves the file out from underneath us
# # maybe later I will relent and add email options?
# args = parser.parse_args()
# print(args)
if __name__ == '__main__':
main()
| 1,179 | 346 |
from abc import ABC
from typing import Any, Type
from alibi.api.interfaces import Explainer
from mlserver import ModelSettings
from mlserver_alibi_explain.common import AlibiExplainSettings
from mlserver_alibi_explain.runtime import AlibiExplainRuntimeBase
class AlibiExplainWhiteBoxRuntime(ABC, AlibiExplainRuntimeBase):
"""
White box alibi explain requires access to the full inference model
to compute gradients etc. usually in the same
domain as the explainer itself. e.g. `IntegratedGradients`
"""
def __init__(self, settings: ModelSettings, explainer_class: Type[Explainer]):
self._inference_model = None
self._explainer_class = explainer_class
# if we are here we are sure that settings.parameters is set,
# just helping mypy
assert settings.parameters is not None
extra = settings.parameters.extra
explainer_settings = AlibiExplainSettings(**extra) # type: ignore
# TODO: validate the settings are ok with this specific explainer
super().__init__(settings, explainer_settings)
async def load(self) -> bool:
self._inference_model = await self._get_inference_model()
if self.alibi_explain_settings.init_parameters is not None:
init_parameters = self.alibi_explain_settings.init_parameters
# white box explainers requires access to the inference model
init_parameters["model"] = self._inference_model
self._model = self._explainer_class(**init_parameters) # type: ignore
else:
self._model = self._load_from_uri(self._inference_model)
self.ready = True
return self.ready
async def _get_inference_model(self) -> Any:
raise NotImplementedError
| 1,771 | 490 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import logging
import os.path
import requests
import shutil
import xml.etree.ElementTree as ET
try:
# Python 3
from urllib.request import Request, urlopen
except ImportError:
# Python 2
from urllib2 import Request, urlopen
from .configuration import get_repository, get_stagedir, get_repository_shortname
from mvnfeed.cli.common.config import AUTHORIZATION, URL, load_config
def transfer_artifact(name, from_repo, to_repo, transfer_deps=False):
"""
Transfers a single artifact.
:param name: name of the artifact to download, following the group_id:artifact_id:version format
:param from_repo: name of the source repository
:param to_repo: name of the destination repository
:param transfer_deps: True if the dependencies must be transferred
"""
logging.info('transferring %s', name)
config = load_config()
from_repository = get_repository(config, from_repo)
to_repository = get_repository(config, to_repo)
stage_dir = get_stagedir(config)
_transfer_single_artifact(name, from_repository, to_repository, stage_dir, transfer_deps)
def transfer_bulk(filename, from_repo, to_repo, transfer_deps=False):
"""
Transfers artifacts from a file, one artifact per line.
:param filename: name of the file containing the mvnfeed to upload
:param from_repo: name of the source repository
:param to_repo: name of the destination repository
:param transfer_deps: True if the dependencies must be transferred
"""
logging.info('transferring from file %s', filename)
config = load_config()
from_repository = get_repository(config, from_repo)
to_repository = get_repository(config, to_repo)
stage_dir = get_stagedir(config)
with open(filename, 'r') as file:
lines = file.readlines()
for line in lines:
line = line.strip().rstrip()
if line:
_transfer_single_artifact(line, from_repository, to_repository, stage_dir, transfer_deps)
def _transfer_single_artifact(name, from_repository, to_repository, stage_dir, transfer_deps):
logging.debug('download url: %s', from_repository[URL])
logging.debug('upload url: %s', to_repository[URL])
logging.debug('stage directory: %s', stage_dir)
if not os.path.exists(stage_dir):
raise ValueError('Output directory doesn\'t exist: ' + stage_dir)
values = name.split(':')
if len(values) == 3:
group_id = values[0]
artifact_name = values[1]
# try to guess if we have a bom file
if '-bom' in artifact_name:
artifact_type = 'pom'
else:
artifact_type = 'jar'
version = values[2]
artifact_fullname = artifact_name + '-' + version
elif len(values) == 4:
group_id = values[0]
artifact_name = values[1]
artifact_type = values[2]
version = values[3]
artifact_fullname = artifact_name + '-' + version
elif len(values) == 5:
group_id = values[0]
artifact_name = values[1]
artifact_type = values[2]
version = values[4]
artifact_fullname = artifact_name + '-' + version + '-' + values[3]
else:
logging.warning('Artifact doesn\'t have correct format. Skipping ' + name)
return
artifact_path = group_id.replace('.', '/') + '/' + artifact_name + '/' + version
if artifact_type in ['jar', 'war']:
files2transfer = _java_artifacts(artifact_fullname, artifact_type, artifact_path, transfer_deps)
else:
files2transfer = _untyped_artifacts(artifact_fullname, artifact_type, artifact_path, transfer_deps)
for file2transfer in files2transfer:
artifact_relativepath = file2transfer['path'] + '/' + file2transfer['name']
already_uploaded = _already_uploaded(to_repository, artifact_relativepath)
if already_uploaded and not file2transfer['name'].endswith('.pom'):
logging.info('%s already uploaded. Skipping', file2transfer['name'])
continue
# let's always download POM files in case we need to process the parent POM
# once again or upload the children dependencies.
outfile = os.path.join(stage_dir, file2transfer['name'])
_download_file(from_repository, artifact_relativepath, outfile)
if not os.path.exists(outfile):
logging.info('%s was not downloaded. Skipping', outfile)
if file2transfer['target']:
logging.warning('%s was not found in the repository', file2transfer['name'])
continue
if not already_uploaded:
_upload_file(to_repository, artifact_relativepath, outfile)
if file2transfer['name'].endswith('.pom'):
# a library will not be installed if it's parent pom.xml file
# is not present in the repository, so let's transfer the
# parent POM file but without transferring its dependencies.
tree = ET.parse(outfile)
parentNode = tree.getroot().find('{http://maven.apache.org/POM/4.0.0}parent')
if parentNode is not None:
parent_group_id = _findNodeValue(parentNode, 'groupId')
parent_artifact_id = _findNodeValue(parentNode, 'artifactId')
parent_version = _findNodeValue(parentNode, 'version')
parent_path = parent_group_id.replace('.', '/') + '/' + parent_artifact_id + '/' + parent_version
files2transfer.append(_pom_artifact(parent_artifact_id + '-' + parent_version, parent_path))
if 'transfer_deps' not in file2transfer or not file2transfer['transfer_deps']:
logging.info('not transferring dependencies from %s', file2transfer['name'])
continue
# try to download the dependencies
dependenciesNode = tree.getroot().find('{http://maven.apache.org/POM/4.0.0}dependencies')
if dependenciesNode is None:
continue
logging.debug("Downloading children")
for dependencyNode in dependenciesNode.getchildren():
dep_group_id = _findNodeValue(dependencyNode, 'groupId')
dep_artifact_id = _findNodeValue(dependencyNode, 'artifactId')
dep_version = _findNodeValue(dependencyNode, 'version')
# we're only downloading `compile` versions. The user can
# easily download other dependencies if needed.
dep_scope = _findNodeValue(dependencyNode, 'scope')
if dep_scope is not None and dep_scope != 'compile':
logging.info('not downloading %s:%s with scope %s', dep_group_id, dep_artifact_id, dep_scope)
continue
# if no version has been defined, than it's getting potentially
# tricky so let's just give up and let the user deal with it
if dep_version is None:
logging.error('missing explicit version for %s:%s in %s. Skipping',
dep_group_id, dep_artifact_id, file2transfer['name'])
continue
# let's download the dependency
artifact_fullname = dep_artifact_id + '-' + dep_version
artifact_path = dep_group_id.replace('.', '/') + '/' + dep_artifact_id + '/' + dep_version
files2transfer.extend(_java_artifacts(artifact_fullname, 'jar', artifact_path, transfer_deps))
# Definitions of the artifacts to download:
# - name: name of the artifact
# - path: full path of the artifact, will be prepended to the urls
# - transfer_deps: true if the dependencies defined in the pom file must be tranferred
# - target: true if definition was created for an artifact that was
# explicitely requested. Used for logging purpose.
def _pom_artifact(artifact_fullname, artifact_path):
return {
'name': artifact_fullname + '.pom',
'path': artifact_path,
'transfer_deps': False,
'target': False
}
def _java_artifacts(artifact_fullname, artifact_type, artifact_path, transfer_deps):
return [
{
'name': artifact_fullname + '.' + artifact_type,
'path': artifact_path,
'target': True
},
{
'name': artifact_fullname + '.pom',
'path': artifact_path,
'transfer_deps': transfer_deps,
'target': False
},
{
'name': artifact_fullname + '-tests.jar',
'path': artifact_path,
'target': False
},
{
'name': artifact_fullname + '-sources.jar',
'path': artifact_path,
'target': False
},
{
'name': artifact_fullname + '-javadoc.jar',
'path': artifact_path,
'target': False
}
]
def _untyped_artifacts(artifact_fullname, artifact_type, artifact_path, transfer_deps):
return [
{
'name': artifact_fullname + '.' + artifact_type,
'path': artifact_path,
'transfer_deps': transfer_deps,
'target': True
},
{
'name': artifact_fullname + '.pom',
'path': artifact_path,
'transfer_deps': transfer_deps,
'target': False
}
]
def _findNodeValue(node, name):
foundNode = node.find('{http://maven.apache.org/POM/4.0.0}' + name)
if foundNode is None:
return None
return foundNode.text
def _download_file(from_repository, path, filename, length=16*1024):
"""
Stores the path into the given filename.
"""
if os.path.exists(filename):
logging.debug('%s already downloaded', filename)
if URL not in from_repository or not from_repository[URL]:
raise ValueError('Repository missing url: ' + get_repository_shortname(from_repository))
url = _append_url(from_repository[URL], path)
logging.debug('downloading from %s', url)
try:
request = Request(url)
if AUTHORIZATION in from_repository and from_repository[AUTHORIZATION]:
logging.debug('authorization header added')
request.add_header('Authorization', from_repository[AUTHORIZATION])
else:
logging.debug('no authorization configured')
response = urlopen(request)
with open(filename, 'wb') as file:
shutil.copyfileobj(response, file, length)
except Exception as ex:
logging.debug('exception while downloading (expected): %s', ex)
None
def _already_uploaded(to_repository, path):
"""
Return True if the file was already uploaded.
"""
if URL not in to_repository or not to_repository[URL]:
raise ValueError('Repository missing upload url: ' + get_repository_shortname(to_repository))
url = _append_url(to_repository[URL], path)
if AUTHORIZATION in to_repository and to_repository[AUTHORIZATION]:
logging.debug('authorization header added')
headers = {'Authorization': to_repository[AUTHORIZATION]}
else:
logging.debug('no authorization configured')
headers = {}
try:
response = requests.head(url, headers=headers)
return response.ok
except Exception as ex:
logging.debug('exception while checking existence %s', ex)
return False
def _upload_file(to_repository, path, filename):
"""
Returns True if the file was uploaded
"""
if not os.path.exists(filename):
# we try to upload a file that was not downloaded (for example an artifact without
# sources.) This is expected to happen and is not an error.
logging.debug('missing file to upload, skipping %s', filename)
return False
if URL not in to_repository or not to_repository[URL]:
raise ValueError('Repository missing upload url: ' + get_repository_shortname(to_repository))
url = _append_url(to_repository[URL], path)
logging.debug('uploading to ' + url)
if AUTHORIZATION in to_repository and to_repository[AUTHORIZATION]:
logging.debug('authorization header added')
headers = {'Authorization': to_repository[AUTHORIZATION]}
else:
logging.debug('no authorization configured')
headers = {}
try:
with open(filename, 'rb') as file:
response = requests.put(url, files={filename: file}, headers=headers)
if not response.ok:
logging.error('error while uploading of %s: %s', path, response.text)
return True
except Exception as ex:
logging.warn('exception while uploading %s', ex)
return False
def _append_url(base_url, fragment):
return base_url + fragment if base_url.endswith('/') else base_url + '/' + fragment
| 13,178 | 3,594 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: h03.py
Author: Scott Yang(Scott)
Email: yangyingfa@skybility.com
Copyright: Copyright (c) 2021, Skybility Software Co.,Ltd. All rights reserved.
Description:
"""
import threading
import time
semaphore = threading.Semaphore(10)
def run(n):
semaphore.acquire()
time.sleep(2)
print(f'run the thread: {n}')
semaphore.release()
def main():
for i in range(50):
t = threading.Thread(target=run, args=(i,))
t.start()
while threading.active_count() != 1:
pass#print(threading.active_count())
else:
print('all thread done')
if __name__ == '__main__':
main()
| 676 | 256 |
"""Intervention Module"""
from flask import current_app
from sqlalchemy import and_
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.exceptions import BadRequest
from ..database import db
from ..dict_tools import strip_empties
from .lazy import query_by_name
from .role import ROLE
LOGOUT_EVENT = 0b001
USER_DOC_EVENT = 0b010
class DisplayDetails(object):
"""Simple abstraction to communicate display details to front end
To provide a custom experience, intevention access can be set at
several levels. For a user, access is either available or not, and when
available, the link controls may be intentionally disabled for a reason the
intervention should note in the status_text field.
Attributes::
access: {True, False}
card_html: Text to display on the card
link_label: Text used to label the button or hyperlink
link_url: URL for the button or link - link to be disabled when null
status_text: Text to inform user of status, or why it's disabled
"""
def __init__(self, access, intervention, user_intervention):
"""Build best set available, prefering values in user_intervention"""
ui = user_intervention
self.access = access
self.card_html = ui and ui.card_html or intervention.card_html
self.link_label = ui and ui.link_label or intervention.link_label
self.link_url = ui and ui.link_url or intervention.link_url
self.status_text = ui and ui.status_text or intervention.status_text
class Intervention(db.Model):
__tablename__ = 'interventions'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
description = db.Column(db.Text, nullable=False)
# nullable as interventions may not have a valid client
client_id = db.Column(db.ForeignKey('clients.client_id'))
card_html = db.Column(db.Text)
link_label = db.Column(db.Text)
link_url = db.Column(db.Text)
status_text = db.Column(db.Text)
public_access = db.Column(db.Boolean, default=True)
display_rank = db.Column(db.Integer)
subscribed_events = db.Column(db.Integer, nullable=False, default=0)
client = db.relationship(
'Client',
primaryjoin="Client.client_id==Intervention.client_id",
uselist=False, backref='Client')
access_strategies = db.relationship(
'AccessStrategy', order_by="AccessStrategy.rank")
@hybrid_property
def subscribed_to_logout_event(self):
return self.subscribed_events & LOGOUT_EVENT
@subscribed_to_logout_event.setter
def subscribed_to_logout_event(self, value):
if value:
self.subscribed_events = self.subscribed_events | LOGOUT_EVENT
else:
self.subscribed_events = self.subscribed_events & ~LOGOUT_EVENT
@hybrid_property
def subscribed_to_user_doc_event(self):
return self.subscribed_events & USER_DOC_EVENT
@subscribed_to_user_doc_event.setter
def subscribed_to_user_doc_event(self, value):
if value:
self.subscribed_events = self.subscribed_events | USER_DOC_EVENT
else:
self.subscribed_events = self.subscribed_events & ~USER_DOC_EVENT
def as_json(self):
"""Returns the 'safe to export' portions of an intervention
The client_id and link_url are non-portable between systems.
The id is also independent - return the rest of the not null
fields as a simple json dict.
NB for staging exclusions to function, link_url and client_id
are now included. Take care to remove it from persistence files
where it is NOT portable, for example, when generating persistence
files programmatically.
"""
d = {'resourceType': 'Intervention'}
for attr in ('name', 'description', 'card_html', 'link_label',
'status_text', 'public_access', 'display_rank',
'subscribed_events', 'link_url', 'client_id'):
if getattr(self, attr, None) is not None:
d[attr] = getattr(self, attr)
return d
@staticmethod
def rct_ids():
"""returns list of RCT (randomized control trial) intervention ids"""
names = current_app.config.get('RCT_INTERVENTIONS')
if not names:
return None
ids = [i.id for i in Intervention.query.filter(
Intervention.name.in_(names))]
if len(ids) != len(names):
raise ValueError(
"can't locate all interventions named in config "
"'RCT_INTERVENTIONS': {}".format(names))
return ids
@classmethod
def from_json(cls, data):
intervention = cls()
return intervention.update_from_json(data)
def update_from_json(self, data):
if 'name' not in data:
raise ValueError("required 'name' field not found")
for attr in ('name', 'description', 'card_html', 'link_label',
'status_text', 'public_access', 'display_rank',
'subscribed_events'):
if attr in data:
setattr(self, attr, data.get(attr))
# link_url and client_id are special - generally we don't pull
# from persisted format as each instance is configured to
# communicate with distinct interventions. As it is used
# for prod -> staging, warn if seen on any other system
if 'link_url' in data and self.link_url != data['link_url']:
if current_app.config.get("SYSTEMT_TYPE", '').lower() != 'staging':
current_app.logger.warning(
"IMPORTING non-portable intervention({}) link_url: '{}'"
"".format(self.name, data['link_url']))
self.link_url = data['link_url']
if 'client_id' in data and self.client_id != data['client_id']:
if current_app.config.get("SYSTEMT_TYPE", '').lower() != 'staging':
current_app.logger.warning(
"IMPORTING non-portable intervention({}) client_id: '{}'"
"".format(self.name, data['client_id']))
self.client_id = data['client_id']
return self
def fetch_strategies(self):
"""Generator to return each registered strategy
Strategies need to be brought to life from their persisted
state. This generator does so, and returns them in a call
ready fashion, ordered by the strategy's rank.
"""
for strat in self.access_strategies:
func = strat.instantiate()
yield func
def display_for_user(self, user):
"""Return the intervention display details for the given user
Somewhat complicated method, depending on intervention configuration.
The following ordered steps are used to determine if a user
should have access to an intervention. The first 'true' found
provides access, otherwise the intervention will not be displayed.
1. call each strategy_function in intervention.access_strategies.
Note, on rare occasions, a strategy may alter the UserIntervention
attributes given the circumstances.
2. check for a UserIntervention row defining access for the given
user on this intervention.
3. check if the intervention has `public_access` set
@return DisplayDetails object defining 'access' and other details
for how to render the intervention.
"""
access = False
# 1. check strategies for access
for func in self.fetch_strategies():
if func(intervention=self, user=user):
access = True
break
# 2. check user_intervention for access
ui = UserIntervention.query.filter_by(
user_id=user.id, intervention_id=self.id).first()
if ui and ui.access == 'granted':
access = True
# 3. check intervention scope for access
# (NB - tempting to shortcut by testing this first, but we
# need to allow all the strategies to run in case they alter settings)
if self.public_access:
access = True
return DisplayDetails(
access=access, intervention=self, user_intervention=ui)
def quick_access_check(self, user):
"""Return boolean representing given user's access to intervention
Somewhat complicated method, depending on intervention configuration.
The following ordered steps are used to determine if a user
should have access to an intervention. The first 'true' found
is returned (as to make the check as quick as possible).
1. check if the intervention has `public_access` set
2. check for a UserIntervention row defining access for the given
user on this intervention.
3. call each strategy_function in intervention.access_strategies.
@return boolean representing 'access'.
"""
# 1. check intervention scope for access
if self.public_access:
return True
# 2. check user_intervention for access
ui = UserIntervention.query.filter_by(
user_id=user.id, intervention_id=self.id).first()
if ui and ui.access == 'granted':
return True
# 3. check strategies for access
for func in self.fetch_strategies():
if func.__name__ == 'update_user_card_html':
return True
if func(intervention=self, user=user):
return True
return False
def __str__(self):
"""print details needed in audit logs"""
if self.name == INTERVENTION.DEFAULT.name:
return ""
return ("Intervention: {0.description}, "
"public_access: {0.public_access}, "
"card_html: {0.card_html}, "
"link_label: {0.link_label}, "
"link_url: {0.link_url}, "
"status_text: {0.status_text},"
"subscribed_events: {0.subscribed_events}".format(self))
access_types = ('forbidden', 'granted', 'subscribed')
access_types_enum = ENUM(*access_types, name='access', create_type=False)
class UserIntervention(db.Model):
__tablename__ = 'user_interventions'
id = db.Column(db.Integer, primary_key=True)
access = db.Column('access', access_types_enum, default='forbidden')
card_html = db.Column(db.Text)
staff_html = db.Column(db.Text)
link_label = db.Column(db.Text)
link_url = db.Column(db.Text)
status_text = db.Column(db.Text)
user_id = db.Column(db.ForeignKey('users.id'), nullable=False)
intervention_id = db.Column(
db.ForeignKey('interventions.id'), nullable=False)
def as_json(self, include_empties=True):
d = {'user_id': self.user_id}
for field in ('access', 'card_html', 'staff_html',
'link_label', 'link_url', 'status_text'):
d[field] = getattr(self, field)
if not include_empties:
return strip_empties(d)
return d
def update_from_json(self, data):
for attr in data:
setattr(self, attr, data[attr])
@classmethod
def user_access_granted(cls, intervention_id, user_id):
"""Shortcut to query for specific (intervention, user) access"""
q = cls.query.filter(and_(
cls.user_id == user_id,
cls.intervention_id == intervention_id,
cls.access == 'granted'))
return q.count() > 0
def intervention_restrictions(user):
"""returns tuple of lists for interventions: (disallow, require)
Users may not have access to some interventions (such as randomized
control trials). In such a case, the first of the tuple items
will name intervention ids which should not be included.
Other users get access to all patients with one or more
interventions. In this case, a list of interventions for which
the user should be granted access is in the second position.
:returns disallow, require::
disallow: list of intervention IDs to exclude associated patients,
such as the randomized control trial interventions.
require: list of intervention IDs if patients must also have the
respective UserIntervention association.
"""
if user.has_role(ROLE.ADMIN.value):
return None, None # no restrictions
disallowed, required = None, None
if user.has_role(ROLE.STAFF.value):
if user.has_role(ROLE.INTERVENTION_STAFF.value):
raise BadRequest(
"Patients list for staff and intervention-staff are "
"mutually exclusive - user shouldn't have both roles")
# staff users aren't to see patients from RCT interventions
disallowed = Intervention.rct_ids()
if user.has_role(ROLE.INTERVENTION_STAFF.value):
# Look up associated interventions
uis = UserIntervention.query.filter(
UserIntervention.user_id == user.id)
# check if the user is associated with any intervention at all
if uis.count() == 0:
raise BadRequest("User is not associated with any intervention.")
required = [ui.intervention_id for ui in uis]
return disallowed, required
STATIC_INTERVENTIONS = {
'analytics': 'Analytics',
'assessment_engine': 'Assessment Engine',
'care_plan': 'Care Plan',
'community_of_wellness': 'Community of Wellness',
'decision_support_p3p': 'Decision Support P3P',
'decision_support_wisercare': 'Decision Support WiserCare',
'music': 'MUSIC Integration',
'psa_tracker': 'PSA Tracker',
'self_management': 'Self Management',
'sexual_recovery': 'Sexual Recovery',
'social_support': 'Social Support Network',
'default': 'OTHER: not yet officially supported',
}
def add_static_interventions():
"""Seed database with default static interventions
Idempotent - run anytime to push any new interventions into existing dbs
"""
for name, description in STATIC_INTERVENTIONS.items():
if not Intervention.query.filter_by(name=name).first():
intervention = Intervention(
name=name, description=description, card_html=description,
subscribed_events=LOGOUT_EVENT)
db.session.add(intervention)
class _NamedInterventions(object):
"""Bunch pattern class to house references to interventions
Don't use this class directly - make reference to its user,
the INTERVENTION instance.
Specialized to handle only Interventions. Attributes
(all without a leading '_') assumed to be interventions and may
be referenced in upper or lower case.
"""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, query_by_name(Intervention, k))
def __getattribute__(self, attr):
if attr.startswith('_'):
return object.__getattribute__(self, attr)
# Catch KeyError in case it's a dynamically added intervention
# (i.e. not from static list)
try:
value = self.__dict__[attr.lower()].__call__(self)
except NoResultFound:
raise AttributeError("Intervention {} not found".format(attr))
except KeyError:
query = Intervention.query.filter_by(name=attr)
if not query.count():
raise AttributeError(
"Intervention {} not found".format(attr))
value = query.one()
return value
def __iter__(self):
for attr in dir(self):
if attr.startswith('_'):
continue
try:
yield getattr(self, attr)
except AttributeError:
# Intervention from static list not found in db, skip
continue
def __contains__(self, item):
try:
self.__getattribute__(item)
return True
except AttributeError:
return False
"""INTERVENTION behaves like a static accessor for all interventions.
Obtain intervention of choice by name in upper or lower case or by string:
sr = INTERVENTION.SEXUAL_RECOVERY
sr = INTERVENTION.sexual_recovery
sr = getattr(INTERVENTION, 'sexual_recovery')
"""
INTERVENTION = _NamedInterventions(**STATIC_INTERVENTIONS)
| 16,523 | 4,640 |
# DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
#
# This material is based upon work supported by the Assistant Secretary of Defense for Research and
# Engineering under Air Force Contract No. FA8721-05-C-0002 and/or FA8702-15-D-0001. Any opinions,
# findings, conclusions or recommendations expressed in this material are those of the author(s) and
# do not necessarily reflect the views of the Assistant Secretary of Defense for Research and
# Engineering.
#
# © 2017 Massachusetts Institute of Technology.
#
# MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contractor (May 2014)
#
# The software/firmware is provided to you on an As-Is basis
#
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or
# 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are
# defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than
# as specifically authorized by the U.S. Government may violate any copyrights that exist in this
# work.
import torch
import numpy as np
from scipy.misc import imread, imresize
from torchvision.models import resnet101
def load_feature_extractor(model_stage=2):
""" Load the appropriate parts of ResNet-101 for feature extraction.
Parameters
----------
model_stage : Integral
The stage of ResNet-101 from which to extract features.
For 28x28 feature maps, this should be 2. For 14x14 feature maps, 3.
Returns
-------
torch.nn.Sequential
The feature extractor (ResNet-101 at `model_stage`)
Notes
-----
This function will download ResNet-101 if it is not already present through torchvision.
"""
model = resnet101(pretrained=True)
layers = [model.conv1, model.bn1, model.relu, model.maxpool]
layers += [getattr(model, 'layer{}'.format(i+1)) for i in range(model_stage)]
model = torch.nn.Sequential(*layers)
if torch.cuda.is_available():
model.cuda()
return model.eval()
def extract_image_feats(img_path, model):
""" Extract image features from the image at `img_path` using `model`.
Parameters
----------
img_path : Union[pathlib.Path, str]
The path to the image file.
model : torch.nn.Module
The feature extractor to use.
Returns
-------
Tuple[numpy.ndarray, torch.Tensor]
The image and image features extracted from `model`
"""
# read in the image and transform it to shape (1, 3, 224, 224)
path = str(img_path) # to handle pathlib
img = imread(path, mode='RGB')
img = imresize(img, (224, 224), interp='bicubic')
img = img.transpose(2, 0, 1)[None]
# use ImageNet statistics to transform the data
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
img_tensor = torch.FloatTensor((img / 255 - mean) / std)
# push to the GPU if possible
if torch.cuda.is_available():
img_tensor = img_tensor.cuda()
return (img.squeeze().transpose(1, 2, 0), model(img_tensor))
| 3,156 | 1,108 |
from .models import VirusData
from app import ma
class DataSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = VirusData
id = ma.auto_field()
name = ma.auto_field()
case_total = ma.auto_field()
case_today = ma.auto_field()
recovered_total = ma.auto_field()
death_today = ma.auto_field()
death_total = ma.auto_field()
date = ma.auto_field()
# init marshmallow schema
data_schema = DataSchema()
datas_schema = DataSchema(many=True)
| 491 | 168 |
import sys
import collections
from queue import PriorityQueue
from itertools import count
from ai_solutions.graph_node import GraphNode
class MazeSolver:
'''
this class is for solving the maze question
using three algorithms:
- BFS
- IDS
- A*
'''
def __init__(self, source, destination, black_cells, size=20):
'''
Initial the source and destination cells
@param source: source cell, two element (row, col)
@type source: tuple
@param destination: dst, two elemets (row, col)
@type destication: tuple
@param BLACKED: list of black cells in the maze like [(row, col), (row, col), etc]
@type type: tuple
@param size: the maze dimention size, this will create size*size maze
@type size: integer
'''
# variables
self.FIRST = (0, 0)
self.LAST = (size - 1, size - 1)
self.BLACKED = set([(x[0], x[1]) for x in black_cells])
self.SRC = GraphNode(None, (source[0], source[1]))
self.DST = GraphNode(None, (destination[0], destination[1]))
self.size = size
print(20*'#' + '\n' + "MazeSolver creation" + '\n' + 20*'#' + '\n')
def create_path(self, node: GraphNode):
"""creates the solution path based on the parents till it visits SRC
@param node: dst node wchich we wanna find path to it from source
@type node: GraphNode
@returns: list of the coordinates to go in correct order
@rtype: list
"""
path = []
cost = 0
while node:
path.insert(0, node.coordinate)
node = node.parent
cost = cost + 1
print(20*'#' + '\n' + "path creation" + '\n' + 20*'#' + '\n')
return path, cost
def is_child_valid(self, node: GraphNode):
'''
checks if the created node is valid or not
@param node: input node to check
@type node: GraphNode
@returns: boolean value which shows if the created node is valid or not
@rtype: bool
'''
if node.coordinate[0] < 0 or node.coordinate[1] < 0 or \
node.coordinate[0] >= self.size or node.coordinate[1] >= self.size or \
node.coordinate == node.parent.coordinate or node.coordinate in self.BLACKED :
return False
return True
def get_children(self, node: GraphNode):
'''
this will create the child nodes then return them as a list
@param node: the node we wanna search for the children
@type node: GraphNode
@returns: list of children
@rtype: list
'''
children = []
try:
b_child = GraphNode(
node, (node.coordinate[0], node.coordinate[1] - 1))
t_child = GraphNode(
node, (node.coordinate[0], node.coordinate[1] + 1))
l_child = GraphNode(
node, (node.coordinate[0] - 1, node.coordinate[1]))
r_child = GraphNode(
node, (node.coordinate[0] + 1, node.coordinate[1]))
if self.is_child_valid(b_child):
children.append(b_child)
if self.is_child_valid(t_child):
children.append(t_child)
if self.is_child_valid(r_child):
children.append(r_child)
if self.is_child_valid(l_child):
children.append(l_child)
print(20*'#' + '\n' + "get children" + '\n' + 20*'#' + '\n')
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return children
def get_specific_child(self, node: GraphNode, which_child):
'''
this will create specific child of the given node
@param node: the node we wanna search for the children
@type node: GraphNode
@param which_child: L-eft, R-ight, B-ottom, T-op
@type which_child: char
@returns: list of children
@rtype: list
'''
try:
if which_child == 'B':
b_child = GraphNode(
node, (node.coordinate[0], node.coordinate[1] - 1))
if self.is_child_valid(b_child):
return b_child
if which_child == 'T':
t_child = GraphNode(
node, (node.coordinate[0], node.coordinate[1] + 1))
if self.is_child_valid(t_child):
return t_child
if which_child == 'L':
l_child = GraphNode(
node, (node.coordinate[0] - 1, node.coordinate[1]))
if self.is_child_valid(l_child):
return l_child
if which_child == 'R':
r_child = GraphNode(
node, (node.coordinate[0] + 1, node.coordinate[1]))
if self.is_child_valid(r_child):
return r_child
return None
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return None
def bfs_graph_search(self):
'''
solve the maze by bfs graph search
@returns : solution path, cost, count of explored set
'''
try:
queue = collections.deque([self.SRC])
explored_set = set()
while queue:
curr = queue.popleft()
if curr == self.DST:
# returning path, cost and explored_set count
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
explored_set.add(curr.coordinate)
# add current cell's child to queue to visit
children = self.get_children(curr)
for child in children:
if child.coordinate not in explored_set:
queue.append(child)
return [], 'Inf', list(explored_set) # no answer found
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return False
def dls_graph_search(self, cut_off):
'''This is the graph search implementation of dls Alg
it is specificly implemented for solving Maze
@returns : solution path, cost, count of explored set or False
'''
if cut_off < 1:
return False
set_limit = 0
for i in range(0, cut_off+1):
set_limit = set_limit + 4 ** i
explored_set = set()
level = 0
try:
level = level+1
curr = self.SRC
explored_set.add(curr.coordinate)
if curr.coordinate == self.DST.coordinate:
path, cost = self.create_path(self.SRC)
return path, cost-1, list(explored_set)
while True:
print(len(explored_set), "\n", set_limit, "\n")
if(len(explored_set) == set_limit):
break
if level == cut_off:
level = level - 1
curr = curr.parent
continue
tmp = self.get_specific_child(curr, 'T')
if tmp and tmp.coordinate not in explored_set:
curr = tmp
print(f"TOP:\t{curr.coordinate}")
level = level + 1
explored_set.add(curr.coordinate)
if curr == self.DST:
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
continue
tmp = self.get_specific_child(curr, 'R')
if tmp and tmp.coordinate not in explored_set:
curr = tmp
print(f"TOP:\t{curr.coordinate}")
level = level + 1
explored_set.add(curr.coordinate)
if curr == self.DST:
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
continue
tmp = self.get_specific_child(curr, 'B')
if tmp and tmp.coordinate not in explored_set:
curr = tmp
print(f"TOP:\t{curr.coordinate}")
level = level + 1
explored_set.add(curr.coordinate)
if curr == self.DST:
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
continue
tmp = self.get_specific_child(curr, 'L')
if tmp and tmp.coordinate not in explored_set:
curr = tmp
print(f"TOP:\t{curr.coordinate}")
level = level + 1
explored_set.add(curr.coordinate)
if curr == self.DST:
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
continue
level = level - 1
curr = curr.parent
return False, list(explored_set)
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return False
def ids_graph_search(self):
'''
solve the maze by ids graph search
@returns : solution path, cost, count of explored set or False
'''
try:
explored_set_tmp = []
for cut_off in count(start=1):
print(20*"!@#$")
print(f"new round\t{cut_off}")
print(20*"!@#$")
result = self.dls_graph_search(cut_off)
if result != False and isinstance(result[1], int):
return result
if result == False:
continue
if result[0] == False:
return [], 'Inf', result[1] # no answer found
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return False
def aStar_graph_search(self):
'''
solve the maze by a* graph search
@returns : solution path, cost, count of explored set
'''
try:
to_check = []
explored_set = []
to_check.append(self.SRC)
while to_check:
current_cell = to_check.pop(0)
if current_cell.coordinate not in explored_set:
explored_set.append(current_cell.coordinate)
if current_cell.coordinate == self.DST.coordinate:
path, cost = self.create_path(current_cell)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
children = self.get_children(current_cell)
for child in children:
child.payed = abs(child.coordinate[0] - self.SRC.coordinate[0]) + abs(
child.coordinate[1] - self.SRC.coordinate[1])
child.hurestic = abs(child.coordinate[0] - self.DST.coordinate[0]) + abs(
child.coordinate[1] - self.DST.coordinate[1])
child.total = child.payed + child.hurestic
flag = True
for tmp in to_check:
if (child == tmp and child.total >= tmp.total):
flag = False
break
if flag:
if child.coordinate not in explored_set:
to_check.append(child)
to_check = sorted(to_check, key=lambda GraphNode_ob: GraphNode_ob.total)
return [], 'Inf', list(explored_set)
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return False
return False
"""
points to pay attention
- set is not serializable so use list at last step
- checking advance debugging in python
"""
# source: https://towardsdatascience.com/a-star-a-search-algorithm-eb495fb156bb
# source: https://www.annytab.com/a-star-search-algorithm-in-python/
# we have to check something important
'''
what if they set the start at 1,1 and the walls arround it?
'''
| 14,080 | 3,872 |
import argparse
from distutils.util import strtobool
import json
import os
import pickle
import numpy as np
import tensorflow as tf
import pdb
from softlearning.environments.utils import get_environment_from_params
from softlearning.policies.utils import get_policy_from_variant
# from softlearning.samplers import rollouts
from softlearning import replay_pools
from softlearning.samplers import (
dummy_sampler,
extra_policy_info_sampler,
remote_sampler,
base_sampler,
simple_sampler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint_path',
type=str,
help='Path to the checkpoint.')
parser.add_argument('--max-path-length', '-l', type=int, default=1000)
parser.add_argument('--num-rollouts', '-n', type=int, default=10)
parser.add_argument('--render-mode', '-r',
type=str,
default=None,
choices=('human', 'rgb_array', None),
help="Mode to render the rollouts in.")
parser.add_argument('--deterministic', '-d',
type=lambda x: bool(strtobool(x)),
nargs='?',
const=True,
default=True,
help="Evaluate policy deterministically.")
args = parser.parse_args()
return args
def rollout(env,
policy,
path_length,
callback=None,
render_mode=None,
break_on_terminal=True):
observation_space = env.observation_space
action_space = env.action_space
pool = replay_pools.SimpleReplayPool(
observation_space, action_space, max_size=path_length)
sampler = simple_sampler.SimpleSampler(
max_path_length=path_length,
min_pool_size=None,
batch_size=None)
sampler.initialize(env, policy, pool)
images = []
infos = []
state_vectors = []
t = 0
for t in range(path_length):
observation, reward, terminal, info = sampler.sample()
state_vector = sampler.env.unwrapped.state_vector()
infos.append(info)
state_vectors.append(state_vector)
if callback is not None:
callback(observation)
if render_mode is not None:
if render_mode == 'rgb_array':
image = env.render(mode=render_mode)
images.append(image)
else:
env.render()
if terminal:
policy.reset()
if break_on_terminal: break
assert pool._size == t + 1
path = pool.batch_by_indices(
np.arange(pool._size),
observation_keys=getattr(env, 'observation_keys', None))
path['infos'] = infos
path['state_vectors'] = np.array([sampler._reset_state_vector] + state_vectors[:-1])
if render_mode == 'rgb_array':
path['images'] = np.stack(images, axis=0)
return path
def rollouts(n_paths, *args, **kwargs):
paths = [rollout(*args, **kwargs) for i in range(n_paths)]
return paths
def simulate_policy(args):
session = tf.keras.backend.get_session()
checkpoint_path = args.checkpoint_path.rstrip('/')
experiment_path = os.path.dirname(checkpoint_path)
variant_path = os.path.join(experiment_path, 'params.json')
with open(variant_path, 'r') as f:
variant = json.load(f)
with session.as_default():
pickle_path = os.path.join(checkpoint_path, 'checkpoint.pkl')
with open(pickle_path, 'rb') as f:
picklable = pickle.load(f)
environment_params = (
variant['environment_params']['evaluation']
if 'evaluation' in variant['environment_params']
else variant['environment_params']['training'])
evaluation_environment = get_environment_from_params(environment_params)
policy = (
get_policy_from_variant(variant, evaluation_environment, Qs=[None]))
policy.set_weights(picklable['policy_weights'])
with policy.set_deterministic(args.deterministic):
paths = rollouts(args.num_rollouts,
evaluation_environment,
policy,
path_length=args.max_path_length,
render_mode=args.render_mode)
#### print rewards
rewards = [path['rewards'].sum() for path in paths]
print('Rewards: {}'.format(rewards))
print('Mean: {}'.format(np.mean(rewards)))
####
if args.render_mode != 'human':
from pprint import pprint; import pdb; pdb.set_trace()
pass
return paths
if __name__ == '__main__':
args = parse_args()
paths = simulate_policy(args)
keys = paths[0].keys()
paths = {key: np.concatenate([path[key] for path in paths]) for key in keys}
print(paths.keys())
print(paths['observations'].shape, paths['state_vectors'].shape)
# pickle.dump(paths, open('data/hopper_state_vectors.pkl', 'wb'))
| 4,998 | 1,499 |
from matplotlib import pyplot as plt
from gluoncv import model_zoo, data, utils
import mxnet as mx
import numpy as np
from PIL import Image
import json
from tqdm import tqdm
# epoch = 1
save_path = '/data1/datasets/bdd100k/testB_result/'
test_path = '/data1/datasets/bdd100k/images/100k/test2018/'
test_json = []
CLASSES = ['traffic light', 'traffic sign', 'person', 'rider', 'bike', 'bus', 'car', 'motor', 'train', 'truck']
_score_thresh = 0.5
ctx = mx.gpu(3)
resize_map = mx.image.ForceResizeAug((1280,720), interp=2)
net = model_zoo.get_model('mask_rcnn_resnet50_v1b_bdd', pretrained=False, pretrained_base=False)
net.load_parameters('bddv4_continuemask_rcnn_resnet50_v1b_bdd_0024.params')
net.collect_params().reset_ctx(ctx)
def save_drivable_map(pred_map, file_id):
drivable_name = file_id + '_drivable_id' + '.png'
mask = mx.nd.softmax(pred_map, axis=2)
mask = mask>0.5
color = np.array([0,1,2])
mask = mask.asnumpy() * color
mask = np.sum(mask, axis=2).astype('uint8')
# print(mask.shape)
img = Image.fromarray(mask)
# img.save()
img.save(save_path + 'seg/' + drivable_name, 'png')
# ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
with open('ai_challenger_adp2018_testb_20180917_t4.txt','r') as f:
for file_id in tqdm(f.readlines()):
file_id = file_id.replace('\n','')
filename = file_id+'.jpg'
x, orig_img = data.transforms.presets.rcnn.load_test(test_path+filename, max_size=1280)
ids, scores, bboxes, drivable_maps = net(x.as_in_context(ctx))
det_id, det_score, det_bbox = [xx[0].asnumpy() for xx in [ids, scores, bboxes]]
mask = drivable_maps[0].transpose((1,2,0)).as_in_context(mx.cpu())
mask = resize_map(mask)
# 保存 图片
save_drivable_map(mask, file_id)
# ids, scores, bboxes
valid = np.where(((det_id >= 0) & (det_score >= _score_thresh)))[0]
det_id = det_id[valid]
det_score = det_score[valid]
det_bbox = det_bbox[valid]
# print(det_score.shape)
for cid, score, bbox in zip(det_id, det_score, det_bbox):
# print(cid)
test_json.append({
"name": filename,
"timestamp": 1000,
"category": CLASSES[int(cid[0])],
"bbox": bbox.tolist(),
"score": float(score[0])
})
print(save_path + 'det4.json')
with open( save_path +'det4.json', 'w') as jsonf:
json.dump(test_json, jsonf)
| 2,566 | 1,026 |
# -*- coding: utf-8 -*-
if __name__ == '__main__':
input = int(input())
output = 0
temp = input
while True:
temp = temp % 10 * 10 + (int(temp / 10) + temp % 10) % 10
output += 1
if input == temp:
break
print(output)
| 300 | 112 |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import scipy.misc
import glob
import scipy
import utils
import tensorflow as tf
""" param """
epoch = 50
batch_size = 64
lr = 0.0002
z_dim = 100
n_critic = 5
gpu_id = 3
''' data '''
# you should prepare your own data in ./data/img_align_celeba
# celeba original size is [218, 178, 3]
def preprocess_fn(img):
crop_size = 108
re_size = 64
img = tf.image.crop_to_bounding_box(img, (218 - crop_size) // 2, (178 - crop_size) // 2, crop_size, crop_size)
img = tf.to_float(tf.image.resize_images(img, [re_size, re_size], method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1
return img
sess = utils.session()
# iteration counter
it_cnt, update_cnt = utils.counter()
sess.run(tf.global_variables_initializer())
sess.run(it_cnt)
sess.run(update_cnt)
img_paths = glob.glob('/Users/idan.a/data/celeba/*.jpg')
data_pool = utils.DiskImageData(img_paths, batch_size, shape=[218, 178, 3], preprocess_fn=preprocess_fn)
batch_epoch = len(data_pool) // (batch_size * n_critic)
real_ipt = data_pool.batch()
sess.run(it_cnt)
it_epoch=1
# save_dir="tmp/"
scipy.misc.imsave('sss.png', utils.immerge(real_ipt, 10, 10))
| 1,222 | 533 |
from django.contrib import admin
from django.urls import path
from gifts import views
urlpatterns = [
path('', views.gifts, name='gifts')
]
| 153 | 51 |
import datetime
from django.db import models, connection
from seymour.feeds.models import Feed, Item, AccountFeed
class Account(models.Model):
openid = models.CharField('openid', max_length=255, null=True)
firstname = models.CharField('first name', max_length=100, null=True)
lastname = models.CharField('last name', max_length=100, null=True)
email = models.EmailField('e-mail address', null=True)
date_joined = models.DateTimeField('date joined', default=datetime.datetime.now, db_index=True)
last_login = models.DateTimeField('last login', null=True, db_index=True)
password_salt = models.CharField('password salt', max_length=100, null=True)
password_hash = models.CharField('password hash', max_length=100, null=True)
confirmation_code = models.CharField('confirmation code', max_length=50, null=True, db_index=True)
is_active = models.BooleanField('active', default=True, db_index=True)
class Meta:
db_table = 'accounts'
unique_together = (('email', 'openid'),)
ordering = ['email', 'openid']
def get_fullname(self):
if self.firstname or self.lastname:
return u"%s %s" % (self.firstname, self.lastname)
else:
return None
fullname = property(get_fullname)
def set_password(self, raw_password):
import random
import sha
salt = sha.new(str(random.random())).hexdigest()[:5]
hsh = sha.new(salt+raw_password).hexdigest()
self.password_salt = salt
self.password_hash = hsh
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct.
"""
import random
import sha
hsh = sha.new(self.password_salt+raw_password).hexdigest()
return self.password_hash == hsh
def get_all_feeds(self):
"""Return all feeds associated with this account"""
return self.accountfeed_set.select_related().filter().order_by('feeds.title')
def get_unread_feeds(self):
"""Return a list of unread feeds for this account. Each feed has
unread_total filled in."""
sql = """
SELECT items.feed_id, count(*)
FROM items
INNER JOIN accounts_feeds ON accounts_feeds.feed_id = items.feed_id
WHERE accounts_feeds.account_id = %s AND
(items.date_added > accounts_feeds.latest_read OR accounts_feeds.latest_read IS NULL)
GROUP BY items.feed_id
""" % (self.id,)
cursor = connection.cursor()
cursor.execute(sql)
feed_totals = {}
for row in cursor.fetchall():
feed_id, total_items = row
feed_totals[feed_id] = total_items
feed_ids = feed_totals.keys()
if feed_ids:
accountfeeds = AccountFeed.objects.select_related().filter(account=self, feed__id__in=feed_ids).order_by('feeds.title')
for af in accountfeeds:
af.feed.unread_total = feed_totals[af.feed.id]
return accountfeeds
else:
return []
def save(self):
# TODO: What would be the best standard exception here?
# (This should be picked up by form validation already)
if self.email and self.openid:
raise Exception()
if not self.email and not self.openid:
raise Exception()
if self.openid and (self.password_salt or self.password_hash):
raise Exception()
if self.email and not (self.password_salt and self.password_hash):
raise Exception()
super(Account, self).save()
| 3,824 | 1,102 |