text stringlengths 38 1.54M |
|---|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from . import views as mainpgviews
urlpatterns = [
path('', mainpgviews.home, name = 'home'),
path('ask/', mainpgviews.ask.as_view(), name = 'askQ'),
path('like/question/<int:q_id>/',mainpgviews.like, name = 'like'),
path('dislike/question/<int:q_id>/',mainpgviews.dislike, name = 'dislike'),
path('accounts/', include('allauth.urls')),
path('view/Question/<int:pk>/', mainpgviews.viewans, name='viewans'),
path('answer/Question/<int:pk>/', mainpgviews.answer, name = 'answer'),
path('profile/<int:pk>/', mainpgviews.seeprofile , name = 'seeprofile'),
path('profile/edit/<int:pk>/', mainpgviews.edit , name = 'edit'),
path('profile/create/<int:pk>/', mainpgviews.create , name = 'create'),
path('profile/pic/edit/<int:pk>/', mainpgviews.editpic , name = 'editpic'),
path("profile/<int:pk>/follow/",mainpgviews.follow, name="FOLLOW"),
path("followinglist/",mainpgviews.followinglist,name="FOLLOWINGLIST"),
path("profile/<int:pk>/unfollow/",mainpgviews.unfollow,name="UNFOLLOW"),
path("feed/", mainpgviews.feed, name = "feed"),
path("profile/<int:pk>/twitter/", mainpgviews.twitter, name = "twitter"),
path("profile/<int:pk>/insta/", mainpgviews.insta, name = "insta"),
path("profile/<int:pk>/github/", mainpgviews.github, name = "github"),
path("profile/<int:pk>/linkedin/", mainpgviews.linkedin, name = "linkedin"),
path("register/User/", mainpgviews.register, name="register"),
path("account_login/", mainpgviews.login, name = 'login'),
path("account_logout/", mainpgviews.logout,name = 'logout'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
import json
import csv
import os
def open_file_json(file):
output_data=[]
for f in file:
with open(f,"r",encoding="utf-8") as op_f:
input_data=json.load(op_f)
for data in input_data:
output_data.append(data)
op_f.close()
return output_data
def open_file_csv(file):
output_data=[]
for f in file:
with open(f,"r",encoding="utf-8") as op_f:
input_data=csv.reader(op_f)
count = 0
for data in input_data:
if count == 0:
count += 1
continue
output_data.append(data)
op_f.close()
return output_data
# test
# j_file=["./in_put_json/actor_trends.json","./in_put_json/actor_trends_10000-12959.json"]
# actor_count=open_file_json(j_file)
# print(actor_count[12595])
#
# movie_list_csv=[]
# movie_csv_file=os.listdir("./input_moviedata")
# for movie in movie_csv_file:
# movie="./input_moviedata/"+movie
# movie_list_csv.append(movie)
#
# movie_list=open_file_csv(movie_list_csv)
# print(os.listdir("./input_moviedata"))
# print(movie_list[0])
# print(movie_list[-1])
#
# movie2act_list_csv=[]
# movie2act_csv_file=os.listdir("./Movie_Actor_Name")
# for movie in movie2act_csv_file:
# movie="./Movie_Actor_Name/"+movie
# movie2act_list_csv.append(movie)
#
# movie2act_list=open_file_csv(movie2act_list_csv)
# print(os.listdir("./Movie_Actor_Name"))
# print(movie2act_list[0])
# print(movie2act_list[-1])
|
# Generated by Django 2.0.3 on 2018-03-27 20:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0006_auto_20180327_2037'),
]
operations = [
migrations.AlterField(
model_name='projectstatus',
name='status',
field=models.CharField(choices=[('active', ('Active', 'ax')), ('defect', 'Defect Liability Period'), ('closed', 'Closed'), ('suspended', 'Suspended'), ('closed', 'Closed')], default='active', max_length=60, verbose_name='Project Status'),
),
]
|
"""
import tkinter as tk
window=tk.Tk()
def open():
pass
def exit():
window.quit()
menubar = tk.Menu(window)
filemenu=tk.Menu(menubar)
filemenu.add_command(label="열기",command=open)
filemenu.add_command(label="종료",command=exit) #quit
menubar.add_cascade(label="파일", menu=filemenu)
window.config(menu=menubar)
window.mainloop()
"""
from tkinter import filedialog
from tkinter import *
from tkinter import messagebox
def open():
file=filedialog.askopenfile(parent=window, mode="r")
if file !=None:
lines=file.read()
text.insert("1.0",lines)
file.close()
def save():
file = filedialog.asksaveasfile(parent=window, mode="w")
if file !=None:
lines=text.get("1.0",END+"-1c")
file.write(lines)
file.close()
def info():
file = messagebox.showinfo("about, 메모장프로그램")
def exit():
pass
window = Tk()
text=Text(window, height=30, width=80)
text.pack()
menu=Menu(window)
window.config(menu=menu)
filemenu=Menu(menu)
menu.add_cascade(label="파일", menu=filemenu)
filemenu.add_command(label="열기", command=open)
filemenu.add_command(label="저장", command=save)
filemenu.add_command(label="종료", command=exit)
helpmenu=Menu(menu)
menu.add_cascade(label="도움말",menu=helpmenu)
helpmenu.add_command(label="도움말 정보",command=info)
window.mainloop()
|
import os
is_pyd = os.environ.get('PYD')
is_pynih = os.environ.get('PYNIH')
def test_issue_39():
from issues import StaticMemberFunctions
s = StaticMemberFunctions()
assert s.add(1, 2) == 3
assert s.add(2, 3) == 5
assert StaticMemberFunctions.add(3, 4) == 7
def test_issue_40_c():
from issues import c_add
assert c_add(2, 3) == 5
def test_issue_40_cpp():
from issues import cpp_add
assert cpp_add(2, 3) == 5
def test_issue_42_takes_in():
from issues import IssueString, takes_in_string
import pytest
if is_pyd:
# pyd can't convert to const
with pytest.raises(RuntimeError):
takes_in_string(IssueString())
def test_issue_42_takes_scope():
from issues import IssueString, takes_scope_string
takes_scope_string(IssueString())
def test_issue_42_takes_ref():
from issues import IssueString, takes_ref_string
takes_ref_string(IssueString())
def test_issue_42_takes_ref_const():
from issues import IssueString, takes_ref_const_string
import pytest
if is_pyd:
# pyd can't convert to const
with pytest.raises(RuntimeError):
takes_ref_const_string(IssueString())
def test_issue_42_returns_ref_const():
from issues import returns_ref_const_string
import pytest
if is_pyd:
# pyd can't convert from const(issues.IssueString*)
with pytest.raises(RuntimeError):
s = returns_ref_const_string()
assert s.value == 'quux'
def test_issue_42_returns_const():
from issues import returns_const_string
import pytest
if is_pyd:
# pyd can't convert from const(issues.IssueString*)
with pytest.raises(RuntimeError):
returns_const_string()
else:
assert returns_const_string().value == 'toto'
def test_issue_44():
import pytest
if is_pynih:
with pytest.raises(ImportError):
from issues import string_ptr
else:
from issues import string_ptr
assert string_ptr('foo').value == 'foo'
assert string_ptr('bar').value == 'bar'
def test_issue_47():
from issues import uncopiable_ptr
import pytest
if is_pyd:
with pytest.raises(RuntimeError):
assert uncopiable_ptr(33.3).x == 33.3
assert uncopiable_ptr(44.4).x == 44.4
else:
assert uncopiable_ptr(33.3).x == 33.3
assert uncopiable_ptr(44.4).x == 44.4
def test_issue_50():
from issues import String, takes_string
takes_string(String())
def test_issue_54():
from issues import Issue54
import pytest
c = Issue54(10)
if is_pyd:
# FIXME
with pytest.raises(AttributeError):
assert c.i == 10
else:
assert c.i == 10
def test_issue_153():
if is_pyd: # FIXME
import pytest
with pytest.raises(ImportError):
from issues import Issue153
else:
from issues import Issue153
txt = ""
def sink(chars):
nonlocal txt
txt += chars
c = Issue153(42)
c.toString(sink)
assert txt == "Issue153(42)"
def test_issue_159():
if is_pyd: # FIXME
import pytest
with pytest.raises(ImportError):
from issues import Socket
else:
from issues import Socket
s = Socket()
assert s.send([1, 2, 3]) == 3
assert s.send([0, 1, 2, 3]) == 4
def test_issue_161():
import pytest
if is_pyd: # FIXME
with pytest.raises(ImportError):
from issues import Issue161
else:
from issues import Issue161
e0 = Issue161()
assert e0.msg == ""
line = 42
next = None
err = -1
def errorFormatter(i):
return str(i) + 'oops'
# This fails because conversion to D function pointers isn't
# implemented
with pytest.raises(RuntimeError):
# FIXME - test the fields as well
Issue161("msg", "file", line, next, err, errorFormatter)
def test_issue_163():
import pytest
from issues import issue163
ints = [1, 2, 3]
issue163(ints)
with pytest.raises(AssertionError):
assert ints == [1, 2, 3, 42]
issue163(ints)
assert ints == [1, 2, 3, 42, 42]
def test_issues_164():
from issues import Issue164, MethodParamString
i = Issue164()
assert i.strlen(MethodParamString("foo")) == 3
assert i.strlen(MethodParamString("quux")) == 4
# FIXME
def test_issues_166():
import pytest
from issues import issue166_days, issue166_secs, issue166_usecs
from datetime import timedelta
delta = timedelta(days=42, seconds=77, microseconds=99)
if is_pynih:
exc = AssertionError
else:
exc = RuntimeError
with(pytest.raises(exc)):
assert(issue166_days(delta)) == 42
assert(issue166_secs(delta)) == 77
assert(issue166_usecs(delta)) == 99
|
"""
Source: Stack Abuse
Jump Search is similar to binary search in that it works on a sorted array, and uses a similar
divide and conquer approach to search through it. It can be classified as an improvement of
the linear search algorithm since it depends on linear search to perform the actual comparison
when searching for a value.
"""
import math
def JumpSearch (lys, val):
length = len(lys)
jump = int(math.sqrt(length))
left, right = 0, 0
while left < length and lys[left] <= val:
right = min(length - 1, left + jump)
if lys[left] <= val and lys[right] >= val:
break
left += jump;
if left >= length or lys[left] > val:
return -1
right = min(length - 1, right)
i = left
while i <= right and lys[i] <= val:
if lys[i] == val:
return i
i += 1
return -1 |
def partition(arr, left, right):
pivot = arr[(left+right) // 2]
while left <= right:
while arr[left] < pivot:
left += 1
while arr[right] > pivot:
right -= 1
if left <= right:
arr[left], arr[right] = arr[right], arr[left]
left += 1
right -= 1
return left
def quicksort(arr, left, right):
index = partition(arr, left, right)
if index - 1 > left:
quicksort(arr, left, index - 1)
if index < right:
quicksort(arr, index, right)
arr = [1,5,4,34,7,3,5,3,7,3,5,3,7,3]
quicksort(arr, 0, len(arr)-1)
print arr |
"""Various car classes."""
from interact_drive.car.car import Car
from interact_drive.car.fixed_velocity_car import FixedVelocityCar
from interact_drive.car.planner_car import PlannerCar
from interact_drive.car.linear_reward_car import LinearRewardCar
from interact_drive.car.base_rational_car import BaseRationalCar
from interact_drive.car.left_lane_car import LeftLaneCar
from interact_drive.car.merger_car import MergerCar
from interact_drive.car.give_way_car import GiveWayCar |
__author__ = 'kummef'
import Prisoner
class SatanPrisoner(Prisoner.Prisoner):
def chooseNextMove(self):
return self.mean
|
from django.contrib import admin
from . import models
# Register your models here.
admin.site.site_header = 'AmisCake Admin'
admin.site.site_title = 'AmisCake Admin'
admin.site.index_title = 'AmisCake Admin'
@admin.register(models.Producto)
class ProductoAdmin(admin.ModelAdmin):
list_display = [
'id',
'nombre',
'precio',
'url_imagen',
'slug',
'activo',
]
@admin.register(models.Cliente)
class ClienteAdmin(admin.ModelAdmin):
list_display = [
'id',
'cedula',
'nombre',
'telefono',
'correo',
'direccion'
]
search_fields = list_display
@admin.register(models.Pedido)
class PedidoAdmin(admin.ModelAdmin):
list_display = [
'id',
'creacion',
'actualizado',
'cliente',
'confirmado',
'despachado',
'detalle'
] |
import pickle
from schafkopf.game_modes import SOLO, WENZ, PARTNER_MODE
from schafkopf.suits import HEARTS, ACORNS
from schafkopf.players.data.data_processing import switch_suits_player_hands, switch_card_suit, \
switch_suits_played_cards
infilename = 'train_data.p'
solo_filename = 'train_data_solo.p'
wenz_filename = 'train_data_wenz.p'
partner_filename = 'train_data_partner.p'
def create_data_trickplay(infile_name):
with open(infile_name, 'rb') as infile:
while True:
try:
data_dic = pickle.load(infile)
preprocess_game(data_dic)
except EOFError:
break
return
def preprocess_game(data_dic):
game_mode = data_dic['game_mode']
if game_mode[0] == SOLO:
transformed_dic = transform_solo(data_dic)
with open(solo_filename, 'ab') as outfile:
pickle.dump(transformed_dic, outfile)
elif game_mode[0] == WENZ:
transformed_dic = transform_wenz(data_dic)
with open(wenz_filename, 'ab') as outfile:
pickle.dump(transformed_dic, outfile)
elif game_mode[0] == PARTNER_MODE:
transformed_dic = transform_partner(data_dic)
with open(partner_filename, 'ab') as outfile:
pickle.dump(transformed_dic, outfile)
def transform_solo(data_dic):
declaring_player = data_dic['declaring_player']
game_suit = data_dic['game_mode'][1]
# switch suits to HEARTS if necessary
if game_suit != HEARTS:
played_cards = switch_suits_played_cards(data_dic['played_cards'], game_suit, HEARTS)
player_hands = switch_suits_player_hands(data_dic['player_hands'], game_suit, HEARTS)
else:
played_cards = data_dic['played_cards']
player_hands = data_dic['player_hands']
# set offensive player as player 0, and all relative positions accordingly
played_cards = [(card, (player - declaring_player) % 4) for card, player in played_cards]
# change player hand cards accordingly
player_hands = [player_hands[(index + declaring_player) % 4] for index in range(4)]
transformed_dic = {'player_hands': player_hands, 'played_cards': played_cards}
return transformed_dic
def transform_wenz(data_dic):
played_cards = data_dic['played_cards']
declaring_player = data_dic['declaring_player']
# set offensive player as player 0, and all relative positions accordingly
played_cards = [(card, (player - declaring_player) % 4) for card, player in played_cards]
# change player hand cards accordingly
player_hands = [data_dic['player_hands'][(index + declaring_player) % 4] for index in range(4)]
transformed_dic = {'player_hands': player_hands, 'played_cards': played_cards}
return transformed_dic
def transform_partner(data_dic):
declaring_player = data_dic['declaring_player']
game_suit = data_dic['game_mode'][1]
# switch suits to ACORNS if necessary
if game_suit != ACORNS:
played_cards = switch_suits_played_cards(data_dic['played_cards'], game_suit, ACORNS)
player_hands = switch_suits_player_hands(data_dic['player_hands'], game_suit, ACORNS)
else:
played_cards = data_dic['played_cards']
player_hands = data_dic['player_hands']
# set offensive player as player 0, and all relative positions accordingly
played_cards = [(card, (player - declaring_player) % 4) for card, player in played_cards]
# change player hand cards accordingly
player_hands = [player_hands[(index + declaring_player) % 4] for index in range(4)]
transformed_dic = {'player_hands': player_hands, 'played_cards': played_cards}
return transformed_dic
def main():
create_data_trickplay(infilename)
if __name__ == '__main__':
main()
|
from django.shortcuts import render
# Create your views here.
# import viewsets
from rest_framework import viewsets
from rest_framework.decorators import permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
# import local data
from .serializers import GeeksSerializer
from .models import A1, A2, A3, A4, A5, A6
@permission_classes((IsAuthenticated,))
# create a viewset
#class GeeksViewSet(viewsets.ModelViewSet):
class GeeksViewSet(viewsets.ReadOnlyModelViewSet):
# define queryset
queryset = A1.objects.all()
# specify serializer to be used
serializer_class = GeeksSerializer
class GeeksViewSet2(viewsets.ReadOnlyModelViewSet):
# define queryset
queryset = A2.objects.all()
# specify serializer to be used
serializer_class = GeeksSerializer
class GeeksViewSet3(viewsets.ReadOnlyModelViewSet):
# define queryset
queryset = A3.objects.all()
# specify serializer to be used
serializer_class = GeeksSerializer
class GeeksViewSet4(viewsets.ReadOnlyModelViewSet):
# define queryset
queryset = A4.objects.all()
# specify serializer to be used
serializer_class = GeeksSerializer
class GeeksViewSet5(viewsets.ReadOnlyModelViewSet):
# define queryset
queryset = A5.objects.all()
# specify serializer to be used
serializer_class = GeeksSerializer
class GeeksViewSet6(viewsets.ReadOnlyModelViewSet):
# define queryset
queryset = A6.objects.all()
# specify serializer to be used
serializer_class = GeeksSerializer
|
from grid import Grid
import os
import unittest
class TestStringMethods(unittest.TestCase):
def testCreateGrid(self):
true = [0, 2, 4, 2], [0, 2, 8, 16], [0, 0, 0, 0], [2048, 0, 0, 1]
test = Grid(true)
for r in range(4):
for c in range(4):
self.assertEqual(test.grid[r][c].val, true[r][c])
def testArbitrarySize(self):
true = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
test = Grid(true)
for r in range(3):
for c in range(3):
self.assertEqual(test.grid[r][c].val, true[r][c])
def testSlide(self):
test = Grid([[2, 2, 0, 0], [2, 0, 0, 0], [2, 4, 8, 16], [4, 4, 4, 4]])
true = Grid([[0, 0, 0, 4], [0, 0, 0, 2], [2, 4, 8, 16], [0, 0, 8, 8]])
test.slide('RIGHT')
self.assertEqual(true, test)
test = Grid([[2, 2, 0, 0], [2, 0, 0, 0], [2, 4, 8, 16], [4, 4, 4, 4]])
true = Grid([[4, 0, 0, 0], [2, 0, 0, 0], [2, 4, 8, 16], [8, 8, 0, 0]])
test.slide('LEFT')
self.assertEqual(test, true)
test = Grid([[2, 2, 0, 16], [2, 0, 0, 0], [2, 4, 8, 16], [4, 4, 4, 4]])
true = Grid([[4, 2, 8, 32], [2, 8, 4, 4], [4, 0, 0, 0], [0, 0, 0, 0]])
test.slide('UP')
self.assertEqual(test, true)
test = Grid([[2, 2, 0, 16], [2, 0, 0, 0], [2, 4, 8, 16], [4, 4, 4, 4]])
true = Grid([[0, 0, 0, 0], [2, 0, 0, 0], [4, 2, 8, 32], [4, 8, 4, 4]])
test.slide('DOWN')
self.assertEqual(test, true)
test = Grid([[4, 2, 2, 4], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
true = Grid([[4, 4, 4, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
test.slide('LEFT')
self.assertEqual(true, test)
test = Grid([[4, 2, 2, 4], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
true = Grid([[0, 4, 4, 4], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
test.slide('RIGHT')
self.assertEqual(true, test)
def testGameOver(self):
test = Grid([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
self.assertTrue(test.checkGameOver())
def testWin(self):
test = Grid([[2048, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
self.assertTrue(test.checkWin())
test = Grid([[1024, 1024, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
test.slide('LEFT')
self.assertTrue(test.checkWin())
def testDimensions(self):
test = Grid([[2, 0, 0, 0, 0, 0], [4, 0, 0, 0, 0, 0]])
self.assertEqual(2, test.num_rows)
self.assertEqual(6, test.num_cols)
true = Grid([[0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 4]])
test.slide('RIGHT')
self.assertEqual(true, test)
# def testBadParams(self):
# self.failUnlessRaises(AssertionError, Grid([1, 2, 3, 4]))
def testUndo(self):
test = Grid([[2, 2, 0, 0], [2, 0, 0, 0], [2, 4, 8, 16], [4, 4, 4, 4]])
true = Grid([[2, 2, 0, 0], [2, 0, 0, 0], [2, 4, 8, 16], [4, 4, 4, 4]])
test.slide('RIGHT')
test.undo()
self.assertEqual(test, true)
if __name__ == '__main__':
os.system('cls')
unittest.main() |
n = input()
count = 0
for i in range(n):
count = count + 1
wrd = raw_input()
l = list(wrd)
p = []
for j in l:
if(len(p) == 0):
p.append(j)
elif(j >= p[0]):
p.insert(0,j)
else:
p.append(j)
str1 = ''.join(p)
print "Case #"+str(count)+": "+str1
|
from rest_framework import serializers
from payments.models import StripeConnect
from payments.utils import get_connect_url
class StripeConnectSerializer(serializers.ModelSerializer):
"""
"""
connected = serializers.SerializerMethodField()
authorization_url = serializers.SerializerMethodField()
created_at = serializers.DateTimeField(required=False, read_only=True)
updated_at = serializers.DateTimeField(required=False, read_only=True)
def get_connected(self, obj):
return obj.connected_account_id is not None
def get_authorization_url(self, obj):
if obj.connected_account_id is None:
return get_connect_url(
obj.redirect_state,
obj.scope,
obj.store.owner.email
)
return None
class Meta:
model = StripeConnect
fields = [
"redirect_state",
"scope",
"connected",
"authorization_url",
"created_at",
"updated_at"
]
|
import logging
import flask
import flask_config
from wsgiref.util import FileWrapper
from pywkher import generate_pdf
app = flask.Flask(__name__)
app.static_folder = "public"
app.SEND_FILE_MAX_AGE_DEFAULT = 0
@app.route('/')
def home():
"""Returns html that is useful for understanding, debugging and extending
the charting API"""
return file('public/index.html').read()
@app.route('/html-to-pdf', methods=['POST'])
def html_to_pdf():
"""Takes an HTTP POST of html data and returns a pdf.
Example use with jQuery:
$.post('/html-to-pdf, {'html': HTML_DATA})
"""
raw_html = flask.request.form.get('html', '')
if raw_html:
pdf_file = generate_pdf(html=raw_html.encode(encoding='UTF-8'))
'''
template = get_template('my_awesome_template.html')
html = template.render(RequestContext(request))
pdf_file = generate_pdf(html=html)
response = HttpResponse(FileWrapper(pdf_file), content_type='application/pdf')
response['Content-Disposition'] =
response['Content-Length'] = pdf_file.tell()
pdf_file.seek(0)
return response
'''
resp = flask.Response(response=pdf_file,
status=200,
mimetype='application/pdf')
#resp.headers['Content-Disposition'] = 'attachment; filename=%s.pdf' % basename(pdf_file.name)
#resp.headers['Content-Length'] = pdf_file.tell()
#pdf_file.seek(0)
return resp
else:
flask.abort(400)
if __name__ == '__main__':
# Set up logging to stdout, which ends up in Heroku logs
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.WARNING)
app.logger.addHandler(stream_handler)
app.debug = flask_config.debug
app.run(host='0.0.0.0', port=flask_config.port)
|
import sys
sys.path.insert(0, "..")
import argparse
import numpy as np
from metric import score, human_score
from utils import summary_level_correlation, system_level_correlation, get_realsumm_data
def realsumm_by_examples(version=2):
"""
version=2 expected output:
================ System Level =================
l3c 0.8417933546081342 0.8010769230769231
p3c 0.8367324204640783 0.7655384615384616
l2c 0.873318115166315 0.8461538461538461
p2c 0.88526218266187 0.8573846153846155
================ Summary Level =================
l3c 0.5649634208524078 0.5384053984871865
p3c 0.6379487042335673 0.5964372547536069
l2c 0.5686841213142527 0.542580658037932
p2c 0.6421714799110585 0.6005035335930164
version=3 expected output:
================ System Level =================
l3c 0.8125769965944079 0.7795384615384615
p3c 0.8841078645351625 0.8683076923076924
l2c 0.8502911457458968 0.8118461538461539
p2c 0.8857783322303019 0.8681538461538463
================ Summary Level =================
l3c 0.4625040999441442 0.4387146552055542
p3c 0.5802680664600168 0.5409421497592383
l2c 0.4796741509889696 0.4585034329414799
p2c 0.5746644786840002 0.5324422041594156
"""
with open(f"../data/REALSumm/ids.txt", 'r') as f:
dids = {line.strip(): i for i, line in enumerate(f.readlines())}
units, system_data = get_realsumm_data(version=version)
system_level, summary_level = {}, {}
for fold in range(1, 6):
with open(f"../data/REALSumm/by_examples/fold{fold}.id", 'r') as f:
fold_dids = [dids[line.strip()] for line in f.readlines()]
fold_units = [units[i] for i in fold_dids]
fold_system_data = {system_name: [[system_data[system_name][0][i] for i in fold_dids],
[system_data[system_name][1][i] for i in fold_dids]]
for system_name in system_data}
l3c, p3c, l2c, p2c, human = {}, {}, {}, {}, {}
for system_name in system_data:
summaries, labels = fold_system_data[system_name]
res = score(summaries, fold_units, labels=labels if version == 2 else None,
model_type=f"shiyue/roberta-large-realsumm-by-examples-fold{fold}", detail=True)
l3c[system_name] = {i: v for i, v in enumerate(res["l3c"][1])}
p3c[system_name] = {i: v for i, v in enumerate(res["p3c"][1])}
l2c[system_name] = {i: v for i, v in enumerate(res["l2c"][1])}
p2c[system_name] = {i: v for i, v in enumerate(res["p2c"][1])}
gold = res["human"][1] if version == 2 else human_score(labels, detail=True)[1]
human[system_name] = {i: v for i, v in enumerate(gold)}
for metric, prediction in [("l3c", l3c), ("p3c", p3c), ("l2c", l2c), ("p2c", p2c)]:
sys_pear, sys_spear = system_level_correlation(human, prediction)
summ_pear, summ_spear = summary_level_correlation(human, prediction)
if fold == 1:
system_level[metric] = {"pear": [sys_pear], "spear": [sys_spear]}
summary_level[metric] = {"pear": [summ_pear], "spear": [summ_spear]}
else:
system_level[metric]["pear"].append(sys_pear)
system_level[metric]["spear"].append(sys_spear)
summary_level[metric]["pear"].append(summ_pear)
summary_level[metric]["spear"].append(summ_spear)
print(f"================ System Level =================")
for metric in system_level:
print(metric, np.mean(system_level[metric]["pear"]), np.mean(system_level[metric]["spear"]))
print(f"================ Summary Level =================")
for metric in system_level:
print(metric, np.mean(summary_level[metric]["pear"]), np.mean(summary_level[metric]["spear"]))
def realsumm_by_systems(version=2):
"""
version=2 expected output:
================ System Level =================
l3c 0.7502881735928073 0.6799999999999999
p3c 0.7514473158330084 0.7
l2c 0.8136240787614536 0.7799999999999999
p2c 0.7389623756730259 0.7199999999999999
================ Summary Level =================
l3c 0.5099953721863157 0.49821640182533294
p3c 0.5477781945069251 0.5175346450835481
l2c 0.5339576668182527 0.5171383697651195
p2c 0.5529397909320518 0.5206720621937355
version=3 expected output:
================ System Level =================
l3c 0.7715564782870926 0.74
p3c 0.7774456941910397 0.76
l2c 0.8235133749974825 0.8399999999999999
p2c 0.7804174278938876 0.7599999999999999
================ Summary Level =================
l3c 0.43228962704258034 0.42105945909959974
p3c 0.4879742684037576 0.4595304316927783
l2c 0.45217790754278975 0.43911306320979093
p2c 0.48765488022472103 0.46381178796934996
"""
units, system_data = get_realsumm_data(version=version)
system_level, summary_level = {}, {}
for fold in range(1, 6):
with open(f"../data/REALSumm/by_systems/fold{fold}.sys", 'r') as f:
fold_systems = [line.strip() for line in f.readlines()]
fold_system_data = {system_name: system_data[system_name]
for system_name in system_data if system_name in fold_systems}
l3c, p3c, l2c, p2c, human = {}, {}, {}, {}, {}
for system_name in fold_system_data:
summaries, labels = fold_system_data[system_name]
res = score(summaries, units, labels=labels if version == 2 else None,
model_type=f"shiyue/roberta-large-realsumm-by-systems-fold{fold}", detail=True)
l3c[system_name] = {i: v for i, v in enumerate(res["l3c"][1])}
p3c[system_name] = {i: v for i, v in enumerate(res["p3c"][1])}
l2c[system_name] = {i: v for i, v in enumerate(res["l2c"][1])}
p2c[system_name] = {i: v for i, v in enumerate(res["p2c"][1])}
gold = res["human"][1] if version == 2 else human_score(labels, detail=True)[1]
human[system_name] = {i: v for i, v in enumerate(gold)}
for metric, prediction in [("l3c", l3c), ("p3c", p3c), ("l2c", l2c), ("p2c", p2c)]:
sys_pear, sys_spear = system_level_correlation(human, prediction)
summ_pear, summ_spear = summary_level_correlation(human, prediction)
if fold == 1:
system_level[metric] = {"pear": [sys_pear], "spear": [sys_spear]}
summary_level[metric] = {"pear": [summ_pear], "spear": [summ_spear]}
else:
system_level[metric]["pear"].append(sys_pear)
system_level[metric]["spear"].append(sys_spear)
summary_level[metric]["pear"].append(summ_pear)
summary_level[metric]["spear"].append(summ_spear)
print(f"================ System Level =================")
for metric in system_level:
print(metric, np.mean(system_level[metric]["pear"]), np.mean(system_level[metric]["spear"]))
print(f"================ Summary Level =================")
for metric in system_level:
print(metric, np.mean(summary_level[metric]["pear"]), np.mean(summary_level[metric]["spear"]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data", default="realsumm",
type=str, help="Data name: choose from [nli, tac08, tac09, realsumm, pyrxsum]")
parser.add_argument("--split", default="examples",
type=str, help="Split by: examples or systems")
parser.add_argument("--version", default=2, type=int, help="Lite[version]Pyramid")
args = parser.parse_args()
if args.data == "realsumm":
if args.split == "examples":
realsumm_by_examples(args.version)
elif args.split == "systems":
realsumm_by_systems(args.version)
else:
print("invalid split!") |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 18:26:56 2018
@author: loey
"""
import time
import sys
import csv
import re
csv.field_size_limit(sys.maxsize)
filenames = dict()
def main():
start_time = time.time()
for i in ["train", "valid", "test"]:
with open('split/training_'+i+'_allFiles.csv', 'r') as csv_file_r:
training_file = open('redditTrain/'+i+'.txt', 'w')
reader = csv.DictReader(csv_file_r)
for r in reader:
if r['filename'] not in filenames:
filenames[r['filename']] = [r['filename']]
print(r['filename'])
newSentAsString = ' '.join([str(c) if c != ord('\n') else '\n' for c in r['text']])
training_file.write(newSentAsString + "\n")
training_file.close()
csv_file_r.close()
print("writeTrainingTxt_lstm.py Run Time: " + str(time.time()-start_time) + " seconds")
main() |
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import shutil
from tensorboard.plugins.hparams import api as hp
import matplotlib.pyplot as plt
import os
import io
import utils.birds_dataset_utils as dataset_utils
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
class LogSegmentationPredCallback(tf.keras.callbacks.Callback):
def __init__(self, logdir, dataset, img_title="model_pred", rate=1):
super(LogSegmentationPredCallback, self).__init__()
self.file_writer = tf.summary.create_file_writer(os.path.join(logdir, "pred_imgs"))
self.dataset = dataset
self.img_title = img_title
self.rate = rate
def on_epoch_end(self, epoch, logs=None):
if epoch % 2 != 0:
return
for img, mask in self.dataset.take(1):
pred_mask = self.model.predict(img)
display_list = [img, mask, pred_mask]
figure = self._display_predictions(display_list, show=False, max_rows=4)
with self.file_writer.as_default():
tf.summary.image(self.img_title, self._plot_to_image(figure), step=epoch)
print("Logging predictions...")
def _create_mask(self, pred_mask):
if pred_mask.shape[2] > 1:
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
# else:
# pred_mask = tf.cast(pred_mask, dtype=tf.int8)
return pred_mask
def _display_predictions(self, display_list, show=True, max_rows=4):
imgs, masks, predictions = display_list
batch_size = imgs.shape[0] if len(imgs.shape) == 4 else 1
num_rows = max_rows if batch_size > max_rows else batch_size
title = ['Input Image', 'True Mask', 'Predicted Mask']
img_size = 2.5
fig = plt.figure(figsize=(img_size * 3, img_size * num_rows))
for row in range(num_rows):
plt.subplot(num_rows, 3, (row * 3) + 1)
plt.imshow(dataset_utils.denormalize_img(imgs[row]))
plt.subplot(num_rows, 3, (row * 3) + 2)
plt.imshow(tf.keras.preprocessing.image.array_to_img(masks[row]),
cmap='gray')
plt.subplot(num_rows, 3, (row * 3) + 3)
pred_mask = self._create_mask(predictions[row])
plt.imshow(tf.keras.preprocessing.image.array_to_img(pred_mask),
cmap='gray')
plt.axis('on')
plt.tight_layout()
if show:
plt.show()
else:
return fig
def _plot_to_image(self, figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
class WeightedBinaryCrossentropy(tf.keras.losses.Loss):
def __init__(self, pos_weight, name='WeightedBinaryCrossentropy'):
super().__init__(name=name)
self.pos_weight = pos_weight
def call(self, y_true, y_pred):
# For numerical stability clip predictions to log stable numbers
y_pred = tf.keras.backend.clip(y_pred,
tf.keras.backend.epsilon(),
1 - tf.keras.backend.epsilon())
# Compute weighted binary cross entropy
wbce = y_true * -tf.math.log(y_pred) * self.pos_weight + (1 - y_true) * -tf.math.log(1 - y_pred)
# Reduce by mean
return tf.reduce_mean(wbce)
def downsample(filters, size, stride=2, apply_batchnorm=True, use_bias=False,
name=None):
if not name is None:
name = "%s_conv2d_k%d_s%d" % (name, size, stride)
result = tf.keras.Sequential(name=name)
result.add(tf.keras.layers.Conv2D(filters, size, strides=stride,
padding='same', use_bias=use_bias))
if apply_batchnorm:
result.add(tf.keras.layers.BatchNormalization())
result.add(tf.keras.layers.ReLU())
if stride == 1:
result.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2), padding='same'))
return result
def upsample(filters, size, stride=2, apply_dropout=False, use_bias=False, name=None):
if not name is None:
name = "%s_conv2d_trans_k%d_s%d%s" % (name, size, stride,
"_D" if apply_dropout else "")
result = tf.keras.Sequential(name=name)
result.add(tf.keras.layers.Conv2DTranspose(filters, size, strides=stride,
padding='same', use_bias=use_bias))
result.add(tf.keras.layers.BatchNormalization())
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.5))
result.add(tf.keras.layers.ReLU())
return result
def SegmentationModel(kernel_size=3, strides=1, depth=8, dropout=True,
skip_connections=True, output_channels=2):
inputs = tf.keras.layers.Input(shape=[128, 128, 3])
down_stack = [
downsample(64, size=kernel_size, stride=strides, apply_batchnorm=False, name="1"), # (bs, 128, 128, 64)
downsample(128, size=kernel_size, stride=strides, name="2"), # (bs, 64, 64, 128)
downsample(256, size=kernel_size, stride=strides, name="3"), # (bs, 32, 32, 256)
downsample(512, size=kernel_size, stride=strides, name="4"), # (bs, 16, 16, 512)
downsample(512, size=kernel_size, stride=strides, name="5"), # (bs, 8, 8, 512)
downsample(512, size=kernel_size, stride=strides, name="6"), # (bs, 4, 4, 512)
downsample(512, size=kernel_size, stride=strides, name="7"), # (bs, 2, 2, 512)
downsample(512, size=kernel_size, stride=strides, name="8"), # (bs, 1, 1, 512)
]
up_stack = [
upsample(512, size=kernel_size, name="8", apply_dropout=dropout), # (bs, 2, 2, 1024)
upsample(512, size=kernel_size, name="7", apply_dropout=dropout), # (bs, 4, 4, 1024)
upsample(512, size=kernel_size, name="6", apply_dropout=dropout), # (bs, 8, 8, 1024)
upsample(512, size=kernel_size, name="5", apply_dropout=dropout), # (bs, 16, 16, 1024)
upsample(256, size=kernel_size, name="4", ), # (bs, 32, 32, 512)
upsample(128, size=kernel_size, name="3", ), # (bs, 64, 64, 256)
upsample(64, size=kernel_size, name="2", ), # (bs, 128, 128, 128)
]
# Limit model by the provided depth
down_stack = down_stack[:depth]
up_stack = up_stack[-depth + 1:]
# Leave last layer without activation as a loss with parameter `from_logits`
# will be used.
last = tf.keras.layers.Conv2DTranspose(output_channels, kernel_size,
strides=2,
activation='sigmoid',
padding='same')
x = inputs
# Downsampling through the model
skips = []
for down in down_stack:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
if skip_connections:
x = tf.keras.layers.Concatenate()([x, skip])
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x)
if __name__ == "__main__":
train_dataset, val_dataset, test_dataset, classes_df = dataset_utils.load_dataset(shuffle=True)
train_dataset = dataset_utils.pre_process_dataset(train_dataset, augmentation=True, with_mask=False)
test_dataset = dataset_utils.pre_process_dataset(test_dataset, with_mask=False)
val_dataset = dataset_utils.pre_process_dataset(val_dataset, with_mask=False)
# Drop class label as we are appoaching a segmentation task
train_dataset = train_dataset.map(lambda img, mask, label: (img, mask))
val_dataset = val_dataset.map(lambda img, mask, label: (img, mask))
test_dataset = test_dataset.map(lambda img, mask, label: (img, mask))
# Get datasets ready and optimize input pipeline by
# 1. Shuffling (only training set)
# 2. Batching
# 3. Prefetching
BATCH_SIZE = 10 # 32 # 64
train_dataset = train_dataset.take(1000).shuffle(10000).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_dataset.batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.batch(BATCH_SIZE)
# Define metrics to watch
METRICS = [
tf.keras.metrics.BinaryAccuracy(name='accuracy'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(name='auc'),
]
# Set hyper parameter search
HP_DEPTH = hp.HParam('net_depth', hp.IntInterval(3, 8))
HP_DROPOUT = hp.HParam('dropout', hp.Discrete([False, True]))
HP_KERNEL_SIZE = hp.HParam('kernel_size', hp.Discrete([3, 4]))
HP_DOWN_STRIDE = hp.HParam('down_stride', hp.Discrete([1, 2]))
HP_POS_CLASS_WEIGHT = hp.HParam('pos_class_weight', hp.Discrete([1., 1.5, 2., 10., 50.]))
hparams_log_dir = os.path.join("segmentation", "logs")
shutil.rmtree(hparams_log_dir, ignore_errors=True)
hparams_writer = tf.summary.create_file_writer(hparams_log_dir)
with hparams_writer.as_default():
hp.hparams_config(
hparams=[HP_KERNEL_SIZE, HP_DROPOUT, HP_DEPTH, HP_DOWN_STRIDE],
metrics=[
hp.Metric('epoch_loss', group="train", display_name='epoch_loss'),
hp.Metric('epoch_loss', group="validation", display_name='val_loss'),
hp.Metric('auc', group="train", display_name='auc'),
hp.Metric('auc', group="validation", display_name='val_auc'),
hp.Metric('precision', group="train", display_name='precision'),
hp.Metric('precision', group="validation", display_name='precision_val'),
hp.Metric('recall', group="train", display_name='recall'),
hp.Metric('recall', group="validation", display_name='recall_val'),
])
EPOCHS = 2
for depth in range(HP_DEPTH.domain.min_value, HP_DEPTH.domain.max_value + 1):
for kernel_size in HP_KERNEL_SIZE.domain.values:
for down_stride in HP_DOWN_STRIDE.domain.values:
# for dropout in HP_DROPOUT.domain.values:
for pos_weight in HP_POS_CLASS_WEIGHT.domain.values:
dropout = False
hparams = {
HP_DEPTH: depth,
HP_DROPOUT: dropout,
HP_KERNEL_SIZE: kernel_size,
HP_DOWN_STRIDE: down_stride,
HP_POS_CLASS_WEIGHT: pos_weight,
}
# Run log dir
logdir = os.path.join(hparams_log_dir, "depth=%d-k=%d-s=%d-pw=%d%s" %
(depth, kernel_size, down_stride, pos_weight,
"_Drop" if dropout else ""))
if os.path.exists(logdir):
continue
model = SegmentationModel(kernel_size=kernel_size, strides=down_stride,
depth=depth, dropout=dropout,
skip_connections=True, output_channels=1)
model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3),
loss=WeightedBinaryCrossentropy(pos_weight=pos_weight),
metrics=METRICS)
model.summary()
model.reset_states()
model.fit(train_dataset,
validation_data=val_dataset,
epochs=EPOCHS,
callbacks=[tf.keras.callbacks.TerminateOnNaN(),
tf.keras.callbacks.TensorBoard(logdir,
update_freq='batch',
write_graph=False,
histogram_freq=5),
tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=6),
hp.KerasCallback(logdir, hparams, trial_id=logdir),
# LogSegmentationPredCallback(logdir, test_dataset,
# img_title="test_set",
# rate=2),
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(logdir, "checkpoints", "cp.ckpt"),
save_best_only=True,
monitor='val_loss',
verbose=1)
]
)
tf.keras.backend.clear_session()
|
from tweepy import OAuthHandler
import tweepy
import asyncio
from discord.ext import commands
import json
class TwitInfo:
A = B = C = D = None
TI = TwitInfo()
with open("twitter_keys.json", "r") as f:
twitinfo = json.load(f)
for k, v in twitinfo.items():
TI.__setattr__(k, v) # this is needlessly complicated
class TwitterCheck:
"""Object for a vtuber twitter user. The consoom_tweets function will go through all recent tweets until
it sees the newest tweet the last time it checked. If a tweet mentioning 'schedule' is found,
consoom_tweets returns the URL of that tweet."""
def __init__(self, user, myclient):
new = myclient.user_timeline(screen_name=user, count=1, exclude_replies=True)
# need to get tweets in exactly the same way as the consoom function, otherwise we might get
# a reply ID here that would never be encountered when filtering out replies later
self.last = new[0].id
self.user = user
self.myclient = myclient
def consoom_tweets(self):
print("---")
print(f"Checking for a schedule from {self.user}")
tweets = self.myclient.user_timeline(screen_name=self.user,count=10,exclude_replies=True)
# get 10 at a time and stop when we find the most recent one from last time we looked
t = iter(tweets)
tw = next(t)
current_id = tw.id
total = 0
found = False
print(f"self.last is {self.last}, this new id is {tw.id}")
while not current_id == self.last:
total += 1
print(tw.text[:50] + "...")
if "schedule" in tw.text.lower():
print("found schedule mention")
if "media" in tw.entities.keys(): # check if the tweet has an image
print("found schedule media tweet")
found = True
self.last = current_id
break
try:
tw = next(t)
except StopIteration:
print("getting new batch")
# time to get a new batch. We can't just count the index because if we ask for 10
# but exclude replies, we might actually get less than 10 back
tweets = self.myclient.user_timeline(screen_name=self.user, count=10, max_id=current_id)
# current id-1 otherwise we might just retrieve the same tweet
t = iter(tweets)
tw = next(t)
current_id = tw.id
if total > 100:
print("Broke loop after 100 iterations")
break # safeguard, something has gone wrong
self.last = current_id
if found:
return f"https://twitter.com/{self.user}/status/{str(tw.id)}"
print("---")
class TwitterListener(commands.Cog):
def __init__(self, bot):
self.bot = bot
auth = OAuthHandler(TI.A, TI.B)
auth.set_access_token(TI.C, TI.D)
self.client = tweepy.API(auth)
self.channel_list = []
self.latest_tweet = None
self.chuubas = []
self.thread = None # where we post the vtuber schedules
with open("vtubechannel.txt", "r") as f:
self.thread = self.bot.get_channel(int(f.read().rstrip("\n")))
self.bot.loop.call_soon(lambda: asyncio.ensure_future(self.thread.send("I'll post vtuber schedules here.")))
try:
with open("twitter_channels.txt", "r") as f:
for line in f.readlines():
cid = int(line.rstrip("\n"))
# bot.get_channel will just return None if you give it a string
self.channel_list.append(cid)
except FileNotFoundError:
pass
with open("vtubers.txt", "r") as f:
for line in f.readlines():
self.chuubas.append(TwitterCheck(line.rstrip("\n"), self.client))
# set up objects to monitor each vtuber twitter account
self.bot.loop.call_later(10, lambda: asyncio.ensure_future(self.get_tweet()))
# wait 10 seconds until bot is logged in
self.bot.loop.call_later(5, lambda: asyncio.ensure_future(self.monitor_chuubas()))
async def get_tweet(self):
# for Niko only
tweet = self.client.user_timeline(user_id="3096462845", count=1)[0]
if tweet.in_reply_to_status_id is None and tweet.id != self.latest_tweet:
for q in self.channel_list:
i = self.bot.get_channel(q) # get_channel expects an INT
await i.send("https://twitter.com/"+tweet.user.name+"/status/"+str(tweet.id))
await i.send("https://tenor.com/view/niko-gif-18543948")
self.latest_tweet = tweet.id
self.bot.loop.call_later(300, lambda: asyncio.ensure_future(self.get_tweet()))
# schedule to check again in 5 mins
async def monitor_chuubas(self):
for q in self.chuubas:
tw = q.consoom_tweets()
if tw:
await self.thread.send(tw)
# TODO: delay for like 5 days once a schedule tweet has been found
await asyncio.sleep(500) # space out the checking
print("checked all vtube schedules, re-scheduling check function")
self.bot.loop.call_later(500, lambda: asyncio.ensure_future(self.monitor_chuubas()))
@commands.command()
async def add_this(self, ctx):
if ctx.message.channel.id not in self.channel_list:
self.channel_list.append(ctx.message.channel.id)
with open("twitter_channels.txt", "a") as f:
f.write(str(ctx.message.channel.id))
f.write("\n")
await ctx.message.channel.send("Added this channel to the list!")
tweet = self.client.user_timeline(user_id="3096462845", count=1)[0]
await ctx.message.channel.send(
"The latest tweet is: https://twitter.com/" + tweet.user.name + "/status/" + str(tweet.id))
await ctx.message.channel.send("https://tenor.com/view/niko-gif-18543948")
else:
await ctx.message.channel.send("This channel is already in the list!")
@commands.command()
async def niko_moment(self, ctx):
tweet = self.client.user_timeline(user_id="3096462845", count=1)[0]
await ctx.message.channel.send(
"The latest niko tweet is: https://twitter.com/" + tweet.user.name + "/status/" + str(tweet.id))
await ctx.message.channel.send("https://tenor.com/view/niko-gif-18543948")
@commands.command()
async def smonitor(self, ctx, chuuba):
try:
tweet = self.client.user_timeline(screen_name=chuuba, count=1)[0]
except:
await ctx.message.channel.send("Doesn't look like that's a valid twitter handle.")
return
with open("vtubers.txt", "a") as f:
f.write(chuuba)
f.write("\n")
self.chuubas.append(TwitterCheck(chuuba, self.client))
await ctx.message.channel.send(f"I'll monitor for schedules from {chuuba}.")
@commands.command()
async def monitoreds(self, ctx):
with open("vtubers.txt", "r") as f:
ms = f.read()
await ctx.message.channel.send("I am currently monitoring:\n" + ms)
async def setup(bot):
await bot.add_cog(TwitterListener(bot))
|
# coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for the real-world environments."""
import collections
import copy
import inspect
import numpy as np
from realworldrl_suite.utils import multiobj_objectives
PERTURB_SCHEDULERS = [
'constant', 'random_walk', 'drift_pos', 'drift_neg', 'cyclic_pos',
'cyclic_neg', 'uniform', 'saw_wave'
]
def action_roc_constraint(env, safety_vars):
"""Limits the rate of change of the input action.
This is imported directly by each task environment and is inactive by default.
To use, set it as one of the entries to safety_spec['constraints'].
Args:
env: The RWRL Env.
safety_vars: The safety_vars from calling env.safety_vars(physics).
Returns:
A boolean, where True represents the constraint was not violated.
"""
if env._last_action is None: # pylint:disable=protected-access
# May happen at the very first step.
return True
return np.all(np.less(
np.abs(env._last_action -
safety_vars['actions']), # pylint:disable=protected-access
env.limits['action_roc_constraint']))
# Utility functions.
def delay_buffer_spec(delay_spec, delay_name):
"""Returns the length (time steps) of a specified delay."""
b_len = delay_spec.get(delay_name, 0)
return b_len + 1, None
def delayed_buffer_item(buffer_item, buffer_item_len, item):
"""Maintains delays using lists."""
item_copy = copy.copy(item)
if buffer_item is None:
buffer_item = buffer_item_len * [item_copy]
else:
buffer_item.append(item_copy)
item_cur = copy.copy(buffer_item.pop(0))
return buffer_item, item_cur
def noise_value(noise_spec, noise_name, default_value=0.0, int_val=False):
"""Returns the value of a specified noise."""
val = noise_spec.get(noise_name, default_value)
if int_val:
val = int(val)
return val
def get_combined_challenge(combined_challenge, delay_spec, noise_spec,
perturb_spec, dimensionality_spec):
"""Returns the specs that define the combined challenge (if applicable)."""
# Verify combined_challenge value is legal.
if (combined_challenge is not None) and (
combined_challenge not in ['easy', 'medium', 'hard']):
raise ValueError('combined_challenge must be easy, medium, or hard.')
# Verify no other spec is defined if combined_challenge is specified.
if combined_challenge is not None:
if (bool(delay_spec)) or (bool(noise_spec)) or (bool(perturb_spec)) or (
bool(dimensionality_spec)):
raise ValueError('combined_challenge is specified.'
'delay_spec, noise_spec, perturb_spec, or '
'dimensionality_spec may not be specified.')
# Define the specs according to the combined challenge.
if combined_challenge == 'easy':
delay_spec = {
'enable': True,
'actions': 3,
'observations': 3,
'rewards': 10
}
noise_spec = {
'gaussian': {
'enable': True,
'actions': 0.1,
'observations': 0.1
},
'dropped': {
'enable': True,
'observations_prob': 0.01,
'observations_steps': 1,
},
'stuck': {
'enable': True,
'observations_prob': 0.01,
'observations_steps': 1,
},
'repetition': {
'enable': True,
'actions_prob': 1.0,
'actions_steps': 1
}
}
perturb_spec = {
'enable': True,
'period': 1,
'scheduler': 'uniform'
}
dimensionality_spec = {
'enable': True,
'num_random_state_observations': 10
}
elif combined_challenge == 'medium':
delay_spec = {
'enable': True,
'actions': 6,
'observations': 6,
'rewards': 20
}
noise_spec = {
'gaussian': {
'enable': True,
'actions': 0.3,
'observations': 0.3
},
'dropped': {
'enable': True,
'observations_prob': 0.05,
'observations_steps': 5,
},
'stuck': {
'enable': True,
'observations_prob': 0.05,
'observations_steps': 5,
},
'repetition': {
'enable': True,
'actions_prob': 1.0,
'actions_steps': 2
}
}
perturb_spec = {
'enable': True,
'period': 1,
'scheduler': 'uniform'
}
dimensionality_spec = {
'enable': True,
'num_random_state_observations': 20
}
elif combined_challenge == 'hard':
delay_spec = {
'enable': True,
'actions': 9,
'observations': 9,
'rewards': 40
}
noise_spec = {
'gaussian': {
'enable': True,
'actions': 1.0,
'observations': 1.0
},
'dropped': {
'enable': True,
'observations_prob': 0.1,
'observations_steps': 10,
},
'stuck': {
'enable': True,
'observations_prob': 0.1,
'observations_steps': 10,
},
'repetition': {
'enable': True,
'actions_prob': 1.0,
'actions_steps': 3
}
}
perturb_spec = {
'enable': True,
'period': 1,
'scheduler': 'uniform'
}
dimensionality_spec = {
'enable': True,
'num_random_state_observations': 50
}
# Return the updated specs.
return delay_spec, noise_spec, perturb_spec, dimensionality_spec
class Base(object):
"""Base class for the different real-world environments.
This class is used for common code sharing between the different environments.
"""
def __init__(self):
"""Initalizes the base class for realworld environments.
The following attributes must be set explicitly by any subclass.
"""
# Safety related.
# If subclass sets self._safety_enabled to True, also must set
# self.constraints and self._constraints_obs. self.constraints takes two
# arguments, the 'self' object and the safety_vars.
self._safety_enabled = False
self.constraints = None
self._constraints_obs = None
# Delay related.
self._delay_enabled = False
self._buffer_observations_len = None
self._buffer_observations = None
self._buffer_actions_len = None
self._buffer_actions = None
self._buffer_rewards_len = None
self._buffer_rewards = None
# Noise Gaussian related.
self._noise_guassian_enabled = False
self._noise_gaussian_observations = None
self._noise_gaussian_actions = None
# Noise dropped related.
self._noise_dropped_enabled = False
self._noise_dropped_obs_prob = None
self._noise_dropped_obs_steps = None
self._noise_dropped_obs_dict = None
self._noise_dropped_action_prob = None
self._noise_dropped_action_steps = None
self._noise_dropped_action_arr = None
# Noise stuck related.
self._noise_stuck_enabled = False
self._noise_stuck_obs_prob = None
self._noise_stuck_obs_steps = None
self._noise_stuck_obs_dict = None
self._noise_stuck_obs = None
self._noise_stuck_action_prob = None
self._noise_stuck_action_steps = None
self._noise_stuck_action_arr = None
self._noise_stuck_action = None
# Noise repetition related.
self._noise_repetition_enabled = None
self._noise_repetition_actions_prob = None
self._noise_repetition_actions_steps = None
self._noise_repetition_action = None
self._noise_repetition_action_counter = None
# Perturbation related.
# If subclass sets self._perturb_enabled to True, also must set
# self._perturb_param, self._perturb_scheduler, self._perturb_cur,
# self._perturb_start, self._perturb_min, self._perturb_max,
# self._perturb_std, and self._perturb_period
self._perturb_enabled = False
self._perturb_period = None
self._perturb_param = None
self._perturb_scheduler = None
self._perturb_saw_wave_sign = 1. # initial direction - for saw_wave.
self._perturb_cur = None
self._perturb_start = None
self._perturb_min = None
self._perturb_max = None
self._perturb_std = None
# State and action dimensions related.
self._dimensionality_enabled = False
self._num_random_state_observations = 0
# Multi-objective related.
self._multiobj_enabled = False
self._multiobj_objective = None
self._multiobj_reward = False
self._multiobj_coeff = 0
self._multiobj_observed = False
# Constraint related.
self._last_action = None
def _setup_delay(self, delay_spec):
"""Setup for the delay specifications of the task."""
self._delay_enabled = delay_spec.get('enable', False)
if self._delay_enabled:
# Add delay specifications.
(self._buffer_actions_len,
self._buffer_actions) = delay_buffer_spec(delay_spec, 'actions')
(self._buffer_observations_len,
self._buffer_observations) = delay_buffer_spec(delay_spec,
'observations')
(self._buffer_rewards_len,
self._buffer_rewards) = delay_buffer_spec(delay_spec, 'rewards')
def _setup_noise(self, noise_spec):
"""Setup for the noise specifications of the task."""
# White Gaussian noise.
self._noise_guassian_enabled = noise_spec.get('gaussian',
{}).get('enable', False)
if self._noise_guassian_enabled:
self._noise_gaussian_observations = noise_value(noise_spec['gaussian'],
'observations')
self._noise_gaussian_actions = noise_value(noise_spec['gaussian'],
'actions')
# Dropped noise.
self._noise_dropped_enabled = noise_spec.get('dropped',
{}).get('enable', False)
if self._noise_dropped_enabled:
self._noise_dropped_obs_prob = noise_value(noise_spec['dropped'],
'observations_prob')
self._noise_dropped_obs_steps = noise_value(
noise_spec['dropped'],
'observations_steps',
default_value=1,
int_val=True)
self._noise_dropped_action_prob = noise_value(noise_spec['dropped'],
'actions_prob')
self._noise_dropped_action_steps = noise_value(
noise_spec['dropped'], 'actions_steps', default_value=1, int_val=True)
# Stuck noise.
self._noise_stuck_enabled = noise_spec.get(
'stuck', {}).get('enable', False)
if self._noise_stuck_enabled:
self._noise_stuck_obs_prob = noise_value(noise_spec['stuck'],
'observations_prob')
self._noise_stuck_obs_steps = noise_value(
noise_spec['stuck'],
'observations_steps',
default_value=1,
int_val=True)
self._noise_stuck_action_prob = noise_value(noise_spec['stuck'],
'actions_prob')
self._noise_stuck_action_steps = noise_value(
noise_spec['stuck'], 'actions_steps', default_value=1, int_val=True)
# Repetition noise.
self._noise_repetition_enabled = noise_spec.get('repetition',
{}).get('enable', False)
if self._noise_repetition_enabled:
self._noise_repetition_actions_prob = noise_value(
noise_spec['repetition'], 'actions_prob')
self._noise_repetition_actions_steps = noise_value(
noise_spec['repetition'],
'actions_steps',
default_value=1,
int_val=True)
self._noise_repetition_action_counter = 0
def _setup_dimensionality(self, dimensionality_spec):
"""Setup for the noise specifications of the task."""
# Dummy variables of white Gaussian noise.
self._dimensionality_enabled = dimensionality_spec.get('enable', False)
if self._dimensionality_enabled:
self._num_random_state_observations = dimensionality_spec.get(
'num_random_state_observations', 0)
def _setup_multiobj(self, multiobj_spec):
"""Setup for the multi-objective reward task."""
self._multiobj_enabled = multiobj_spec.get('enable', False)
if self._multiobj_enabled:
self._multiobj_reward = multiobj_spec.get('reward', False)
self._multiobj_coeff = multiobj_spec.get('coeff', 0.0)
self._multiobj_observed = multiobj_spec.get('observed', False)
# Load either from internal library or accept passing in class.
multiobj_objective = multiobj_spec['objective']
if isinstance(multiobj_objective, str):
self._multiobj_objective = multiobj_objectives.OBJECTIVES[
multiobj_objective]()
elif inspect.isclass(multiobj_objective) or callable(multiobj_objective):
self._multiobj_objective = multiobj_objective()
def _generate_parameter(self):
"""Generates a new value for the physics perturbed parameter."""
delta = np.random.normal(scale=self._perturb_std)
if self._perturb_scheduler == 'constant':
pass
elif self._perturb_scheduler == 'random_walk':
self._perturb_cur += delta
elif self._perturb_scheduler == 'drift_pos':
self._perturb_cur += abs(delta)
elif self._perturb_scheduler == 'drift_neg':
self._perturb_cur -= abs(delta)
elif self._perturb_scheduler == 'cyclic_pos':
self._perturb_cur += abs(delta)
if self._perturb_cur >= self._perturb_max:
self._perturb_cur = self._perturb_start
elif self._perturb_scheduler == 'cyclic_neg':
self._perturb_cur -= abs(delta)
if self._perturb_cur <= self._perturb_min:
self._perturb_cur = self._perturb_start
elif self._perturb_scheduler == 'uniform':
self._perturb_cur = np.random.uniform(
low=self._perturb_min, high=self._perturb_max)
elif self._perturb_scheduler == 'saw_wave':
self._perturb_cur = self._perturb_saw_wave_sign * abs(delta)
if ((self._perturb_cur >= self._perturb_max) or
(self._perturb_cur <= self._perturb_min)):
self._perturb_saw_wave_sign *= -1.
# Clip the value to be in the defined support
self._perturb_cur = np.clip(self._perturb_cur, self._perturb_min,
self._perturb_max)
def get_observation(self, physics, obs=None):
"""Augments the observation based on the different specifications."""
# This will get the task-specific observation.
if not obs:
obs = super(Base, self).get_observation(
physics) # pytype: disable=attribute-error
if self._noise_guassian_enabled:
# Add white Gaussian noise to observations.
for k, v in obs.items():
obs[k] = np.random.normal(v, self._noise_gaussian_observations)
if not isinstance(v, np.ndarray):
obs[k] = np.float64(obs[k]).astype(v.dtype)
if self._noise_dropped_enabled:
# Drop observation values with some probability.
if not self._noise_dropped_obs_dict:
# First observation - need to initialize dictionary.
self._noise_dropped_obs_dict = collections.OrderedDict([
(k, np.zeros(v.shape)) for k, v in obs.items()
])
for k, v in self._noise_dropped_obs_dict.items():
# Updating identities and length of dropped values.
identities = np.random.binomial(
n=1, p=self._noise_dropped_obs_prob, size=v.shape)
self._noise_dropped_obs_dict[k][
(v == 0) & (identities == 1)] = self._noise_dropped_obs_steps
# Dropping values.
if isinstance(obs[k], np.ndarray):
obs[k][self._noise_dropped_obs_dict[k] > 0] = 0.
else:
obs[k] = np.float64(0.).astype(
obs[k].dtype) if self._noise_dropped_obs_dict[k] > 0 else obs[k]
update_indices = self._noise_dropped_obs_dict[k] > 0
self._noise_dropped_obs_dict[k][update_indices] -= 1.
if self._noise_stuck_enabled:
# Stuck observation values with some probability.
if not self._noise_stuck_obs_dict:
# First observation - need to initialize dictionary and previous obs.
self._noise_stuck_obs_dict = collections.OrderedDict([
(k, np.zeros(v.shape)) for k, v in obs.items()
])
self._noise_stuck_obs = copy.deepcopy(obs)
for k, v in self._noise_stuck_obs_dict.items():
# Updating identities and length of stuck values.
identities = np.random.binomial(
n=1, p=self._noise_stuck_obs_prob, size=v.shape)
self._noise_stuck_obs_dict[k][
(v == 0) & (identities == 1)] = self._noise_stuck_obs_steps
# Stick values.
if isinstance(obs[k], np.ndarray):
stuck_indices = self._noise_stuck_obs_dict[k] > 0
obs[k][stuck_indices] = self._noise_stuck_obs[k][stuck_indices]
else:
obs[k] = (
self._noise_stuck_obs[k]
if self._noise_stuck_obs_dict[k] > 0 else obs[k])
update_indices = self._noise_stuck_obs_dict[k] > 0
self._noise_stuck_obs_dict[k][update_indices] -= 1.
# Storing observation as previous observation for next step.
self._noise_stuck_obs = copy.deepcopy(obs)
if self._safety_enabled:
if self._safety_observed:
obs['constraints'] = self._constraints_obs
if self._delay_enabled and self._buffer_observations_len > 1:
# Delay the observations.
self._buffer_observations, obs = delayed_buffer_item(
self._buffer_observations, self._buffer_observations_len, obs)
if self._dimensionality_enabled and self._num_random_state_observations > 0:
for i in range(self._num_random_state_observations):
obs['dummy-{}'.format(i)] = np.array(np.random.normal())
if self._multiobj_enabled and self._multiobj_observed:
obs['multiobj'] = self.get_multiobj_obs(physics)
return obs
def get_reward(self, physics):
# This will call the 2nd element of the mixin's `get_reward` method.
# e.g. for a mixin with cartpole.Balance, this effectively calls
# cartpole.Balance.get_reward
reward = super(Base, self).get_reward(
physics) # pytype: disable=attribute-error
reward = self.get_multiobj_reward(physics, reward)
reward = self.delay_reward(reward)
return reward
def delay_reward(self, reward):
"""Augments the reward based on the different specifications."""
if self._delay_enabled and self._buffer_rewards_len > 1:
# Delay the reward.
self._buffer_rewards, reward = delayed_buffer_item(
self._buffer_rewards, self._buffer_rewards_len, reward)
return reward
def get_multiobj_obs(self, physics):
base_reward = super(Base, self).get_reward(
physics) # pytype: disable=attribute-error
objectives = self._multiobj_objective.get_objectives(self)
return np.append(base_reward, objectives)
def get_multiobj_reward(self, physics, reward):
"""Adds a multi-objective reward to the current reward."""
if self._multiobj_enabled and self._multiobj_reward:
return self._multiobj_objective.merge_reward(self, physics, reward,
self._multiobj_coeff)
else:
return reward
def before_step(self, action, action_min, action_max):
"""Returns an actions according to the different specifications."""
if self._delay_enabled and self._buffer_actions_len > 1:
# Delay the actions.
self._buffer_actions, action = delayed_buffer_item(
self._buffer_actions, self._buffer_actions_len, action)
if self._noise_guassian_enabled:
# Add white Gaussian noise to actions.
action = np.random.normal(action, self._noise_gaussian_actions)
action = np.clip(action, action_min, action_max)
if self._noise_dropped_enabled:
# Drop action values with some probability.
if self._noise_dropped_action_arr is None:
# First action - need to initialize array.
self._noise_dropped_action_arr = np.zeros(action.shape)
# Updating identities and length of dropped values.
identities = np.random.binomial(
n=1, p=self._noise_dropped_action_prob, size=action.shape)
dropped_indices = ((self._noise_dropped_action_arr == 0) &
(identities == 1))
self._noise_dropped_action_arr[dropped_indices] = (
self._noise_dropped_action_steps)
# Dropping values.
action[self._noise_dropped_action_arr > 0] = 0.
update_indices = self._noise_dropped_action_arr > 0
self._noise_dropped_action_arr[update_indices] -= 1.
if self._noise_stuck_enabled:
# Stuck action values with some probability.
if self._noise_stuck_action_arr is None:
# First action - need to initialize array.
self._noise_stuck_action_arr = np.zeros(action.shape)
self._noise_stuck_action = copy.deepcopy(action)
# Updating identities and length of stuck values.
identities = np.random.binomial(
n=1, p=self._noise_stuck_action_prob, size=action.shape)
stuck_indices = (
(self._noise_stuck_action_arr == 0) & (identities == 1))
self._noise_stuck_action_arr[stuck_indices] = (
self._noise_stuck_action_steps)
# Stick values.
if isinstance(action, np.ndarray):
stuck_indices = self._noise_stuck_action_arr > 0
action[stuck_indices] = self._noise_stuck_action[stuck_indices]
else:
action = (
self._noise_stuck_action
if self._noise_stuck_action_arr > 0 else action)
update_indices = self._noise_stuck_action_arr > 0
self._noise_stuck_action_arr[update_indices] -= 1.
# Storing action as previous action for next step.
self._noise_stuck_action = copy.deepcopy(action)
if self._noise_repetition_enabled:
# Repeat previous actions if relevant.
if self._noise_repetition_action is None:
# First action - need to store reference.
self._noise_repetition_action = copy.deepcopy(action)
if self._noise_repetition_action_counter == 0:
# Finished previous repetition.
if np.random.uniform() < self._noise_repetition_actions_prob:
# Action is to be repeated.
self._noise_repetition_action_counter = (
self._noise_repetition_actions_steps)
# Setting the action to be the previous one.
action = copy.deepcopy(action)
# Decreasing the repetition counter by one.
self._noise_repetition_action_counter -= 1
else:
# Still repeating previous action.
action = copy.deepcopy(self._noise_repetition_action)
self._noise_repetition_action_counter -= 1
# Storing the action to serve as the next step's reference action.
self._noise_repetition_action = copy.deepcopy(action)
return action
def safety_vars(self, physics):
raise NotImplementedError
def _populate_constraints_obs(self, physics):
"""Copies over the safety vars and populates the contraints observation."""
safety_vars = self.safety_vars(physics)
for idx, constraint in enumerate(self.constraints):
self._constraints_obs[idx] = self.constraints[constraint](self,
safety_vars)
def after_step(self, physics):
# Populate safety observations here so it can be used by a multi-objective
# reward function, which will be called before get_observation.
if self._safety_enabled:
self._populate_constraints_obs(physics)
@property
def constraints_obs(self):
# The cached constraint observation
return self._constraints_obs
@property
def safety_enabled(self):
return self._safety_enabled
@property
def delay_enabled(self):
return self._delay_enabled
@property
def perturb_enabled(self):
return self._perturb_enabled
@property
def perturb_period(self):
return self._perturb_period
@property
def multiobj_enabled(self):
return self._multiobj_enabled
|
#! /usr/bin/env python3
# contains class encapsulating data structure that holds reminders
__copyright__ = 'Yovary'
__author__ = 'karepker@gmail.com (Kar Epker)'
import datetime
import logging
import sortedcontainers
import threading
class Reminders:
"""Holds reminders and controls insertion and sending of them."""
def __init__(self):
self.reminders = sortedcontainers.SortedList()
self.reminders_lock = threading.Lock()
self.temporary_reminders = sortedcontainers.SortedList()
def send_reminders(self):
self.reminders_lock.acquire()
# send reminders until a reminder does not need to be sent
while self.reminders and self.reminders[0].send_reminder():
first_reminder = self.reminders[0]
del self.reminders[0]
self.reminders.add(first_reminder)
self.reminders_lock.release()
def add_reminder(self, reminder):
# iterate through the list and remove other instance of username
pruned_reminders = sortedcontainers.SortedList()
self.reminders_lock.acquire()
for candidate in self.reminders:
# filter reminders with the same username
if candidate.username != reminder.username:
pruned_reminders.append(candidate)
pruned_reminders.add(reminder)
self.reminders = pruned_reminders
self.reminders_lock.release()
def add_temporary_reminder(self, reminder):
reminder.set_temporary() # should already be set, but just in case
self.reminders_lock.acquire()
self.reminders.add(reminder)
self.reminders_lock.release()
def get_seconds_until_next_reminder(self):
self.reminders_lock.acquire()
# return an arbitrarily high sleep number
if not self.reminders:
self.reminders_lock.release()
return 600
time_until_next_notification = (self.reminders[0].next_notification -
datetime.datetime.utcnow())
self.reminders_lock.release()
seconds_until_next = time_until_next_notification.total_seconds()
now = datetime.datetime.utcnow()
return seconds_until_next
def serialize_to_file(self, filename):
self.reminders_lock.acquire()
with open(filename, 'w') as out_file:
logging.info('Saving %d reminders' % (len(self.reminders)))
for reminder in self.reminders:
out_file.write(reminder.json_serialize())
out_file.write('\n')
self.reminders_lock.release()
reminders = Reminders()
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d.axes3d import Axes3D
# cmap=cm.coolwarm
cmap=cm.rainbow
def Print2DFunction(V, range_x, range_y, title='', path=None):
assert V.shape == (len(range_x), len(range_y))
x,y = np.mgrid[range_x, range_y]
fig = plt.figure(figsize=(12, 12))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, V, rstride=1, cstride=1, cmap=cmap, linewidth=1, antialiased=True)
fig.colorbar(surf, shrink=0.5)
plt.title(title)
plt.ylabel('player sum', size=18)
plt.xlabel('dealer', size=18)
if path is not None: plt.savefig(path)
plt.show()
def PrintLambdaMSE(lmbda, mse, title='', path=None):
plt.plot(lmbda, mse, 'ro')
plt.plot(lmbda, mse)
plt.title(title)
plt.ylabel('mse', size=18)
plt.xlabel('lambda', size=18)
if path is not None: plt.savefig(path)
plt.show()
def PrintLoss(losses, tags, title='', path=None):
assert len(losses) == len(tags)
length = len(losses[0])
x = range(length)
for loss, tag in zip(losses, tags):
assert len(loss) == length
plt.plot(x, loss, label=tag)
plt.legend(loc='best')
plt.title(title)
plt.ylabel('mse', size=18)
plt.xlabel('episodes', size=18)
if path is not None: plt.savefig(path)
plt.show() |
class RouterTrieNode:
def __init__(self):
self.handler = None
self.children = {}
def insert(self, path):
self.children[path] = RouterTrieNode()
|
def solution(array, commands):
answer = []
for i in commands:
l = sorted(array[i[0]-1:i[1]])
answer.append(l[i[2]-1])
return answer
# Best Solution
# def solution(array, commands):
# return list(map(lambda x:sorted(array[x[0]-1:x[1]])[x[2]-1], commands))
def main():
array = [1, 5, 2, 6, 3, 7, 4]
command = [[2, 5, 3], [4, 4, 1], [1, 7, 3]]
print(solution(array,command))
main() |
from time import time
from functools import lru_cache
# 题目链接: https://leetcode-cn.com/problems/climbing-stairs/
# 递推公式: f(n) = f(n-1) + f(n-2) (f(1)=1,f(2)=2)
# 当n较大时,用递归会存在大量重复的存储与计算,效率低
# 自定义装饰器
# 参考链接: https://blog.csdn.net/mp624183768/article/details/79522231
def memo(func):
cache = {}
def wrap(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrap
# @memo
# def climbStairs(n):
# if n == 1:
# return 1
# elif n == 2:
# return 2
#
# return climbStairs(n - 1) + climbStairs(n - 2)
# 系统级装饰器
@lru_cache()
def climbStairs(n):
if n == 1:
return 1
elif n == 2:
return 2
return climbStairs(n - 1) + climbStairs(n - 2)
# 带缓存的递归
# def climbStairs(n, cache=None):
# if cache is None:
# # if not cache: # 这样写有问题
# cache = {}
# if n in cache:
# return cache[n]
# if n == 1:
# return 1
# elif n == 2:
# return 2
# cache[n] = climbStairs(n-1, cache) + climbStairs(n-2, cache)
#
# return cache[n]
# 动态规划
# def climbStairs(n):
# if n == 1:
# return 1
# elif n == 2:
# return 2
# else:
# i = 1
# j = 2
# for _ in range(2, n):
# i, j = j, i+j
# return j
start = time()
print('[use time]', time()-start, '[result]', climbStairs(35)) # time s
|
"""Base implementations of the :mod:`pymap.interfaces.message` interfaces."""
from __future__ import annotations
import re
from collections.abc import Collection, Iterable, Mapping, Sequence
from datetime import datetime
from typing import Any, Final
from .bytes import Writeable
from .flags import SessionFlags
from .interfaces.message import FlagsKey, CachedMessage, MessageInterface, \
LoadedMessageInterface
from .mime import MessageContent
from .mime.cte import MessageDecoder
from .parsing.response.fetch import EnvelopeStructure, BodyStructure, \
MultipartBodyStructure, ContentBodyStructure, TextBodyStructure, \
MessageBodyStructure
from .parsing.specials import Flag, ObjectId, FetchRequirement
__all__ = ['BaseMessage', 'BaseLoadedMessage']
class _NoContent(ValueError):
# Thrown when message contents were requested, but the backend did not or
# could not load them.
def __init__(self) -> None:
super().__init__('Message content not available.')
class BaseMessage(MessageInterface, CachedMessage):
"""Message metadata such as UID, permanent flags, and when the message
was added to the system.
Args:
uid: The UID of the message.
internal_date: The internal date of the message.
permanent_flags: Permanent flags for the message.
email_id: The message content identifier for the message.
thread_id: The thread identifier for the message.
expunged: True if this message has been expunged from the mailbox.
"""
__slots__ = ['uid', 'internal_date', 'expunged', '_permanent_flags',
'_email_id', '_thread_id', '_flags_key']
def __init__(self, uid: int, internal_date: datetime,
permanent_flags: Iterable[Flag], *,
email_id: ObjectId | None = None,
thread_id: ObjectId | None = None,
expunged: bool = False) -> None:
super().__init__()
self.uid: Final = uid
self.internal_date: Final = internal_date
self.expunged: Final = expunged
self._email_id = email_id or ObjectId(None)
self._thread_id = thread_id or ObjectId(None)
self._permanent_flags = frozenset(permanent_flags or ())
self._flags_key = (uid, self._permanent_flags)
@property
def email_id(self) -> ObjectId:
return self._email_id
@property
def thread_id(self) -> ObjectId:
return self._thread_id
@property
def permanent_flags(self) -> frozenset[Flag]:
return self._permanent_flags
@permanent_flags.setter
def permanent_flags(self, permanent_flags: frozenset[Flag]) -> None:
self._permanent_flags = permanent_flags
self._flags_key = (self.uid, permanent_flags)
def get_flags(self, session_flags: SessionFlags) -> frozenset[Flag]:
msg_sflags = session_flags.get(self.uid)
if msg_sflags:
return self._permanent_flags | msg_sflags
else:
return self._permanent_flags
@property
def flags_key(self) -> FlagsKey:
return self._flags_key
def __repr__(self) -> str:
type_name = type(self).__name__
return f'<{type_name} uid={self.uid} flags={self.permanent_flags}>'
class BaseLoadedMessage(LoadedMessageInterface):
"""The loaded message content, implemented using an instance of
:class:`~pymap.mime.MessageContent`.
Args:
message: The message object.
requirement: The fetch requirement of the loaded content.
content: The MIME-parsed message content, if available.
"""
__slots__ = ['_message', '_requirement', '_content']
def __init__(self, message: MessageInterface,
requirement: FetchRequirement,
content: MessageContent | None) -> None:
super().__init__()
self._message = message
self._requirement = requirement
self._content = content
@property
def message(self) -> MessageInterface:
return self._message
@property
def requirement(self) -> FetchRequirement:
return self._requirement
@property
def content(self) -> MessageContent:
if self._content is None:
raise _NoContent()
return self._content
def __bytes__(self) -> bytes:
return bytes(self.content)
def _get_subpart(self, section: Sequence[int] | None) -> MessageContent:
if section:
subpart = self.content
for i in section:
if subpart.body.has_nested:
subpart = subpart.body.nested[i - 1]
elif i == 1:
pass
else:
raise IndexError(i)
return subpart
else:
return self.content
def get_header(self, name: bytes) -> Sequence[str]:
try:
return self.content.header.parsed[name]
except (KeyError, _NoContent):
return []
def get_headers(self, section: Sequence[int]) -> Writeable:
try:
msg = self._get_subpart(section)
except (IndexError, _NoContent):
return Writeable.empty()
else:
return msg.header
def get_body(self, section: Sequence[int] | None = None,
binary: bool = False) -> Writeable:
try:
msg = self._get_subpart(section)
except (IndexError, _NoContent):
return Writeable.empty()
if binary:
decoded = MessageDecoder.of(msg.header).decode(msg.body)
if not section:
return Writeable.concat((msg.header, decoded))
else:
return decoded
else:
if not section:
return msg
else:
return msg.body
def get_message_headers(self, section: Sequence[int] | None = None,
subset: Collection[bytes] | None = None,
inverse: bool = False) -> Writeable:
try:
msg = self._get_subpart(section)
except (IndexError, _NoContent):
return Writeable.empty()
if section:
if msg.is_rfc822:
msg = msg.body.nested[0]
else:
return Writeable.empty()
if subset is None:
return msg.header
headers = Writeable.concat(value for key, value in msg.header.folded
if inverse != (key.upper() in subset))
return Writeable.concat((headers, Writeable.wrap(b'\r\n')))
def get_message_text(self, section: Sequence[int] | None = None) \
-> Writeable:
try:
msg = self._get_subpart(section)
except (IndexError, _NoContent):
return Writeable.empty()
if section:
if msg.is_rfc822:
msg = msg.body.nested[0]
else:
return Writeable.empty()
return msg.body
@classmethod
def _get_size_with_lines(cls, msg: MessageContent) -> tuple[int, int]:
return len(msg), msg.lines
def get_size(self, section: Sequence[int] | None = None) -> int:
try:
msg = self._get_subpart(section)
except (IndexError, _NoContent):
return 0
return len(msg)
def get_envelope_structure(self) -> EnvelopeStructure:
try:
return self._get_envelope_structure(self.content)
except _NoContent:
return EnvelopeStructure.empty()
def get_body_structure(self) -> BodyStructure:
try:
return self._get_body_structure(self.content)
except _NoContent:
return BodyStructure.empty()
@classmethod
def _get_envelope_structure(cls, msg: MessageContent) -> EnvelopeStructure:
parsed = msg.header.parsed
return EnvelopeStructure(
parsed.date, parsed.subject, parsed.from_, parsed.sender,
parsed.reply_to, parsed.to, parsed.cc, parsed.bcc,
parsed.in_reply_to, parsed.message_id)
@classmethod
def _get_params(cls, msg: MessageContent) -> Mapping[str, Any]:
return msg.body.content_type.params
@classmethod
def _get_body_structure(cls, msg: MessageContent) -> BodyStructure:
parsed = msg.header.parsed
maintype = msg.body.content_type.maintype
subtype = msg.body.content_type.subtype
params = cls._get_params(msg)
disposition = parsed.content_disposition
language = parsed.content_language
location = parsed.content_location
if maintype == 'multipart':
sub_body_structs = [cls._get_body_structure(part)
for part in msg.body.nested]
return MultipartBodyStructure(
subtype, params, disposition, language, location,
sub_body_structs)
content_id = parsed.content_id
content_desc = parsed.content_description
content_encoding = parsed.content_transfer_encoding
if maintype == 'message' and subtype == 'rfc822':
sub_msg = msg.body.nested[0]
sub_env_struct = cls._get_envelope_structure(sub_msg)
sub_body_struct = cls._get_body_structure(sub_msg)
size, lines = cls._get_size_with_lines(msg)
return MessageBodyStructure(
params, disposition, language, location, content_id,
content_desc, content_encoding, None, size, lines,
sub_env_struct, sub_body_struct)
elif maintype == 'text':
size, lines = cls._get_size_with_lines(msg)
return TextBodyStructure(
subtype, params, disposition, language, location,
content_id, content_desc, content_encoding, None, size, lines)
size = len(msg)
return ContentBodyStructure(
maintype, subtype, params, disposition, language, location,
content_id, content_desc, content_encoding, None, size)
def contains(self, value: bytes) -> bool:
try:
content = self.content
except _NoContent:
return False
pattern = re.compile(re.escape(value), re.I)
for part in content.walk():
if pattern.search(bytes(part.header)) is not None:
return True
elif part.body.content_type.maintype == 'text':
if pattern.search(bytes(part.body)) is not None:
return True
return False
|
# Generated by Django 3.2.4 on 2021-06-24 04:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trivia', '0003_score'),
]
operations = [
migrations.AddField(
model_name='questionnaire',
name='image',
field=models.ImageField(null=True, upload_to='uploads/'),
),
migrations.AlterField(
model_name='score',
name='score',
field=models.IntegerField(default=0, null=True),
),
]
|
import ee
# Function to join two image collections based on images with the nearest dates.
def data_join(left, right):
data_filter = ee.Filter.maxDifference(
difference=(24*60*60*1000),
leftField='system:time_start',
rightField='system:time_start'
)
filter_join = ee.Join.saveBest(
matchKey='match',
measureKey='delta_t'
)
out = ee.ImageCollection(filter_join.apply(left, right, data_filter))
out = out.map(lambda img: img.addBands(img.get('match')))
return out
# Function to reproject and resample resolution of an image collection based on an input projection.
def apply_match_proj(coll, proj):
def match_proj(img):
return img.reduceResolution(
reducer=ee.Reducer.mean(),
maxPixels=20000
).reproject(crs=proj)
return coll.map(match_proj) |
import base64
import json
import pickle
import zlib
from typing import Any, ClassVar, Dict, Protocol, Type
DEFAULT_CONTENT_TYPE = "pickle_compat"
# Highest version of the protocol, understood by Python2.
PICKLE_PY2_COMPAT_PROTO = 2
class Codec(Protocol):
@classmethod
def serialize(cls, message: Any) -> str:
...
@classmethod
def deserialize(cls, serialized: str) -> Any:
...
class JSONCodec:
@classmethod
def serialize(cls, message: Any) -> str:
return json.dumps(message)
@classmethod
def deserialize(cls, serialized: str) -> Any:
return json.loads(serialized)
class PickleCodec:
protocol: ClassVar[int] = pickle.DEFAULT_PROTOCOL
@classmethod
def serialize(cls, message: Any) -> str:
binary_data = pickle.dumps(message, protocol=cls.protocol)
compressed_data = zlib.compress(binary_data)
return base64.urlsafe_b64encode(compressed_data).decode("latin1")
@classmethod
def deserialize(cls, serialized: str) -> Any:
compressed_data = base64.urlsafe_b64decode(serialized.encode("latin1"))
binary_data = zlib.decompress(compressed_data)
return pickle.loads(binary_data)
class PickleCompatCodec(PickleCodec):
protocol = PICKLE_PY2_COMPAT_PROTO
def get_codec(content_type: str) -> Type[Codec]:
return CONTENT_TYPES_CODECS[content_type]
CONTENT_TYPES_CODECS: Dict[str, Type[Codec]] = {
"json": JSONCodec,
"pickle": PickleCodec,
"pickle_compat": PickleCompatCodec,
}
|
########################################
# create by :ding-PC
# create time :2018-03-08 12:02:25.983533
########################################
'''
初始化权限资料
'''
from seeds.models_rm import *
from app.models import *
from app import create_app_swagger
def init_rm_data(config):
app = create_app_swagger(config).app
app_context = app.app_context()
app_context.push()
current_app.logger.info('start add right seeds ')
db.create_all()
# todo add right data
# sample code
# # 人事系统
# empsys = add_sys1('emp0','empsys')
# # 员工管理
# employeemng = add_sys2(empsys,'employeemng','员工管理')
# # 员工资料和浏览(1),新增(2),修改(3),删除(4)操作
# employee = add_sys3_and_op(empsys,employeemng,'employee','员工资料')
# 增加第5个操作
# add_sys3_op(employee, 'aud', '审核',5)
# employee_pic = add_sys3(empsys,employeemng,'employee_pic','员工图片')
# add_sys3_op(employee_pic, 'view', '浏览图片', 1)
# add_sys3_op(employee_pic, 'insert', '上传图片',2)
# add_sys3_op(employee_pic, 'delete', '删除图片', 4)
current_app.logger.info('add right seeds success!')
|
from selenium import webdriver
# For using sleep function because selenium
# works only when the all the elemets of the
# page is loaded.
import time
from selenium.webdriver.common.keys import Keys
# Creating an instance webdriver
browser = webdriver.Chrome('C:/Users/ITPeople/code/Python/Automation/chromedriver')
browser.get('https://www.twitter.com')
# Let's the user see and also load the element
time.sleep(2)
login = browser.find_elements_by_xpath('//*[@id="doc"]/div[1]/div/div[1]/div[2]/a[3]')
# using the click function which is similar to a click in mouse.
login
login[0].click()
print("Loggin in Twitter")
user = browser.find_elements_by_xpath('//*[@id="login-dialog-dialog"]/div[2]/div[2]/div[2]/form/div[1]/input')
# Enter User Name
user[0].send_keys('USER-NAME')
user = browser.find_element_by_xpath('//*[@id="login-dialog-dialog"]/div[2]/div[2]/div[2]/form/div[2]/input')
# Reads password from a text file because
# saving the password in a script is just silly.
with open('test.txt', 'r') as myfile:
Password = myfile.read().replace('\n', '')
user.send_keys(Password)
LOG = browser.find_elements_by_xpath('//*[@id="login-dialog-dialog"]/div[2]/div[2]/div[2]/form/input[1]')
LOG[0].click()
print("Login Sucessfull")
time.sleep(5)
elem = browser.find_element_by_name("q")
elem.click()
elem.clear()
elem.send_keys("Geeks for geeks ")
# using keys to send special KEYS
elem.send_keys(Keys.RETURN)
print("Search Sucessfull")
# closing the browser
browser.close() |
from django.conf.urls import include, url
from lost_and_found.views.sign_up import member_signup
from lost_and_found.views.log_in import member_login
from lost_and_found.views.update_user_info import update_user_info
from lost_and_found.views.get_user_info import get_user_info
from lost_and_found.views.check import check
from lost_and_found.views.get_user_uploads import get_user_uploads
from lost_and_found.views.get_uploads import get_uploads
from lost_and_found.views.member_info import member_info
urlpatterns = [
url(r'^signup/$', member_signup, name="signup"),
url(r'^login/$', member_login, name="login"),
url(r'^profile/$', get_user_info, name='user_info'),
url(r'^member/(?P<username>\w+)/profile/update/$', update_user_info, name='profile_update'),
url(r'^check/(?P<username>\w+)/(?P<item_name>\w+)/(?P<item_category>\w+)/upload/', check, name="check"),
url(r'^(?P<username>\w+)/uploads/$',get_user_uploads,name='user_uploads'),
url(r'^uploads/$',get_uploads,name='uploads'),
url(r'^member/profile/info/$',member_info,name='member_info')
] |
# 一些PyTorch基础练习
import numpy as np
import torch
print(torch.eye(3))
points = 100
points = np.arange(points)
print(points)
np.random.shuffle(points)
print(points)
#print()
result = torch.zeros(1, 9517)
print(result)
a = np.ones(3)
x = torch.from_numpy(a)
print(x)
x = torch.Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(x)
x = torch.Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(x.size())
print(x.view(x.size(0), -1))
# 将tensor的维度换位
#print(x.permute(1, 0))
# 求幂运算
print(x.pow(-1.0))
print(x.pow(2.0))
# 按列求和
print(x.sum(dim = 0))
print(torch.sum(x, 0))
# 按行求和
print(x.sum(dim = 1))
print(torch.sum(x, 1))
# 按行求和,求导数,
temp = x.sum(dim = 1).pow(-1.0)
print(temp)
# 对角矩阵
print(temp.diag_embed())
t = torch.rand(2, 4, 3, 5)
a = np.random.rand(2, 4, 3, 5)
print(t.size())
print(t.stride())
print(t.stride(0))
print(t.stride(1))
print(t.stride(2))
print(t.stride(3))
#print(a.shape)
#print(a)
#print(t.size())
#print(t)
x = torch.Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(x.stride())
print(x.narrow(0,0,3))
print(x.narrow(0,1,2))
print(x.narrow(1,1,2))
print(x.narrow(-1,2,1))
x=torch.zeros(3)
print(x)
a=torch.Tensor([[[1,2,3],[4,5,6]]])
b=torch.Tensor([1,2,3,4,5,6])
print(type(a))
print(a.view(1,6))
print(b.view(1,6))
print(a.view(3,2))
# 将Tensor a转换为3行2列
temp = a.view(3,2)
#print(temp.size(-1))
# 数据Tensor temp的大小
print(temp.size())
# 将多行tensor拼接成一行
print(temp.view(1, -1))
# 将多个tensor拼接成一列
print(temp.view(-1, 1))
|
#https://www.thepythoncode.com/article/get-youtube-data-python
from requests_html import HTMLSession
from bs4 import BeautifulSoup as bs # importing BeautifulSoup
videos = ["https://www.youtube.com/watch?v=tkvJQ5x-eEY&list=PLq1I6GEIwH6fdea1RoWbPwME0Bzjt8pnH",
"https://www.youtube.com/watch?v=OKMoxMSCFng",
"https://www.youtube.com/watch?v=GgWQFLble_I",
"https://www.youtube.com/watch?v=RNHrIirvYow",
"https://www.youtube.com/watch?v=jD92I5jHs9U",
"https://www.youtube.com/watch?v=70BBCSy1cL4",
"https://www.youtube.com/watch?v=3qxCcZmctqI",
"https://www.youtube.com/watch?v=39ExyhtP6no&feature=youtu.be"]
# init an HTML Session
session = HTMLSession()
print ("Iniciando a busca")
for video_url in videos:
# get the html content
response = session.get(video_url)
# execute Java-script
response.html.render(sleep=2, timeout=60)
# create bs object to parse HTML
soup = bs(response.html.html, "html.parser")
result = {}
result["title"] = soup.find("meta", itemprop="name")['content']
result["views"] = 0
interaction = soup.find("meta", itemprop="interactionCount")
if interaction:
result["views"] = interaction['content']
print (result['title']+" : "+result['views']+ " views")
print ("Fim") |
from xml.etree import ElementTree
import yaml
import pytest
from vuecli.provider.provider import Provider
@pytest.fixture
def render_index(tmp_path):
def render(config=None):
tmp_path.joinpath("vuepy.yml").write_text(yaml.dump(config or {}))
provider = Provider(tmp_path)
return provider.render_index()
return render
def parse_index(index):
et = ElementTree.fromstring(index)
return {
"stylesheets": [e.attrib["href"] for e in et.findall("head/link")],
"scripts": [e.attrib["src"] for e in et.findall("head/script")],
"templates": {
e.attrib["id"]: e.text.strip()
for e in et.findall("body/script[@type='x-template']")
},
"brython": et.find("body").attrib["onload"],
}
class TestRenderIndex:
def test_defaults(self, render_index):
index = render_index()
assert parse_index(index) == {
"stylesheets": [],
"scripts": ["vuepy.js", "vue.js"],
"templates": {},
"brython": "brython();",
}
def test_custom_stylesheets(self, render_index):
index = render_index({"stylesheets": ["first.css", "second.css"]})
assert parse_index(index)["stylesheets"] == ["first.css", "second.css"]
@pytest.mark.parametrize(
"ext, js", [("vuex", "vuex.js"), ("vue-router", "vue-router.js")]
)
def test_enable_builtin_script(self, render_index, ext, js):
index = render_index({"scripts": {ext: True}})
assert js in parse_index(index)["scripts"]
@pytest.mark.parametrize("ext", ["vue", "brython", "vuex", "vue-router"])
def test_customize_builtin_script(self, render_index, ext):
index = render_index({"scripts": {ext: "custom"}})
assert "custom" in parse_index(index)["scripts"]
def test_custom_script(self, render_index):
index = render_index({"scripts": ["myscript.js"]})
assert "myscript.js" in parse_index(index)["scripts"]
def test_custom_template(self, render_index, tmp_path):
tmp_path.joinpath("my.html").write_text("content")
index = render_index({"templates": {"my": "my.html"}})
assert parse_index(index)["templates"] == {"my": "content"}
def test_custom_brython_args(self, render_index):
index = render_index({"brython_args": {"debug": 10}})
assert parse_index(index)["brython"] == "brython({ debug: 10 });"
|
##### ANTECESSOR E SUCESSOR #####
""" CURSO EM VÍDEO - EXERCÍCIO PYTHON 005:
Faça um programa que leia um número Inteiro e mostre na tela o seu sucessor e
seu antecessor.
Link: https://youtu.be/664e0G_S9nU
"""
###############################################################################
### INÍCIO DO PROGRAMA ########################################################
###############################################################################
separador = '\n' + '-'*80 + '\n'
print(separador)
num = int(input('Digite um número inteiro: '))
print('\nSeu antecessor é {} e o seu sucessor é {}.'.format(num -1 , num +1 ))
print(separador)
###############################################################################
### FIM DO PROGRAMA ###########################################################
###############################################################################
|
import torch
from torch.nn.modules.loss import _Loss
from torch.distributions import kl_divergence
from .utils import mmd_rbf, mmd_imq, shuffle_code
class MMDTCVAELoss(_Loss):
def __init__(self, args):
super().__init__()
self.args = args
if args.mmd_kernel == 'rbf':
self.mmd = mmd_rbf
elif args.mmd_kernel == 'imq':
self.mmd = mmd_imq
else:
raise ValueError("Unsupported kernel type: " + args.mmd_kernel)
@property
def loss_components(self):
return ['rec', 'kld', 'wtc']
def forward(self, model, sample):
outputs = model(sample)
batch_size = sample['batch_size']
prior = outputs['prior']
z = outputs['z']
x_rec = outputs['x']
logging_output = {}
logging_output['batch_size'] = batch_size
# Rec
# p(x|z)
rec = torch.sum(- x_rec.log_prob(sample['image'])) / batch_size
logging_output['rec'] = rec.item()
# KLD
kld = torch.sum(
kl_divergence(outputs['posterior'], prior)) / batch_size
logging_output['kld'] = kld.item()
# WTC
prior_var = prior.variance.mean()
shuffled_z = shuffle_code(z)
wtc = self.mmd(z, shuffled_z, prior_var)
logging_output['wtc'] = wtc.item()
return (rec, kld, wtc), batch_size, logging_output
|
# projectile.py
"""updating projectile.py
provides a simple class for modeling the flight of projectiles."""
from math import sin, cos, radians
class Projectile:
"""Simulates the flight of simple projectiles near the earth's surfaces,
ignoring wind resistance. Tracking is done in two dimensions, height (y)
and distance (x)."""
def __init__(self, angle, velocity, height):
"""Create a projectile with given launch angle, initial
velocity and height."""
self.xpos = 0.0
self.ypos = height
theta = radians(angle)
self.xvel = velocity * cos(theta)
self.yvel = velocity * sin(theta)
def update(self, dt):
"""Update the short dt seconds farther along its flight."""
# update the projectile
self.proj.update(dt)
# move the circle to the new projectile location
center = self.marker.getCenter()
dx = self.proj.getX() - center.getX()
dy = self.proj.getY() - center.getY()
self.marker.move(dx,dy)
def getX(self):
""" Returns the current x coordinate of the shot's center."""
return self.proj.getX()
def getY(self):
""" Returns the y coordinate of the shot's center."""
return self.proj.getY()
def undraw(Self):
""" undraw the shot """
self.marker.undraw()
|
import os
import django
def createClient(first_name, last_name, email):
client = Client(first_name=first_name, last_name=last_name, email=email)
client.save()
return client
def createExercise(name, description, time):
exercise = Exercise(name=name, description=description, time=time)
exercise.save()
return exercise
def createDailyExercisePlan(name):
exercisePlan = DailyExercisePlan(name=name)
exercisePlan.save()
return exercisePlan
def createWorkoutPlan(name):
workoutPlan = WorkoutPlan(name=name)
workoutPlan.save()
return workoutPlan
def createMembership(client, workoutplan):
membership = Membership(client=client, workoutplan=workoutplan)
membership.save()
return membership
def populate():
client_1 = createClient(first_name="Luke", last_name="Skywalker", email="luke@tatooine.com")
client_2 = createClient(first_name="Leia", last_name="Organa", email="leia@alderaan.com")
client_3 = createClient(first_name="Han", last_name="Solo", email="han@corellia.com")
exercise_1 = createExercise(name="Pushups", description="40x5 pushups", time="17:00")
exercise_2 = createExercise(name="Situps", description="40x5 situps", time="17:20")
exercise_3 = createExercise(name="Pullups", description="10x4 pullups", time="17:40")
exercise_4 = createExercise(name="Squats", description="50x5 squats", time="18:00")
daily_exercise_plan_1 = createDailyExercisePlan(name="Exercise Plan 1: A Jump Rope")
daily_exercise_plan_2 = createDailyExercisePlan(name="Exercise Plan 2: Bad Knee Strikes Back")
daily_exercise_plan_3 = createDailyExercisePlan(name="Exercise Plan 3: Return of the Cardio")
workout_plan_1 = createWorkoutPlan(name="Workout Plan 1: Heavy Lifting")
workout_plan_2 = createWorkoutPlan(name="Workout Plan 2: Fit For Summer")
daily_exercise_plan_1.exercises.add(exercise_1, exercise_2, exercise_3)
daily_exercise_plan_1.save()
daily_exercise_plan_2.exercises.add(exercise_2, exercise_3, exercise_4)
daily_exercise_plan_2.save()
daily_exercise_plan_3.exercises.add(exercise_4, exercise_1, exercise_2)
daily_exercise_plan_3.save()
workout_plan_1.monday.add(daily_exercise_plan_1)
workout_plan_1.tuesday.add(daily_exercise_plan_2)
workout_plan_2.monday.add(daily_exercise_plan_3)
workout_plan_2.tuesday.add(daily_exercise_plan_2)
membership_1 = createMembership(client=client_1, workoutplan=workout_plan_1)
membership_1 = createMembership(client=client_2, workoutplan=workout_plan_2)
membership_1 = createMembership(client=client_3, workoutplan=workout_plan_1)
if __name__ == '__main__':
print("Start populating..")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chewbacca.settings')
django.setup()
from clients.models import Client, WorkoutPlan, Exercise, DailyExercisePlan, Membership
populate()
|
from behave import *
import ast
from katas.your_order_please.your_order_please import your_order_please
@given("sentence = {sentence}")
def set_up_params_for_your_order_please(context, sentence):
context.sentence = ast.literal_eval(sentence)
@when("function 'your_order_please' is called with these params")
def execute_your_order_please(context):
context.your_order_please_result = your_order_please(context.sentence)
@then("function 'your_order_please' returns {result}")
def test_result_of_your_order_please(context, result):
assert(context.your_order_please_result == ast.literal_eval(result))
|
from moviepy.editor import *
import sys
import os
import numpy as np
min_fps = 10
min_colors = 40
min_dimension = 160
limit_size = 1000000
# FUNCTIONS DEFINATION
# Get the width of the clip.
def getClipWidth(clip):
# Get the first frame of the clip.
frame = clip.get_frame(0)
return np.size(frame, 1)
# Get the height of the clip.
def getClipHeight(clip):
# Get the first frame of the clip.
frame = clip.get_frame(0)
return np.size(frame, 0)
# Tell the side with the least pixels ("width"/"height").
def getClipSide(clip):
# Return the dimension with the smallest value.
if (getClipWidth(clip) < getClipHeight(clip)):
return 'width'
else:
return 'height'
# Get the dimension of the smallest side.
def getClipDimension(clip):
return min(getClipWidth(clip), getClipHeight(clip))
# Get the total frames count of the clip.
def getClipFramesCount(clip):
return int(clip.fps * clip.duration)
# Get the size of a file.
def getFileSize(path):
return os.path.getsize(path)
# Show the info of the original clip.
def showOrigInfo(width, height, framesCount, fps, duration, colors, size):
print(' Dimension: %d * %d' % (width, height))
print(' Frames Count: %(fr)d (%(fps)d fps * %(du).2f s)' %
{'fr': framesCount, 'fps': fps, 'du': duration})
print(' File Size: %d KB\n' % (size / 1000))
# Show the changes after compression.
def showChangedInfo(width, height, framesCount, fps, duration, colors, size,
orig_width, orig_height, orig_framesCount,
orig_fps, orig_duration, orig_size):
print(' Dimension: %(orig_wid)d * %(orig_hei)d -> %(curr_wid)d * %(curr_hei)d' %
{'curr_wid': width, 'curr_hei': height, 'orig_wid': orig_width, 'orig_hei': orig_height})
print(' Frames Count: %(orig_fr)d (%(orig_fps)d fps * %(orig_du).2f s) -> %(curr_fr)d (%(curr_fps)d fps * %(curr_du).2f s)' %
{'orig_fr': orig_framesCount, 'orig_fps': orig_fps, 'orig_du': orig_duration,
'curr_fr': framesCount, 'curr_fps': fps, 'curr_du': duration})
print(' Colors Count: %d' % colors)
print(' Size: %(orig)d KB -> %(curr)d KB\n' %
{'orig': (orig_size / 1000), 'curr': (size / 1000)})
# Compress the clip.
def compressClip(clip_path):
# Output file name and path setting.
file_name, file_extension = os.path.splitext(clip_path)
output_filename = file_name + '.gif'
output_folder = os.path.join(os.getcwd(), 'output/')
temp_path = os.path.join(output_folder, output_filename)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
clip = VideoFileClip(clip_path)
# Store original clip information
shortest_side = getClipSide(clip)
original_dimension = getClipDimension(clip)
original_width = getClipWidth(clip)
original_height = getClipHeight(clip)
original_fps = clip.fps
original_duration = clip.duration
original_framesCount = getClipFramesCount(clip)
original_size = getFileSize(clip_path)
print('\nOriginal Info:')
showOrigInfo(original_width, original_height, original_framesCount,
original_fps, original_duration, 0, original_size)
# PRE-COMPRESSION
# Change color count.
current_colorsCount = 64
# Set a variable for changing dimension.
if original_dimension > 300:
current_dimension = 300
else:
current_dimension = original_dimension
# Change dimension based on the shortest side.
if shortest_side == 'width':
temp_clip = clip.resize(width=current_dimension)
else:
temp_clip = clip.resize(height=current_dimension)
# Change fps.
if original_fps > 15:
current_fps = 15
else:
current_fps = original_fps
# Compress to a gif file.
temp_clip.write_gif(temp_path, fps=current_fps, program='ffmpeg',
colors=current_colorsCount, tempfiles=True)
temp_clip = VideoFileClip(temp_path)
current_size = getFileSize(temp_path)
current_framesCount = getClipFramesCount(temp_clip)
current_duration = temp_clip.duration
print('\n\n1-time compression finished.')
showChangedInfo(getClipWidth(temp_clip), getClipHeight(temp_clip),
current_framesCount, temp_clip.fps, current_duration,
current_colorsCount, current_size, original_width,
original_height, original_framesCount, original_fps,
original_duration, original_size)
# COMPRESSION
compression_counter = 1
real_counter = 1
while True:
if (current_size < limit_size) or (current_fps <= min_fps and current_dimension <= min_dimension and current_colorsCount <= min_colors):
# os.rename(temp_path, output_path)
print('Ouput file saved to %s\n' % temp_path)
break
# Compression settings
if compression_counter == 0:
if original_dimension > 300:
current_dimension = 300
real_counter += 1
compression_counter += 1
else:
compression_counter += 1
continue
elif compression_counter == 1:
if original_dimension > 260:
current_dimension = 260
real_counter += 1
compression_counter += 1
else:
compression_counter += 1
continue
elif compression_counter == 2:
if original_fps > 12:
current_fps = 12
real_counter += 1
compression_counter += 1
else:
compression_counter += 1
continue
elif compression_counter == 3:
current_colorsCount = 56
real_counter += 1
compression_counter += 1
elif compression_counter == 4:
if original_dimension > 220:
current_dimension = 220
real_counter += 1
compression_counter += 1
else:
compression_counter += 1
continue
elif compression_counter == 5:
current_colorsCount = 48
real_counter += 1
compression_counter += 1
elif compression_counter == 6:
if original_dimension > 200:
current_dimension = 200
real_counter += 1
compression_counter += 1
else:
compression_counter += 1
continue
elif compression_counter == 7:
current_colorsCount = 40
real_counter += 1
compression_counter += 1
elif compression_counter == 8:
if original_fps > 10:
current_fps = 10
real_counter += 1
compression_counter += 1
else:
compression_counter += 1
continue
elif compression_counter == 9:
if original_dimension > 160:
current_dimension = 160
real_counter += 1
compression_counter += 1
else:
compression_counter += 1
continue
# Execute the compression
# Change dimension based on the shortest side.
if shortest_side == 'width':
temp_clip = clip.resize(width=current_dimension)
else:
temp_clip = clip.resize(height=current_dimension)
# Compress to a gif file.
temp_clip.write_gif(temp_path, fps=current_fps, program='ffmpeg',
colors=current_colorsCount, tempfiles=True)
temp_clip = VideoFileClip(temp_path)
current_size = getFileSize(temp_path)
current_framesCount = getClipFramesCount(temp_clip)
current_duration = temp_clip.duration
print('\n\n%d-time compression finished.' % (real_counter))
showChangedInfo(getClipWidth(temp_clip), getClipHeight(temp_clip),
current_framesCount, temp_clip.fps, current_duration,
current_colorsCount, current_size, original_width,
original_height, original_framesCount, original_fps,
original_duration, original_size)
# MAIN EXECUTION
files_count = len(sys.argv) - 1
for i in range(files_count):
clip_path = str(sys.argv[i + 1])
print('\n----------------------------------------------\nCurrent job: %s' % clip_path)
print('\nOverall progress: %(current)d/%(overall)d Started.' %
{'current': (i + 1), 'overall': files_count})
compressClip(clip_path)
print('\nOverall progress: %(current)d/%(overall)d Finished.' %
{'current': (i + 1), 'overall': files_count})
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('slobbbyApp', '0005_auto_20141115_1348'),
]
operations = [
migrations.AlterField(
model_name='event',
name='endTime',
field=models.TimeField(default=datetime.time(14, 14, 46, 46780)),
),
migrations.AlterField(
model_name='event',
name='startTime',
field=models.TimeField(default=datetime.time(14, 14, 46, 46735)),
),
]
|
# -*- coding: utf-8 -*-
import logging
import time
from apscheduler.schedulers.background import BackgroundScheduler
from zvdata import IntervalLevel
from zvt import init_log
from zvt.recorders.joinquant.quotes.jq_stock_kdata_recorder import JqChinaStockKdataRecorder
logger = logging.getLogger(__name__)
sched = BackgroundScheduler()
# 名称 dataschema provider comments download
# 个股资料 Stock eastmoney,sina 个股和板块为多对多的关系
# 板块资料 Index eastmoney,sina 板块有行业,概念,区域三个分类的维度,不同的provider分类会有所不同,个股和板块为多对多的关系
# 个股行情 Stock{level}Kdata joinquant,netease,eastmoney 支持1,5,15,30,60分钟, 日线,周线级别
# 指数日线行情 Index1DKdata eastmoney,sina,joinquant 指数本质上也是一种板块,指数对应板块资料中的标的
# 个股资金流 MoneyFlow eastmoney,sina,joinquant
# 板块资金流 MoneyFlow eastmoney,sina,joinquant 对应板块资料里面的标的
# 分红融资数据 DividendFinancing eastmoney 企业最后的底线就是能不能给投资者赚钱,此为年度统计信息
# 分红明细 DividendDetail eastmoney
# 融资明细 SPODetail eastmoney
# 配股明细 RightsIssueDetail eastmoney
# 主要财务指标 FinanceFactor eastmoney
# 资产负债表 BalanceSheet eastmoney
# 利润表 IncomeStatement eastmoney
# 现金流量表 CashFlowStatement eastmoney
# 十大股东 TopTenHolder eastmoney
# 十大流通股东 TopTenTradableHolder eastmoney
# 机构持股 InstitutionalInvestorHolder eastmoney
# 高管交易 ManagerTrading eastmoney
# 大股东交易 HolderTrading eastmoney
# 大宗交易 BigDealTrading eastmoney
# 融资融券 MarginTrading eastmoney
# 龙虎榜数据 DragonAndTiger eastmoney
@sched.scheduled_job('cron', hour=1, minute=10, day_of_week='tue-sat')
def record_day_kdata():
loop = 9
while loop >= 0:
try:
JqChinaStockKdataRecorder(level=IntervalLevel.LEVEL_1DAY).run()
break
except Exception as e:
loop -= 1
logger.exception('joinquant_run_recorder joinquant day_kdata runner error:{}'.format(e))
time.sleep(60 * 2)
# 每周6抓取周线和月线数据
@sched.scheduled_job('cron', day_of_week=6, hour=2, minute=30)
def record_wk_kdata():
loop = 8
while loop >= 0:
try:
JqChinaStockKdataRecorder(level=IntervalLevel.LEVEL_1WEEK).run()
break
except Exception as e:
loop -= 1
logger.exception('joinquant_run_recorder joinquant wk_kdata runner error:{}'.format(e))
time.sleep(60 * 2)
# 每周6抓取周线和月线数据
@sched.scheduled_job('cron', day_of_week=0, hour=2, minute=30)
def record_month_kdata():
loop = 8
while loop >= 0:
try:
JqChinaStockKdataRecorder(level=IntervalLevel.LEVEL_1MON).run()
break
except Exception as e:
loop -= 1
logger.exception('joinquant_run_recorder joinquant month_kdata runner error:{}'.format(e))
time.sleep(60 * 2)
if __name__ == '__main__':
init_log('joinquant_run_recorder.log')
# record_day_kdata()
# record_wk_kdata()
sched.start()
sched._thread.join()
|
'''
Contém métodos para pesquisar informação na wikipedia usando
a wikimedia API
search_wiki(search_field, lang) -> recebe o conceito a pesquisar e
a linguagem da wikipedia a usar e retorna o resumo da página encontrada
'''
import wikipedia
def search_wiki(search_field, lang='PT'):
'''
Usa a API da wikimedia para fazer pesquisas na wikipedia
e retorna o resumo do conceito pesquisado
search-field - string com o termo a pesquisar
lang - lingua da wikipedia (default: "PT")
'''
wikipedia.set_lang(lang) # mudar a linguagem para português
try:
result = wikipedia.page(search_field) # tentar procurar página correspondente
#print(result.summary) # mostrar apenas a info resumida
return result.summary
except wikipedia.exceptions.DisambiguationError as e: # ocorre quando o objeto de pesquisa é ambiguo
print(e.options) # mostra as diferentes sugestões
except wikipedia.exceptions.PageError as e: # ocorre quando não é encontrado nenhum resultado
print("Página não encontrada")
print(wikipedia.search(search_field, 5)) # mostra sugestões (se existirem)
|
# Generated by Django 2.2 on 2019-05-26 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('survey', '0047_auto_20190526_1102'),
]
operations = [
migrations.AlterField(
model_name='question',
name='answer_type',
field=models.CharField(choices=[('vote', 'vote'), ('textarea', 'textarea'), ('radio', 'radio'), ('checkbox', 'checkbox')], default='radio', max_length=32),
),
]
|
"""Registration code of Gym environments in this package."""
import gym
from ._rom_mode import RomMode
def _register_mario_env(id, **kwargs):
"""
Register a Super Mario Bros. (1/2) environment with OpenAI Gym.
Args:
id (str): id for the env to register
kwargs (dict): keyword arguments for the SuperMarioBrosEnv initializer
Returns:
None
"""
kwargs['max_episode_steps'] = float('inf')
# register the environment
gym.envs.registration.register(
id=id,
entry_point='gym_super_mario_bros:SuperMarioBrosEnv',
max_episode_steps=9999999,
reward_threshold=32000,
kwargs=kwargs,
nondeterministic=True,
)
# Super Mario Bros. with standard frame skip
_register_mario_env('SuperMarioBros-v0', frameskip=4, rom_mode=RomMode.VANILLA)
_register_mario_env('SuperMarioBros-v1', frameskip=4, rom_mode=RomMode.DOWNSAMPLE)
_register_mario_env('SuperMarioBros-v2', frameskip=4, rom_mode=RomMode.PIXEL)
_register_mario_env('SuperMarioBros-v3', frameskip=4, rom_mode=RomMode.RECTANGLE)
# Super Mario Bros. with no frame skip
_register_mario_env('SuperMarioBrosNoFrameskip-v0', frameskip=1, rom_mode=RomMode.VANILLA)
_register_mario_env('SuperMarioBrosNoFrameskip-v1', frameskip=1, rom_mode=RomMode.DOWNSAMPLE)
_register_mario_env('SuperMarioBrosNoFrameskip-v2', frameskip=1, rom_mode=RomMode.PIXEL)
_register_mario_env('SuperMarioBrosNoFrameskip-v3', frameskip=1, rom_mode=RomMode.RECTANGLE)
# Super Mario Bros. 2 (Lost Levels) with standard frame skip
_register_mario_env('SuperMarioBros2-v0', lost_levels=True, frameskip=4, rom_mode=RomMode.VANILLA)
_register_mario_env('SuperMarioBros2-v1', lost_levels=True, frameskip=4, rom_mode=RomMode.DOWNSAMPLE)
# Super Mario Bros. 2 (Lost Levels) with no frame skip
_register_mario_env('SuperMarioBros2NoFrameskip-v0', lost_levels=True, frameskip=1, rom_mode=RomMode.VANILLA)
_register_mario_env('SuperMarioBros2NoFrameskip-v1', lost_levels=True, frameskip=1, rom_mode=RomMode.DOWNSAMPLE)
def _register_mario_level_env(id, **kwargs):
"""
Register a Super Mario Bros. (1/2) Level environment with OpenAI Gym.
Args:
id (str): id for the env to register
kwargs (dict): keyword arguments for the SuperMarioBrosLevelEnv initializer
Returns:
None
"""
kwargs['max_episode_steps'] = float('inf')
# register the environment
gym.envs.registration.register(
id=id,
entry_point='gym_super_mario_bros:SuperMarioBrosLevelEnv',
max_episode_steps=9999999,
reward_threshold=32000,
kwargs=kwargs,
nondeterministic=True,
)
# a template for making individual level environments
_ID_TEMPLATE = 'SuperMarioBros{}-{}-{}-v{}'
# iterate over all the rom modes, worlds (1-8), and levels (1-4)
_ROM_MODES = [
RomMode.VANILLA,
RomMode.DOWNSAMPLE,
RomMode.PIXEL,
RomMode.RECTANGLE
]
for version, rom_mode in enumerate(_ROM_MODES):
for world in range(1, 9):
for level in range(1, 5):
# setup the frame-skipping environment
env_id = _ID_TEMPLATE.format('', world, level, version)
_register_mario_level_env(env_id,
frameskip=4,
rom_mode=rom_mode,
target_world=world,
target_level=level
)
# setup the no frame-skipping environment
env_id = _ID_TEMPLATE.format('NoFrameskip', world, level, version)
_register_mario_level_env(env_id,
frameskip=1,
rom_mode=rom_mode,
target_world=world,
target_level=level
)
# create an alias to gym.make for ease of access
make = gym.make
# define the outward facing API of this module (none, gym provides the API)
__all__ = [make.__name__]
|
from functools import wraps
import logging
def log_and_discard_exceptions(fun):
"""
Wraps fun. If exceptions is caught, log the error and return None
"""
@wraps(fun)
def decorator_function(*args, **kwargs):
try:
return fun(*args, **kwargs)
except BaseException as e:
fun_name = fun.__module__ + "." + fun.__name__
logging.warning("In function %s, %s is caught and discarded: %s" % (fun_name, type(e).__name__, str(e)))
return decorator_function
|
#!/usr/bin/env python3
import pypdfchptsplit
if __name__ == '__main__':
pypdfchptsplit.main()
|
from pathlib import Path
import numpy as np
import yt
from astropy import table
from . import run_attributes
import sys
sys.path.append(str(Path(__file__).parent.parent / "analysis_functions"))
import age_spreads
yt.funcs.mylog.setLevel(50) # ignore yt's output
# ======================================================================================
#
# Setup for precalculation of key quantities
#
# ======================================================================================
class Galaxy(object):
def __init__(
self,
ds,
center=None,
sphere_radius=None,
m_vir=None,
r_vir=None,
rank=None,
name=None,
):
self.ds = ds
self.center = center
self.m_vir = m_vir
self.r_vir = r_vir
self.rank = rank
self.name = name
self.mask_done_forming = None
# only create the sphere if the user wants to
if sphere_radius is not None:
self.sphere = self.ds.sphere(center=self.center, radius=sphere_radius)
else:
self.sphere = None
# have a place for some things (like the CIMF) to be stored as a user
# calculates them.
self.precalculated = dict()
def __getitem__(self, property):
"""
Automatically apply the cut to only pick formed clusters
"""
if self.sphere is None:
raise ValueError("No sphere set on galaxy initialization")
# then get the mask showing which clusters are done forming
if self.mask_done_forming is None and property[0] == "STAR":
self.make_finished_cluster_mask()
quantity = self.sphere[property]
if property[0] == "STAR":
quantity = quantity[self.mask_done_forming]
return quantity
def make_finished_cluster_mask(self):
self.mask_done_forming = self.sphere[("STAR", "age")] > 15 * yt.units.Myr
def prop_all_clusters(self, property):
"""
Like __getitem__, but does not apply the restriction that clusters must be
done forming
"""
if self.sphere is None:
raise ValueError("No sphere set on galaxy initialization")
return self.sphere[property]
class Simulation(object):
def __init__(
self, ds_path, sphere_radius_kpc=None, min_virial=False, n_galaxies=None
):
"""
ds_path must be a Path object
"""
# get the dataset and corresponding halo file
self.run_dir = ds_path.parent.parent
halo_path = self.run_dir / "halos"
halo_name = ds_path.name.replace("continuous_", "out_")
halo_name = halo_name.replace(".art", ".list")
self.ds = yt.load(str(ds_path))
self.z = self.ds.current_redshift
self.scale_factor = 1 / (1 + self.z)
# get the axis names and other stuff. The dictionaries are defaultdicts, so
# there is no need to worry about key errors
self.names = run_attributes.names[self.run_dir]
self.axes = list(self.names.keys())
self.color = run_attributes.colors[self.run_dir]
self.marker = run_attributes.markers[self.run_dir]
self.ls = run_attributes.lss[self.run_dir]
# have the place to store some precalculated things, particularly those that
# include all galaxies in the simulation
self.precalculated = dict()
# if we are the old IC set, we have one galaxy, otherwise two
# check what kind of particles are present
if n_galaxies is None:
if ("N-BODY_0", "MASS") in self.ds.derived_field_list:
self.n_galaxies = 2
else:
self.n_galaxies = 1
else:
self.n_galaxies = n_galaxies
# load halo catalogs
cat = table.Table.read(halo_path / halo_name, format="ascii.commented_header")
# delete some unneeded quantities. If I want to inlcude these later, I'll need
# to verify I'm doing the units correctly.
for col in cat.colnames:
if col not in ["Mvir", "Vmax", "Vrms", "Rvir", "X", "Y", "Z"]:
del cat[col]
# To modify the units of things, we need to know little h. It's in the
# file header. We also want a, to turn things into physical units.
with open(halo_path / halo_name, "r") as in_file:
line_num = 1
for line in in_file:
if line_num == 2:
a = float(line.split()[-1])
elif line_num == 3:
h = float(line.split()[-1])
break
line_num += 1
assert 0 < a < 1.01 # slight buffer for last output
assert 0.6 < h < 0.8
# Masses are in Msun / h
cat["Mvir"] = cat["Mvir"] / h
# Positions in Mpc / h (comoving)
for col in ["X", "Y", "Z"]:
cat[col] = cat[col] / h
# Halo Distances, Lengths, and Radii in kpc / h (comoving)
cat["Rvir"] = cat["Rvir"] / h
# Velocities in km / s (physical, peculiar) -- no change needed
# add units to names
cat.rename_column("Mvir", "Mvir_msun")
cat.rename_column("X", "X_mpccm")
cat.rename_column("Y", "Y_mpccm")
cat.rename_column("Z", "Z_mpccm")
cat.rename_column("Rvir", "Rvir_kpccm")
cat.rename_column("Vmax", "Vmax_kms")
cat.rename_column("Vrms", "Vrms_kms")
# Do some parsing of the halo catalogs
# We get the indices that sort it. The reversing there makes the biggest
# halos first, like we want.
rank_idxs = np.argsort(cat["Mvir_msun"])[::-1]
# Add this info to each halo object, and put the halos into a new sorted list,
# with the highest mass (lowest rank) halos first). We only keep the number
# the user requested
self.galaxies = []
for rank, idx in enumerate(rank_idxs[: self.n_galaxies], start=1):
row = cat[idx]
r_vir = self.ds.arr(row["Rvir_kpccm"], "kpccm")
M_vir = self.ds.arr(row["Mvir_msun"], "Msun")
center = self.ds.arr(
[row["X_mpccm"], row["Y_mpccm"], row["Z_mpccm"]], "Mpccm"
)
if sphere_radius_kpc is None:
radius = None
else:
if min_virial:
radius = self.ds.arr(
min(r_vir.to("kpc").value, sphere_radius_kpc), "kpc"
)
else:
radius = self.ds.arr(sphere_radius_kpc, "kpc")
self.galaxies.append(Galaxy(self.ds, center, radius, M_vir, r_vir, rank))
# # we need to be careful with a couple of runs. Runs with epsff=1% or runs
# # with 10% and fboost=1 are unreliable above 10^5 Msun
# run_dir_str = str(self.run_dir)
# if "sfe001" in run_dir_str or (
# "sfe010" in run_dir_str and "fboost1" in run_dir_str
# ):
# self.unreliable_mass = 1e5
# self.reliable = False
# else:
# self.unreliable_mass = np.inf
# self.reliable = True
if sphere_radius_kpc is not None:
self._determine_failed()
def _determine_failed(self):
# probably change this at some point
if (
"sfe010" not in str(self.run_dir) and "sfe001" not in str(self.run_dir)
) or "rj" in str(self.run_dir):
self.reliable = True
self.unreliable_mass = np.inf
return
# Figure out at what mass the durations reach a median of 14 Myr
durations = self.func_all_galaxies(
lambda g: age_spreads.duration(g).to("Myr").value
)
masses = self.func_all_galaxies(
lambda g: g[("STAR", "INITIAL_MASS")].to("Msun").value
)
# assume I'm reliable, then break otherwise
self.reliable = True
self.unreliable_mass = np.inf
dm = 0.25
m_min = 3
while m_min < 7:
m_max = m_min + dm
good_idx = np.logical_and(masses > 10 ** m_min, masses < 10 ** m_max)
# check that there aren't too few clusters
if np.sum(good_idx) < 10:
m_min += dm
continue
this_durations = durations[good_idx]
median = np.median(this_durations)
if median > 14:
self.unreliable_mass = 10 ** m_min
self.reliable = False
break
m_min += dm
def __repr__(self):
return str(self.run_dir)
def func_all_galaxies(self, func):
"""
Apply one function to all galaxies, and append the results to each other.
This is likely used for something like getting the stellar masses of all stars
in all galaxies, or their bound fractions.
"""
return np.concatenate([func(galaxy) for galaxy in self.galaxies])
# ======================================================================================
#
# Loading datasets
#
# ======================================================================================
# Get the datasets and halo catalogs. When doing these we need to be a bit
# careful about the datasets. We will make one set of comparisons at the last
# common output of all simulations, then one with the last output of each
# simulation. Those all need to be stored separately.
def filename_to_scale_factor(filename):
return float(filename[-10:-4])
def get_outputs_in_dir(sim_dir):
directory = Path(sim_dir)
# This is useful to avoid loading things without names that won't appear on plots,
# but it makes this code less useful elsewhere when I may want to use it to load
# all kinds of simulations
# if directory not in run_attributes.names:
# print(f"Skipping {directory}")
# return []
out_dir = directory / "out"
return [
file
for file in out_dir.iterdir()
if file.is_file()
and str(file.name).endswith(".art")
and str(file.name).startswith("continuous_a")
]
def get_simulations_last(sim_dirs, sphere_radius_kpc=30, min_virial=True):
sims_last = []
for directory in sorted(sim_dirs):
all_outputs = get_outputs_in_dir(directory)
if len(all_outputs) > 0:
last_output = sorted(all_outputs)[-1]
sims_last.append(Simulation(last_output, sphere_radius_kpc, min_virial))
return sims_last
def get_common_scale_factor(sim_dirs, z_max):
# Start by getting the last common output among the provided runs
last_outputs = []
for directory in sim_dirs:
directory = Path(directory)
if directory not in run_attributes.names:
continue
all_outputs = get_outputs_in_dir(directory)
if len(all_outputs) == 0:
print(f"This has no outputs: {directory}")
continue
# restrict to be a reasonable redshift
a_min = 1 / (1 + z_max)
this_last_output = sorted(all_outputs)[-1]
if filename_to_scale_factor(this_last_output.name) > a_min:
last_outputs.append(filename_to_scale_factor(this_last_output.name))
# include fudge factor for scale comparisons (so 0.1801 and 0.1802 match)
return min(last_outputs) + 0.001
def get_simulations_common(sim_dirs, z_max=5, sphere_radius_kpc=30, min_virial=True):
common_scale = get_common_scale_factor(sim_dirs, z_max)
sims_common = []
for directory in sorted(sim_dirs):
all_outputs = get_outputs_in_dir(directory)
if len(all_outputs) == 0:
# happens when skipped by get_outputs_in_dir
continue
# get the last one that's in common with the other simulations
all_common_outputs = [
file
for file in all_outputs
if filename_to_scale_factor(file.name) <= common_scale
and abs(filename_to_scale_factor(file.name) - common_scale) < 0.02
]
# if there are no snapshots early enough for this, don't add them
if len(all_common_outputs) > 0:
last_common_snapshot = sorted(all_common_outputs)[-1]
sims_common.append(
Simulation(last_common_snapshot, sphere_radius_kpc, min_virial)
)
return sims_common, common_scale
def get_simulations_same_scale(
sim_dirs, desired_z, z_tolerance=0.05, sphere_radius_kpc=30, min_virial=True
):
sims_common = []
for directory in sorted(sim_dirs):
all_outputs = get_outputs_in_dir(directory)
if len(all_outputs) == 0:
# happens when skipped by get_outputs_in_dir
continue
closest_z = np.inf
closest_snapshot = None
for out in all_outputs:
a = filename_to_scale_factor(out.name)
z = (1 / a) - 1
if abs(z - desired_z) < abs(closest_z - desired_z):
closest_z = z
closest_snapshot = out
# then validate that we got close enough.
if abs(closest_z - desired_z) / desired_z < z_tolerance:
sims_common.append(
Simulation(closest_snapshot, sphere_radius_kpc, min_virial)
)
else:
print(f"No outputs close to z={desired_z} in {directory}")
return sims_common
def get_plot_names(sims):
all_plots = []
for sim in sims:
for ax in sim.axes:
all_plots.append(ax)
return list(set(all_plots))
def get_plot_names_dirs(dirs):
all_plots = []
for d in dirs:
for ax in run_attributes.names[d]:
all_plots.append(ax)
return list(set(all_plots))
|
n = int(input())
A = list(map(int, input().split()))
second = 0
find_or_not = False
main = -1
while not find_or_not:
main += 1
second = main
while second + 1 < n:
if A[main] == A[second + 1]:
find_or_not = True
break
second += 1
print(A[main]) |
from django.db import models
from django.conf import settings
from django_countries.fields import CountryField
from django.contrib.auth.models import User
from django.db.models.signals import post_save
CATEGORY_CHOICES = (
('Solid Neon Colour Adapter','Solid Neon Colour Adapter'),
('Solid Neon Color Blunt Box','Solid Neon Color Blunt Box'),
('Day Glow Colour Adapter','Day Glow Colour Adapter'),
('Day Glow Blunt Colour Box','Day Glow Blunt Colour Box'),
('Glow-In-The-Dark Adapter','Glow-In-The-Dark Adapter'),
)
LABEL_CHOICES = (
('D','New'),
('P','Best Seller'),
)
JOB_CATEGORY={
('Creative Designer','Creative Designer'),
('Engineering','Engineering'),
('Finance','Finance'),
('Job1','Job1'),
('Job2','Job2'),
('Job3','Job3'),
}
CHOICES = (
('Solid Neon Adaptor','Solid Neon Adaptor'),
('Solid Neon Blunt Box','Solid Neon Blunt Box'),
('Royal Splits Collection','Royal Splits Collection'),
('Special Order','Special Order'),
)
SHIPPING_CHOICE = (
('Residential Address','Residential Address'),
('Commercial Address','Commercial Address')
)
class Payment(models.Model):
stripe_charge_id = models.CharField(max_length=50)
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL, blank=True, null=True)
amount = models.FloatField()
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username
class Address(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
name = models.CharField(max_length=100)
address = models.CharField(max_length=100)
country = CountryField(multiple=False)
email = models.EmailField(max_length = 254)
state = models.CharField(max_length=100)
zip_code = models.CharField(max_length=100)
class Meta:
verbose_name_plural = 'Address'
def __str__(self):
return self.name
class AuthorAbout(models.Model):
name = models.CharField(max_length=225)
about_author = models.TextField()
def __str__(self):
return self.name
class Blog(models.Model):
title = models.CharField(max_length=225)
blog_image = models.ImageField(upload_to='images/')
body = models.TextField()
pub_date = models.DateTimeField()
author = models.ForeignKey(AuthorAbout,on_delete=models.CASCADE)
def __str__(self):
return self.title
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
def summary(self):
return self.body[:100]
class Comment(models.Model):
blog = models.ForeignKey(Blog,on_delete=models.CASCADE)
your_name = models.CharField(max_length=20)
comment_text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
class About(models.Model):
description = models.TextField()
# renames the instances of the model
# with their title name
class Job(models.Model):
title = models.CharField(choices=JOB_CATEGORY,max_length=225)
city = models.CharField(max_length=200)
prerequisites = models.CharField(max_length=225)
number = models.IntegerField(default=0,null=True,blank=True)
def __str__(self):
return self.title
class JobApplication(models.Model):
job_title = models.ForeignKey(Job, on_delete=models.CASCADE)
full_name = models.CharField(max_length=225)
email = models.EmailField(max_length = 254)
reason = models.TextField()
resume = models.FileField(null=True,blank=True)
def __str__(self):
return self.full_name
class AboutUs(models.Model):
story = models.TextField()
mission = models.TextField()
class New(models.Model):
news_title = models.CharField(max_length=225)
news_image = models.ImageField(upload_to='images/')
news_desc = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.news_title
def pub_date_pretty(self):
return self.date_added.strftime('%b %e %Y')
class ContactU(models.Model):
name = models.CharField(max_length=225,null=True)
email = models.EmailField(max_length = 225,null=True)
phone_no = models.CharField(max_length=13,null=True)
description = models.TextField(null=True)
def __str__(self):
return self.name
class WholeSale(models.Model):
cname = models.CharField(max_length=225,null=True)
fname = models.CharField(max_length=225,null=True)
lname = models.CharField(max_length=225,null=True)
ctitle = models.CharField(max_length=225,null=True)
email = models.EmailField(max_length=225,null=True)
phnumber = models.CharField(max_length=225,null=True)
fnum = models.CharField(max_length=225,null=True)
product_choice = models.CharField(max_length=225,choices=CHOICES,null=True)
quantity = models.CharField(max_length=3,null=True)
address = models.TextField(null=True)
city = models.CharField(max_length=225,null=True)
state = models.CharField(max_length=225,null=True)
zip_code = models.CharField(max_length=225,null=True)
country = models.CharField(max_length=225,null=True)
federal_tax_id = models.CharField(max_length=225,null=True)
reseller_lisence = models.CharField(max_length=225,null=True)
inst_id =models.CharField(max_length=225,null=True)
fb_id = models.CharField(max_length=225,null=True)
shipping_add_field = models.CharField(max_length=225,choices=SHIPPING_CHOICE,null=True)
shipping_add = models.TextField(null=True)
shipping_city = models.CharField(max_length=225,null=True)
shipping_state = models.CharField(max_length=225,null=True)
shipping_zip = models.CharField(max_length=225,null=True)
shipping_country = models.CharField(max_length=225,null=True)
shipping_person_name = models.CharField(max_length=225,null=True)
shipping_person_num = models.CharField(max_length=225,null=True)
class Customer(models.Model):
user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
fname = models.CharField(max_length=225,null=True)
lname =models.CharField(max_length=225,null=True)
email = models.EmailField(max_length=225,null=True)
location = models.CharField(max_length=225,null=True)
website = models.CharField(max_length=225,null=True,blank=True)
bio = models.TextField(null=True)
def __str__(self):
return self.user.username
def create_profile(sender, instance, created, **kwargs):
if created:
Customer.objects.create(user=instance)
post_save.connect(create_profile,sender=User)
# def update_profile(sender, instance, created, **kwargs):
# if created == False:
# instance.customer.save()
# post_save.connect(update_profile,sender=User)
class Product(models.Model):
title = models.CharField(max_length=225)
image1 = models.ImageField(upload_to='images/')
category = models.CharField(choices=CATEGORY_CHOICES,max_length=40,null=True, blank=True)
label = models.CharField(choices=LABEL_CHOICES,max_length=1,null=True, blank=True)
thumbnail_1 = models.ImageField(upload_to='images/')
thumbnail_2 = models.ImageField(upload_to='images/')
thumbnail_3 = models.ImageField(upload_to='images/',null=True, blank=True)
thumbnail_4 = models.ImageField(upload_to='images/',null=True, blank=True)
thumbnail_5 = models.ImageField(upload_to='images/',null=True, blank=True)
thumbnail_6 = models.ImageField(upload_to='images/',null=True, blank=True)
thumbnail_7 = models.ImageField(upload_to='images/',null=True, blank=True)
price= models.FloatField(default=0.00)
description = models.TextField(null=True,blank=True)
def __str__(self):
return self.title
class Coupon(models.Model):
code = models.CharField(max_length=15)
amount = models.FloatField(blank=True, null=True)
def __str__(self):
return self.code
class Order(models.Model):
customer = models.ForeignKey(Customer,on_delete=models.CASCADE,null=True,blank=True)
ordered_date = models.DateTimeField(auto_now_add =True)
complete = models.BooleanField(default=False,null=True,blank=True)
transaction_id = models.CharField(max_length=225,null=True,blank=True)
coupon = models.ForeignKey(Coupon, on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return str(self.id)
def get_sub_total(self):
total=0
total = sum([order_item.get_total for order_item in self.orderitem_set.all()])
return total
def get_cart_total(self):
total=0
total = sum([order_item.get_total for order_item in self.orderitem_set.all()])
if self.coupon:
total -= self.coupon.amount
return total
class OrderItem(models.Model):
product = models.ForeignKey(Product,on_delete=models.CASCADE,null=True)
order = models.ForeignKey(Order,on_delete=models.CASCADE,null=True)
quantity = models.IntegerField(default=0,null=True,blank=True)
color= models.CharField(max_length=225,null=True,blank=True)
date_added = models.DateTimeField(auto_now_add=True)
@property
def get_total(self):
total = self.product.price * self.quantity
return total
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'zach powers'
__email__ = 'zcharlop@rockefeller.edu'
__version__ = '0.1.0'
from quickD3map import PointMap
|
# Create your models here.
from django.db import models
# Create your models here.
from django.urls import reverse
TYPE = (
('VEHICLE', 'veh'),
('DUSTBIN', 'dust'),
)
VEHICLE_TYPE = (
('Mini', 'mini'),
('MICRO','micro'),
('SEDAN','sedan'),
('SUV','suv'),
)
class Dustbin(models.Model):
dustbin_no = models.IntegerField()
weight = models.IntegerField()
expiry = models.IntegerField()
comment = models.TextField(max_length=500)
location = models.CharField(max_length=40)
RFID_TAG = models.IntegerField()
# on submit click on the product entry page, it redirects to the url below.
#return a URL string that would point to this model's view
def get_absolute_url(self):
return reverse('main:index')
class Meta:
db_table = "Dustbin"
class service(models.Model):
name = models.CharField(max_length=20)
email_id = models.EmailField()
adhar_card_no = models.CharField(max_length=12)
service_type = models.CharField(max_length=3)
contact_no = models.IntegerField()
def get_absolute_url(self):
return reverse('main:Sindex')
class Meta:
db_table = "service"
class driver(models.Model):
driver_name = models.CharField(max_length=20)
Email_id = models.CharField(max_length=20)
adhar_card_no = models.IntegerField()
Front_A = models.ImageField(upload_to='front/')
Back_A = models . ImageField(upload_to='back/')
Driving_lisence_no = models . IntegerField()
Front_L = models.ImageField(upload_to='front/')
Back_L = models.ImageField(upload_to='back/')
Date_of_expiry = models.IntegerField()
contact_no = models.IntegerField()
def get_absolute_url(self):
return reverse('main:Dindex')
class Meta:
db_table = "driver"
class device(models.Model):
device_name = models.CharField(max_length=20)
Type = models.CharField(max_length=2 , choices=TYPE)
Device_use = models.CharField(max_length=1)
Quantity=models.IntegerField()
def get_absolute_url(self):
return reverse('main DEindex')
class Meta:
db_table = "device"
class vehicle(models.Model):
vehicle_type = models.CharField(max_length=20,choices=VEHICLE_TYPE) #ForeignKey(vehicle, on_delete=models.SET_NULL, null=True)
capacity = models.IntegerField()
area = models.IntegerField()
vehicle_no = models.IntegerField()
rfid_kit_no = models.IntegerField()
def get_absolute_url(self):
return reverse('main:Vindex')
class Meta:
db_table = "vehicle"
|
def valid_parentheses(string):
right = 0
for c in string:
if right == 0 and c == ")":
return False
else:
if c == ")":
right += 1
elif c == "(":
right -= 1
else:
continue
if right == 0:
return True
else:
return False
|
#!/usr/bin/env python
from os import path
import logging as log
import facebook
import webapp2
from webapp2_extras import sessions
import jinja2
from google.appengine.ext import db
from google.appengine.api.app_identity import get_application_id
FACEBOOK_APP_ID = "522368114539124"
FACEBOOK_APP_SECRET = "e75e283da7fc04b8e752e25a9459ed7e"
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
path.dirname(__file__) + "/../templates"))
log.debug("The path for templates is" +
path.dirname(__file__) + "/../templates")
class User(db.Model):
id = db.StringProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
name = db.StringProperty(required=True)
profile_url = db.StringProperty(required=True)
access_token = db.StringProperty(required=True)
class LogoutException(Exception):
def __init__(self,*vars,**kwds):
super(Exception,self).__init__(*vars,**kwds)
class BaseHandler(webapp2.RequestHandler):
"""Provides access to the active Facebook user in self.current_user
The property is lazy-loaded on first access, using the cookie saved
by the Facebook JavaScript SDK to determine the user ID of the active
user. See http://developers.facebook.com/docs/authentication/ for
more information.
"""
@property
def current_user(self):
log.warning("The instance was called from: " + self.app.active_instance.request.application_url)
log.warning("The instance was called from: " + get_application_id())
if self.session.get("user"):
# User is logged in
return self.session.get("user")
else:
# Either used just logged in or just saw the first page
# We'll see here
cookie = facebook.get_user_from_cookie(self.request.cookies,
FACEBOOK_APP_ID,
FACEBOOK_APP_SECRET)
if cookie:
# Okay so user logged in.
# Now, check to see if existing user
user = User.get_by_key_name(cookie["uid"])
if not user:
# Not an existing user so get user info
graph = facebook.GraphAPI(cookie["access_token"])
profile = graph.get_object("me")
user = User(
key_name=str(profile["id"]),
id=str(profile["id"]),
name=profile["name"],
profile_url=profile["link"],
access_token=cookie["access_token"]
)
user.put()
elif user.access_token != cookie["access_token"]:
user.access_token = cookie["access_token"]
user.put()
# User is now logged in
self.session["user"] = dict(
name=user.name,
profile_url=user.profile_url,
id=user.id,
access_token=user.access_token
)
return self.session.get("user")
else:
# This hits when the user has logged out
log.warning("user logged out")
raise LogoutException("mesa logged out tusa")
return None
def dispatch(self):
"""
This snippet of code is taken from the webapp2 framework documentation.
See more at
http://webapp-improved.appspot.com/api/webapp2_extras/sessions.html
"""
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
"""
This snippet of code is taken from the webapp2 framework documentation.
See more at
http://webapp-improved.appspot.com/api/webapp2_extras/sessions.html
"""
return self.session_store.get_session()
def render(self, values = {}, template = "home.html", user = None):
"""render the values in the template.
by default it goes to the index page"""
# There are some default values that we will always use
values["facebook_app_id"]=FACEBOOK_APP_ID
# But the user has to be injected
values["current_user"]=user
# and then just load and render the template
template = jinja_environment.get_template(template)
self.response.out.write(template.render(values))
def do_query(self, query = None, fql = False, user = None):
""" Easy querying to facebook. It handles the errors and issues an
retrieves just an empty dict with the errors if nothing returned.
Returns a dict with the "data" and the "error" if any.
"""
assert(query != None)
result = { "data": []}
error = ""
try:
if user == None:
# then try with the self property
user = self.current_user
if user == None:
# then its definetly logout
raise LogoutException("doing query") # send logout upstream
# and catch it up
else:
cu = user
graph = facebook.GraphAPI(cu["access_token"])
log.debug("doing Query: " + query)
if fql:
# Perform the fql query
result = graph.fql(query)
else:
# Its a graph api query
result = graph.get_object(query)
##log.debug( u"result"+ repr(result))
except LogoutException as e:
log.exception(e)
raise # this should be catched the later the better, on the caller
# that decides the flow ofthe application
except Exception as e:
# GraphAPIError , and if there is expired, means that we need to relogin
# GraphAPIError 606, and if there is "permission" means we have no rights
log.debug("pokemon exception")
log.exception(e)
try:
# try to guess if we run out of time
if e.message.find(u"Session has expired") > 0 or e.message.find(u"user logged out") > 0:
#thing = u"Please go to <a href=\"/\">home</a>, logout, and come back in"
log.warning(e.message)
raise LogoutException("the query resulted in finished session")
else:
log.warning("something bad happened")
log.warning(e.message)
log.warning("Silencing exception")
error = e.message
except:
#reraise
raise
return {"data":result["data"],"error":error}
|
# encoding: utf-8
# import math
import torch
import itertools
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from grid_sample import grid_sample
# from torch.autograd import Variable
from tps_grid_gen import TPSGridGen
import pdb
class CNN(nn.Module):
def __init__(self, num_output):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, num_output)
# pdb.set_trace()
# (Pdb) a
# self = CNN(
# (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))
# (conv2): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1))
# (conv2_drop): Dropout2d(p=0.5, inplace=False)
# (fc1): Linear(in_features=320, out_features=50, bias=True)
# (fc2): Linear(in_features=50, out_features=32, bias=True)
# )
# num_output = 32
def forward(self, x):
# pdb.set_trace()
# (Pdb) pp x.size()
# torch.Size([64, 1, 28, 28])
# (Pdb) pp self.conv1(x).size()
# torch.Size([64, 10, 24, 24])
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
# pdb.set_trace()
# torch.Size([64, 32])
return x
class ClsNet(nn.Module):
def __init__(self):
super(ClsNet, self).__init__()
self.cnn = CNN(10)
def forward(self, x):
return F.log_softmax(self.cnn(x))
class BoundedGridLocNet(nn.Module):
def __init__(self, grid_height, grid_width, target_control_points):
super(BoundedGridLocNet, self).__init__()
self.cnn = CNN(grid_height * grid_width * 2)
bias = torch.from_numpy(np.arctanh(target_control_points.numpy()))
bias = bias.view(-1)
self.cnn.fc2.bias.data.copy_(bias)
self.cnn.fc2.weight.data.zero_()
def forward(self, x):
batch_size = x.size(0)
points = F.tanh(self.cnn(x))
return points.view(batch_size, -1, 2)
class UnBoundedGridLocNet(nn.Module):
def __init__(self, grid_height, grid_width, target_control_points):
super(UnBoundedGridLocNet, self).__init__()
self.cnn = CNN(grid_height * grid_width * 2)
bias = target_control_points.view(-1)
self.cnn.fc2.bias.data.copy_(bias)
self.cnn.fc2.weight.data.zero_()
def forward(self, x):
batch_size = x.size(0)
points = self.cnn(x)
return points.view(batch_size, -1, 2)
class STNClsNet(nn.Module):
def __init__(self, args):
super(STNClsNet, self).__init__()
self.args = args
r1 = args.span_range_height # 0.9
r2 = args.span_range_width # 0.9
assert r1 < 1 and r2 < 1 # if >= 1, arctanh will cause error in BoundedGridLocNet
target_control_points = torch.Tensor(
list(
itertools.product(
np.arange(-r1, r1 + 0.00001, 2.0 * r1 / (args.grid_height - 1)),
np.arange(-r2, r2 + 0.00001, 2.0 * r2 / (args.grid_width - 1)),
)))
# pdb.set_trace()
# # pp 2.0 * r1 / (args.grid_height - 1) -- 0.6
# array([-0.9, -0.3, 0.3, 0.9])
# pp target_control_points
# tensor([[-0.9000, -0.9000],
# [-0.9000, -0.3000],
# [-0.9000, 0.3000],
# [-0.9000, 0.9000],
# [-0.3000, -0.9000],
# [-0.3000, -0.3000],
# [-0.3000, 0.3000],
# [-0.3000, 0.9000],
# [ 0.3000, -0.9000],
# [ 0.3000, -0.3000],
# [ 0.3000, 0.3000],
# [ 0.3000, 0.9000],
# [ 0.9000, -0.9000],
# [ 0.9000, -0.3000],
# [ 0.9000, 0.3000],
# [ 0.9000, 0.9000]])
Y, X = target_control_points.split(1, dim=1)
target_control_points = torch.cat([X, Y], dim=1)
GridLocNet = {
'unbounded_stn': UnBoundedGridLocNet,
'bounded_stn': BoundedGridLocNet,
}[args.model]
self.loc_net = GridLocNet(args.grid_height, args.grid_width,
target_control_points)
self.tps = TPSGridGen(args.image_height, args.image_width,
target_control_points)
self.cls_net = ClsNet()
# pdb.set_trace()
# (Pdb) a
# self = STNClsNet(
# (loc_net): UnBoundedGridLocNet(
# (cnn): CNN(
# (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))
# (conv2): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1))
# (conv2_drop): Dropout2d(p=0.5, inplace=False)
# (fc1): Linear(in_features=320, out_features=50, bias=True)
# (fc2): Linear(in_features=50, out_features=32, bias=True)
# )
# )
# (tps): TPSGridGen()
# (cls_net): ClsNet(
# (cnn): CNN(
# (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))
# (conv2): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1))
# (conv2_drop): Dropout2d(p=0.5, inplace=False)
# (fc1): Linear(in_features=320, out_features=50, bias=True)
# (fc2): Linear(in_features=50, out_features=10, bias=True)
# )
# )
# )
# args = Namespace(angle=90, batch_size=64, cuda=True, epochs=10,
# grid_height=4, grid_size=4, grid_width=4, image_height=28,
# image_width=28, log_interval=10, lr=0.01, model='unbounded_stn',
# momentum=0.5, no_cuda=False, save_interval=100, seed=1, span_range=0.9,
# span_range_height=0.9, span_range_width=0.9, test_batch_size=1000)
def forward(self, x):
batch_size = x.size(0)
source_control_points = self.loc_net(x)
source_coordinate = self.tps(source_control_points)
grid = source_coordinate.view(batch_size, self.args.image_height,
self.args.image_width, 2)
transformed_x = grid_sample(x, grid)
logit = self.cls_net(transformed_x)
# pdb.set_trace()
# (Pdb) pp source_control_points.size()
# torch.Size([64, 16, 2])
# (Pdb) source_coordinate.size()
# torch.Size([64, 784, 2])
# (Pdb) grid.size()
# torch.Size([64, 28, 28, 2])
# (Pdb) transformed_x.size()
# torch.Size([64, 1, 28, 28])
# (Pdb) logit.size()
# torch.Size([64, 10])
return logit
def get_model(args):
if args.model == 'no_stn':
print('create model without STN')
model = ClsNet()
else:
print('create model with STN')
model = STNClsNet(args)
return model
|
import numpy as np
import scipy
import matcompat
# if available import pylab (from matlibplot)
try:
import matplotlib.pylab as plt
except ImportError:
pass
def SAE(X, S, lamb):
# Local Variables: A, C, B, S, W, X, lambda
# Function calls: sylvester, SAE
#% SAE is Semantic Auto-encoder
#% Inputs:
#% X: dxN data matrix.
#% S: kxN semantic matrix.
#% lambda: regularisation parameter.
#%
#% Return:
#% W: kxd projection matrix.
A = np.dot(S, S.conj().T)
B = np.dot(np.dot(lamb, X), X.conj().T)
C = np.dot(np.dot(1.+lamb, S), X.conj().T)
W = sylvester(A, B, C)
return |
import os
import sys
sys.path.append('/home/will/Documents/data/lib/libsvm-3.17/python')
from svmutil import *
from voice import Voice
import config
#root_dir = '/home/will/Documents/data/luyin'
def train(diretory = config.root_dir):
'''Train all the files in diretory 'luyin'
The diretory is made up of two subdiretories named 'normal' and 'abnormal'.
As a result , the script will generate 3 files:
(1)scale : used for scale data by the svm command 'svm-scale -s filename > scale'
(2)dataset_scaled : the scaled dataset for training
(3)model : the svm model file
'''
files = os.listdir(config.normal)
dataset = config.dataset
dataset_scaled = config.dataset_scaled
scale = config.scale
#fs = open(dataset,'a')
for f in files:
f = config.normal + f
voice = Voice()
voice.analyze(f)
voice.calFeatures()
voice.learn(dataset,'+1')
files = os.listdir(config.abnormal)
for f in files:
f = config.abnormal + f
voice = Voice()
voice.analyze(f)
voice.calFeatures()
voice.learn(dataset,'-1')
os.system('svm-scale -s %s %s > %s'%(scale,dataset,dataset_scaled))
y,x = svm_read_problem(dataset_scaled)
m = svm_train(y,x)
svm_save_model(config.model,m)
if __name__ == '__main__':
train() |
import sys
def giveBooks():
global N, M, want, given
cnt = 0
for left, right in want:
for book in range(left, right+1):
if given[book] == 0:
given[book] = 1
cnt += 1
break
print(cnt)
if __name__ == '__main__':
TC = int(input())
for T in range(TC):
N, M = map(int, input().split())
want = [0 for _ in range(M)]
given = [0 for _ in range(N+1)]
for stu in range(M):
a, b = map(int, sys.stdin.readline().split())
want[stu] = (a, b)
want.sort(key=lambda item: item[1])
giveBooks()
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Spotify AB.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
from collections import OrderedDict, namedtuple
from typing import Tuple, Text, List, Optional # noqa: F401
import six
import tensorflow as tf
from tensorflow.python.lib.io import file_io
FeatureInfo = namedtuple("FeatureInfo", ["name", "kind", "tags"])
class TfRecordSpecParser(object):
# TODO: make this private, or handle arguments better, having both of the required doesn't make sense # noqa: E501
@classmethod
def parse_tf_record_spec(cls,
tf_record_desc_path, # type: Optional[str]
dir_path # type: str
):
# type: (...) -> Tuple[List[FeatureInfo], str, List[List[str]]]
"""
Parses TF record spec saved by Scio. tf_record_desc_path takes precedence over dir_path.
:param tf_record_desc_path: fully qualified path to TF record spec - this must be a single
file.
:param dir_path: path of the data directory, will use default spec file name
:return:
"""
tf_record_spec_path = cls.__get_tf_record_spec_path(tf_record_desc_path, dir_path)
with file_io.FileIO(tf_record_spec_path, "r") as f:
spec = json.load(f)
assert spec["version"] == 1, "TFRecordSpec parsing error: Unsupported version."
# features types
type_map = {
"FloatList": tf.float32,
"Int64List": tf.int64,
"BytesList": tf.int8
}
feature_info = [FeatureInfo(fi["name"], type_map[fi["kind"]], fi["tags"])
for fi in spec["features"]]
assert len(feature_info) > 0, "TFRecordSpec parsing error: No feature found."
# groups by multispec
multispec_feature_groups = [] # type: List[List[str]]
if "multispec-id" in feature_info[0][2]:
d = OrderedDict() # type: OrderedDict[int, List[str]]
for name, _, tags in feature_info:
key = int(tags["multispec-id"])
if key not in d:
d[key] = []
d[key].append(name)
multispec_feature_groups = [[str()]] * len(d)
for i, f in six.iteritems(d):
multispec_feature_groups[i] = list(f)
# parse compression
compression_map = {
"UNCOMPRESSED": "",
"DEFLATE": "ZLIB",
"GZIP": "GZIP"
}
assert spec["compression"] in compression_map, \
"Compression %s not supported by TF." % spec["compression"]
return feature_info, compression_map[spec["compression"]], multispec_feature_groups
@staticmethod
def __get_tf_record_spec_path(tf_record_desc_path, # type: Optional[str]
dir_path # type: str
):
# type: (...) -> Text
if tf_record_desc_path is not None:
assert isinstance(tf_record_desc_path, str), \
"tf_record_desc_path is not a String: %r" % tf_record_desc_path
assert file_io.file_exists(tf_record_desc_path), \
"feature desc `%s` does not exist" % tf_record_desc_path
return tf_record_desc_path
assert isinstance(dir_path, str), "dir_path is not a String: %r" % dir_path
assert file_io.file_exists(dir_path), "directory `%s` does not exist" % dir_path
assert file_io.is_directory(dir_path), "`%s` is not a directory" % dir_path
from os.path import join as pjoin
default_tf_record_spec_filename = "_tf_record_spec.json"
return pjoin(dir_path, default_tf_record_spec_filename)
|
# Copyright (c) 2012, Walter Bender
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
def myblock(tw, x): # ignore second argument
''' Load journal stats to heap (Sugar only) '''
import os
from gettext import gettext as _
MAX = 19
DIROFINTEREST = 'datastore'
class ParseJournal():
''' Simple parser of datastore '''
def __init__(self):
self._dsdict = {}
self._activity_name = []
self._activity_count = []
homepath = os.environ['HOME']
for name in os.listdir(os.path.join(homepath, ".sugar")):
path = os.path.join(homepath, ".sugar", name)
if isdsdir(path):
self._dsdict[os.path.basename(path)] = []
if not os.path.isdir(path):
continue
for dsobjdir in os.listdir(path):
if len(dsobjdir) != DIROFINTEREST + 2:
continue
dsobjdir = os.path.join(path, dsobjdir)
if not os.path.isdir(dsobjdir):
continue
for dsobj in os.listdir(dsobjdir):
dsobj = os.path.join(dsobjdir, dsobj)
self._dsdict[os.path.basename(path)].append({})
activity = isactivity(dsobj)
if not activity:
self._dsdict[os.path.basename(path)][-1][
'activity'] = 'media object'
else:
self._dsdict[os.path.basename(path)][-1][
'activity'] = activity
for k, v in self._dsdict.iteritems():
for a in v:
if 'activity' in a:
if a['activity'] in self._activity_name:
i = self._activity_name.index(a['activity'])
self._activity_count[i] += 1
else:
self._activity_name.append(a['activity'])
self._activity_count.append(1)
def get_sorted(self):
activity_tuples = []
for i in range(len(self._activity_name)):
activity_tuples.append((self._activity_name[i],
self._activity_count[i]))
sorted_tuples = sorted(activity_tuples, key=lambda x: x[1])
activity_list = []
count = 0
length = len(sorted_tuples)
for i in range(length):
if i < MAX:
activity_list.append([sorted_tuples[length - i - 1][0],
sorted_tuples[length - i - 1][1]])
else:
count += sorted_tuples[length - i - 1][1]
if count > 0:
activity_list.append([_('other'), count])
return activity_list
def hascomponent(path, component):
''' Return metadata attribute, if any '''
if not os.path.exists(os.path.join(path, 'metadata')):
return False
if not os.path.exists(os.path.join(path, 'metadata', component)):
return False
fd = open(os.path.join(path, 'metadata', component))
data = fd.readline()
fd.close()
if len(data) == 0:
return False
return data
def isactivity(path):
''' Return activity name '''
activity = hascomponent(path, 'activity')
if not activity:
return False
else:
return activity.split('.')[-1]
def isdsdir(path):
''' Only interested if it is a datastore directory '''
if not os.path.isdir(path):
return False
if not os.path.exists(os.path.join(path, DIROFINTEREST)):
return False
return True
data = ParseJournal()
activity_list = data.get_sorted()
for a in activity_list:
tw.lc.heap.append(a[0])
tw.lc.heap.append(a[1])
tw.lc.heap.append(activity_list[0][1])
return
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import six
import logging
logger = logging.getLogger('django-send-messages-sms')
class SMSMessage(object):
"""
A container for SMS information.
"""
encoding = None # None => use settings default
def __init__(self,tpl_id=None,content='',to=None,connection=None):
"""
Initialize a single sms message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, six.string_types),'"to" argument must be a list or tuple'
assert not len(to) > 100,'"to" argument can not exceed 100'
self.to = list(to)
else:
self.to = []
self.content = content
self.connection = connection
self.tpl_id = tpl_id
def get_connection(self, fail_silently=False):
from sms import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
return self.content
def recipients(self):
"""
Returns a set of all recipients of the sms (includes direct
addressees as well as Cc and Bcc entries).
"""
return set(self.to)
def send(self, fail_silently=False):
"""Sends the sms message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def mobile(self):
from sms import is_correct_mobile
recipients = [mobile_number for mobile_number in self.recipients()
if is_correct_mobile(mobile_number)]
logger.info("prepared to send sms to mobiles:%s;",
self.recipients(),exc_info=1)
logger.info("successed send sms to mobiles:%s;",
recipients,exc_info=1)
return ','.join(recipients)
|
#region import
import tornado.web
import asyncio
import json
from pricelists.PriceListClient import PriceListClient
from modules.kinetic_core.DateTimeEncoder import DateTimeEncoderCompact
from web.handlers.BaseHandler import *
#endregion
class PricesListJSONHandler(BaseHandler):
@allowedRole(Role.PHARMA_CLIENT)
async def get(self):
offset = int(self.get_argument("start", default=0))
limit = int(self.get_argument("limit", default=10))
pricelist_client = PriceListClient()
result = await pricelist_client.paginate(limit=limit, offset=offset)
data = {
"draw": self.get_argument("draw", default=1),
"recordsTotal": result["count"],
"recordsFiltered": result["count"],
}
l = list()
for item in result["data"]:
l.append([
item["price_list_id"],
item["name"],
item["manufacturer"],
item["supplier_id"],
item["quantity"],
item["expiry"].strftime('%m.%Y'),
item["wire25"],
item["wire50"],
item["wire75"],
item["wire100"],
item["cash"]
])
data["data"] = l
self.write(json.dumps(data, cls=DateTimeEncoderCompact))
self.finish()
|
import newspaper
import newsapi
from newsapi import NewsApiClient
from newspaper import Article
newsapi = NewsApiClient(api_key='b887d1939c004198a6f027703cb318e6')
url = 'https://edition.cnn.com/2020/05/15/politics/trump-2016-instincts-pandemic-second-term/index.html'
article = Article(url)
article.download()
article.parse()
article.nlp()
#utiliser "find" pour trouver les caractères à remplacer
source = article.source_url
print(source)
text_file = open("source.txt", "w")
n = text_file.write(article.source_url)
text_file.close()
letters=['' for i in range(1000)]
j=0
with open("source.txt","r") as file:
for line in file:
i=0
for ch in line:
letters[i]+=ch;
i=i+1
j=j+1
file.close()
text_file = open("source_sans_https",'w')
i=0
while letters[i]!='':
#utiliser replace à la place de la boucle
if letters[i]=='w' and letters[i+1]=='w' and letters[i+2]=='w' and letters[i+3]=='.':
j=4
while letters[i+j]!='':
n= text_file.write(letters[i+j])
j=j+1
i=i+1
text_file.close()
source=open("source_sans_https",'r')
url_a_rechercher=source.readline()
print(url_a_rechercher)
recherche=newsapi.get_everything(domains=url_a_rechercher)
print(recherche)
# key_words = article.keywords
# print(key_words)
#
# all_articles = newsapi.get_everything(q=(key_words[0] and key_words[1] and key_words[2] and key_words[3] and key_words[4] and key_words[5] and key_words[6] and key_words[7] and key_words[8] and key_words[9] and key_words[10] and key_words[11] and key_words[12] and key_words[13]))
# print(all_articles)
# Total_number= all_articles.get("totalResults") |
import torch
import math
import numpy as np
import torch.nn as nn
from models.encoder import PCNNEncoder, SACNNEncoder, CNNEncoder, SingleEncoder, SALayer
from models.decoder import Decoder
from models.cnn import PCNN, SelfAttentionConv, CNNLayer, ChannelParallelismCNN, FastCNNLayer, BlurCNNLayer, FCNNLayer
from utils.positionalEncoding import PositionalEncoding
class CSATNet(nn.Module):
def __init__(self, num_hiddens=128, num_heads=4, seq_len=4, cnn_layer1_num=3, cnn_layer2_num=2,
enc_layer_num=3, dec_layer_num=3, label_size=1, drop_out=0.1, min_output_size=32):
super(CSATNet, self).__init__()
self.enc = CNNEncoder(num_hiddens, num_heads, seq_len, cnn_layer1_num,
cnn_layer2_num, enc_layer_num, drop_out, min_output_size)
self.key_size = self.enc.key_size
self.dec = Decoder(dec_layer_num, self.key_size, num_hiddens, num_heads, seq_len, drop_out)
self.dense = nn.Linear(num_hiddens, label_size)
def forward(self, x):
x, cross = self.enc(x)
output = self.dec(x, cross)
output = self.dense(output)
return output
class CSATNet_v2(nn.Module):
def __init__(self, num_hiddens=128, num_heads=4, seq_len=8, cnn_layer1_num=3, cnn_layer2_num=2, enc_layer_num=3,
dec_layer_num=3, vector_num=32, label_size=1, drop_out=0.1, min_output_size=32,
attention=False, channel_expansion=True):
super(CSATNet_v2, self).__init__()
self.num_hiddens = num_hiddens
self.norm = nn.BatchNorm2d(3)
# self.cnn = CNNLayer(num_hiddens, cnn_layer1_num, cnn_layer2_num, channel_expansion)
self.cnn = FCNNLayer(num_hiddens, cnn_layer1_num, cnn_layer2_num, channel_expansion)
# self.cnn = BlurCNNLayer(num_hiddens, cnn_layer1_num, cnn_layer2_num, laplace)
# self.cnn = FastCNNLayer(num_hiddens, cnn_layer1_num, cnn_layer2_num, laplace)
# self.cnn = ChannelParallelismCNN(num_hiddens, cnn_layer1_num, cnn_layer2_num)
self.pe = PositionalEncoding(num_hiddens, 0)
self.enc = SingleEncoder(enc_layer_num, num_hiddens, num_heads, seq_len, drop_out, min_output_size)
self.li = nn.Sequential(
nn.ELU(),
nn.Dropout(drop_out),
nn.Linear(num_hiddens, (num_hiddens + vector_num)//2),
nn.ELU(),
nn.Dropout(drop_out),
nn.Linear((num_hiddens + vector_num)//2, vector_num)
)
self.key_size = self.enc.key_size
self.dec = Decoder(dec_layer_num, self.key_size, num_hiddens, num_heads, seq_len, drop_out)
self.dense = nn.Linear(num_hiddens, vector_num)
if attention:
self.output_li = SALayer(2, vector_num * 2, label_size, 4, seq_len, drop_out)
else:
self.output_li = nn.Sequential(
nn.ELU(),
nn.Dropout(drop_out),
nn.Linear(vector_num * 2, 64),
nn.ELU(),
nn.Dropout(drop_out),
nn.Linear(64, label_size)
)
def forward(self, x):
batch_num = x.shape[0]
x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4])
x = self.norm(x)
x = self.cnn(x)
output1 = self.li(x)
output1 = output1.reshape(batch_num, -1, output1.shape[1])
x = x.reshape(batch_num, -1, x.shape[1])
x = self.pe(x * math.sqrt(self.num_hiddens))
cross = self.enc(x)
output2 = self.dec(x, cross)
output2 = self.dense(output2)
output = torch.cat((output1, output2), dim=2)
output = self.output_li(output)
return output
class PSACNN(nn.Module):
def __init__(self, num_hiddens=128, cnn_layer1_num=2, cnn_layer2_num=0,
input_size=(88, 200), label_size=1, attention=False):
super(PSACNN, self).__init__()
self.cnn = PCNN(num_hiddens, cnn_layer1_num, cnn_layer2_num, input_size, attention)
self.dense = nn.Sequential(
nn.Linear(num_hiddens, 128),
nn.ELU(),
nn.Linear(128, 32),
nn.ELU(),
nn.Linear(32, label_size)
)
def forward(self, x):
batch_num = x.shape[0]
x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4])
x = self.cnn(x)
x = self.dense(x)
x = x.reshape(batch_num, -1, x.shape[1])
return x
class SACNN(nn.Module):
def __init__(self, cnn_layer1_num=3, cnn_layer2_num=2, label_size=1):
super(SACNN, self).__init__()
self.cnn = nn.Sequential()
in_channels = [3, 24, 36, 48, 64, 80, 128, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256]
for i in range(0, cnn_layer1_num):
self.cnn.add_module("layer1-"+str(i), nn.Conv2d(in_channels[i], in_channels[i+1], kernel_size=5, stride=2))
self.cnn.add_module("actFun-" + str(i), nn.ELU())
self.cnn.add_module("pool1", nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
for i in range(cnn_layer1_num, cnn_layer1_num + cnn_layer2_num):
self.cnn.add_module("layer1-" + str(i), SelfAttentionConv(in_channels[i], in_channels[i+1]//2, in_channels[i+1]))
self.cnn.add_module("pool2", nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
in_channel = in_channels[cnn_layer1_num + cnn_layer2_num]
self.dense = nn.Sequential(
nn.Flatten(),
nn.Linear(in_channel * 3, 128),
nn.ELU(),
nn.Linear(128, 32),
nn.ELU(),
nn.Linear(32, label_size)
)
def forward(self, x):
batch_num = x.shape[0]
x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4])
x = self.cnn(x)
x = self.dense(x)
x = x.reshape(batch_num, -1, x.shape[1])
return x
class FSACNN(nn.Module):
def __init__(self, cnn_layer1_num=3, cnn_layer2_num=2, label_size=1):
super(FSACNN, self).__init__()
self.cnn = nn.Sequential(
nn.Conv2d(3, 24, kernel_size=5, stride=2),
nn.ELU(),
)
in_channels = [24, 36, 48, 64, 80, 128, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256]
for i in range(0, cnn_layer1_num):
self.cnn.add_module("layer1-"+str(i), SelfAttentionConv(in_channels[i], in_channels[i+1]//2, in_channels[i+1]))
self.cnn.add_module("actFun-" + str(i), nn.ELU())
self.cnn.add_module("pool1", nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
for i in range(cnn_layer1_num, cnn_layer1_num + cnn_layer2_num):
self.cnn.add_module("layer1-" + str(i), SelfAttentionConv(in_channels[i], in_channels[i+1]//2, in_channels[i+1]))
self.cnn.add_module("pool2", nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
in_channel = in_channels[cnn_layer1_num + cnn_layer2_num]
self.cnn.add_module("layer3-cnn", nn.Conv2d(in_channel, in_channels[cnn_layer1_num + cnn_layer2_num+1],
kernel_size=3, stride=2))
in_channel = in_channels[cnn_layer1_num + cnn_layer2_num + 1]
self.dense = nn.Sequential(
nn.Flatten(),
nn.Linear(in_channel * 60, 128),
nn.ELU(),
nn.Linear(128, 32),
nn.ELU(),
nn.Linear(32, label_size)
)
def forward(self, x):
batch_num = x.shape[0]
x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4])
x = self.cnn(x)
x = self.dense(x)
x = x.reshape(batch_num, -1, x.shape[1])
return x
class CNN(nn.Module):
def __init__(self, cnn_layer1_num=3, cnn_layer2_num=2, label_size=1):
super(CNN, self).__init__()
self.cnn = CNNLayer(256, cnn_layer1_num, cnn_layer2_num)
self.dense = nn.Sequential(
nn.Flatten(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 32),
nn.ELU(),
nn.Linear(32, label_size)
)
def forward(self, x):
batch_num = x.shape[0]
x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4])
x = self.cnn(x)
x = self.dense(x)
x = x.reshape(batch_num, -1, x.shape[1])
return x
if __name__ == '__main__':
X = torch.rand(size=(8, 4, 3, 180, 320))
# net = CSATNet_v2()
net = CSATNet()
X = net(X)
print(X.shape)
# conv_op = nn.Conv2d(3, 3, 3, padding=1, bias=False)
# sobel_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype='float32')
# sobel_kernel = sobel_kernel.reshape((1, 1, 3, 3))
# sobel_kernel = torch.from_numpy(sobel_kernel)
# sobel_kernel = sobel_kernel.repeat(3, 3, 1, 1)
# conv_op.weight.data = sobel_kernel
# x = torch.rand(size=(12, 3, 88, 200))
# y = conv_op(x)
# print(y.shape)
|
import argparse
from pathlib import Path
def build_parser() -> argparse.ArgumentParser:
"""
build parser
:return: Argument parser
"""
DESCRIPTION = "Extract snippet in coq file (.v) generate by alectryon."
parser = argparse.ArgumentParser(DESCRIPTION)
INPUT_FILES_HELP = "coq file '.v'"
parser.add_argument("input",
type=Path,
help=INPUT_FILES_HELP)
OUPUT_DIR_HELP = "output directory"
parser.add_argument("-o", "--output-dir",
default=Path("."),
type=Path,
help=OUPUT_DIR_HELP)
DUMP_LATEX_HELP = "dump latex complete file (usefull to debug)"
parser.add_argument("-d", "--dump-complete-latex",
action="store_true",
help=DUMP_LATEX_HELP)
DUMP_LATEX_DIR_HELP = "directory to dump complete latex" \
"file (please active flag '--dump-complete-latex')"
parser.add_argument("-D", "--dump-complete-latex-dir",
type=Path,
default=Path("."),
help=DUMP_LATEX_DIR_HELP)
# serapi (copy from alectryon.cli)
SUBP_HELP = "Pass arguments to the SerAPI process"
subp = parser.add_argument_group("Subprocess arguments", SUBP_HELP)
SERTOP_ARGS_HELP = "Pass a single argument to SerAPI (e.g. -Q dir,lib)."
subp.add_argument("--sertop-arg", dest="sertop_args",
action="append", default=[],
metavar="SERAPI_ARG",
help=SERTOP_ARGS_HELP)
I_HELP = "Pass -I DIR to the SerAPI subprocess."
subp.add_argument("-I", "--ml-include-path", dest="coq_args_I",
metavar="DIR", nargs=1, action="append",
default=[], help=I_HELP)
Q_HELP = "Pass -Q DIR COQDIR to the SerAPI subprocess."
subp.add_argument("-Q", "--load-path", dest="coq_args_Q",
metavar=("DIR", "COQDIR"), nargs=2, action="append",
default=[], help=Q_HELP)
R_HELP = "Pass -R DIR COQDIR to the SerAPI subprocess."
subp.add_argument("-R", "--rec-load-path", dest="coq_args_R",
metavar=("DIR", "COQDIR"), nargs=2, action="append",
default=[], help=R_HELP)
EXPECT_UNEXPECTED_HELP = "Ignore unexpected output from SerAPI"
parser.add_argument("--expect-unexpected", action="store_true",
default=False, help=EXPECT_UNEXPECTED_HELP)
return parser
def post_process_arguments(args: argparse.Namespace) -> argparse.Namespace:
"""
post process sertop arguments
:param args: args parsed
:return: new name space
"""
for dirpath in args.coq_args_I:
args.sertop_args.extend(("-I", dirpath))
for pair in args.coq_args_R:
args.sertop_args.extend(("-R", ",".join(pair)))
for pair in args.coq_args_Q:
args.sertop_args.extend(("-Q", ",".join(pair)))
return args
def parse_args() -> argparse.Namespace:
"""
parse argument
:return: Namespace structure
"""
args = build_parser().parse_args()
return post_process_arguments(args)
EXTENSION_LATEX = ".tex"
def get_path_latex(output_dir: Path, name: str):
return output_dir / (name + EXTENSION_LATEX)
def make_latex_file(content: str, path: Path):
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, 'w') as file:
file.write(content)
def make_latex(input_file: Path, sertop_args):
from .Extractor import SnippetExtractor
from .docutils import CoqToLatexReader, register_docutils
print(f"Convert '{input_file}' to latex.")
register_docutils(sertop_args)
reader = CoqToLatexReader(input_file)
return SnippetExtractor(reader), reader
def copy_asset(output_dir: Path, dir_name='assets'):
from alectryon.cli import copy_assets
from alectryon.latex import ASSETS
from shutil import copy
STY = ASSETS.ALECTRYON_STY + ASSETS.PYGMENTS_STY
assets = [(ASSETS.PATH, asset) for asset in STY]
output_dir = output_dir / dir_name
output_dir.mkdir(exist_ok=True)
copy_assets(None, assets, copy, output_dir)
print(f"copy assets {STY} in {output_dir}")
def main():
args = parse_args()
input_name = args.input.stem
output_dir = args.output_dir / input_name
snippet_extractor, reader = make_latex(args.input, args.sertop_args)
if reader.exit_code > 0:
return reader.exit_code
# dump latex to debug
if args.dump_complete_latex:
path_latex = get_path_latex(args.dump_complete_latex_dir,
input_name)
print(f"Make complete latex file {path_latex}.")
make_latex_file(reader.content_latex, path_latex)
# write snippets files
print("Extract snippets.")
snippets = snippet_extractor.extract()
for snippet in snippets:
path = get_path_latex(output_dir, snippet.name)
print(f"extract snippet '{snippet.name}', dump file {path}.")
make_latex_file(str(snippet), path)
if snippets:
copy_asset(args.output_dir)
return 0
|
#!/Users/i346261/Documents/git/personal/django-first-project/my_ve/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
#!/usr/bin/env python
import optparse
import os, sys, stat
def which(program):
'''
Returns the path to a given executable, or None if not found
'''
import os
if os.name == "nt":
program += ".exe"
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def dictToString(dict):
'''
Joins the keys and values of a dictionary, using = as a separator
'''
ret = ''
for key, value in dict.iteritems():
ret += key + '=' + value + ' '
return ret
def escape(s):
'''
Replaces \ with \\ and " with \"
'''
return s.replace('\\', '\\\\').replace('"', '\\"')
def generateCmakeCommand(project, additionalOptions = {}):
'''
Generates the run-cmake command for a given project
'''
baseOptions = {
'-DQT_QMAKE_EXECUTABLE:FILEPATH': options.qmake,
'-DCMAKE_INSTALL_PREFIX': '"' + rootPath + '"',
'-DCMAKE_PREFIX_PATH': '"' + rootPath + '"',
'-DCMAKE_BUILD_TYPE': 'Debug' if options.debug else 'Release',
'-DSTATIC_LIBRARY': 'ON' if options.static else 'OFF',
'-DKDE4_BUILD_TESTS': 'ON' if options.tests else 'OFF',
'-DKIMAP_STANDALONE': 'ON',
'-DNO_DBUS': 'ON',
'-DKDE_PLATFORM_FEATURE_DISABLE_DEPRECATED': 'ON',
'-DCMAKE_CXX_FLAGS': '"-DKIMAP_STANDALONE -DNO_DBUS' + (' -DKDEWIN_STATIC_LIBS' if options.static else '') + '"'
}
# Special cases for Windows
if os.name == 'nt':
# When using a statically-linked version of Qt, we need to add ws2_32.lib
baseOptions['-DCMAKE_CXX_STANDARD_LIBRARIES'] = '"kernel32.lib user32.lib gdi32.lib winspool.lib shell32.lib ole32.lib oleaut32.lib uuid.lib comdlg32.lib advapi32.lib ws2_32.lib"'
# With MSVC in Release mode, use Whole Program Optimization
# Todo: we should check that we're using MSVC and not Mingw before setting those flags.
baseOptions['-DCMAKE_CXX_FLAGS_RELEASE:STRING'] = '"/MD /O2 /Ob2 /D /GL /D NDEBUG -DQT_NO_DEBUG"'
baseOptions['-DCMAKE_EXE_LINKER_FLAGS_RELEASE:STRING'] = '"/LTCG /INCREMENTAL:NO /NODEFAULTLIB:libcmt /DEFAULTLIB:msvcrt"'
# Special case: if additionalOptions provides it's own value for CMAKE_CXX_FLAGS, we merge it with
# the base option, instead of replacing it.
if '-DCMAKE_CXX_FLAGS' in additionalOptions:
baseOptions['-DCMAKE_CXX_FLAGS'] = baseOptions['-DCMAKE_CXX_FLAGS'].rstrip('"') \
+ ' ' + additionalOptions['-DCMAKE_CXX_FLAGS'].lstrip('"')
del additionalOptions['-DCMAKE_CXX_FLAGS']
buildFolder = rootPath + '/build/' + project
buildFolders.append(buildFolder)
try:
os.makedirs(buildFolder)
except:
pass # makdeirs raise an exception if the folder already exist, ignore it.
command = 'cmake ' \
+ ('-G "NMake Makefiles" ' if os.name == 'nt' else '') \
+ dictToString(baseOptions) \
+ dictToString(additionalOptions) \
+ '"' + rootPath + '\\src\\' + project + '"'
fullScript = '#!/usr/bin/env python\n' \
+ 'import os\n' \
+ 'os.system("' + escape(command) + '")\n'
filename = buildFolder + '/run-cmake.py'
print ' -- generating ' + filename
file = open(filename, 'w')
file.write(fullScript)
file.close()
os.chmod(filename, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
###################################################################
parser = optparse.OptionParser()
parser.add_option("--qmake", dest="qmake", help="full path to qmake executable", metavar="QMAKE")
parser.add_option("--debug", action="store_true", dest="debug", help="build in debug mode (default)", default=True)
parser.add_option("--release", action="store_false", dest="debug", help="build in release mode")
parser.add_option("--static", action="store_true", dest="static", help="build as static libraries (default)", default=True)
parser.add_option("--shared", action="store_false", dest="static", help="build as a shared libraries")
parser.add_option("--tests", action="store_true", dest="tests", help="build tests", default=False)
(options, args) = parser.parse_args()
buildFolders = []
# If --qmake wasn't specified, try to autodetect it in path
if options.qmake == None:
options.qmake = which("qmake")
# If we couldn't autodetect, fail
if options.qmake == None:
print "Could not find qmake executable in path, please specify it with the --qmake command line argument";
sys.exit(1)
# If the --qmake option is invalid, fail
if not (os.path.exists(options.qmake) and os.access(options.qmake, os.X_OK)):
print 'Could not open qmake executable "' + options.qmake + '"'
sys.exit(1)
# Initialize paths with forward slashes on all platforms
rootPath = os.getcwd()
if os.name == 'nt':
options.qmake = options.qmake.replace('\\', '/')
rootPath = rootPath.replace('\\', '/')
print 'Using qmake executable at "' + options.qmake + '"\n'
## Boost
generateCmakeCommand("3rdparty/boost");
## Cyrus-sasl
if os.name == "nt":
generateCmakeCommand("3rdparty/cyrus-sasl", {'-DSTATIC_PLUGIN=': 'ON' if options.static else 'OFF'})
## ZLib
generateCmakeCommand("3rdparty/zlib");
## Automoc
generateCmakeCommand("automoc");
## kdewin on windows
if os.name == "nt":
generateCmakeCommand("kdewin");
## kdelibs
generateCmakeCommand("kdelibs");
## kdepimlibs
generateCmakeCommand("kdepimlibs", {'-DKDEPIM_NO_KRESOURCES': 'ON',
'-DKDEPIM_NO_KCAL': 'ON',
'-DCMAKE_CXX_FLAGS': '"-DKDELIBS_STATIC_LIBS"' if options.static else '""'})
## ksmtp
generateCmakeCommand("ksmtp", {'-DCMAKE_CXX_FLAGS': '"-DKDELIBS_STATIC_LIBS -DKDEPIM_STATIC_LIBS"' if options.static else '""'})
## Generate the build-all script
buildAll = '#!/usr/bin/env python\nimport os, sys\n'
for folder in buildFolders:
command = ""
if os.name == 'nt':
command = 'cd "' + folder + '" && python run-cmake.py && jom && jom install'
else:
command = 'cd "' + folder + '" && ./run-cmake.py && make && make install'
buildAll += 'if os.system("' + escape(command) + '") != 0:\n sys.exit(1)\n'
filename = rootPath + '/build-all.py'
file = open(filename, 'w')
file.write(buildAll)
file.close()
os.chmod(filename, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
print "\nConfiguration successful. Now you can launch build-all.py\n";
|
import requests
stranka = requests.get('http://thecatapi.com/api/images/get?format=src&type=gif')
stranka.raise_for_status()
print(stranka.status_code) |
# 2021Feb07 Dog Food Timer
from adafruit_circuitplayground import cp
import time
# 10 hours = 10 LEDs
countdown_seconds = 60*60*10 # 10 hour production
countdown_seconds = 60 # 1 minute test
Rdefault = 0
Gdefault = 0
Bdefault = 0
# Countdown illumination
Ron = 10
Gon = 10
Bon = 200
R = Rdefault
G = Gdefault
B = Bdefault
cp.pixels.brightness = 0.03
pressed_count = 0
pressed_prior = False
pressed = False
pressed_time = time.monotonic()
up_threshold = 6
while True:
# adafruit acceleration sample
x, y, z = cp.acceleration
#print((x, y, z))
up = ( abs(x) + abs(y) )
# ether button pressed?
if up > up_threshold:
pressed = True
else:
pressed = False
# Detect state change
if not pressed_prior and pressed:
pressed_count += 1
pressed_time = time.monotonic()
# reset state to latest value
pressed_prior = pressed
elapsed = time.monotonic() - pressed_time
pixel_count = pressed_count % 10
# pixel_count = int(elapsed*5.0 % 10) + 1
if not pressed:
pixel_count = 0
# time math
leds_illuminated = ( ( countdown_seconds - elapsed ) / countdown_seconds ) * 10
print(f"elapsed: {elapsed} leds: {leds_illuminated}")
for led in range(0, 10):
if ( leds_illuminated > led ):
cp.pixels[led] = (Ron,Gon,Bon)
else:
cp.pixels[led] = (Rdefault, Gdefault, Bdefault)
time.sleep(0.5)
|
from django.conf.urls.defaults import *
from models import Entry # relative import
info_dict = {
'queryset': Entry.objects.all(),
'date_field': 'pub_date',
}
urlpatterns = patterns('django.views.generic.date_based',
(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<slug>[\w-]+)/$', 'object_detail', dict(info_dict, slug_field='slug')),
(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$', 'archive_day', info_dict),
(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/$', 'archive_month', info_dict),
(r'^(?P<year>\d{4})/$', 'archive_year', info_dict),
url(r'^/?$', 'archive_index', info_dict, name="blog-index"),
)
|
# f = open("h1.txt","w+")
# li = ["hello world\n","this is nyc\n"]
# f.writelines(li)
# f.close()
# f = open("h1.txt","a+")
#
# context = "goodbye"
#
# f.write(context)
# f.close()
# import os
# if os.path.exists("h1副本.txt"):
# os.remove("h1副本.txt")
import os
li = os.listdir(".")
print(li)
if "hello.txt" in li:
os.rename("hello.txt","hello1.txt")
elif "hi.txt" in li:
os.rename("li.txt","hi1.txt")
|
import json
import requests
from flask import Blueprint, request
from app import config, utils, translations
init_apis = Blueprint('init_apis', __name__)
@init_apis.route('/api/config/v1/locations')
@init_apis.route('/api/config/v1/keys')
def locations():
if 'key' not in request.args:
return utils.error('Missing "key" argument.')
DEVELOPMENT = 'ODE4NTYzODY1ODY1MDAyNTQ4Mjc2NDIyNTk4MjUwMjI6c3FqNnE3c2pqdDUwZnZ1NXRpaW1kaXJjaHduZG1jNmhjMXAxZ3oyZ3BmMnZsZmg3b2hmdDU3emowcThqNzMyMA=='
STAGING = 'OTQ0MjIwMDEzNzUzMzk2OTY2Mzc2MTQyMzM5NjgxNDI6OXY3cHFxMXh0eTVtZnNvNXp4NXJzOXFhcTE2MWh5ZG4wN3l5OG1wcThyNXRycmo4aGJsYzEwNTB6NnFodDlvdQ=='
PRODUCTION = 'ODI1OTAzOTI2NzcyNzcxNzcxNTI0MTA5ODk1ODA2NDc6cDFwd3hoanFiM2NiazdyMWlwdXFjeG85MjRreDN1dDQzNDBmd3hvd3pxM3F4bjlidmMzdml0bzlsa2N2NGl0bA=='
key = request.args.get('key', PRODUCTION)
base_url = 'https://config-api-dot-{}-euw-gmal-mcdonalds.appspot.com'
if key == DEVELOPMENT:
url = base_url.format('dev') + request.path
elif key == STAGING:
url = base_url.format('stg') + request.path
elif key == PRODUCTION:
url = base_url.format('prd') + request.path
params = {
'key': key
}
headers = {
'User-Agent': 'okhttp/3.12.0'
}
r = requests.request(request.method, url, params=params, headers=headers)
if r.status_code != 200:
return utils.request_error(r)
x = json.loads(r.content.decode())
return utils.json_response(x)
@init_apis.route('/api/locationfinder/v1/client/location/info')
def locationfinder():
if 'key' not in request.args:
return utils.error('Missing "key" argument.')
DEVELOPMENT = 'NjE3MjgxMjMwNDY3MDY5NTcwMTMwNTQwMjExODY1NDQ6YTNzdWsxcGsxNW1peHdybnBtZ2pwdXNmODMyNzNhOGgwbXR4ejk3NmU2b29iam1xNzV4Nmppb21panE2eWljcg=='
STAGING = 'NzUyMzE2NzQyMjI0NjUyMTY0NDQ2NDI4NjI5ODA1NTE6cGE5ZTdlOHYyeGN0bWo2Nml1NGhldjMzbGNhajA1czJlMnF5c3RkaXAwaWRnNTRrdnc5eDllcnBmeWR2c3E2cw=='
PRODUCTION = 'MDAwNzc3Mzg4MTg5MDI0OTM5NzI1MjE4OTA5MTgyNDY6bWE4bDNjeTh6cmkydHNnMWZicjMwMGpiYWY5NXZ1aTFpc2hmNW0xdTBsNzRlcXZraHVncmEwaTJ1aGQ4amlqZA=='
key = request.args.get('key', PRODUCTION)
base_url = 'https://locationfinder-api-dot-{}-euw-gmal-mcdonalds.appspot.com'
if key == DEVELOPMENT:
url = base_url.format('dev') + request.path
elif key == STAGING:
url = base_url.format('stg') + request.path
elif key == PRODUCTION:
url = base_url.format('prd') + request.path
params = {
'key': key
}
headers = {
'User-Agent': 'okhttp/3.12.0'
}
location = {
'country': {
'name': 'Italy',
'code': 'IT'
},
'location': {
'latitude': 0.0,
'longitude': 0.0
}
}
return utils.json_response(location)
@init_apis.route('/api/config/v1/configs/<market_id>/<language_code>-<country_code>')
def api_config_configs(market_id, language_code, country_code):
DEVELOPMENT = 'ODE4NTYzODY1ODY1MDAyNTQ4Mjc2NDIyNTk4MjUwMjI6c3FqNnE3c2pqdDUwZnZ1NXRpaW1kaXJjaHduZG1jNmhjMXAxZ3oyZ3BmMnZsZmg3b2hmdDU3emowcThqNzMyMA=='
STAGING = 'OTQ0MjIwMDEzNzUzMzk2OTY2Mzc2MTQyMzM5NjgxNDI6OXY3cHFxMXh0eTVtZnNvNXp4NXJzOXFhcTE2MWh5ZG4wN3l5OG1wcThyNXRycmo4aGJsYzEwNTB6NnFodDlvdQ=='
PRODUCTION = 'ODI1OTAzOTI2NzcyNzcxNzcxNTI0MTA5ODk1ODA2NDc6cDFwd3hoanFiM2NiazdyMWlwdXFjeG85MjRreDN1dDQzNDBmd3hvd3pxM3F4bjlidmMzdml0bzlsa2N2NGl0bA=='
key = request.args.get('key', PRODUCTION)
base_url = 'https://config-api-dot-{}-euw-gmal-mcdonalds.appspot.com'
if key == DEVELOPMENT:
url = base_url.format('dev') + request.path
elif key == STAGING:
url = base_url.format('stg') + request.path
elif key == PRODUCTION:
url = base_url.format('prd') + request.path
params = {
'key': key
}
headers = {
'User-Agent': 'okhttp/3.12.0'
}
r = requests.request(request.method, url, params=params, headers=headers)
if r.status_code != 200:
utils.request_error(r)
x = json.loads(r.content.decode())
# Change siteId
x['connectors']['vMob']['siteId'] = config.server_name
# Disable analytics
if 'analytic' in x:
del x['analytic']
# Disable forceUpdate
x['forceUpdate']['enabled'] = False
# Enable numericCode
x['loyalty']['enableNumericCode'] = True
# Change offers tutorial
x['loyalty']['onBoardingSlides'] = [{
'image': '{}/images/onboarding_offer.png'.format(config.server_external_url),
'title': 'Offerte Illimitate\n@Hexile_0',
'message': 'Per sbloccare basta fare il login lasciando vuoti i campi e cliccando su Accedi.',
'nextButtonText': 'gmal_tutorial_done'
}]
# Add on boarding slides
x['onBoarding'] = {
'skipButtonEnalbed': True,
'slides': [
{
'image': '{}/images/onboarding_mcmod.png'.format(config.server_external_url),
'title': 'Benvenuto in McMod!',
'message': 'McMod è l\'app moddata del McDonald\'s.\n\n'
'Puoi leggere il codice sorgente del custom server e della patch su '
'<a href="https://github.com/giacomoferretti/ffapi-project/">GitHub</a>.\n\n'
'<b>Se l\'app ti è utile ricordati che puoi donarmi un caffè su '
'<a href="https://paypal.me/hexile0">PayPal</a></b>!\n\n'
'<i>Author: Hexile</i>',
'nextButtonText': 'Ok'
}
]
}
# Change menu
custom_menu = [
{
"title": "Checkout the mod on Github",
"image": "custom_icon_github",
"link": "https://www.github.com/giacomoferretti/mcdapi-app-mod"
},
{
"title": "Donate",
"image": "custom_icon_donate",
"link": "https://paypal.me/hexile0"
},
{
"title": "Join the Telegram channel for updates",
"image": "icon_menu_about_italic",
"link": "https://t.me/ffcoupons_updates"
}
]
c = 0
for advert in custom_menu:
x['menu']['sub'].insert(c, advert)
c += 1
# Change login
x['account']['termsConsent'] = 'mcmod-consent'
x['account']['fields'] = [
{
'type': 'firstName',
'showInAccount': False,
'required': False
},
{
'type': 'email',
'showInAccount': False,
'required': False
}
]
# Disable email verification for countries asking for it
if 'emailVerification' in x['account']:
del x['account']['emailVerification']
# Enable redeem button
if 'hideRedeemButton' in x['loyalty']:
del x['loyalty']['hideRedeemButton']
# Disable account migration prompt
if 'migrationType' in x['account']:
del x['account']['migrationType']
# Disable security checks
if 'system' in x:
del x['system']
return utils.json_response(x)
@init_apis.route('/<language>-<country>.json')
def language_strings(language, country):
r = requests.get('https://storage.googleapis.com/prd-euw-gmalstring-mcdonalds' + request.path)
if r.status_code != 200:
return utils.request_error(r)
x = json.loads(r.content.decode())
# Customize strings
x['gmal_error_general_title'] = translations.get_string('generic_error_title', language)
x['gmal_error_general_body'] = translations.get_string('generic_error_body', language)
return utils.json_response(x)
|
# This script takes a list of real values and finds the locations
# of all local minima. The list of values is first smooth to reduce
# noise and only return more 'legitimate' local minima. The function
# returns the values of the list, at the minima.
import numpy as np
def findLocalMinima( list ) :
# First use a running average to smooth the data.
# This is done using the numpy function 'convolve'
smoothList = np.convolve( list, np.ones(5)/5 )
# Loop through the values in the list, and see if it's nearest
# neighbours are greater in value. If so, store the value
localMinima = []
x = []
for i in range(1, len(list)-1 ) :
if smoothList[i] < smoothList[i-1] and smoothList[i] < smoothList[i+1] :
localMinima.append( smoothList[i] )
x.append( i )
return localMinima
|
import discord
import os
import asyncio
from discord.ext import commands
async def update_embed(listpages, page, url, f, message):
newpage = listpages[page]
if newpage[2] == "None":
embed = discord.Embed(title=f'Document {f}', description=f'Page {page+1}')
else:
embed = discord.Embed(title=f'Document {f}', description=f'Page {page+1} - [Link]({newpage[2]})')
embed.add_field(name=newpage[0], value=f'{newpage[1]}\n\n[Document in Github]({url})')
embed.set_footer(text=f'Page {listpages.index(newpage)+1} of {len(listpages)}')
await message.edit(embed=embed)
class Search(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=['s','open','o'], help='Search Directory')
async def search(self, ctx, dir=None, option=None):
if dir == None:
await ctx.send('`[x] Usage: .search <file> [page|all]`')
for file in os.listdir('./index'):
f = file[2:]
if f == dir + ".txt":
with open(f'index/{file}', 'r') as file:
lines = file.read().splitlines()
page = 0
link = None
url = f'https://github.com/Javascript-void0/Archive/blob/main/{file.name}'
try:
try:
if float(option).is_integer():
option = int(option)
try:
if option > 0 and option <= len(lines):
page = option-1
except IndexError:
page = 0
except TypeError:
pass
except ValueError:
pass
titles = []
texts = []
links = []
for i in range(len(lines)):
if lines[i].startswith(' TITLE '):
x = lines[i].replace(' TITLE ', "", 1)
titles.append(x)
elif lines[i].startswith(' LINK '):
x = lines[i].replace(' LINK ', "", 1)
links.append(x)
else:
texts.append(lines[i])
t = "\n".join(texts)
t = t.split("\n\n")
listpages = []
for i in range(len(titles)):
newpage = []
newpage.append(titles[i])
newpage.append(t[i])
newpage.append(links[i])
listpages.append(newpage)
if option == "all":
if isinstance(ctx.channel, discord.channel.DMChannel) == True:
for s in listpages:
newpage = listpages[page]
if newpage[2] == "None":
embed = discord.Embed(title=f'Document {f}', description=f'Page {page+1}')
else:
embed = discord.Embed(title=f'Document {f}', description=f'Page {page+1} - [Link]({newpage[2]})')
embed.add_field(name=newpage[0], value=f'{newpage[1]}\n\n[Document in Github]({url})')
embed.set_footer(text=f'Page {listpages.index(newpage)+1} of {len(listpages)}')
message = await ctx.send(embed=embed)
page += 1
else:
await ctx.send('`[x] Sending all pages can only be used in DMs`')
else:
newpage = listpages[page]
if newpage[2] == "None":
embed = discord.Embed(title=f'Document {f}', description=f'Page {page+1}')
else:
embed = discord.Embed(title=f'Document {f}', description=f'Page {page+1} - [Link]({newpage[2]})')
embed.add_field(name=newpage[0], value=f'{newpage[1]}\n\n[Document in Github]({url})')
embed.set_footer(text=f'Page {listpages.index(newpage)+1} of {len(listpages)}')
message = await ctx.send(embed=embed)
await message.add_reaction("⏮")
await message.add_reaction("◀")
await message.add_reaction("▶")
await message.add_reaction("⏭")
await message.add_reaction("❌")
def check(reaction, user):
return reaction.message.id == message.id and user == ctx.author
while True:
try:
reaction, user = await self.client.wait_for('reaction_add', timeout= 60.0, check=check)
if reaction.emoji == '⏮' and page != 0:
page = 0
await message.remove_reaction(reaction, user)
await update_embed(listpages, page, url, f, message)
elif reaction.emoji == '◀' and page > 0:
page -= 1
await message.remove_reaction(reaction, user)
await update_embed(listpages, page, url, f, message)
elif reaction.emoji == '▶' and page < len(listpages) -1:
page += 1
await message.remove_reaction(reaction, user)
await update_embed(listpages, page, url, f, message)
elif reaction.emoji == '⏭' and page != len(listpages)-1:
page = len(listpages)-1
await message.remove_reaction(reaction, user)
await update_embed(listpages, page, url, f, message)
elif reaction.emoji == '❌':
await message.edit(content='`[x] Timeout`', embed=embed)
await message.remove_reaction(reaction, user)
break
else:
await message.remove_reaction(reaction, user)
except asyncio.TimeoutError:
await message.edit(content='`[x] Timeout`', embed=embed)
break
def setup(client):
client.add_cog(Search(client)) |
# =======================
# Importing the libraries
# =======================
import sys
directory = '/home/marquesleandro/lib_class'
sys.path.insert(0, directory)
from tqdm import tqdm
from time import time
import numpy as np
import scipy.sparse as sps
import scipy.sparse.linalg
import search_file
import import_msh
import assembly
import benchmark_problems
import semi_lagrangian
import export_vtk
import relatory
print '''
COPYRIGHT
======================================
Simulator: %s
created by Leandro Marques at 02/2019
e-mail: marquesleandro67@gmail.com
Gesar Search Group
State University of the Rio de Janeiro
======================================
''' %sys.argv[0]
print ' ------'
print ' INPUT:'
print ' ------'
print ""
# ----------------------------------------------------------------------------
print ' (1) - Poiseuille'
print ' (2) - Half Poiseuille'
print ' (3) - Cavity'
benchmark_problem = int(raw_input(" Enter benchmark problem above: "))
print ""
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
mesh_name = (raw_input(" Enter name (.msh): ") + '.msh')
equation_number = int(raw_input(" Enter equation number: "))
print ""
Re = float(raw_input(" Enter Reynolds Number (Re): "))
Sc = float(raw_input(" Enter Schmidt Number (Sc): "))
print ""
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
print ' (1) - Linear Element'
print ' (2) - Mini Element'
print ' (3) - Quadratic Element'
print ' (4) - Cubic Element'
polynomial_option = int(raw_input(" Enter polynomial degree option above: "))
print ""
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
print ' 3 Gauss Points'
print ' 4 Gauss Points'
print ' 6 Gauss Points'
print ' 12 Gauss Points'
gausspoints = int(raw_input(" Enter Gauss Points Number option above: "))
print ""
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
print ' (1) - Taylor Galerkin Scheme'
print ' (2) - Semi Lagrangian Scheme'
scheme_option = int(raw_input(" Enter simulation scheme option above: "))
print ""
print ""
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
nt = int(raw_input(" Enter number of time interations (nt): "))
directory_name = raw_input(" Enter folder name to save simulations: ")
print ""
# ----------------------------------------------------------------------------
print ' ------------'
print ' IMPORT MESH:'
print ' ------------'
start_time = time()
directory = search_file.Find(mesh_name)
if directory == 'File not found':
sys.exit()
npoints, nelem, x, y, IEN, neumann_edges, dirichlet_pts, neighbors_nodes, neighbors_elements, far_neighbors_nodes, far_neighbors_elements, length_min, GL, nphysical = import_msh.Element2D(directory, mesh_name, equation_number, polynomial_option)
CFL = 0.5
#dt = float(CFL*length_min)
dt = 0.005
end_time = time()
import_mesh_time = end_time - start_time
print ' time duration: %.1f seconds' %import_mesh_time
print ""
print ' ---------'
print ' ASSEMBLY:'
print ' ---------'
start_time = time()
Kxx, Kxy, Kyx, Kyy, K, M, MLump, Gx, Gy, polynomial_order = assembly.Element2D(polynomial_option, GL, npoints, nelem, IEN, x, y, gausspoints)
end_time = time()
assembly_time = end_time - start_time
print ' time duration: %.1f seconds' %assembly_time
print ""
print ' --------------------------------'
print ' INITIAL AND BOUNDARY CONDITIONS:'
print ' --------------------------------'
start_time = time()
# ------------------------ Boundaries Conditions ----------------------------------
# Applying vx condition
xvelocity_LHS0 = sps.lil_matrix.copy(M)
condition_xvelocity = benchmark_problems.QuadPoiseuille(nphysical,npoints,x,y)
condition_xvelocity.neumann_condition(neumann_edges[1])
condition_xvelocity.dirichlet_condition(dirichlet_pts[1])
condition_xvelocity.gaussian_elimination(xvelocity_LHS0,neighbors_nodes)
vorticity_ibc = condition_xvelocity.ibc
# Applying vy condition
yvelocity_LHS0 = sps.lil_matrix.copy(M)
condition_yvelocity = benchmark_problems.QuadPoiseuille(nphysical,npoints,x,y)
condition_yvelocity.neumann_condition(neumann_edges[2])
condition_yvelocity.dirichlet_condition(dirichlet_pts[2])
condition_yvelocity.gaussian_elimination(yvelocity_LHS0,neighbors_nodes)
# Applying psi condition
streamfunction_LHS0 = sps.lil_matrix.copy(K)
condition_streamfunction = benchmark_problems.QuadPoiseuille(nphysical,npoints,x,y)
condition_streamfunction.streamfunction_condition(dirichlet_pts[3],streamfunction_LHS0,neighbors_nodes)
# ---------------------------------------------------------------------------------
# -------------------------- Initial condition ------------------------------------
vx = np.copy(condition_xvelocity.bc_1)
vy = np.copy(condition_yvelocity.bc_1)
psi = np.copy(condition_streamfunction.bc_1)
w = np.zeros([npoints,1], dtype = float)
#---------- Step 1 - Compute the vorticity and stream field --------------------
# -----Vorticity initial-----
vorticity_RHS = sps.lil_matrix.dot(Gx,vy) - sps.lil_matrix.dot(Gy,vx)
vorticity_LHS = sps.lil_matrix.copy(M)
w = scipy.sparse.linalg.cg(vorticity_LHS,vorticity_RHS,w, maxiter=1.0e+05, tol=1.0e-05)
w = w[0].reshape((len(w[0]),1))
# -----Streamline initial-----
# psi condition
streamfunction_RHS = sps.lil_matrix.dot(M,w)
streamfunction_RHS = np.multiply(streamfunction_RHS,condition_streamfunction.bc_2)
streamfunction_RHS = streamfunction_RHS + condition_streamfunction.bc_dirichlet
psi = scipy.sparse.linalg.cg(condition_streamfunction.LHS,streamfunction_RHS,psi, maxiter=1.0e+05, tol=1.0e-05)
psi = psi[0].reshape((len(psi[0]),1))
#----------------------------------------------------------------------------------
end_time = time()
bc_apply_time = end_time - start_time
print ' time duration: %.1f seconds' %bc_apply_time
print ""
print ' -----------------------------'
print ' PARAMETERS OF THE SIMULATION:'
print ' -----------------------------'
print ' Mesh: %s' %mesh_name
print ' Number of equation: %s' %equation_number
print ' Number of nodes: %s' %npoints
print ' Number of elements: %s' %nelem
print ' Smallest edge length: %f' %length_min
print ' Time step: %s' %dt
print ' Number of time iteration: %s' %nt
print ' Reynolds number: %s' %Re
print ' Schmidt number: %s' %Sc
print ""
print ' ----------------------------'
print ' SOLVE THE LINEARS EQUATIONS:'
print ' ----------------------------'
print ""
print ' Saving simulation in %s' %directory_name
print ""
start_time = time()
vorticity_bc_1 = np.zeros([npoints,1], dtype = float)
for t in tqdm(range(0, nt)):
# ------------------------ Export VTK File ---------------------------------------
save = export_vtk.Quad2D(x,y,IEN,npoints,nelem,w,w,psi,vx,vy)
save.create_dir(directory_name)
save.saveVTK(directory_name + str(t))
# --------------------------------------------------------------------------------
#---------- Step 2 - Compute the boundary conditions for vorticity --------------
vorticity_RHS = sps.lil_matrix.dot(Gx,vy) - sps.lil_matrix.dot(Gy,vx)
vorticity_LHS = sps.lil_matrix.copy(M)
vorticity_bc_1 = scipy.sparse.linalg.cg(vorticity_LHS,vorticity_RHS,vorticity_bc_1, maxiter=1.0e+05, tol=1.0e-05)
vorticity_bc_1 = vorticity_bc_1[0].reshape((len(vorticity_bc_1[0]),1))
# Gaussian elimination
vorticity_bc_dirichlet = np.zeros([npoints,1], dtype = float)
vorticity_bc_neumann = np.zeros([npoints,1], dtype = float)
vorticity_bc_2 = np.ones([npoints,1], dtype = float)
vorticity_LHS = ((np.copy(M)/dt) + (1.0/Re)*np.copy(K))
for mm in vorticity_ibc:
for nn in neighbors_nodes[mm]:
vorticity_bc_dirichlet[nn] -= float(vorticity_LHS[nn,mm]*vorticity_bc_1[mm])
vorticity_LHS[nn,mm] = 0.0
vorticity_LHS[mm,nn] = 0.0
vorticity_LHS[mm,mm] = 1.0
vorticity_bc_dirichlet[mm] = vorticity_bc_1[mm]
vorticity_bc_2[mm] = 0.0
#----------------------------------------------------------------------------------
#---------- Step 3 - Solve the vorticity transport equation ----------------------
# Taylor Galerkin Scheme
#scheme_name = 'Taylor Galerkin'
#A = np.copy(M)/dt
#vorticity_RHS = sps.lil_matrix.dot(A,w) - np.multiply(vx,sps.lil_matrix.dot(Gx,w))\
# - np.multiply(vy,sps.lil_matrix.dot(Gy,w))\
# - (dt/2.0)*np.multiply(vx,(np.multiply(vx,sps.lil_matrix.dot(Kxx,w)) + np.multiply(vy,sps.lil_matrix.dot(Kyx,w))))\
# - (dt/2.0)*np.multiply(vy,(np.multiply(vx,sps.lil_matrix.dot(Kxy,w)) + np.multiply(vy,sps.lil_matrix.dot(Kyy,w))))
#vorticity_RHS = np.multiply(vorticity_RHS,vorticity_bc_2)
#vorticity_RHS = vorticity_RHS + vorticity_bc_dirichlet
#w = scipy.sparse.linalg.cg(vorticity_LHS,vorticity_RHS,w, maxiter=1.0e+05, tol=1.0e-05)
#w = w[0].reshape((len(w[0]),1))
# Linear Semi-Lagrangian Scheme
#scheme_name = 'Semi Lagrangian Linear'
#w_d = semi_lagrangian.Linear2D(npoints, neighbors_elements, IEN, x, y, vx, vy, dt, w)
#A = np.copy(M)/dt
#vorticity_RHS = sps.lil_matrix.dot(A,w_d)
#
#vorticity_RHS = vorticity_RHS + (1.0/Re)*vorticity_bc_neumann
#vorticity_RHS = np.multiply(vorticity_RHS,vorticity_bc_2)
#vorticity_RHS = vorticity_RHS + vorticity_bc_dirichlet
#
#w = scipy.sparse.linalg.cg(vorticity_LHS,vorticity_RHS,w, maxiter=1.0e+05, tol=1.0e-05)
#w = w[0].reshape((len(w[0]),1))
# Mini Semi-Lagrangian Scheme
#scheme_name = 'Semi Lagrangian Mini'
#w_d = semi_lagrangian.Mini2D(npoints, neighbors_elements, IEN, x, y, vx, vy, dt, w)
#A = np.copy(M)/dt
#vorticity_RHS = sps.lil_matrix.dot(A,w_d)
#
#vorticity_RHS = vorticity_RHS + (1.0/Re)*vorticity_bc_neumann
#vorticity_RHS = np.multiply(vorticity_RHS,vorticity_bc_2)
#vorticity_RHS = vorticity_RHS + vorticity_bc_dirichlet
#
#w = scipy.sparse.linalg.cg(vorticity_LHS,vorticity_RHS,w, maxiter=1.0e+05, tol=1.0e-05)
#w = w[0].reshape((len(w[0]),1))
# Quad Semi-Lagrangian Scheme
scheme_name = 'Semi Lagrangian Quad'
w_d = semi_lagrangian.Quad2D(npoints, neighbors_elements, IEN, x, y, vx, vy, dt, w)
A = np.copy(M)/dt
vorticity_RHS = sps.lil_matrix.dot(A,w_d)
vorticity_RHS = vorticity_RHS + (1.0/Re)*vorticity_bc_neumann
vorticity_RHS = np.multiply(vorticity_RHS,vorticity_bc_2)
vorticity_RHS = vorticity_RHS + vorticity_bc_dirichlet
w = scipy.sparse.linalg.cg(vorticity_LHS,vorticity_RHS,w, maxiter=1.0e+05, tol=1.0e-05)
w = w[0].reshape((len(w[0]),1))
#----------------------------------------------------------------------------------
#---------- Step 4 - Solve the streamline equation --------------------------------
# Solve Streamline
# psi condition
streamfunction_RHS = sps.lil_matrix.dot(M,w)
streamfunction_RHS = np.multiply(streamfunction_RHS,condition_streamfunction.bc_2)
streamfunction_RHS = streamfunction_RHS + condition_streamfunction.bc_dirichlet
psi = scipy.sparse.linalg.cg(condition_streamfunction.LHS,streamfunction_RHS,psi, maxiter=1.0e+05, tol=1.0e-05)
psi = psi[0].reshape((len(psi[0]),1))
#----------------------------------------------------------------------------------
#---------- Step 5 - Compute the velocity field -----------------------------------
# Velocity vx
xvelocity_RHS = sps.lil_matrix.dot(Gy,psi)
xvelocity_RHS = np.multiply(xvelocity_RHS,condition_xvelocity.bc_2)
xvelocity_RHS = xvelocity_RHS + condition_xvelocity.bc_dirichlet
vx = scipy.sparse.linalg.cg(condition_xvelocity.LHS,xvelocity_RHS,vx, maxiter=1.0e+05, tol=1.0e-05)
vx = vx[0].reshape((len(vx[0]),1))
# Velocity vy
yvelocity_RHS = -sps.lil_matrix.dot(Gx,psi)
yvelocity_RHS = np.multiply(yvelocity_RHS,condition_yvelocity.bc_2)
yvelocity_RHS = yvelocity_RHS + condition_yvelocity.bc_dirichlet
vy = scipy.sparse.linalg.cg(condition_yvelocity.LHS,yvelocity_RHS,vy, maxiter=1.0e+05, tol=1.0e-05)
vy = vy[0].reshape((len(vy[0]),1))
#----------------------------------------------------------------------------------
end_time = time()
solution_time = end_time - start_time
print ' time duration: %.1f seconds' %solution_time
print ""
print ' ----------------'
print ' SAVING RELATORY:'
print ' ----------------'
print ""
print ' End simulation. Relatory saved in %s' %directory_name
print ""
# -------------------------------- Export Relatory ---------------------------------------
relatory.export(directory_name, sys.argv[0], benchmark_problem, scheme_name, mesh_name, equation_number, npoints, nelem, length_min, dt, nt, Re, Sc, import_mesh_time, assembly_time, bc_apply_time, solution_time, polynomial_order, gausspoints)
|
# Copyright 2013 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import InvalidLifecycleConfigError
# Relevant tags for the lifecycle configuration XML document.
LIFECYCLE_CONFIG = 'LifecycleConfiguration'
RULE = 'Rule'
ACTION = 'Action'
DELETE = 'Delete'
SET_STORAGE_CLASS = 'SetStorageClass'
CONDITION = 'Condition'
AGE = 'Age'
CREATED_BEFORE = 'CreatedBefore'
NUM_NEWER_VERSIONS = 'NumberOfNewerVersions'
IS_LIVE = 'IsLive'
MATCHES_STORAGE_CLASS = 'MatchesStorageClass'
# List of all action elements.
LEGAL_ACTIONS = [DELETE, SET_STORAGE_CLASS]
# List of all condition elements.
LEGAL_CONDITIONS = [AGE, CREATED_BEFORE, NUM_NEWER_VERSIONS, IS_LIVE,
MATCHES_STORAGE_CLASS]
# List of conditions elements that may be repeated.
LEGAL_REPEATABLE_CONDITIONS = [MATCHES_STORAGE_CLASS]
class Rule(object):
"""
A lifecycle rule for a bucket.
:ivar action: Action to be taken.
:ivar action_text: The text value for the specified action, if any.
:ivar conditions: A dictionary of conditions that specify when the action
should be taken. Each item in the dictionary represents the name and
value (or a list of multiple values, if applicable) of a condition.
"""
def __init__(self, action=None, action_text=None, conditions=None):
self.action = action
self.action_text = action_text
self.conditions = conditions or {}
# Name of the current enclosing tag (used to validate the schema).
self.current_tag = RULE
def validateStartTag(self, tag, parent):
"""Verify parent of the start tag."""
if self.current_tag != parent:
raise InvalidLifecycleConfigError(
'Invalid tag %s found inside %s tag' % (tag, self.current_tag))
def validateEndTag(self, tag):
"""Verify end tag against the start tag."""
if tag != self.current_tag:
raise InvalidLifecycleConfigError(
'Mismatched start and end tags (%s/%s)' %
(self.current_tag, tag))
def startElement(self, name, attrs, connection):
if name == ACTION:
self.validateStartTag(name, RULE)
elif name in LEGAL_ACTIONS:
self.validateStartTag(name, ACTION)
# Verify there is only one action tag in the rule.
if self.action is not None:
raise InvalidLifecycleConfigError(
'Only one action tag is allowed in each rule')
self.action = name
elif name == CONDITION:
self.validateStartTag(name, RULE)
elif name in LEGAL_CONDITIONS:
self.validateStartTag(name, CONDITION)
# Verify there is no duplicate conditions.
if (name in self.conditions and
name not in LEGAL_REPEATABLE_CONDITIONS):
raise InvalidLifecycleConfigError(
'Found duplicate non-repeatable conditions %s' % name)
else:
raise InvalidLifecycleConfigError('Unsupported tag ' + name)
self.current_tag = name
def endElement(self, name, value, connection):
self.validateEndTag(name)
if name == RULE:
# We have to validate the rule after it is fully populated because
# the action and condition elements could be in any order.
self.validate()
elif name == ACTION:
self.current_tag = RULE
elif name in LEGAL_ACTIONS:
if name == SET_STORAGE_CLASS and value is not None:
self.action_text = value.strip()
self.current_tag = ACTION
elif name == CONDITION:
self.current_tag = RULE
elif name in LEGAL_CONDITIONS:
self.current_tag = CONDITION
# Some conditions specify a list of values.
if name in LEGAL_REPEATABLE_CONDITIONS:
if name not in self.conditions:
self.conditions[name] = []
self.conditions[name].append(value.strip())
else:
self.conditions[name] = value.strip()
else:
raise InvalidLifecycleConfigError('Unsupported end tag ' + name)
def validate(self):
"""Validate the rule."""
if not self.action:
raise InvalidLifecycleConfigError(
'No action was specified in the rule')
if not self.conditions:
raise InvalidLifecycleConfigError(
'No condition was specified for action %s' % self.action)
def to_xml(self):
"""Convert the rule into XML string representation."""
s = ['<' + RULE + '>']
s.append('<' + ACTION + '>')
if self.action_text:
s.extend(['<' + self.action + '>',
self.action_text,
'</' + self.action + '>'])
else:
s.append('<' + self.action + '/>')
s.append('</' + ACTION + '>')
s.append('<' + CONDITION + '>')
for condition_name in self.conditions:
if condition_name not in LEGAL_CONDITIONS:
continue
if condition_name in LEGAL_REPEATABLE_CONDITIONS:
condition_values = self.conditions[condition_name]
else:
# Wrap condition value in a list, allowing us to iterate over
# all condition values using the same logic.
condition_values = [self.conditions[condition_name]]
for condition_value in condition_values:
s.extend(['<' + condition_name + '>',
condition_value,
'</' + condition_name + '>'])
s.append('</' + CONDITION + '>')
s.append('</' + RULE + '>')
return ''.join(s)
class LifecycleConfig(list):
"""
A container of rules associated with a lifecycle configuration.
"""
def __init__(self):
# Track if root tag has been seen.
self.has_root_tag = False
def startElement(self, name, attrs, connection):
if name == LIFECYCLE_CONFIG:
if self.has_root_tag:
raise InvalidLifecycleConfigError(
'Only one root tag is allowed in the XML')
self.has_root_tag = True
elif name == RULE:
if not self.has_root_tag:
raise InvalidLifecycleConfigError('Invalid root tag ' + name)
rule = Rule()
self.append(rule)
return rule
else:
raise InvalidLifecycleConfigError('Unsupported tag ' + name)
def endElement(self, name, value, connection):
if name == LIFECYCLE_CONFIG:
pass
else:
raise InvalidLifecycleConfigError('Unsupported end tag ' + name)
def to_xml(self):
"""Convert LifecycleConfig object into XML string representation."""
s = ['<?xml version="1.0" encoding="UTF-8"?>']
s.append('<' + LIFECYCLE_CONFIG + '>')
for rule in self:
s.append(rule.to_xml())
s.append('</' + LIFECYCLE_CONFIG + '>')
return ''.join(s)
def add_rule(self, action, action_text, conditions):
"""
Add a rule to this Lifecycle configuration. This only adds the rule to
the local copy. To install the new rule(s) on the bucket, you need to
pass this Lifecycle config object to the configure_lifecycle method of
the Bucket object.
:type action: str
:param action: Action to be taken.
:type action_text: str
:param action_text: Value for the specified action.
:type conditions: dict
:param conditions: A dictionary of conditions that specify when the
action should be taken. Each item in the dictionary represents the name
and value of a condition.
"""
rule = Rule(action, action_text, conditions)
self.append(rule)
|
class Lemmikki:
def __init__(self, nimi, omistaja):
self.nimi = nimi
self.omistaja = omistaja
def tulosta_tiedot(self):
print(self.nimi)
print(self.omistaja)
class Koira(Lemmikki):
def __init__(self, nimi, omistaja, rotu):
super().__init__(nimi, omistaja)
#Lemmikki.__init__(self, nimi,omistaja)
self.rotu = rotu
def tulosta_tiedot(self):
super().tulosta_tiedot()
print(self.rotu)
def hauku(self):
print("Hau hau")
# lem = Lemmikki('Seppo', 'Musti')
#
# lem.tulosta_tiedot()
seppo = Koira("Seppo", "Musti", "Suomen pystykorva")
seppo.tulosta_tiedot()
seppo.hauku() |
import numpy
n, m = map(int, input().split())
a = []
for i in range(n):
a.append(list(map(int, input().split())))
my_array = numpy.array(a)
print(numpy.mean(my_array, axis=1))
print(numpy.var(my_array, axis=0))
print(numpy.std(my_array))
|
from math import*
r = float(input())
a = float(input())
n = int(input())
va = (4*(pi)*(r**3))/3
vc = ((pi)*(a**2)*(3*r-a))/3
com= va-vc
if(n == 1):
print(round(vc,4))
if(n == 2):
print(round(com,4)) |
from rpy2ica import fastica as rica
import numpy as np
class TestICA:
def setup(self):
self.signals = np.vstack([np.sin([x/20.0 for x in xrange(1,1001)]),(1.0 + np.mod(xrange(1000),200) - 100.0)/100.0])
self.mixing = np.array([[0.291, 0.6557], [-0.5439, 0.5572]])
self.X = np.dot(self.mixing,self.signals)
self.AR,self.WR,self.SR = rica(self.X,2,method="R",maxIterations=10000)
self.AC,self.WC,self.SC = rica(self.X,2,method="C",maxIterations=10000)
def test_R_W_orthogonality(self):
assert np.allclose(np.dot(self.WR.T,self.WR),np.eye(2),atol=1.0e-06),"native R: W^TW not within 1.0e-06 of I"
def test_R_S_recovery(self):
from scipy.linalg import det
assert np.allclose(1.0,np.abs(det(np.corrcoef(self.SR,self.signals)[0:2,2:])),atol=1.0e-03),"native R: |det(rho(ShatT,S))| not within 1e-03 of unity"
def test_C_W_orthogonality(self):
assert np.allclose(np.dot(self.WC.T,self.WC),np.eye(2),atol=1.0e-06),"R calling C: W^TW not within 1.0e-06 of I"
def test_C_S_recovery(self):
from scipy.linalg import det
assert np.allclose(1.0,np.abs(det(np.corrcoef(self.SC,self.signals)[0:2,2:])),atol=1.0e-03),"R calling C: |det(rho(ShatT,S))| not within 1e-03 of unity"
|
import glob
PATHS_REQUIRING_HEADER = ["kedro_server", "tests"]
LEGAL_HEADER_FILE = "legal_header.txt"
LICENSE_MD = "LEGAL_NOTICE.md"
RED_COLOR = "\033[0;31m"
NO_COLOR = "\033[0m"
LICENSE = """Copyright (c) 2020 - present
"""
def files_at_path(path: str):
return glob.glob(path + "/**/*.py", recursive=True)
def files_missing_substring(file_names, substring):
for file_name in file_names:
with open(file_name, "r", encoding="utf-8") as current_file:
content = current_file.read()
if content.strip() and substring not in content:
yield file_name
def main():
exit_code = 0
with open(LEGAL_HEADER_FILE) as header_f:
header = header_f.read()
# find all .py files recursively
files = [
new_file for path in PATHS_REQUIRING_HEADER for new_file in files_at_path(path)
]
# find all files which do not contain the header and are non-empty
files_with_missing_header = list(files_missing_substring(files, header))
# exit with an error and print all files without header in read, if any
if files_with_missing_header:
print(
RED_COLOR
+ "The legal header is missing from the following files:\n- "
+ "\n- ".join(files_with_missing_header)
+ NO_COLOR
+ "\nPlease add it by copy-pasting the below:\n\n"
+ header
+ "\n"
)
exit_code = 1
# check the LICENSE.md exists and has the right contents
try:
files = list(files_missing_substring([LICENSE_MD], LICENSE))
if files:
print(
RED_COLOR
+ "Please make sure the LEGAL_NOTICE.md file "
+ "at the root of the project "
+ "has the right contents."
+ NO_COLOR
)
exit(1)
except IOError:
print(
RED_COLOR
+ "Please add the LEGAL_NOTICE.md file at the root of the project "
"with the appropriate contents." + NO_COLOR
)
exit(1)
# if it doesn't exist, send a notice
exit(exit_code)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
entry_point = 'peer_count_reporter_component=peer_count_reporter_component:PeerCountReporterComponent'
setup(
name='trinity-peer-count-reporter-component',
py_modules=['peer_count_reporter_component'],
entry_points={
'trinity.components': entry_point,
},
)
|
import os
import csv
bank_csv_path = os.path.join("..","Resources","budget_data.csv")
results_txt_path = os.path.join("..","Output","budget_data_result.txt")
with open (bank_csv_path,"r",newline = "") as bankfile:
bankreader = csv.reader(bankfile,delimiter = ",")
bankheader = next(bankreader) #read header
row = next(bankreader) #read first row
profit_loss = float(row[1])
tot_months = 1 #set initial values
tot_profit_loss = profit_loss
sum_of_change = 0
if profit_loss >0 :
greatest_profit=profit_loss
great_profit_date = row[0]
greatest_loss=0
great_loss_date = " "
elif profit_loss <0 :
greatest_profit = 0
great_profit_date = " "
greatest_loss=profit_loss
great_loss_date = row[0]
for row in bankreader: # loop through rows
change = float(row[1]) - profit_loss # calculate change from previous month
sum_of_change += change
tot_months += 1
profit_loss = float(row[1]) #set new values for running totals
tot_profit_loss = tot_profit_loss + profit_loss
if profit_loss > greatest_profit:
greatest_profit=profit_loss
great_profit_date = row[0]
if profit_loss < greatest_loss:
greatest_loss=profit_loss
great_loss_date = row[0]
result=[] #prepare results
result.append(" Financial Analysis")
result.append("-----------------------------")
result.append(f" Total Months :{tot_months}")
result.append(" Total Net Profit/Loss: ${:0,.0f}".format(tot_profit_loss))
result.append(" Average Change: ${:>#11,.2f}".format(sum_of_change / (tot_months - 1)))
result.append(" Greatest Increase in Profits: {0} (${1:>#13,.0f})".format(great_profit_date,greatest_profit))
result.append(" Greatest Decrease in Profits: {0} (${1:>#13,.0f})".format(great_loss_date,greatest_loss))
for line in result: #send results to terminal
print (line)
with open (results_txt_path,"w") as resultfile: #send results to txt file
for line in result:
resultfile.write(line +"\n") |
'''
Created on 15.08.2020
@author: Max Weise
'''
'''This file contains all custom exceptions used in the 'main.py' module'''
class NameException(Exception):
message = '\n === Name may not contain spaces === \n'
class NotFoundException(Exception):
message = '\n === Element not found === \n'
class DuplicateException(Exception):
message = '\n === The element is a duplicate of some sort. Check for name === \n'
if __name__ == '__main__':
print('This module contains the custom exceptions raised in the main program.\nTo run the program, run the main.py module' ) |
import RPi.GPIO as GPIO, time
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
red=11
yellow=13
green=15
GPIO.setup(red, GPIO.OUT)
GPIO.setup(yellow, GPIO.OUT)
GPIO.setup(green, GPIO.OUT)
while (True):
GPIO.output(red, GPIO.HIGH)
time.sleep(5)
GPIO.output(yellow, GPIO.HIGH)
time.sleep(2)
GPIO.output(red, GPIO.LOW)
GPIO.output(yellow, GPIO.LOW)
GPIO.output(green, GPIO.HIGH)
time.sleep(5)
GPIO.output(green, GPIO.LOW)
GPIO.output(yellow, GPIO.HIGH)
time.sleep(2)
GPIO.output(yellow, GPIO.LOW)
|
from Warhammer_2ed_Karta_Postaci.MaszynaLosująca import RzutyKoscia
"""
aby wylosować imie człowieka wpisz:
wybierz_imie_czlowiek_mezczyzna()
lub
wybierz_imie_czlowiek_kobieta()
aby wylosowac imie elfa wpisz:
wybierz_imie_elf_mezczyzna()
lub
wybierz_imie_elf_kobieta()
aby wylosowac imie krasnoluda wpisz:
wybierz_imie_krasnolud_mezczyzna()
lub
wybierz_imie_krasnolud_mezczyzna()
aby wylosowac imie niziolka wpisz:
wybierz_imie_krasnolud_mezczyzna()
lub
wybierz_imie_krasnolud_kobieta()
"""
def wybierz_imie_czlowiek_mezczyzna(self=RzutyKoscia.rzuc_koscia_100()):
if 1 <= self <= 33:
return imie_czlowiek_mezczyzna_1()
elif 34 <= self <= 66:
return imie_czlowiek_mezczyzna_2()
elif 67 <= self <= 100:
return imie_czlowiek_mezczyzna_3()
def wybierz_imie_czlowiek_kobieta(self):
if 1 <= self <= 33:
return imie_czlowiek_kobieta_1()
elif 34 <= self <= 66:
return imie_czlowiek_kobieta_2()
elif 67 <= self <= 100:
return imie_czlowiek_kobieta_3()
def wybierz_imie_elf_mezczyzna():
return (imie_elf_1_czlon() +
imie_elf_lacznik() +
imie_elf_mezczyzna_2_czlon())
def wybierz_imie_elf_kobieta():
return (imie_elf_1_czlon() +
imie_elf_lacznik() +
imie_elf_kobieta_2_czlon())
def wybierz_imie_krasnolud_mezczyzna():
return (imie_krasnolud_1_czlon() +
imie_krasnolud_mezczyzna_2_czlon())
def wybierz_imie_krasnolud_kobieta():
return (imie_krasnolud_1_czlon() +
imie_krasnolud_kobieta_2_czlon())
def wybierz_imie_niziolek_mezczyzna():
return (imie_niziolek_1_czlon() +
imie_niziolek_mezczyzna_2_czlon())
def wybierz_imie_niziolek_kobieta():
return (imie_niziolek_1_czlon() +
imie_niziolek_kobieta_2_czlon())
def imie_czlowiek_mezczyzna_1(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "Abelard", "Abehelm", "Admund", "Adred", "Adric", "Agis",
"Alaric", "Alberic", "Albrecht", "Aldebrand", "Aldred", "Aldric", "Alfreid",
"Altmar", "Alric", "Andre", "Andred", "Andric", "Anshelm", "Anton", "Arne",
"Arnulf", "Axel", "Axelbrand", "Baldred", "Baldric", "Baldwin", "Balthasar",
"Barnabas", "Bart", "Bartolf", "Bartomar", "Bernolt", "Bertold", "Bertolf",
"Boris", "Bruno", "Burgolf", "Calvin", "Casimir", "Caspar", "Cedred", "Conrad",
"Corvin", "Dagmar", "Dantmar", "Dankred", "Dekmar", "Detlef", "Diebold", "Diel",
"Dietfried", "Dieter", "Dietmar", "Dietmund", "Dietrich", "Dirk", "Donat", "Durnhelm",
"Eber", "Eckel", "Eckhart", "Edgar", "Edmund", "Edwin", "Ehrhart", "Ehrl", "Ehrwig",
"Eldred", "Elmeric", "Emil", "Engel", "Engelbert", "Engelbrecht", "Engelhart",
"Eodred", "Eomund", "Erdman", "Erdred", "Erkenbrand", "Erasmus", "Erich", "Erman",
"Ernst", "Erwin", "Eugen", "Eustasius", "Ewald", "Fabian", "Faustus", "Felix", "Ferdinand",
"Fleugweiner", "Fosten", "Franz", "Frediger", "Fredric", "Friebald", "Friedrich", "Fulko"]
return imie[self]
def imie_czlowiek_mezczyzna_2(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "Gawin", "Gerber", "Gerhart", "Gerlach", "Gernar", "Gerolf", "Gilbrecht",
"Gisbert", "Giselbrecht", "Gismar", "Goran", "Gosbert", "Goswin", "Gotfried", "Gothard",
"Gottolf", "Gotwin", "Gregor", "Greimold", "Grimwold", "Griswold", "Guildo", "Gundolf",
"Gundred", "Gunnar", "Gunter", "Gunther", "Gustaf", "Hadred", "Hadwin", "Hagar", "Hagen",
"Haldred", "Halman", "Hamlyn", "Hand", "Harbrand", "Harman", "Hartmann", "Haug", "Heidric",
"Heimar", "Heinfriem", "Heinman", "Heinrich", "Heinz", "Helmut", "Henlyn", "Hermann",
"Herwin", "Hieronymus", "Hildebart", "Hildebrand", "Hildemar", "Hildemund", "Hildred",
"Hildric", "Horst", "Hugo", "Igor", "Ingwald", "Jander", "Jekil", "Jodokus", "Johann",
"Jonas", "Jorg", "Jorn", "Josef", "Jost", "Jurgen", "Karl", "Kaspar", "Klaus", "Kleber",
"Konrad", "Konradin", "Kurt", "Lamprecht", "Lanfird", "Lanric", "Lanwin", "Leo", "Leopold",
"Levin", "Liebert", "Liebrecht", "Liebwin", "Lienchart", "Linus", "Lodwig", "Lothar",
"Lucius", "Ludwig", "Luitpold", "Lukas", "Lupold", "Lupus", "Luther", "Lutolf"]
return imie[self]
def imie_czlowiek_mezczyzna_3(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "Mandred", "Magnus", "Mandred", "Manfred", "Mathias", "Max", "Maximillian",
"Meiner", "Meinhart", "Meinolf", "Mekel", "Merkel", "Nat", "Nathandar", "Nicodemus", "Odamar",
"Odric", "Odwin", "Olbrecht", "Oldred", "Oldric", "Ortlieb", "Ortolf", "Orwin", "Oswald",
"Osric", "Oswin", "Otfried", "Otto", "Otwin", "Paulus", "Prospero", "Ragen", "Ralf", "Rabrecht",
"Randulf", "Ranulf", "Ranald", "Reikhard", "Rein", "Reiner", "Renhard", "Reinolt", "Reuban",
"Rigo", "Roderic", "Rolf", "Ruben", "Rudel", "Rudgar", "Rudolf", "Rufus", "Rusprecht",
"Sebastian", "Semund", "Sepp", "Sieger", "Siegfried", "Siegmund", "Sigismund", "Sigric",
"Steffan", "Tankred", "Theoderic", "Tilmann", "Tomas", "Trubald", "Trubert", "Udo",
"Ulli", "Ulfred", "Ulfman", "Ulman", "Uto", "Valdred", "Valdric", "Varl", "Viggo", "Viktor",
"Vilmar", "Volker", "Volkhard", "Volkrad", "Volkin", "Voltz", "Walbrecht", "Waldor", "Waldred",
"Walther", "Warmund", "Werner", "Wilbert", "Wilfried", "Wilhelm", "Woldred", "Wolfram",
"Wolfhart", "Wolfgang", "Wulf", "Xavier"]
return imie[self]
def imie_czlowiek_kobieta_1(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "Abbie", "Abighild", "Abigund", "Abigunda", "Ada", "Ada", "Adel", "Adelind",
"Adeline", "Adelyn", "Adelle", "Adelle", "Agathe", "Agnete", "Aldreda", "Alfreda", "Alicia",
"Allane", "Althea", "Amalie", "Amalinde", "Amalyn", "Anhilda", "Annabella", "Anna", "Anthea",
"Arabella", "Aver", "Bechilda", "Bella", "Bella", "Bellane", "Benedicta", "Berlinda", "Berlyn",
"Bertha", "Berthilda", "Bess", "Bess", "Beth", "Broncea", "Brunhilda", "Camilla", "Carla",
"Carlinda", "Carlotta", "Cilicia", "Cilie", "Clora", "Clothilda", "Connie", "Constance",
"Constanza", "Cordelia", "Dema", "Demona", "Desdemona", "Dorthilda", "Drachena", "Drachilda",
"Edhilda", "Edith", "Edyth", "Edythe", "Eleanor", "Eleanor", "Elinor", "Elisinda", "Elsina",
"Ella", "Ellene", "Ellinde", "Eloise", "Elsa", "Elsa", "Elsbeth", "Elspeth", "Elyn", "Emagunda",
"Emelia", "Emme", "Emmalyn", "Emmanuel", "Emerlinde", "Emerlyn", "Erica", "Ermina", "Erminlind",
"Ermintrude", "Esmeralda", "Estelle", "Etheldreda", "Etherlind", "Ethelreda", "Fay", "Frieda",
"Frieda", "Friedhilda", "Friedrun", "Friedrica"]
return imie[self]
def imie_czlowiek_kobieta_2(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "Gabby", "Gabby", "Gabriele", "Galina", "Gena", "Genevieve", "Genoveva",
"Gerberga", "Gerda", "Gerlinde", "Gertie", "Gertrud", "Greta", "Greta", "Gretchen", "Grizelda",
"Grunhilda", "Gudrun", "Gudryn", "Hanna", "Hedwig", "Heidi", "Heidrun", "Helga", "Herlinde",
"Herwig", "Hilda", "Hilda", "Hildegart", "Hildegund", "Honoria", "Ida", "Ingrid", "Ingrun",
"Ingrund", "Irmella", "Irmine", "Isabella", "Isadora", "Isolde", "Isolde", "Jocelin",
"Johanna", "Josie", "Karin", "Katarine", "Katheryn", "Katharina", "Katherine", "Katherine",
"Keterlind", "Keterlyn", "Kitty", "Kirsten", "Kirstena", "Kristyn", "Kirsten", "Kirsten",
"Kirstyn", "Lavina", "Lavinia", "Leanor", "Leanora", "Leticia", "Letty", "Lena", "Lenora",
"Lisa", "Lisbeth", "Lizzie", "Lorinda", "Lorna", "Lucinda", "Lucretia", "Lucie", "Ludmilla",
"Mabel", "Madge", "Magdalyn", "Maggie", "Maghilda", "Maglind", "Maglyn", "Magunda", "Magreta",
"Maida", "Marien", "Marietta", "Margaret", "Marget", "Margreta", "Marline", "Marlyn",
"Mathilda", "Maude", "May", "Meg", "Melicent", "Miranda", "Moll"]
return imie[self]
def imie_czlowiek_kobieta_3(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "Nathilda", "Nellie", "Nellie", "Nora", "Nora", "Olga", "Olga", "Ophelia",
"Ophelia", "Osterhild", "Osterhild", "Ostelle", "Ostelle", "Ostia", "Ostia", "Ottagunda",
"Ottagunda", "Ottaline", "Ottaline", "Ottilda", "Ottilda", "Ottilyn", "Ottilyn", "Pertida",
"Pertida", "Pergale", "Pergale", "Pergunda", "Pergunda", "Petronella", "Petronella",
"Philomelia", "Philomelia", "Reikhilda", "Reikhilda", "Renata", "Renata", "Rosabel",
"Rosabel", "Rosabella", "Rosabella", "Rosale", "Rosale", "Rosalia", "Rosalia", "Rosalin",
"Rosalin", "Rosalinde", "Rosalinde", "Rosamunde", "Rosamunde", "Rosanne", "Rosanne", "Rose",
"Rose", "Roz", "Roz", "Rozhilda", "Rozhilda", "Salina", "Salina", "Saltza", "Saltza",
"Sigismunda", "Sigismunda", "Sigrid", "Sigrid", "Sigunda", "Sigunda", "Solla", "Solla",
"Styrine", "Styrine", "Talima", "Talima", "Theodora", "Theodora", "Therese", "Therese",
"Tilea", "Tilea", "Ursula", "Ursula", "Ulrica", "Ulrica", "Valeria", "Valeria", "Verena",
"Verena", "Wilfrieda", "Wilfrieda", "Wilhelmina", "Wilhelmina", "Winifred", "Winifred",
"Wolfhide", "Wolfhide", "Zomelda", "Zomelda", "Zena"]
return imie[self]
def imie_elf_1_czlon(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "Aed", "Ael", "Aelf", "Aen", "Aeth", "Alth", "An", "And", "Ar", "Arg", "Ast",
"Ath", "Av", "Ban", "Bel", "Beth", "Cad", "Cael", "Caem", "Caeth", "Cal", "Cam", "Cel", "Cir",
"El", "Eld", "Elth", "En", "End", "Er", "Ers", "Fand", "Fer", "Ferg", "Fim", "Fin", "Gal", "Gald",
"Gaen", "Gaes", "Ged", "Gen", "Ges", "Geth", "Glor", "Has", "Hath", "Hel", "Heth", "Hith", "Ill",
"Ind", "Ist", "Ith", "Iy", "Kor", "Ky", "Kyr", "La", "Lan", "Lil", "Lim", "Lith", "Loth", "Mal",
"Mar", "Mas", "Math", "Me", "Mes", "Meth", "Men", "Mor", "Mort", "Nal", "Nar", "Nen", "Nor", "Norl",
"Ri", "Riabb", "Riann", "Rid", "Riell", "Rien", "Ruth", "Ryn", "Tab", "Tal", "Tan", "Tar", "Teth",
"Tel", "Tor", "Ty", "Ull", "Um", "Ur", "Yr", "Yv"]
return imie[self]
def imie_elf_mezczyzna_2_czlon(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "baen", "baine", "baire", "bar", "bhir", "brier", "brior", "brin", "daen",
"daine", "daire", "dar", "dhil", "dhir", "drel", "drir", "dror", "eorl", "eos", "eoth", "fil",
"fin", "fir", "hil", "hin", "hir", "hor", "il", "in", "ion", "ir", "is", "ith", "lael", "laen",
"laer", "laine", "laire", "lan", "las", "len", "les", "lil", "lin", "lir", "lis", "lor", "los",
"mael", "maen", "mair", "main", "mal", "mar", "mil", "min", "mir", "nael", "naen", "naer",
"nail", "nair", "nal", "nan", "nar", "neal", "nean", "near", "nil", "nin", "nir", "nis",
"ran", "rea", "rel", "ril", "riol", "rion", "rior", "riorl", "riorn", "ril", "ryel", "taen",
"tair", "tain", "than", "thar", "thel", "thil", "thir", "thin", "thril", "thrin", "thwe",
"til", "tin", "tis", "we", "yan"]
return imie[self]
def imie_elf_kobieta_2_czlon(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "a", "aine", "am", "ann", "arma", "arna", "arth", "ath", "beann", "bet",
"beth", "brim", "brys", "deann", "det", "deth", "dys", "drian", "driel", "drys", "eann",
"eanna", "earna", "earth", "elle", "eth", "eys", "eyth", "felle", "fionn", "flys", "fyll",
"fynn", "fyr", "fys", "i", "ille", "ina", "ira", "isa", "ithi", "itt", "la", "lam", "lana",
"larna", "lath", "leann", "leath", "lel", "lelle", "leth", "let", "lielle", "lieth", "lyann",
"nelle", "nem", "neth", "ni", "niell", "niella", "nith", "nas", "reann", "rell", "relle",
"rielle", "ris", "rith", "rys", "rar", "rath", "ser", "seth", "sir", "sith", "sor", "soth",
"shar", "sher", "shir", "sys", "tar", "teal", "teann", "ter", "thea", "ther", "thi", "thryn",
"thyn", "tir", "tor", "tos", "tryan", "trys", "yll", "yrs", "ys"]
return imie[self]
def imie_elf_lacznik(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "a", "a", "al", "al", "an", "an", "ar", "ar", "as", "as", "e", "e", "el",
"el", "en", "en", "er", "er", "es", "es", "fan", "fan", "fen", "fen", "fin", "fin", "i",
"i", "il", "il", "in", "in", "ir", "ir", "is", "is", "o", "o", "ol", "ol", "on", "on", "or",
"or", "os", "os", "ra", "ra", "ral", "ral", "ran", "ran", "re", "re", "rel", "rel", "ren",
"ren", "ri", "ri", "ril", "ril", "rin", "rin", "ro", "ro", "rol", "rol", "ron", "ron", "ry",
"ry", "sa", "sa", "sal", "sal", "san", "san", "se", "se", "sel", "sel", "sen", "sen", "si",
"si", "sil", "sil", "sin", "sin", "so", "so", "sol", "sol", "son", "son", "u", "u", "ul", "ul"]
return imie[self]
def imie_krasnolud_1_czlon(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "Al", "Ath", "Athran", "Bal", "Bala", "Bara", "Bel", "Bela", "Ber", "Bok",
"Bor", "Bur", "Da", "Dam", "Dora", "Drok", "Drong", "Dur", "Dwal", "El", "Ela", "Elan", "Elda",
"Fa", "Far", "Fara", "Fim", "Fima", "Firen", "Fur", "Fura", "Ga", "Gim", "Gol", "Gollen",
"Got", "Gota", "Grim", "Gro", "Grun", "Hak", "Haka", "Har", "Hega", "Hur", "Kad", "Kar", "Kata",
"Kaz", "Kaza", "Krag", "Logaz", "Lok", "Lun", "Mo", "Mola", "Mor", "Mora", "No", "Nola", "Nor",
"Noran", "Nun", "Oda", "Oka", "Olla", "Olf", "Oth", "Othra", "Ro", "Ror", "Roran", "Ska",
"Skalla", "Skalf", "Skar", "Skor", "Skora", "Snor", "Snora", "Sven", "Thar", "Thor", "Thora",
"Thron", "Thrun", "Thura", "Un", "Utha", "Ulla", "Vala", "Var", "Vara", "Zak", "Zaka", "Zakan",
"Zar", "Zara", "Zam", "Zama"]
return imie[self]
def imie_krasnolud_mezczyzna_2_czlon(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "bin", "bin", "bor", "bor", "dil", "din", "din", "dok", "dok", "dor",
"dor", "drin", "drin", "fin", "fin", "gan", "gan", "gar", "gar", "gil", "gil", "gin",
"gni", "gni", "grom", "grom", "grond", "grond", "groth", "groth", "grum", "grum", "grund",
"grund", "grunt", "gon", "gon", "gor", "gor", "grim", "grim", "gron", "gron", "grom", "grom",
"gul", "gul", "gun", "gun", "gund", "gund", "ki", "ki", "kin", "kin", "krag", "krag", "kri",
"kri", "krin", "krin", "li", "li", "lin", "lin", "lik", "lik", "lok", "lok", "lun", "lun",
"lin", "min", "mir", "mir", "nin", "nin", "nir", "nir", "rag", "ri", "ri", "rig", "rig",
"rik", "rik", "rin", "rin", "run", "run", "skin", "tin", "tin", "tok", "tok", "trek", "trok",
"zin", "zor", "zor"]
return imie[self]
def imie_krasnolud_kobieta_2_czlon(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "bina", "bina", "bora", "bora", "dila", "dina", "dina", "dorkina", "dorkina",
"dora", "dora", "drinella", "fina", "fina", "fya", "fya", "gana", "gana", "gara", "gara", "gella",
"gella", "gina", "groma", "groma", "grondella", "grondella", "grotha", "grotha", "gruma",
"gruma", "grunda", "grunda", "gruntina", "gruntina", "gona", "gora", "gora", "grimella",
"grimella", "grina", "grina", "gromina", "gromina", "gula", "grunella", "grunella", "grundina",
"grundina", "kina", "kina", "kragella", "kragella", "krina", "krina", "kya", "kya", "lina",
"lina", "likina", "likina", "loka", "loka", "luna", "mina", "mina", "mira", "mira", "nina",
"nina", "nira", "nira", "nya", "ragina", "ragina", "riga", "riga", "riga", "rika", "rika",
"rina", "rina", "runa", "runa", "runella", "runella", "skina", "skina", "skinella", "skinella",
"tina", "toka", "trekella", "trekella", "trekina", "trekina", "troka", "troka", "zina", "zora"]
return imie[self]
def imie_niziolek_1_czlon(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "Bag", "Balf", "Berc", "Bill", "Bobb", "Bodg", "Bog", "Bom", "Bonn", "Brog",
"Bulc", "Bull", "Bust", "Cam", "Cap", "Ced", "Chund", "Clog", "Clof", "Cob", "Cog", "Crum",
"Crump", "Curl", "Dod", "Dog", "Dott", "Dram", "Drub", "Drog", "Dron", "Durc", "Elm", "Enn",
"Ermin", "Ethan", "Falc", "Fald", "Falm", "Far", "Fild", "Flac", "Fogg", "Frit", "Ful", "Func",
"Gaff", "Galb", "Gamm", "Gert", "Giff", "Gild", "Gon", "Grop", "Gudd", "Grump", "Ham", "Hal",
"Hart", "Harp", "Jac", "Jas", "Jasp", "Joc", "Lac", "Lil", "Lob", "Lott", "Lud", "Lurc", "Mad",
"Mag", "Man", "May", "Mer", "Mul", "Murc", "Murd", "Nag", "Nell", "Nobb", "Od", "Og", "Old",
"Pipp", "Podd", "Porc", "Riff", "Rip", "Rob", "Sam", "Supp", "Taff", "Talb", "Talc", "Tay",
"Tom", "Wald", "Watt", "Will"]
return imie[self]
def imie_niziolek_mezczyzna_2_czlon(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "belly", "belly", "belly", "belly", "belly", "belly", "belly",
"er", "er", "er", "er", "er", "er", "er", "er", "fast", "fast", "fast", "fast", "fast",
"fast", "fast", "in", "in", "in", "in", "in", "in", "in", "it", "it", "it", "it", "it",
"it", "it", "mutch", "mutch", "mutch", "mutch", "mutch", "mutch", "mutch", "o", "o",
"o", "o", "o", "o", "o", "o", "odoc", "odoc", "odoc", "odoc", "odoc", "odoc", "odoc",
"riadoc", "riadoc", "riadoc", "riadoc", "riadoc", "riadoc", "riadoc", "regar", "regar",
"regar", "regar", "regar", "regar", "regar", "wick", "wick", "wick", "wick", "wick",
"wick", "wick", "wise", "wise", "wise", "wise", "wise", "wise", "wise", "wit", "wit",
"wit", "wit", "wit", "wit", "wit", "y", "y", "y", "y", "y", "y", "y"]
return imie[self]
def imie_niziolek_kobieta_2_czlon(self=RzutyKoscia.rzuc_koscia_100()):
imie = ["początek listy", "a", "a", "a", "a", "a", "adell", "adell", "adell", "adell", "adell",
"alot", "alot", "alot", "alot", "alot", "apple", "apple", "apple", "apple", "apple",
"bell", "bell", "bell", "bell", "bell", "berry", "berry", "berry", "berry", "berry",
"eena", "eena", "eena", "eena", "eena", "ella", "ella", "ella", "ella", "ella", "era",
"era", "era", "era", "era", "et", "et", "et", "et", "et", "ia", "ia", "ia", "ia", "ia",
"flower", "flower", "flower", "flower", "flower", "lotta", "lotta", "lotta", "lotta",
"lotta", "petal", "petal", "petal", "petal", "petal", "riella", "riella", "riella",
"riella", "riella", "sweet", "sweet", "sweet", "sweet", "sweet", "trude", "trude",
"trude", "trude", "trude", "rose", "rose", "rose", "rose", "rose", "willow", "willow",
"willow", "willow", "willow", "y", "y", "y", "y", "y", ]
return imie[self]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 17:19:23 2020
@author: billcoleman
Exploring and data munging for the emodb database
.wav audio files labelled for emotions
Based on:
https://towardsdatascience.com/building-a-vocal-emotion-sensor-with-deep-learning-bedd3de8a4a9
# read file data - name as string, split string for emotion category
# set VAD values per emotion
# read in LPMS data - clamp between 0 - 20 kHz - 40 bins, use first 20 only to
cover up to 8 kHz at least
# try 50 ms windows with 25% overlap
# drop silence, select 0.4 sec chunks, define LPMS array of size 13 x ??
# (depends on temporal window)
# scale LPMS (already scaled to dB so - might not be necessary?)
"""
#%matplotlib inline
import matplotlib.pyplot as plt
import librosa.display
import os
import pandas as pd
import numpy as np
# Storage
import pickle
# Audio functions
import librosa as lib
# Normalise data
from sklearn.preprocessing import MinMaxScaler
# Play a file
import simpleaudio as sa
'''
LOAD A FILE
'''
def load_file(file_idx, fnames, filepath):
'''
Load a file, return the samples
'''
name = fnames[file_idx]
# file to load
getfile = filepath + str(name)
# get datapoints and sample rate of file and load it
samples, sr = lib.load(getfile, sr=None)
return samples, getfile
'''
PLOTTING/PLAYING FUNCTIONS
'''
def plot_raw_wave(file):
'''
Plot the raw wave
'''
plt.figure(figsize=(14, 5))
librosa.display.waveplot(file, sr=16000)
def plot_lpms_chunk(win):
'''
Plot lpms instance chunk
'''
librosa.display.specshow(win,
sr=16000,
x_axis='s',
y_axis='linear',
hop_length=256)
plt.colorbar(format='%+2.0f dB')
def play_file(file):
'''
Play a file
'''
wave_obj = sa.WaveObject.from_wave_file(file)
play_obj = wave_obj.play()
# Wait until sound has finished playing
play_obj.wait_done()
'''
DELETE THE SILENCE
'''
def strip_silence(file_to_strip):
'''
Takes a non-treated file and strips silent segments from it - provides an
array as final output
'''
intervals = librosa.effects.split(file_to_strip,
top_db=30,
frame_length=1024,
hop_length=256)
# compile non silent parts to a list of lists
non_silent = []
for i in range(intervals.shape[0]):
chunk = file_to_strip[intervals[i][0]:intervals[i][1]]
non_silent.append(chunk)
# flatten list of lists to a single list
non_silent_arr = [item for sublist in non_silent for item in sublist]
return np.asarray(non_silent_arr)
'''
CONVERT TO LPMS
'''
def convert_to_lpms(raw_silenced):
'''
Take a raw wave with the silence removed and convert to a LPMS matrix
'''
log_pow = librosa.feature.melspectrogram(y=raw_silenced,
sr=16000,
n_mels=40,
win_length=1024,
hop_length=256)
# scale to dB
log_pow_db = librosa.power_to_db(log_pow, ref=np.max)
return log_pow_db
'''
CHUNK THE FILE TO 0.4 sec CHUNKS
SCALE THE CONTENTS
SAVE INDIVIDUAL INSTANCES WITH APPROPRIATE LABELS
'''
def chunk_scale_lpms_matrix(lpms_matrix, fname):
'''
Split the LPMS matrix up into 0.4 second chunks
'''
# to track chunks
track_start = 0
track_end = 25
# lists to hold data and labels
data_scaled_list = []
labels_list = []
# define scaler
scaler = MinMaxScaler(feature_range=(0, 1))
# taking the lower 20 bins
# step through the instance extracting 0.4 sec length chunks
while track_end < lpms_matrix.shape[1]:
# get window data
win = lpms_matrix[20:40, track_start:track_end]
# scale the data
win_scaled = scaler.fit_transform(win)
# append data and labels
data_scaled_list.append(win_scaled)
labels_list.append(fname[5])
# increment start and end of chunks
track_start += 1
track_end += 1
return data_scaled_list, labels_list
###################################
########## WORKINGS ###############
###################################
# get emodb filenames
filenames = os.listdir("/Users/billcoleman/NOTEBOOKS/DublinAI/nlp_emotion/data/emodb/wav")
# filename and path
path = '/Users/billcoleman/NOTEBOOKS/DublinAI/nlp_emotion/data/emodb/wav/'
# create empty series to hold data
lpms_scaled_chunks = []
lpms_scaled_labels = []
for f in range(len(filenames)):
samples, getfile = load_file(f, filenames, path)
silence_stripped = strip_silence(samples)
lpms_ified = convert_to_lpms(silence_stripped)
data_scaled, labels = chunk_scale_lpms_matrix(lpms_ified, filenames[f])
for d in data_scaled:
lpms_scaled_chunks.append(d)
for l in labels:
lpms_scaled_labels.append(l)
print(f)
########################################
########## TABLE MUNGING ###############
########################################
# to pick out english emotion names
emotions_eng = ['fear', 'disgust', 'happy', 'bored', 'neutral', 'sad', 'angry']
emotions_code = ['A', 'E', 'F', 'L', 'N', 'T', 'W']
# to assign VAD values - taken from Russell & Mahrabian (1977)
vals_emos_V = [-0.64, -0.6, 0.81, -0.65, 0, -0.63, -0.51]
vals_emos_A = [0.6, 0.35, 0.51, -0.62, 0, -0.27, 0.59]
vals_emos_D = [-0.43, 0.11, 0.46, -0.33, 0, -0.33, 0.25]
# to hold values
vec_eng = []
vec_V = []
vec_A = []
vec_D = []
# to search for german emotion tag
unique_ger = np.unique(lpms_scaled_labels).tolist()
# step through all instances, append values to lists
for s in range(0, len(lpms_scaled_labels)):
find = lpms_scaled_labels[s]
idx = unique_ger.index(find)
vec_eng.append(emotions_eng[idx])
vec_V.append(vals_emos_V[idx])
vec_A.append(vals_emos_A[idx])
vec_D.append(vals_emos_D[idx])
audiofile_metadata = pd.DataFrame([lpms_scaled_labels,
vec_eng,
vec_V,
vec_A,
vec_D]).T
audiofile_metadata.columns = ["ger", "eng", "V", "A", "D"]
########################################
########## PICKLE DATA #################
########################################
with open('/Volumes/COLESLAW_1TB/BELL_LABS/emodb_labels.data',
'wb') as new_data:
# store the data as binary data stream - protocol 2 is so it can be used on Kevin Street
pickle.dump(audiofile_metadata, new_data, protocol=4)
print('Saved labels')
with open('/Volumes/COLESLAW_1TB/BELL_LABS/emodb_audio.data',
'wb') as new_data:
# store the data as binary data stream - protocol 2 is so it can be used on Kevin Street
pickle.dump(lpms_scaled_chunks, new_data, protocol=4)
print('Saved data')
|
from airtravel import *
flight = Flight("AB1234", Aircraft("G-UETP", "Airbus plane", num_rows=22, num_seats_per_row=6))
print(type(flight))
print(flight.number())
print(flight._number)
print(flight.airline())
print(flight.aircraft_model()) |
''' M/M/1 '''
import simpy
import numpy as np
def generate_interarrival():
return np.random.exponential(1./5.0)
def generate_service():
return np.random.exponential(1./4.0)
def factory_run(env, servers):
i = 0
while True:
i += 1
yield env.timeout(generate_interarrival())
print(env.now, 'Lot arrival')
env.process(lot(env, i, servers))
def lot(env, lot, servers):
with servers.request() as request:
print(env.now, 'Lot {} arrives'.format(lot))
yield request
print(env.now, 'lot {} is loaded'.format(lot))
yield env.timeout(generate_service())
print(env.now, 'lot {} departs'.format(lot))
np.random.seed(0)
env = simpy.Environment()
servers = simpy.Resource(env, capacity=1)
env.process(factory_run(env, servers))
env.run(until=50) |
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Documenter
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Guiders.Nbconverter"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import collections
import os
import copy
import sys
import importlib
from ShareYourSystem.Standards.Classors import Doer
from ShareYourSystem.Standards.Guiders import Celler
#</ImportSpecificModules>
#<DefineLocals>
DocumentOntologyLocalFolderPathStr=SYS.ShareYourSystemLocalFolderPathStr+'Ouvaton/'
DocumentNbviewerLocalFolderPathStr=SYS.ShareYourSystemLocalFolderPathStr+'Ouvaton/'
DocumentLibraryLocalFolderPathStr=SYS.ShareYourSystemLocalFolderPathStr+'docs/LibraryReference/'
DocumentOntologyOuvatonFolderPathStr='/httpdocs/slides/'
DocumentNbviewerOuvatonFolderPathStr='/httpdocs/ipython/'
DocumentModuleStrsList=SYS.lib()
DocumentModulePathStrsList=map(
lambda __DocumentModuleStr:
__DocumentModuleStr.replace('.','/'),
DocumentModuleStrsList
)
DocumentNameStrsList=map(
lambda __DocumentModulePathStr:
__DocumentModulePathStr.split('/')[-1],
DocumentModulePathStrsList
)
#</DefineLocals>
#<DefineFunctions>
def getDocumentedReadmeInstanceVariableWithFolderPathStr(
_InstanceVariable,_FolderPathStr
):
#file first
return _InstanceVariable.notebook(
**{
'FolderedPathStr':_FolderPathStr,
'GuidingBookStr':"Doc",
'NotebookingFileKeyStr':"Presentation.ipynb"
}
).nbconvert("Readme.md")
#</DefineFunctions>
#<DefineClass>
@DecorationClass()
class DocumenterClass(BaseClass):
def default_init(self,
_DocumentingConceptNameStr="",
_DocumentingSubReadmeBool=True,
_DocumentingConceptReadmeBool=True,
_DocumentingConceptLibraryBool=True,
_DocumentingConceptSlideBool=True,
_DocumentingSiteDocumentBool=True,
_DocumentedConceptModuleVariable=None,
_DocumentedConceptModuleStr="",
_DocumentedConceptModuleFolderPathStr="",
_DocumentedSubNameStrsList=None,
_DocumentedSubModulesList=None,
_DocumentedSubModuleStrsList=None,
_DocumentedPresentationsDictsList=None,
_DocumentedConceptNotebookDict=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_document(self):
#debug
'''
self.debug(
[
('self.',self,[
'DocumentingSubReadmeBool',
'DocumentingConceptLibraryBool',
'DocumentingConceptNameStr'
])
]
)
'''
#get
self.DocumentedConceptModuleVariable=getattr(
SYS,
self.DocumentingConceptNameStr
)
#debug
self.folder(
self.DocumentedConceptModuleVariable
)
#set
DocumentedConceptLocalPathStr=self.FolderedModuleDict['LocalFolderPathStr']
#get name
self.DocumentedConceptModuleStr=self.DocumentedConceptModuleVariable.__name__
#debug
self.debug(
[
('self.',self,[
'FolderedModuleStr',
'FolderedDirKeyStrsList'
]),
'Find the DocumentedSubNameStrsList',
'DocumentNameStrsList is ',
str(DocumentNameStrsList)
]
)
#filter
self.DocumentedSubNameStrsList=SYS._filter(
lambda __FolderedDirKeyStr:
os.path.isdir(
self.FolderedPathStr+__FolderedDirKeyStr
) and __FolderedDirKeyStr in DocumentNameStrsList,
self.FolderedDirKeyStrsList
)
#debug
'''
self.debug(
[
'DocumentNameStrsList is ',
str(DocumentNameStrsList),
('self.',self,[
'DocumentedSubNameStrsList'
])
]
)
'''
#sort
self.DocumentedSubNameStrsList=SYS._filter(
lambda __DocumentNameStr:
__DocumentNameStr in self.DocumentedSubNameStrsList,
DocumentNameStrsList
)
#map
self.DocumentedSubModuleStrsList=map(
lambda __DocumentedSubNameStr:
self.DocumentedConceptModuleStr+'.'+__DocumentedSubNameStr,
self.DocumentedSubNameStrsList
)
#debug
self.debug(
[
('self.',self,[
'DocumentedSubNameStrsList',
'DocumentedSubModuleStrsList',
]
),
'Now we get the DocumentedSubModulesList'
]
)
#filter
self.DocumentedSubModulesList=SYS._filter(
lambda __AttributeValueVariable:
type(__AttributeValueVariable).__name__=='module',
self.DocumentedConceptModuleVariable.__dict__.values()
)
#debug
'''
self.debug((
'self.',self,[
'DocumentedSubModulesList'
]
))
'''
#Check
if self.DocumentingSubReadmeBool:
#debug
self.debug(
[
'we build sub modules readmes here',
('self.',self,[
'DocumentedSubModuleStrsList'
])
]
)
#map
map(
lambda __DocumentedSubModuleStr:
self.folder(
importlib.import_module(
__DocumentedSubModuleStr
)
).scriptbook(
_GuideTuplesList=[
('001','Document','Markdown'),
],
**{
'GuidingBookStr':"Doc",
}
).notebook(
"PreReadme.ipynb"
).nbconvert(
"Readme.md"
),
self.DocumentedSubModuleStrsList
)
#Check
if self.DocumentingConceptSlideBool:
#debug
'''
self.debug(
[
'we slide here',
('self.',self,['DocumentedSubModuleStrsList'])
]
)
'''
#map
map(
lambda __DocumentedSubModuleStr:
self.folder(
importlib.import_module(
__DocumentedSubModuleStr
)
).scriptbook(
_GuideTuplesList=[
('001','Document','Markdown'),
('1','Github','Markdown'),
],
**{
'GuidingBookStr':"Doc",
}
).notebook(
"Presentation.ipynb"
).nbconvert(
"Presentation.html",
'Slide'
),
self.DocumentedSubModuleStrsList
)
#mv for Nbviewer ipython notebooks
map(
lambda __DocumentedSubModuleStr:
os.popen(
'cp '+sys.modules[
__DocumentedSubModuleStr
].LocalFolderPathStr+'Presentation.ipynb '+DocumentNbviewerLocalFolderPathStr+__DocumentedSubModuleStr.split(
'.'
)[-1]+'.ipynb'
),
self.DocumentedSubModuleStrsList
)
#mv for Ouvaton slide in html
map(
lambda __DocumentedSubModuleStr:
os.popen(
'cp '+sys.modules[
__DocumentedSubModuleStr
].LocalFolderPathStr+'Presentation.html '+DocumentOntologyLocalFolderPathStr+__DocumentedSubModuleStr.split(
'.'
)[-1]+'.html'
),
self.DocumentedSubModuleStrsList
)
#mv for Ouvaton slide in php
map(
lambda __DocumentedSubModuleStr:
os.popen(
'cp '+sys.modules[
__DocumentedSubModuleStr
].LocalFolderPathStr+'Presentation.html '+DocumentOntologyLocalFolderPathStr+__DocumentedSubModuleStr.split(
'.'
)[-1]+'.php'
),
self.DocumentedSubModuleStrsList
)
#map
self.DocumentedPresentationsDictsList=map(
lambda __DocumentedSubModuleStr:
self.folder(
sys.modules[__DocumentedSubModuleStr]
).file(
'Presentation.ipynb',
_ModeStr='r'
).file(
_ModeStr='c'
).FiledReadVariable,
self.DocumentedSubModuleStrsList
)
#debug
self.debug(
[
('self.',self,[
'DocumentedPresentationsDictsList'
])
]
)
#copy
self.DocumentedConceptNotebookDict=copy.copy(Celler.CellInitDict)
#flat
DocumentedFlatPresentationsDictsList=SYS.flat(
map(
lambda __DocumentedPresentationsDict:
copy.deepcopy(
__DocumentedPresentationsDict['worksheets'][0]['cells']
),
self.DocumentedPresentationsDictsList
)
)
#Flat all the presentations
self.DocumentedConceptNotebookDict['worksheets']=[
{
'cells':map(
lambda __DocumentedFlatPresentationsDict,__IndexInt:
dict(__DocumentedFlatPresentationsDict,**{
'prompt_number':__IndexInt}),
DocumentedFlatPresentationsDictsList,
xrange(len(DocumentedFlatPresentationsDictsList))
)
}
]
#debug
'''
self.debug(('self.',self,['DocumentedConceptNotebookDict']))
'''
#Write
self.folder(
self.DocumentingConceptNameStr
).file(
_KeyStr='Concept'+self.GuidingBookStr+'.ipynb',
_WriteVariable=self.DocumentedConceptNotebookDict,
_FormatStr="json"
).file(
_ModeStr='c'
)
#nbconvert
self.NotebookedCodeDict=self.DocumentedConceptNotebookDict
self.folder(
self.DocumentedConceptModuleVariable
).nbconvert(
_FormatStr='Slide',
_FileKeyStr='Concept'+self.GuidingBookStr+'.ipynb'
)
#set
DocumentOntologyLocalFolderPathStr+self.DocumentedConceptModuleVariable.__name__.split('.')[-1]+'.html'
#cp
os.popen('cp '+self.FiledPathStr+' '+self.DocumentedSlideLocalFilePathStr+self.DocumentedConceptModuleVariable.__name__.split('.')[-1]+'.ipynb')
#mv with .html extension
os.popen(
'cp '+self.FiledPathStr.replace(
'.ipynb',
'.html'
)+' '+self.DocumentedSlideLocalFilePathStr
)
#mv with .php extension
os.popen(
'mv '+self.FiledPathStr.replace(
'.ipynb',
'.html'
)+' '+self.DocumentedSlideLocalFilePathStr.replace('.html','.php')
)
#deploy
try:
self.deploy(
_ClientFilePathStrToServerFilePathStrOrderedDict=collections.OrderedDict(
[
(
self.DocumentedSlideLocalFilePathStr,
DocumentOntologyOuvatonFolderPathStr+self.DocumentedConceptModule.__name__.split('.'
)[-1]+'.php'
)
]
)
)
except:
print('There is NO Internet !')
#Check
if self.DocumentingConceptReadmeBool:
#debug
'''
self.debug('we build the concept readme here')
'''
#import submodules
'''
map(
lambda __DocumentedSubModuleStr:
importlib.import_modules(__DocumentedSubModuleStr),
self.DocumentedSubModuleStrsList
)
'''
#readme
self.folder(
self.DocumentedConceptModuleVariable
).scriptbook(
_GuideTuplesList=[
('001','Document','Markdown'),
('002','Ouvaton','Markdown'),
#('1','Github','Markdown'),
],
**{'GuidingBookStr':"Doc"}
)
#notebook
self.scriptbook(
_GuideTuplesList=[]
).notebook(
"PreReadme.ipynb"
).nbconvert(
"Readme.md",
'Markdown',
)
#Check
if self.DocumentingConceptLibraryBool:
#debug
'''
self.debug(
[
'we document here',
('self.',self,['DocumentedConceptModuleFolderPathStr'])
]
)
'''
'''
#document
self.document(
**{'PackagingModuleVariable':self.DocumentedConceptModuleStr}
)
'''
#folder
self.folder(
self.DocumentedConceptModuleVariable
)
#mv with .php extension
os.popen(
'cp '+DocumentedConceptLocalPathStr+'Readme.md '+DocumentLibraryLocalFolderPathStr+self.DocumentedConceptModuleStr.split('.')[-1]+'.md'
)
if self.DocumentingSiteDocumentBool:
#open
os.popen(
'mkdocs build --clean'
)
#deploy
try:
self.deploy(
_ClientFilePathStrToServerFilePathStrOrderedDict=collections.OrderedDict(
[
(
self.DocumentedSlideLocalFilePathStr,
DocumentOntologyOuvatonFolderPathStr+self.DocumentedConceptModule.__name__.split('.'
)[-1]+'.php'
)
]
)
)
except:
print('There is NO Internet !')
#</DefineClass>
#</DefinePrint>
DocumenterClass.PrintingClassSkipKeyStrsList.extend(
[
'DocumentingConceptNameStr',
'DocumentingSubReadmeBool',
'DocumentingConceptReadmeBool',
'DocumentingConceptLibraryBool',
'DocumentingConceptSlideBool',
'DocumentingSiteDocumentBool',
'DocumentedConceptModule',
'DocumentedConceptModuleStr',
'DocumentedConceptModuleFolderPathStr',
'DocumentedSubNameStrsList',
'DocumentedSubModulesList',
'DocumentedSubModuleStrsList',
#'DocumentedPresentationsDictsList',
#'DocumentedConceptNotebookDict'
]
)
#<DefinePrint> |
from .pages.product_page import ProductPage
from .pages.basket_page import BasketPage
from .pages.login_page import LoginPage
import pytest
import time
@pytest.mark.authorized_user
class TestUserAddToBasketFromProductPage():
@pytest.fixture(scope="function", autouse=True)
def setup(self, browser):
email = str(time.time()) + "@fakemail.org"
password = email
link = "http://selenium1py.pythonanywhere.com/en-gb/accounts/login/"
page = LoginPage(browser, link)
page.open()
page.register_new_user(email, password)
page.should_be_authorized_user()
def test_user_cant_see_success_message(self, browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
page = ProductPage(browser, link)
page.open()
page.should_not_be_success_message()
@pytest.mark.need_review
def test_user_can_add_product_to_basket(self, browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
page = ProductPage(browser, link)
page.open()
page.add_product_to_basket()
page.should_be_correct_success_message()
@pytest.mark.need_review
def test_guest_can_add_product_to_basket(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
page = ProductPage(browser, link)
page.open()
page.add_product_to_basket()
page.should_be_correct_success_message()
@pytest.mark.need_review
def test_guest_can_go_to_login_page_from_product_page(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
page = ProductPage(browser, link)
page.open()
page.go_to_login_page()
login_page = LoginPage(browser, browser.current_url)
login_page.should_be_login_page()
@pytest.mark.need_review
def test_guest_cant_see_product_in_basket_opened_from_product_page(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
page = ProductPage(browser, link)
page.open()
page.go_to_basket()
basket_page = BasketPage(browser, link)
basket_page.should_not_be_products_to_basket()
basket_page.should_be_correct_text_to_empty_basket()
@pytest.mark.xfail
def test_guest_cant_see_success_message_after_adding_product_to_basket(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
page = ProductPage(browser, link)
page.open()
page.add_product_to_basket()
page.should_not_be_success_message()
def test_guest_cant_see_success_message(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
page = ProductPage(browser, link)
page.open()
page.should_not_be_success_message()
def test_guest_should_see_login_link_on_product_page(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
page = ProductPage(browser, link)
page.open()
page.should_be_login_link()
@pytest.mark.xfail
def test_message_disappeared_after_adding_product_to_basket(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
page = ProductPage(browser, link)
page.open()
page.add_product_to_basket()
page.should_be_disappeared()
|
#problem 1
#the function isint(x) checks whether the string x is made of integer or has decimal
def isint(x):
for i in range(0,10):
#assert: x is a string with a single element
#invariant: the string does not contain intgers from 0 to i-1
#note here instead of str function indiviual cases of '1','2''etc. could also have been made
if x==str(i):
#assert: string has an integer
return True
#assert:string does not have a integer
if x=='.':
#assert: string has a decimal
return True
#assert:string does not have a integer or a decimal
return False
#readNumber is the function asked in problem statement
def readNumber(s,i):
#assert:s[i] is a integer
j=i+1
a=len(s)
while (j!=a and isint(s[j])):
#invariant: s[i:j] contains a integer or a float and j is not equal to a
j+=1
return (s[i:j],j)
#signeval takes two floats x,y identifies the sign in string a and performs the required computation
def signeval(x,y,a):
if a=='+':
return x+y
if a=='-':
return x-y
if a=='*':
return x*y
if a=='/':
return x/y
#evalParen is the function asked in problem statement
def evalParen(s,i):
#assert: the input inside brackets may be of four forms
#'sign may be other than +'
#case1 : ()+x
#case2 : ()+()
#case3 : x+y
#case4 : x+()
# only (x) does not make sense
if s[i+1]=='(':
#assert: case 1 or case 2 have to be tackled
(a,b)=evalParen(s,i+1)
#assert: s[b] contains the operator according to which operation has to be done
if s[b+1]=='(':
#assert: case 2 has to be tackled
(c,d)=evalParen(s,b+1)
return (signeval(float(a),float(c),s[b]),d+1)
#assert: case 1 has to be tackled
(c,d)=readNumber(s,b+1)
return (signeval(float(a),float(c),s[b]),d+1)
#assert: either case 3 or case 4
(a,b)=readNumber(s,i+1)
if isint(s[b+1])==True:
#assert: case 3 has to be tackled
(c,d)=readNumber(s,b+1)
return (signeval(float(a),float(c),s[b]),d+1)
#assert: case 4 has to be tackled
(e,f)=evalParen(s,b+1)
return (signeval(float(a),float(e),s[b]),f+1)
#evaluate is the function asked in problem statement
def evaluate(s):
#assert: there are two cases for the input
#case1: single number
#case2: combination like x+y or x+() or ()+x
if isint(s[0]):
(a,b)=readNumber(s,0)
if len(s)==b:
#assert: this is case1
return a
#assert:case2 is the only case left
#in this brackets can be added to the string and evalParen can be used
s='('+s+')'
(b,c)=evalParen(s,0)
return b
#problem 2
# sumcheck checks whther x can be represented as a sum of two distinct numbers from l in a unique way
def sumcheck(x,l):
count=0
for i in range(len(l)):
#invariant 1: count represents the number of ways in which the number x can be represented as a sum of two distinct numbers from l, one of the numbers having index between 0,i-1
for j in range(i+1,len(l)):
#invariant 2: count represents the number of ways in which the number x can be represented as a sum of two distinct numbers from l, (one being from 0,i-1) or (one being i and other between i+1,j-1)
if x==l[i]+l[j] :
count+=1
if count>=2 :
# assert: x can be represented as sum of two distinct numbers from l in more than one way
return False
#assert: count represents the number of ways in which the number x can be represented as a sum of two distinct numbers from l, one of the numbers having index between 0,i
#assert: count represents the number of ways in which the number x can be represented as a sum of two distinct numbers from l
if count==0:
#assert: x cannot be represented as sum of two distinct numbers from l in any way
return False
#assert count==1 and x can be represented as a sum of two distinct numbers from l in a unique way
return True
# next term provides the next term of the given sequence
def nextterm(l):
x=l[-1]+1
while (not sumcheck(x,l)):
#invariant: all numbers between l[-1] and x including x cannot be part of the sequence
x+=1
#assert: x satisfies sumcheck(x,l) and all numbers between l[-1] and x do not satisfy hence x is next term of the sequence
return x
# sumSequence provides the required list
def sumSequence(n):
a=[1,2]
while len(a)!=n:
#len(a)!=n hence more terms have to be appended to list a
a.append(nextterm(a))
# assert len(a)==n and a contains the numbers of the asked sequence
return a
#problem 3
def sumlist(l):
sum=l[0]
for i in range(len(l)-1):
# invariant: sum = sum of all elements till index i of the list at start of loop
sum+=l[i+1]
#assert sum = sum of all elements in the list
return sum
def min(a,b):
if a<=b: return a
else : return b
#assert min returns minimum of a and b
def minLength(a,n):
ans=len(a)+2
# at start ans is unrealistic so that (*does not exist case*) may be detected
for i in range(len(a)):
# invariant 1: ans is the least length of a contigous list whose sum>n and starts from index 0..i-1 if such list exists
for j in range(i,len(a)):
# invariant 2: ans is the least length of a contigous list whose sum>n and starts from index 0..i-1 or starts at index i and is contained in a[i:j] if such list exists
if sumlist(a[i:j+1])>n :
ans=min(ans,len(a[i:j+1]))
#assert: ans is the least length of a contigous list whose sum>n and starts from index 0...i if such list exists
# assert: ans is the least length of a contigous list whose sum>n if such list existsif not exist then ans=len(a)+2
if ans == len(a) +2 :
#assert no such contigous list exist
return -1
else :
#assert the minimum length of the contigous list is ans
return ans
# problem 4
# Merges two subarrays of arr[] write the output to b[l:r]
# First subarray is arr[l:m] # Second subarray is arr[m:r]
def mergeAB(arr,b, l, m, r):
i = l # Initial index of first subarray
j = m # Initial index of second subarray
k = l # Initial index of merged subarray
while i < m and j < r :
#invariant: list b from index l to k-1 is sorted
if arr[i] <= arr[j]:
b[k] = arr[i]
i += 1
else:
b[k] = arr[j]
j += 1
k += 1
while i < m:
# Copy the remaining elements of arr[i:m], if there are any
b[k] = arr[i]
i += 1
k += 1
while j < r:
# Copy the remaining elements of arr[j:r], if there are any
b[k] = arr[j]
j += 1
k += 1
def mergeit(A,B,n,l):
# A of size n consists of n/l sorted lists of size l each [last list may be shorter]
# merge them in pairs writing the result to B [there may be one unpaired if not even]
if n%l == 0:
count=n//l
else:
count=n//l + 1
for i in range( count//2 ):
# invariant: all the elements upto upto 2*i*l have been copied into b as i sorted lists of length 2l each
left=i*l*2
right=min(left+2*l,n) # since last list could be shorter
mergeAB(A,B,left,left+l,right)
# Copy the last list if there is any (may happen if count is odd)
for i in range(right,n):
#assert: count was odd hence one list could not be paired with others
B[i]=A[i]
def mergeSort(A):
n=len(A)
l=1
B=[0 for x in range(n)]
dir=0
while l < n:
#invariant: A or B according to value of dir contain n/l sorted lists of size l each [last list may be shorter]
if dir == 0:
#we have to copy result from A to B
mergeit(A,B,n,l)
dir=1
else:
#we have to copy result from B to A
mergeit(B,A,n,l)
dir=0
l*=2
#if result is in B copy result to A
if dir==1:
for i in range(n):
A[i]=B[i]
def mergeContacts(l):
mergeSort(l)
#assert: list is sorted hence same names will occur consecutively
#assert: list is not empty
(a,b)=l[0]
l[0]=(a,[b])
ans=[l[0]]
for i in range(1,len(l)):
#invariant: emails have been merged for all people having entries till index i-1 and appended to ans
(w,x)=l[i-1]
(y,z)=l[i]
if w==y:
#assert: l[i] and l[i-1] have the same names hence emails to be merged
(g,h)=ans[-1]
h.append(z)
ans[-1]=(g,h)
else:
l[i]=(y,[z])
ans.append(l[i])
#assert: ans contains merged result obtained from merging all entries with same names in l
return ans
|
def get_role(number: int):
invites_keys = roles.keys()
invites_keys = sorted(invites_keys, reverse=True)
for invites_needed in invites_keys:
if number >= invites_needed:
return roles[invites_needed]
return None
def get_next_role(number: int):
invites_keys = roles.keys()
invites_keys = sorted(invites_keys)
for invites_needed in invites_keys:
if number < invites_needed:
return roles[invites_needed], invites_needed
return 'Rank 1', 100
def get_previous_role(number: int):
invites_keys = roles.keys()
invites_keys = sorted(invites_keys)
previous = 0
for invites_needed in invites_keys:
if number < invites_needed:
return roles[previous], previous
previous = invites_needed
roles = {
1: 'Rank 10',
3: 'Rank 9',
8: 'Rank 8',
15: 'Rank 7',
30: 'Rank 6',
50: 'Rank 5',
75: 'Rank 4',
100: 'Rank 3',
150: 'Rank 2',
200: 'Rank 1',
}
|
# In order to run this file, please install bibtexparser, titlecase.
# Both of them can be installed through pip.
import bibtexparser
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import author, page_double_hyphen
import re
from titlecase import titlecase
import textwrap
import argparse
parser = argparse.ArgumentParser(description="Validate bibtex entries.")
parser.add_argument('bibtex_file', type=str,
help='path to the bibtex file')
parser.add_argument('-o', dest="output_file", default='processed_bibtex.bib',
help='path to the output bibtex file (default: processed_bibtex.bib)')
parser.add_argument('-a', dest="author_list_file",
help='path to the publications author list txt file')
args = parser.parse_args()
# Initialize text wrapper for abstracts
wrapper = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ")
wrapper.width = 79
# Copied from source, and change 'keyword' to 'keywords'
# ref: https://bibtexparser.readthedocs.io/en/master/_modules/bibtexparser/customization.html#keyword
def keyword(record, sep=',|;'):
"""
Split keyword field into a list.
:param record: the record.
:type record: dict
:param sep: pattern used for the splitting regexp.
:type record: string, optional
:returns: dict -- the modified record.
"""
if "keywords" in record:
record["keywords"] = [i.strip() for i in re.split(sep, record["keywords"].replace('\n', ''))]
return record
# Let's define a function to customize our entries.
# It takes a record and return this record.
def customizations(record):
"""Use some functions delivered by the library
:param record: a record
:returns: -- customized record
"""
# record = type(record)
record = author(record)
# record = editor(record)
# record = journal(record)
record = keyword(record)
# record = link(record)
record = page_double_hyphen(record)
# record = doi(record)
return record
with open(args.bibtex_file) as bibtex_file:
parser = BibTexParser()
parser.customization = customizations
bib_database = bibtexparser.load(bibtex_file, parser=parser)
authors = None
if args.author_list_file:
print("# Author list is provided...")
with open(args.author_list_file) as author_file:
authors = author_file.read().splitlines()
print("Total number of authors: {}".format(len(authors)))
print("")
# print(authors[0])
# print(authors[1])
# ============================================================
# ENTRY FORMAT
# @[type]{[citation key],
# [field name] = {[field value]},
# [field name] = {[multiline
# field value]},
# ...
# [field name] = {
# [long paragraph
# of text]
# },
# [field name] = {[field value]}
# }
# entry type, citation key, and field names in lowercase
# no blank lines
# maximum line length 79 characters [except file field]
# use @inproceedings, not @conference
# citation key in the form [last name][year][optional letter]
# single spaces around '=', do not align field values
# field values enclosed in braces, not quotes
# each field terminated with a comma, except the last field
# authors in [first middle last] format except when [von] or [jr]
# see https://nwalsh.com/tex/texhelp/bibtx-23.html
# =============================================
# ENTRY SORTING
# TODO: section off entries by year
# TODO: The following can be achieved with writer.order_entries_by(...)
# TODO: within each year, sort entries
# by first author
# then by subsequent authors
# then by title
# within each entry, sort fields
# author, title, [collection], [publisher], [date], [other]
# collection = journal, booktitle, edition, editor,
# volume, number, series, chapter, pages
# publisher = publisher, organization, institution, school, address
# date = year, month
# other = keywords, abstract, file
special_fields = ['ID', 'ENTRYTYPE']
# NOTE: Make sure the following fields are listed in the expected display order
fields = ['author',
'title',
# collection
'journal',
'booktitle',
'edition',
'editor',
'volume',
'number',
'series',
'chapter',
'pages',
# publisher
'publisher',
'organization',
'institution',
'school',
'address',
# date
'year',
'month',
# other
'keywords',
'abstract',
'file']
# The following fields will be wrapped according to multiline format
# The 'abstract' field will be wrapped according to long paragraph of text
multiline_fields = ['author',
'title',
# collection
'journal',
'booktitle',
'edition',
'editor',
'volume',
'number',
'series',
'chapter',
'pages',
# publisher
'publisher',
'organization',
'institution',
'school',
'address',
# date
'year',
'month',
# other
'keywords',
# 'abstract',
# 'file'
]
print("# Detecting unrecognized or missing fields...")
must_exist_fields = ['title', 'author', 'year', 'file', 'abstract']
# Makes sure the fields are recognized by us, and output fields not in the included list
# Also check certain fields must exist
has_unrecognized = False
has_missing = False
for entry in bib_database.entries:
ID = entry['ID']
# if 'author' in entry:
# print(entry['author'])
# if 'keywords' in entry:
# print(entry['keywords'])
for key, val in entry.items():
if key not in fields + special_fields:
print("{} has unrecognized field: ({}: {})".format(ID, key, val))
has_unrecognized = True
# print(key, val)
for key in must_exist_fields:
if key not in entry:
print("{} is missing field: {}".format(ID, key))
has_missing = True
if has_unrecognized:
print("# Please validate/remove the unrecognized fields in the above manually.")
if has_missing:
print("# Please add the missing fields in the above manually.")
print("")
print("# Validating fields...")
def get_first_last_name(a):
"""Returns the name in First Last format.
Args:
a (str): Name in Last, First format.
"""
terms = a.split(", ")
# print(terms)
# If the following assertion fails, probably some name has more than first, last name
assert(len(terms) == 2)
return terms[1] + ' ' + terms[0]
def get_last_name(a):
"""Returns the last name.
Args:
a (str): Name in Last, First format.
"""
terms = a.split(", ")
# print(terms)
# If the following assertion fails, probably some name has more than first, last name
assert(len(terms) == 2)
return terms[0]
field_type={
"ENTRYTYPE": ['inproceedings', 'phdthesis', 'mastersthesis', 'article'],
}
# Check that each field satisfies the correct format
for entry in bib_database.entries:
ID = entry['ID']
if not ID.islower():
new_ID = ID.lower()
print("{} entry's ID has been changed to lowercase {}.".format(ID, new_ID))
ID = new_ID
entry['ID'] = ID
for key, val in entry.items():
if key in field_type:
if val not in field_type[key]:
# Special case: if used @conference, change to @inproceedings
if val == 'conference':
entry['ENTRYTYPE'] = 'inproceedings'
print("{} field data: ({}: {}) has been changed to 'inproceedings'".format(ID, key, val))
else:
print("{} field data is unrecognized: ({}: {})".format(ID, key, val))
if authors is not None:
if 'author' in entry:
author = entry['author']
for a in author:
if not a in authors:
print("{} has unrecognized author {}".format(ID, a))
if 'von' in a or 'jr' in a:
print("{} has author {} with 'von' or 'jr'".format(ID, a))
if 'author' in entry and 'year' in entry:
first_author = entry['author'][0]
expected_id = get_last_name(first_author).lower() + entry['year']
if expected_id not in ID:
print("{} is different than the expected ID: {}<optional letter>".format(ID, expected_id))
if 'author' in entry:
author = entry['author']
first_author = get_first_last_name(author[0])
author_string = first_author
for a in author[1:]:
author_string = author_string + ' and ' + get_first_last_name(a)
entry['author'] = author_string
if 'keywords' in entry:
keywords = entry['keywords']
keywords_string = keywords[0]
for k in keywords[1:]:
keywords_string = keywords_string + ', ' + k
if not keywords_string.islower():
print("{} keywords are not all lowercase: {}".format(ID, keywords_string))
entry['keywords'] = keywords_string
if 'title' in entry:
title = entry['title']
capital_title = titlecase(title)
entry['title'] = capital_title
if 'abstract' in entry:
abstract = entry['abstract']
abstract_paragraphs = abstract.split('\n')
# print(len(abstract_paragraphs[0]))
if len(abstract_paragraphs[0]) > 79:
print("{} field: abstract wrapped at 79.".format(ID))
# print(len(abstract_paragraphs))
wrapped_texts = ""
for abst in abstract_paragraphs:
splitted_abstract = wrapper.wrap(abst)
# print(wrapper.wrap(abstract))
wrapped_text = ""
for text in splitted_abstract:
wrapped_text = wrapped_text + "\n" + text
wrapped_texts = wrapped_texts + wrapped_text + "\n"
# FIXME: hacking so that the ending bracket for abstract has indent of 2 spaces
wrapped_texts += " "
entry['abstract'] = wrapped_texts
for key, val in entry.items():
if key in multiline_fields:
if "\n" in val:
print("{} field: {} already has linebreak, skiping wrap check.".format(ID, key))
else:
prefixed_val = " {} = [".format(key) + val
if len(prefixed_val) > 79:
print("{} field: {} wrapped at 79.".format(ID, key))
index = prefixed_val.index('[')
subsequent_indent = ' '
for i in range(index):
subsequent_indent += ' '
tmp_wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=subsequent_indent)
tmp_wrapper.width = 79
splitted_texts = tmp_wrapper.wrap(prefixed_val)
wrapped_text = ""
for text in splitted_texts:
wrapped_text = wrapped_text + "\n" + text
wrapped_text = wrapped_text[index+2:]
entry[key] = wrapped_text
print("# Please verify author name list is in correct First Last format.")
print("# Please verify the title is correcly capitalized.")
print("# Please verify all the wrapping is correct.")
# Output the file location, so that user can check that they are correct
print("\n# Please verify the following files are placed in the correct directory accordingly.")
for entry in bib_database.entries:
ID = entry['ID']
if 'file' in entry:
filepath = entry['file']
print("{}:\t\t{}".format(ID, filepath))
else:
print("{} has not specified a file! Fix first.".format(ID))
# Specify the writer, including indent and display order
writer = BibTexWriter()
# writer.contents = ['comments', 'entries']
# writer.order_entries_by = ('ID', )
writer.indent = ' '
writer.display_order = tuple(fields)
bibtex_str = bibtexparser.dumps(bib_database, writer)
with open(args.output_file, 'w') as bibtex_file:
bibtexparser.dump(bib_database, bibtex_file, writer)
|
from core.sintaxe import sintaxe
from util.field_util import rename_field
from core.load_dump_file import ler_df
from core import constants as constant
import sys
import getopt
def compare_triggers(table1, table2) ->str:
dif = False
command = ""
result_trigger_list = list()
## retira triggers que nao contem na table1
for trg2 in table2.triggers:
trg1 = [tt1 for tt1 in table1.triggers if tt1.event.lower() == trg2.event.lower()]
if len(trg1) is 0:
action = {'action': 'del', 'trigger': trg2}
result_trigger_list.append(action)
## add triggers que nao contem na table2
for idx, trg1 in enumerate(table1.triggers):
trg2 = [tt2 for tt2 in table2.triggers if tt2.event.lower() == trg1.event.lower()]
if trg2 is [] or trg2 != trg1:
action = {'action': 'add', 'trigger': trg1}
result_trigger_list.append(action)
##ordena resultado da lista, isso deve ficar assim por causa dos testes unitarios que
##tem ordem de comparacao
result_trigger_list.sort(key=lambda l: l['trigger'].event)
for item in result_trigger_list:
trg = item['trigger']
action = item['action']
if action == 'add':
command += str(trg)
elif action == 'del':
command += trg.del_trigger_sintaxe()
else:
raise ValueError('Acao nao tratada')
return command
def compareTable(table1, table2) -> str:
dif: bool = False
comando = sintaxe.UPDATE_TABLE + '\n'
if table2 is None:
return str(table1)
#if (table1.area != table2.area):
# dif = True
# comando += " AREA \"" + table1.area + "\" \n"
if table1.label != table2.label:
dif = True
comando += " LABEL \"" + table1.label + "\" \n"
if table1.description != table2.description:
dif = True
comando += " DESCRIPTION \"" + table1.description + "\" \n"
# if (table1.dump_name != table2.dump_name):
# dif = True
# comando += " DUMP-NAME \"" + table1.dump_name + "\" \n"
if table1.trigger_to_string() != table2.trigger_to_string():
dif = True
comando += compare_triggers(table1, table2)
if dif:
return comando.format(tableName=table1.name) + '\n'
return ''
def compareField(field1, field2)->str:
dif: bool = False
comando = sintaxe.UPDATE_FIELD + '\n'
if field2 is None:
return str(field1)
if not(field1.typeField.__eq__(field2.typeField)) or field1.extent != field2.extent:
return rename_field(field1, field2)
if field1.description != field2.description:
dif = True
comando += sintaxe.PROP_QUOTE.format(prop_name="DESCRIPTION", prop_value=field1.description)
if field1.formatt != field2.formatt:
dif = True
comando += sintaxe.PROP_QUOTE.format(prop_name="FORMAT", prop_value=field1.formatt)
if field1.initial != field2.initial:
dif = True
comando += sintaxe.PROP_QUOTE.format(prop_name="INITIAL", prop_value=field1.initial)
if field1.label != field2.label:
dif = True
comando += sintaxe.PROP_QUOTE.format(prop_name="LABEL", prop_value=field1.label)
if field1.position != field2.position:
dif = True
comando += sintaxe.PROP_NOT_QUOTE.format(prop_name="POSITION", prop_value=field1.position)
if field1.columnLabel != field2.columnLabel:
dif = True
comando += sintaxe.PROP_QUOTE.format(prop_name="COLUMN-LABEL", prop_value=field1.columnLabel)
if field1.help != field2.help:
dif = True
comando += sintaxe.PROP_QUOTE.format(prop_name="HELP", prop_value=field1.help)
if field1.decimals != field2.decimals:
dif = True
comando += sintaxe.PROP_NOT_QUOTE.format(prop_name="DECIMALS", prop_value=field1.decimals)
if field1.order != field2.order:
dif = True
comando += sintaxe.PROP_NOT_QUOTE.format(prop_name="ORDER", prop_value=field1.order)
if field1.mandatory != field2.mandatory:
if field2.mandatory is True:
comando += sintaxe.PROP_FLAG.format(prop_flag="NULL-ALLOWED")
else:
comando += sintaxe.PROP_FLAG.format(prop_flag="MANDATORY")
if dif:
return comando.format(fieldName=field1.name, tableName=field1.nameTable) + '\n'
return ''
def compare_index(index1, index2) -> str:
dif: bool = False
comando = ""
if index2 is None:
return str(index1)
dif = index2.unique != index1.unique or index2.primary != index1.primary
if not dif:
dif = dif_seq(index1, index2)
if dif:
comando = sintaxe.RENAME_INDEX.format(
indexName=index2.name,
tableName=index2.nameTable,
newName=index2.name + "_old"
) + "\n"
comando += index1.__str__()
comando += sintaxe.DROP_INDEX.format(indexName=index2.name + "_old", tableName=index2.nameTable)
return comando
def dif_seq(i1, i2) -> bool:
dif = len(i1.indexField) != len(i2.indexField)
if not dif:
for indf in i1.indexField:
if2 = i2.indexField.get(indf, None)
if if2 is None:
dif = True
break
if i1.indexField[indf].seq != i2.indexField[indf].seq:
dif = True
break
return dif
def drop_table_comando(table)->str:
return sintaxe.DROP_TABLE.format(tableName=table.name)
def drop_field_comando(field)->str:
return sintaxe.DROP_FIELD.format(fieldName=field.name,tableName=field.nameTable)
def drop_index_comando(index)->str:
return sintaxe.DROP_INDEX.format(indexName=index.name,tableName=index.nameTable)
def get_propriedade(obj, name):
return obj.get(name, None)
def obj_is_none(table, prop, obj, funcao):
if table is None:
return None
else:
return funcao(prop,obj)
def compara_dump1_x_dump2(dump1, dump2) -> str:
retorno = ''
for table in dump1.tables:
t1 = dump1.tables.get(table, None)
t2 = dump2.tables.get(table, None)
if t1 is not None and t1.name.startswith('agr'):
continue
if t2 is not None and t2.name.startswith('agr'):
continue
comando = compareTable(t1, t2)
if comando is not '':
retorno += comando
for field in t1.fields:
f1 = t1.fields.get(field, None)
if t2 is None:
f2 = None
else:
f2 = t2.fields.get(field, None)
comando = compareField(f1, f2)
if comando is not '':
retorno += comando
for index in t1.indexes:
i1 = t1.indexes.get(index, None)
if t2 is None:
i2 = None
else:
i2 = t2.indexes.get(index, None)
comando = compare_index(i1, i2)
if comando is not '':
retorno += comando
return retorno
# DROP TABLES, FIELS E INDEX QUE EXISTEM NA DUMP2 E NAO NA DUMP1
def compara_dump2_x_dump1(dump1, dump2):
retorno = ''
for table in dump2.tables:
t1 = dump1.tables.get(table, None)
t2 = dump2.tables.get(table, None)
if t1 is None:
comando = drop_table_comando(t2)
retorno += comando
continue
for field in t2.fields:
f1 = obj_is_none(t1, t1.fields, field, get_propriedade)
if f1 is None:
comando = drop_field_comando(t2.fields[field])
retorno += comando
for index in t2.indexes:
i1 = obj_is_none(t1, t1.indexes, index, get_propriedade)
if i1 is None:
comando = drop_index_comando(t2.indexes[index])
retorno += comando
return retorno
def executa_diferenca(fileNameDump1,fileNameDump2, **kwargs) -> str:
dump1 = ler_df(fileNameDump1)
dump2 = ler_df(fileNameDump2)
retorno = compara_dump1_x_dump2(dump1, dump2)
if kwargs.get('drops', None) is None:
retorno += compara_dump2_x_dump1(dump1, dump2)
return retorno
def main(argv):
try:
opts, args = getopt.getopt(argv, "hvda:b:c:", ["dump1=","dump2=","dfsaida=","drops"])
except getopt.GetoptError:
print ('exec.py --dump1=nome_dump1 --dump2=nome_dump2 --dfsaida=nome_arquivo_saida')
sys.exit(2)
opcoes = dict()
for opt, arg in opts:
if opt == '-h':
print(constant.TEXTO_HELP)
sys.exit()
elif opt == '-v':
print('versao:', constant.VERSAO)
sys.exit()
elif opt in ("-a", "--dump1"):
opcoes['dump1'] = arg
elif opt in ("-b", "--dump2"):
opcoes['dump2'] = arg
elif opt in ("-c", "--dfsaida"):
opcoes['dfsaida'] = arg
elif opt in ("-d", "--drops"):
opcoes['drops'] = False
if opcoes.get('dump1',None) == None or opcoes.get('dump2',None) == None:
print (constant.QUEBRA_DE_LINHA + "Parametros Invalidos!!!")
print ("Devem ser informados pelo menos os parametros --dump1 e --dump2")
exit(1)
return opcoes
if __name__ == '__main__':
opcoes = main(sys.argv[1:])
retorno = executa_diferenca(opcoes.get('dump1'), opcoes.get('dump2'), **opcoes)
nomeArquivoSaida = opcoes.get('dfsaida', None)
if nomeArquivoSaida is None:
print(retorno)
exit(0)
else:
try:
arquivoSaida = open(nomeArquivoSaida, 'w')
arquivoSaida.write(retorno)
arquivoSaida.close()
except FileNotFoundError as e:
print(e)
print('Erro ao criar arquivo de saida!!!')
exit(1)
|
class minmax_val(object):
def __init__(self, min, max):
self.min = min
self.max = max
def __iter__(self):
yield self.min
yield self.max
def minmax(items, key=lambda x: x):
min, max = None, None
for item in items:
if min is None or key(min) > key(item):
min = item
if max is None or key(max) < key(item):
max = item
if min is None or max is None:
# Iterable was empty
raise ValueError()
return minmax_val(min, max)
|
# Author Caozy
from operation.models import UserAsk
from django import forms
import re
class UserAskForm(forms.ModelForm):
class Meta:
model = UserAsk
fields = ['name', 'mobile', 'course_name']
def clean_mobile(self):
mobile=self.cleaned_data['mobile']
REGEX_MOBILE= "^(((13[0-9])|(14[579])|(15([0-3]|[5-9]))|(16[6])|(17[0135678])|(18[0-9])|(19[89]))\\d{8})$"
p=re.compile(REGEX_MOBILE)
if p.match(mobile):
return mobile
else:
raise forms.ValidationError(u'手机号非法',code='mobile_invalid') |
# coding=utf-8
# shell class
import random
import config
import datetime, time
import math
class Game:
def __init__(self, contestmanager):
self.contestmanager = contestmanager
self.bot = contestmanager.bot
self.questionData = None
self.answers = []
self.startTime = 0
def start(self):
self.startTime = int(time.time())
# first select a grouping
groupData = self.bot.execQuerySelectOne("SELECT question_grouping AS chosenGroup FROM trivia_questions WHERE used_in_cycle = 0 GROUP BY question_grouping ORDER BY RANDOM() LIMIT 1")
if groupData == None:
# no questions left
self.bot.execQueryModify("UPDATE trivia_questions SET used_in_cycle = 0")
groupData = self.bot.execQuerySelectOne("SELECT question_grouping AS chosenGroup FROM trivia_questions WHERE used_in_cycle = 0 GROUP BY question_grouping ORDER BY RANDOM() LIMIT 1")
self.questionData = self.bot.execQuerySelectOne("SELECT * FROM trivia_questions WHERE question_grouping = ? AND used_in_cycle = 0 ORDER BY RANDOM() LIMIT 1", (groupData["chosenGroup"], ))
self.bot.execQueryModify("UPDATE trivia_questions SET used_in_cycle = 1 WHERE id = ?", (self.questionData["id"],))
questionStr = "/me Kappa Kappa Kappa | Trivia! %s | Kappa Kappa Kappa" % (self.questionData["question"].encode("utf-8"))
self.bot.channelMsg(questionStr)
answerRows = self.bot.execQuerySelectMultiple("SELECT * FROM trivia_answers WHERE question_id = ?", (self.questionData["id"],))
for answerRow in answerRows:
self.answers.append(answerRow["answer"].encode("utf-8"))
def processMessage(self, user, message):
if message.strip():
if "bot" in user:
# no bots
return
altCheck = self.bot.execQuerySelectOne("SELECT * FROM alts WHERE twitchname = ?", (user,))
if altCheck != None:
# no alts either
return
msglower = message.strip().lower()
msglower = msglower.replace("é", "e")
if msglower.startswith("!guess "):
msglower = msglower[7:]
if msglower.startswith("pokemon "):
msglower = msglower[8:]
if msglower in self.answers:
# woo
prize = config.triviaPrizes[self.questionData["difficulty"]]
diffName = config.triviaDifficulties[self.questionData["difficulty"]]
elapsedTime = int(time.time()) - self.startTime
if elapsedTime >= config.triviaPrizeReducFirst:
prize = int(prize/2)
if elapsedTime >= config.triviaPrizeReducSecond:
prize = int(prize/2)
wonCheck = self.bot.execQuerySelectOne("SELECT * FROM trivia_winners WHERE twitchname = ? AND question_id = ? LIMIT 1", (user, self.questionData["id"]))
if wonCheck != None:
prize = int(prize/2)
emote = random.choice(["Kreygasm", "KevinTurtle", "TriHard"])
emotewall = " ".join([emote]*3)
msgArgs = (emotewall, user, msglower, diffName, elapsedTime, prize, config.currencyPlural, emotewall)
self.bot.channelMsg("/me %s | %s's answer of %s was correct! For answering a %s question in %d seconds they gain %d %s. | %s" % msgArgs)
userData = self.bot.getUserDetails(user)
theirNewBal = userData["balance"] + prize
queryArgList = (theirNewBal, user, userData["balance"])
self.bot.execQueryModify("UPDATE users SET balance = ?, contests_won = contests_won + 1 WHERE twitchname = ? AND balance = ?", queryArgList)
self.bot.updateHighestBalance(userData, theirNewBal)
logArgList = (user, "trivia", msglower, prize, int(time.time()), self.bot.factory.channel)
self.bot.execQueryModify("INSERT INTO contestwins (twitchname, gameid, answer, reward, whenHappened, channel) VALUES(?, ?, ?, ?, ?, ?)", logArgList)
secondlogArgList = (user, self.questionData["id"], elapsedTime, prize, int(time.time()), self.bot.factory.channel)
self.bot.execQueryModify("INSERT INTO trivia_winners (twitchname, question_id, timeTaken, reward, whenHappened, channel) VALUES(?, ?, ?, ?, ?, ?)", secondlogArgList)
self.contestmanager.contestIsDone()
return
def end(self):
emote = random.choice(["BibleThump", ":("])
emotewall = " ".join([emote]*3)
self.bot.channelMsg("/me %s | No-one answered correctly. Too bad! Try again next time. | %s" % (emotewall, emotewall))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.