index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
5,180
|
AvatarSenju/django-first
|
refs/heads/master
|
/posts/urls.py
|
from django.contrib import admin
from django.urls import path,re_path
from . import views as v
app_name="posts"
urlpatterns = [
path('',v.listss,name='lists'),
path('create', v.create, name='create'),
path('edit/<int:id>/', v.update, name='update'),
path('retrive/<int:id>/', v.retrive, name='retrive'),
# re_path(r'^retrive/(?P<id>\d+)/$', v.retrive, name='retrive'),
path('details/<int:id>/', v.details, name='details'),
path('delete/<int:id>/', v.delete, name='delete'),
]
|
{"/posts/views.py": ["/posts/models.py"], "/posts/admin.py": ["/posts/models.py"]}
|
5,181
|
AvatarSenju/django-first
|
refs/heads/master
|
/posts/models.py
|
from __future__ import unicode_literals
from django.db import models
from django.urls import reverse
# Create your models here.
#MVC
class Post(models.Model):
title=models.CharField(max_length=120)
content=models.TextField()
updated=models.DateTimeField(auto_now=True,auto_now_add=False)
timestamp=models.DateTimeField(auto_now=False,auto_now_add=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("posts:details", kwargs={"id": self.id})
#kwargs need dictionary
|
{"/posts/views.py": ["/posts/models.py"], "/posts/admin.py": ["/posts/models.py"]}
|
5,188
|
sbulat/Splendor
|
refs/heads/master
|
/Game.py
|
# -*- coding: utf-8 -*-
from Globals import Glob
from Player import *
from Card import *
import tkMessageBox
class Game(object):
def __init__(self):
self.firstLev = [Glob.firstLevCards.pop() for i in range(4)]
self.secLev = [Glob.secLevCards.pop() for i in range(4)]
self.thirdLev = [Glob.thirdLevCards.pop() for i in range(4)]
self.players = [Player(), Player()]
self.actualPlayer = self.players[0]
self.textId = Glob.canvas.create_text(Glob.WINDOW_X-5, 15, anchor='e', text='Player #'+str(self.actualPlayer.id))
self.playerIter = self.player_iterator(0)
self.nextTurn = self.make_button()
def __getitem__(self):
return self.firstLev + self.secLev + self.thirdLev
# tworzenie przycisku do kończenia tury
def make_button(self):
b = Button(Glob.root, text='End Turn', command=self.change_player, state='disabled')
b.pack()
b.place(x=500, y=650)
return b
# rozkładane są 4 karty z każdego stosu - przygotowanie gry
def deal_cards(self):
x = 150
y1 = 350
y2 = 200
y3 = 50
for i in range(4):
tmp = self.firstLev[i]
Glob.canvas.move(tmp.tag, x-tmp.a[0], y1-tmp.a[1])
tmp.__setitem__(x, y1)
tmp = self.secLev[i]
Glob.canvas.move(tmp.tag, x-tmp.a[0], y2-tmp.a[1])
tmp.__setitem__(x, y2)
tmp = self.thirdLev[i]
Glob.canvas.move(tmp.tag, x-tmp.a[0], y3-tmp.a[1])
tmp.__setitem__(x, y3)
x+=90
# gdy któraś z kart zostanie kupiona jest zastępowana odpowiednią ze stosu
def draw_new_card(self, cardLevel, card):
if cardLevel==FirstLevelCard:
self.firstLev.pop(self.firstLev.index(card))
try:
tmp = Glob.firstLevCards.pop()
tmp.move(card.a[0]-tmp.a[0], card.a[1]-tmp.a[1])
self.firstLev.insert(0, tmp)
except IndexError:
return
elif cardLevel==SecondLevelCard:
self.secLev.pop(self.secLev.index(card))
try:
tmp = Glob.secLevCards.pop()
tmp.move(card.a[0]-tmp.a[0], card.a[1]-tmp.a[1])
self.secLev.insert(0, tmp)
except IndexError:
return
elif cardLevel==ThirdLevelCard:
self.thirdLev.pop(self.thirdLev.index(card))
try:
tmp = Glob.thirdLevCards.pop()
tmp.move(card.a[0]-tmp.a[0], card.a[1]-tmp.a[1])
self.thirdLev.insert(0, tmp)
except IndexError:
return
# zwracanie żetonu do puli
def return_token(self, token):
try:
token.move(Glob.tokens[self.get_index(token.bonus)].a[0] - token.a[0], \
Glob.tokens[self.get_index(token.bonus)].a[1] - token.a[1] + 10)
except TypeError:
token.move(Glob.tokensPos[token.bonus][0] - token.a[0], Glob.tokensPos[token.bonus][1] - token.a[1] + 10)
# pobranie indeksu żetonu z puli, aby wiedzieć gdzie odłozyć żeton
@staticmethod
def get_index(stone):
for token in Glob.tokens:
if token.bonus==stone:
return Glob.tokens.index(token)
# sprawdza czy koniec rozgrywki
def is_end(self):
if(self.actualPlayer.state.vp>=15):
tkMessageBox.showinfo("Koniec!", "Wygrał gracz #"+str(self.actualPlayer.id)+"!")
Glob.canvas.unbind("<Button-1>")
return True
else:
return False
# funkcja zmiany gracza
def change_player(self):
Glob.game.nextTurn.config(state='disabled')
for token in self.actualPlayer.tokens:
Glob.canvas.itemconfig(token.tag, state='hidden')
for card in self.actualPlayer.cards:
Glob.canvas.itemconfig(card.tag, state='hidden')
self.actualPlayer.tokenCount = 0
self.actualPlayer.gotToken = False
self.actualPlayer.gotCard = False
self.actualPlayer.tmpTokens = copy.deepcopy(Glob.stones)
self.actualPlayer = self.playerIter.next()
for token in self.actualPlayer.tokens:
Glob.canvas.itemconfig(token.tag, state='normal')
for card in self.actualPlayer.cards:
Glob.canvas.itemconfig(card.tag, state='normal')
Glob.canvas.delete(self.textId)
self.textId = Glob.canvas.create_text(Glob.WINDOW_X-5, 15, anchor='e', text='Player #'+str(self.actualPlayer.id))
self.actualPlayer.state.update_state()
# funkcja iterująca po graczach w tablicy self.players
def player_iterator(self, start):
idx = start
while True:
idx += 1
idx = idx % len(self.players)
yield self.players[idx]
|
{"/Game.py": ["/Globals.py", "/Player.py", "/Card.py"], "/Token.py": ["/Globals.py"], "/Player.py": ["/Globals.py"], "/main.py": ["/Globals.py", "/Game.py", "/Card.py", "/Token.py"], "/Card.py": ["/Globals.py"]}
|
5,189
|
sbulat/Splendor
|
refs/heads/master
|
/Globals.py
|
# -*- coding: utf-8 -*-
from Tkinter import *
import tkFont
class Glob(object):
WINDOW_X = 1024
WINDOW_Y = 700
SIZE_X = 80
SIZE_Y = 120
TOKEN_DIAMETER = 50
root = Tk()
canvas = Canvas(root, width=WINDOW_X, height=WINDOW_Y)
myFont = tkFont.Font(family="sans-serif", size=12)
vpFont = tkFont.Font(family="sans-serif", size=12, weight='bold')
firstLevCards = []
secLevCards = []
thirdLevCards = []
tokens = []
game = 0
tokensPos = {'red': [50, 500], 'green': [120, 500], 'blue': [190, 500], 'white': [260, 500], 'black': [330, 500]}
stones = {'red': 0, 'green': 0, 'blue': 0, 'white': 0, 'black': 0}
stonesNames = ['red', 'green', 'blue', 'white', 'black']
stoneToColor = {'red': '#ff3333', 'green': '#99ff33', 'blue': '#3333ff', 'white': '#ccffcc', 'black': '#000033'}
stoneToImage = {'red': PhotoImage(file='./images/ruby.ppm'),
'green': PhotoImage(file='./images/emerald.ppm'),
'blue': PhotoImage(file='./images/sapphire.ppm'),
'white': PhotoImage(file='./images/diamond.ppm'),
'black': PhotoImage(file='./images/onyx.ppm')
}
cardReverse = {'first': PhotoImage(file='./images/first.ppm'),
'second': PhotoImage(file='./images/second.ppm'),
'third': PhotoImage(file='./images/third.ppm')
}
|
{"/Game.py": ["/Globals.py", "/Player.py", "/Card.py"], "/Token.py": ["/Globals.py"], "/Player.py": ["/Globals.py"], "/main.py": ["/Globals.py", "/Game.py", "/Card.py", "/Token.py"], "/Card.py": ["/Globals.py"]}
|
5,190
|
sbulat/Splendor
|
refs/heads/master
|
/Token.py
|
# -*- coding: utf-8 -*-
from Globals import *
class Token(object):
pos = [50, 500]
def __init__(self, x, y, bonus):
self.a = x, y
self.b = x+Glob.TOKEN_DIAMETER, y+Glob.TOKEN_DIAMETER
self.bonus = bonus
self.image = Glob.stoneToImage[self.bonus]
self.ov = Glob.canvas.create_oval(self.a, self.b, fill='white')
self.tag = str(self.ov) + 't'
Glob.canvas.itemconfig(self.ov, tags=self.tag)
Glob.canvas.create_image(self.a[0]+(Glob.TOKEN_DIAMETER/2), self.a[1]+(Glob.TOKEN_DIAMETER/2), tags=self.tag, image=self.image)
Glob.canvas.pack()
Glob.tokens.insert(0, self)
def __setitem__(self, x, y):
self.a = x, y
self.b = x+Glob.SIZE_X, y+Glob.SIZE_Y
def move(self, x, y):
if (self.a[0]+x)<0 or (self.a[0]+x)>Glob.WINDOW_X-Glob.TOKEN_DIAMETER or \
(self.a[1]+y)<0 or (self.a[1]+y)>Glob.WINDOW_Y-Glob.TOKEN_DIAMETER:
raise ValueError()
if not( type(x)==int or type(y)==int ):
raise TypeError()
self.__setitem__(self.a[0]+x, self.a[1]+y)
Glob.canvas.move(self.tag, x, y)
Glob.canvas.tag_raise(self.tag)
|
{"/Game.py": ["/Globals.py", "/Player.py", "/Card.py"], "/Token.py": ["/Globals.py"], "/Player.py": ["/Globals.py"], "/main.py": ["/Globals.py", "/Game.py", "/Card.py", "/Token.py"], "/Card.py": ["/Globals.py"]}
|
5,191
|
sbulat/Splendor
|
refs/heads/master
|
/Player.py
|
# -*- coding: utf-8 -*-
from Globals import *
import copy
class State(object):
def __init__(self):
self.vp = 0;
self.money = copy.deepcopy(Glob.stones)
self.tag = 'state'
# drukowanie punktów zwycięstwa i posiadanych bonusów(kamieni)
def print_state(self):
Glob.canvas.create_text(Glob.WINDOW_X-5, 35, anchor='e', text='VP: '+str(self.vp), tags=self.tag)
i = 55
for stone in self.money:
Glob.canvas.create_image(Glob.WINDOW_X-5, i, anchor='e', image=Glob.stoneToImage[stone], tags=self.tag)
Glob.canvas.create_text(Glob.WINDOW_X-30, i, anchor='e', text=str(self.money[stone]), tags=self.tag)
i += 25
# aktualiacja danych
def update_state(self):
Glob.canvas.delete(self.tag)
self.print_state()
class Player(object):
count = 0
def __init__(self):
Player.count += 1
self.id = self.count
self.tokenCount = 0
self.gotToken = False
self.gotCard = False
self.tmpTokens = copy.deepcopy(Glob.stones)
self.state = State()
self.tokens = []
self.cards = []
self.tokensBon = copy.deepcopy(Glob.stones)
self.cardsBon = copy.deepcopy(Glob.stones)
self.tokensPos = copy.deepcopy(Glob.stones)
self.cardsPos = copy.deepcopy(Glob.stones)
self.set_cards_and_tokens_pos()
self.state.print_state()
# ustawienie pozycji dla kart w polu gracza
def set_cards_and_tokens_pos(self):
xC = 530
yC = 100
xT = 550
yT = 450
for pos in self.cardsPos:
self.cardsPos[pos] = [xC, yC]
self.tokensPos[pos] = [xT, yT]
xC+=90
xT+=65
# zabieranie żetonu:
# można zabrać trzy różne lub dwa tego samego rodzaju
def get_token(self, token):
if self.gotCard:
return
elif self.tokenCount>=3 or (2 in self.tmpTokens.values()):
return
elif self.tmpTokens[token.bonus]==1:
if self.can_buy_second_token(token):
self.move_token_and_update(token)
else:
return
elif self.tokenCount==2 and self.tmpTokens[token.bonus]==1:
return
else:
self.move_token_and_update(token)
self.state.update_state()
if self.tokenCount==3 or (2 in self.tmpTokens.values()):
Glob.game.nextTurn.config(state='normal') # tura wstrzymywana aby gracz mógł zobaczyć co posiada
# funkcja która przenosi znacznik i aktualizuje właściwości gracza
def move_token_and_update(self, token):
self.gotToken = True
self.state.money[token.bonus]+=1
self.tokensBon[token.bonus]+=1
token.move(self.tokensPos[token.bonus][0]-token.a[0], self.tokensPos[token.bonus][1]-token.a[1])
self.tokenCount+=1
self.tmpTokens[token.bonus]+=1
self.tokensPos[token.bonus][1]+=10
Glob.tokensPos[token.bonus][1]-=10
self.tokens.insert(0, Glob.tokens.pop(Glob.tokens.index(token)))
# jeśli gracz ma jeden żeton to może kupić żeton tego samego rodzaju
def can_buy_second_token(self, token):
tmpValues = self.tmpTokens.values()
if tmpValues.count(1)==1 and self.tmpTokens.get(token.bonus)==1:
return True
else:
return False
# kupowanie karty:
# sprawdzamy czy nie wzięto już żetonu, jeśli tak to return
# sprawdzamy czy można kupić kartę(can_buy_card())
# zwracamy tokeny
# przesuwamy karte i dajemy ją graczowi
def buy_card(self, card):
if self.gotToken or self.gotCard:
return
if self.can_buy_card(card):
self.return_tokens_after_buy(card)
self.move_card_and_update(card)
else:
return
self.state.update_state()
if Glob.game.is_end():
return
Glob.game.nextTurn.config(state='normal')
# funkcja która przenosi zakupioną kartę i aktualizuje właściwości gracza
def move_card_and_update(self, card):
self.state.money[card.bonus]+=1
self.cardsBon[card.bonus]+=1
self.state.vp += 0 if card.vp=='' else int(card.vp)
self.gotCard = True
Glob.game.draw_new_card(type(card), card)
card.move(self.cardsPos[card.bonus][0]-card.a[0], self.cardsPos[card.bonus][1]-card.a[1])
self.cards.append(card)
self.cardsPos[card.bonus][1]+=23
# czy można kupić kartę - czy koszt karty jest mniejszy od posiadanych funduszy
def can_buy_card(self, card):
for stone in card.cost:
if self.state.money[stone]<card.cost[stone]:
return False
return True
# zwracanie żetonów po zakupie: uaktualnianie funduszy, pozycji żetonów
def return_tokens_after_buy(self, card):
tmpCost = copy.deepcopy(Glob.stones)
for stone in tmpCost:
if card.cost[stone]>0:
tmpCost[stone] = card.cost[stone] - self.cardsBon[stone] # odejmowanie bonusów, czyli ile trzeba wydać żetonów
tmpCost[stone] = tmpCost[stone] if tmpCost[stone]>=0 else 0 # jeśli wynik byłby ujemny ustawiam 0
self.tokensBon[stone] -= tmpCost[stone] # odejmuje pozostały koszt od posiadanych żetonów
self.state.money[stone] -= tmpCost[stone] # odejmuje również od ogólnie posiadanych
for i in range(tmpCost[stone]):
tmpToken = self.find_appr_token_here(stone)
self.tokensPos[stone][1]-=10
Glob.tokensPos[stone][1]+=10
Glob.game.return_token(tmpToken)
Glob.tokens.insert(0, self.tokens.pop(self.tokens.index(tmpToken)))
# znajdowanie odpowiedniego żetonu po nazwie(pierwszego w self.tokens czyli tego którego szukamy)
def find_appr_token_here(self, stone):
for token in self.tokens:
if token.bonus==stone:
return token
|
{"/Game.py": ["/Globals.py", "/Player.py", "/Card.py"], "/Token.py": ["/Globals.py"], "/Player.py": ["/Globals.py"], "/main.py": ["/Globals.py", "/Game.py", "/Card.py", "/Token.py"], "/Card.py": ["/Globals.py"]}
|
5,192
|
sbulat/Splendor
|
refs/heads/master
|
/main.py
|
# -*- coding: utf-8 -*-
from Tkinter import *
from Globals import Glob
from Game import *
from Card import *
from Token import *
def B1_click(event):
for card in Glob.game.__getitem__():
if card.a[0]<=event.x<=card.b[0] and card.a[1]<=event.y<=card.b[1]:
Glob.game.actualPlayer.buy_card(card)
return
for token in Glob.tokens:
if token.a[0]<=event.x<=token.b[0] and token.a[1]<=event.y<=token.b[1]:
Glob.game.actualPlayer.get_token(token)
return
### glowny program
Glob.root.resizable(0,0)
Glob.root.title("Splendor")
Glob.canvas.pack()
Glob.canvas.bind("<Button-1>", B1_click)
for i in range(40):
FirstLevelCard(40,350)
for i in reversed(range(3)):
Glob.canvas.create_image(41-(i*4)+(Glob.SIZE_X/2), 351-(i*4)+(Glob.SIZE_Y/2), image=Glob.cardReverse['first'])
for i in range(30):
SecondLevelCard(40,200)
for i in reversed(range(3)):
Glob.canvas.create_image(41-(i*4)+(Glob.SIZE_X/2), 201-(i*4)+(Glob.SIZE_Y/2), image=Glob.cardReverse['second'])
for i in range(20):
ThirdLevelCard(40,50)
for i in reversed(range(3)):
Glob.canvas.create_image(41-(i*4)+(Glob.SIZE_X/2), 51-(i*4)+(Glob.SIZE_Y/2), image=Glob.cardReverse['third'])
tmpStones = Glob.stonesNames*5
for stone in tmpStones:
Glob.tokensPos[stone][1]+=10
Token(Glob.tokensPos[stone][0], Glob.tokensPos[stone][1], stone)
for pos in Glob.tokensPos:
Glob.tokensPos[pos][1] -= 10
Glob.game = Game()
Glob.game.deal_cards()
Glob.root.mainloop()
|
{"/Game.py": ["/Globals.py", "/Player.py", "/Card.py"], "/Token.py": ["/Globals.py"], "/Player.py": ["/Globals.py"], "/main.py": ["/Globals.py", "/Game.py", "/Card.py", "/Token.py"], "/Card.py": ["/Globals.py"]}
|
5,193
|
sbulat/Splendor
|
refs/heads/master
|
/Card.py
|
# -*- coding: utf-8 -*-
from Tkinter import *
from Globals import Glob
import random
import copy
class Card(object):
def __init__(self, x, y):
if x<0 or x>Glob.WINDOW_X-Glob.SIZE_X or y<0 or y>Glob.WINDOW_Y-Glob.SIZE_Y:
raise ValueError("Podane współrzędne leżą poza oknem aplikacji.")
if not( type(x)==int or type(y)==int ):
raise TypeError()
self.a = x, y
self.b = x+Glob.SIZE_X, y+Glob.SIZE_Y
self.color = 'white'
self.rect = Glob.canvas.create_rectangle(self.a, self.b, fill=self.color)
self.tag = str(self.rect) + 't'
Glob.canvas.itemconfig(self.rect, tags=self.tag)
Glob.canvas.create_line(self.a[0], self.a[1]+23, self.b[0], self.a[1]+23, tags=self.tag)
def __setitem__(self, x, y):
self.a = x, y
self.b = x+Glob.SIZE_X, y+Glob.SIZE_Y
def get_cords(self):
return self.a, self.b
def move(self, x, y):
if (self.a[0]+x)<0 or (self.a[0]+x)>Glob.WINDOW_X-Glob.SIZE_X or (self.a[1]+y)<0 or (self.a[1]+y)>Glob.WINDOW_Y-Glob.SIZE_Y:
raise ValueError()
if not( type(x)==int or type(y)==int ):
raise TypeError()
self.__setitem__(self.a[0]+x, self.a[1]+y)
Glob.canvas.move(self.tag, x, y)
Glob.canvas.tag_raise(self.tag)
class FirstLevelCard(Card):
bonuses = Glob.stonesNames*8
random.shuffle(bonuses)
@classmethod
def gen_attrs(cls):
vp = random.randint(0, 1)
bonus = cls.bonuses.pop()
cost = copy.deepcopy(Glob.stones)
tmpStones = copy.deepcopy(Glob.stonesNames)
random.shuffle(tmpStones)
numOfStones = random.randint(2, 4)
for j in range(numOfStones):
cost[tmpStones.pop()] = random.randint(1,3)
return {'vp': vp, 'bonus': bonus, 'cost': cost}
def __init__(self, x, y):
tmpAttr = self.gen_attrs()
self.vp = tmpAttr['vp'] if tmpAttr['vp'] else ""
self.bonus = tmpAttr['bonus']
self.cost = tmpAttr['cost']
self.image = Glob.stoneToImage[self.bonus]
super(FirstLevelCard, self).__init__(x, y)
Glob.canvas.create_text(self.a[0]+5, self.a[1]+12, text=self.vp, tags=self.tag, anchor='w', font=Glob.vpFont)
Glob.canvas.create_image(self.b[0]-5, self.a[1]+12, image=self.image, tags=self.tag, anchor='e')
i = 10
for x in self.cost:
tmp=self.cost.get(x)
if tmp==0:
continue
Glob.canvas.create_image(self.b[0]-(Glob.SIZE_X/2)+25, self.b[1]-i, image=Glob.stoneToImage.get(x), tags=self.tag, anchor='e')
Glob.canvas.create_text(self.b[0]-(Glob.SIZE_X/2)-5, self.b[1]-i, text=str(tmp), tags=self.tag, anchor='e', font=(12))
i+=23
Glob.canvas.pack()
Glob.firstLevCards.insert(0, self)
class SecondLevelCard(Card):
bonuses = (copy.deepcopy(Glob.stonesNames))*6
random.shuffle(bonuses)
@classmethod
def gen_attrs(cls):
vp = random.randint(1, 3)
bonus = cls.bonuses.pop()
cost = copy.deepcopy(Glob.stones)
tmpStones = copy.deepcopy(Glob.stonesNames)
random.shuffle(tmpStones)
numOfStones = random.randint(2, 4)
for j in range(numOfStones):
cost[tmpStones.pop()] = random.randint(3,5)
return {'vp': vp, 'bonus': bonus, 'cost': cost}
def __init__(self, x, y):
tmpAttr = self.gen_attrs()
self.vp = tmpAttr['vp']
self.bonus = tmpAttr['bonus']
self.cost = tmpAttr['cost']
self.image = Glob.stoneToImage[self.bonus]
super(SecondLevelCard, self).__init__(x, y)
Glob.canvas.create_text(self.a[0]+5, self.a[1]+12, text=self.vp, tags=self.tag, anchor='w', font=Glob.vpFont)
Glob.canvas.create_image(self.b[0]-5, self.a[1]+12, image=self.image, tags=self.tag, anchor='e')
i = 10
for x in self.cost:
tmp=self.cost.get(x)
if tmp==0:
continue
Glob.canvas.create_image(self.b[0]-(Glob.SIZE_X/2)+25, self.b[1]-i, image=Glob.stoneToImage.get(x), tags=self.tag, anchor='e')
Glob.canvas.create_text(self.b[0]-(Glob.SIZE_X/2)-5, self.b[1]-i, text=str(tmp), tags=self.tag, anchor='e', font=(12))
i+=23
Glob.canvas.pack()
Glob.secLevCards.insert(0, self)
class ThirdLevelCard(Card):
bonuses = (copy.deepcopy(Glob.stonesNames))*4
random.shuffle(bonuses)
@classmethod
def gen_attrs(cls):
vp = random.randint(3, 5)
bonus = cls.bonuses.pop()
cost = copy.deepcopy(Glob.stones)
tmpStones = copy.deepcopy(Glob.stonesNames)
random.shuffle(tmpStones)
numOfStones = random.randint(2, 4)
for j in range(numOfStones):
cost[tmpStones.pop()] = random.randint(3,7)
return {'vp': vp, 'bonus': bonus, 'cost': cost}
def __init__(self, x, y):
tmpAttr = self.gen_attrs()
self.vp = tmpAttr['vp']
self.bonus = tmpAttr['bonus']
self.cost = tmpAttr['cost']
self.image = Glob.stoneToImage[self.bonus]
super(ThirdLevelCard, self).__init__(x, y)
Glob.canvas.create_text(self.a[0]+5, self.a[1]+12, text=self.vp, tags=self.tag, anchor='w', font=Glob.vpFont)
Glob.canvas.create_image(self.b[0]-5, self.a[1]+12, image=self.image, tags=self.tag, anchor='e')
i = 10
for x in self.cost:
tmp=self.cost.get(x)
if tmp==0:
continue
Glob.canvas.create_image(self.b[0]-(Glob.SIZE_X/2)+25, self.b[1]-i, image=Glob.stoneToImage.get(x), tags=self.tag, anchor='e')
Glob.canvas.create_text(self.b[0]-(Glob.SIZE_X/2)-5, self.b[1]-i, text=str(tmp), tags=self.tag, anchor='e', font=(12))
i+=23
Glob.canvas.pack()
Glob.thirdLevCards.insert(0, self)
|
{"/Game.py": ["/Globals.py", "/Player.py", "/Card.py"], "/Token.py": ["/Globals.py"], "/Player.py": ["/Globals.py"], "/main.py": ["/Globals.py", "/Game.py", "/Card.py", "/Token.py"], "/Card.py": ["/Globals.py"]}
|
5,208
|
customr/COLORSCHEME
|
refs/heads/master
|
/colorscheme/colorscheme.py
|
import numpy as np
import matplotlib.pyplot as plt
from cv2 import kmeans, KMEANS_RANDOM_CENTERS, TERM_CRITERIA_EPS, TERM_CRITERIA_MAX_ITER
from PIL import Image
from io import BytesIO
class Picture:
"""A picture object
Inputs image and makes a new image with color data on it.
How it works:
1. Posterized image (with decreased colors range) fed to KMeans alghoritm
2. from KMeans we receiving most interesting colors
3. from this array taking k-colors and making a histogram
Args:
image_b (bytes): image in bytesIO representation
k (int): count of colors
Constants:
PAD (bool): if True, makes white border on image, otherwise nothing
MIN_H (int): minimal image height
MIN_W (int): minimal image width
Attributes:
image (numpy.ndarray): sourse image
image_posterized (numpy.ndarray): image with quantizated colors (decreased colors range)
image_pil (PIL.Image): image in PIL representation
label (numpy.ndarray): Picture.center counts
center (numpy.ndarray): most wanted colors in RGB representation
hist (numpy.ndarray): histogram of k most wanted colors
"""
PAD = True
MIN_W = 1000
MIN_H = 1000
def __init__(self, image_b, k=6):
self.image_pil = Image.open(BytesIO(image_b))
self.image = np.array(self.image_pil)
self.k = k
#if image is less than minimal width and height
if (self.image.shape[0] < self.MIN_W) or (self.image.shape[1] < self.MIN_H):
#choose maximal proportion (where side less than minimal)
p = max(self.MIN_W / self.image.shape[0],
self.MIN_H / self.image.shape[1])
#increase both sides by proportional multiplication
self.image_pil = self.image_pil.resize((int(p*self.image.shape[1]), int(p*self.image.shape[0])), Image.LANCZOS)
self.image = np.array(self.image_pil)
#quantization - decrease colors range
self.image_posterized = self.image_pil.quantize((self.k//2)*self.k**2, 1)
self.image_posterized = self.image_posterized.convert("RGB", palette=Image.ADAPTIVE, colors=self.k**3)
self.image_posterized = np.array(self.image_posterized)
"""
KMeans:
1. create random centers of colors (centroids)
2. for each color assign nearest centroid (calculates as Euclidian distance)
3. move centroid to their new position (mean of assigned colors)
4. repeat stepts 2 and 3 until sum of distances is minimal
"""
_, self.label, self.center = kmeans(
self.image_posterized.reshape(-1, 3).astype('float32'),
self.k,
None,
(TERM_CRITERIA_EPS + TERM_CRITERIA_MAX_ITER, 10, 1),
0,
KMEANS_RANDOM_CENTERS
)
self.hist = self.get_hist()
if self.PAD:
#making a white pad around the image
pv = max(self.image_pil.width, self.image_pil.height)//85
self.image_pil = Image.fromarray(np.pad(self.image_pil, ((pv,pv), (pv,pv), (0,0)), 'constant', constant_values=255))
def get_hist(self):
#makes histogram of found clusters and returns mostly frequent colors
(hist, _) = np.histogram(self.label, bins=self.k)
mask = np.argsort(hist)
self.center = self.center.reshape(-1, 3)[mask]
return hist[mask]
#picture to the left of the form
def __add__(self, form):
assert isinstance(form, Form), 'Should be Form object'
return form.__radd__(self)
#picture over the form
def __radd__(self, form):
assert isinstance(form, Form), 'Should be Form object'
return form.__add__(self)
class Form:
"""A form object
Only for work with Picture
Args:
flag (bool): for ultra wide (0) or ultra tall (1)
**params:
w,h, r,c, an1,an2, bb, loc - better you don't touch this, because there is no meaning :)
"""
def __init__(self, flag, **params):
self.flag = flag
self.params = params
def create_form(self, image, colors, hist):
"""Create form and insert image into it
Args:
image (Image): image to insert into form
colors (numpy.ndarray): Picture.center
hist (numpy.ndarray): Picture.hist
Returns:
PIL.Image - form with a picture
"""
fig = plt.figure()
fig.set_size_inches(self.params['w'], self.params['h'])
ax1 = plt.subplot(self.params['r'], self.params['c'], 2, aspect="equal", anchor=self.params['an2'])
ax2 = plt.subplot(self.params['r'], self.params['c'], 1, aspect="equal", anchor=self.params['an1'])
hex_colors = np.array(['#{:02X}{:02X}{:02X}'.format(x[0],x[1],x[2]) for x in colors.astype('uint8')])
wedges, _ = ax1.pie(hist, colors=hex_colors, startangle=90, radius=1.25)
ax1.legend(wedges, hex_colors, loc=self.params['loc'], bbox_to_anchor=self.params['bb'], fontsize=90+self.flag*20,
labelspacing=0.75 + 0.25*self.flag*((len(hist)-hist.size) / (len(hist)*2)))
ax2.imshow(image)
ax2.axis('off')
plt.tight_layout()
fig.canvas.draw()
w, h = fig.canvas.get_width_height()
buf = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(w, h, 3)
plt.close()
return Image.frombytes("RGB", (w, h), buf.tostring())
#picture to the left of the form
def __radd__(self, pic):
assert isinstance(pic, Picture), 'Should be Picture object'
self.figure = self.create_form(
pic.image_posterized,
pic.center,
pic.hist
)
w1, h1 = pic.image_pil.width, pic.image_pil.height
w2, h2 = self.figure.width, self.figure.height
if h1<h2: self.figure = self.figure.resize((int(w2*(h1/h2)), h1), Image.LANCZOS)
if h1>h2: pic.image_pil = pic.image_pil.resize((int(w1*(h2/h1)), h2), Image.LANCZOS)
return Image.fromarray(np.hstack((np.asarray(pic.image_pil), np.asarray(self.figure))))
#picture over the form
def __add__(self, pic):
assert isinstance(pic, Picture), 'Should be Picture object'
self.figure = self.create_form(
pic.image_posterized,
pic.center,
pic.hist
)
w1, h1 = pic.image_pil.width, pic.image_pil.height
w2, h2 = self.figure.width, self.figure.height
if w1<w2: self.figure = self.figure.resize((w1, int(h2*(w1/w2))), Image.LANCZOS)
if w1>w2: pic.image_pil = pic.image_pil.resize((w2, int(h1*(w2/w1))), Image.LANCZOS)
return Image.fromarray(np.vstack((np.asarray(pic.image_pil), np.asarray(self.figure))))
def colorscheme(imgb):
"""Managing Form and Picture
Args:
imgb (bytes): image in bytes
Returns:
result - image in bytes
"""
image = Picture(imgb)
result = None
if (image.image_pil.width-image.image_pil.height) > (image.image_pil.width+image.image_pil.height)/10:
form = Form(flag=0, w=50, h=15, r=1, c=2, an1='C', an2='W', bb=(1,0,-0.5,1), loc="center left")
ans = form + image
else:
form = Form(flag=1, w=15, h=50, r=2, c=1, an1='S', an2='N', bb=(0.5,1), loc='lower center')
ans = image + form
with BytesIO() as output:
ans.save(output, 'BMP')
result = output.getvalue()
return result
|
{"/colorscheme/__init__.py": ["/colorscheme/colorscheme.py"], "/run.py": ["/colorscheme/__init__.py"]}
|
5,209
|
customr/COLORSCHEME
|
refs/heads/master
|
/colorscheme/__init__.py
|
from colorscheme.colorscheme import colorscheme
|
{"/colorscheme/__init__.py": ["/colorscheme/colorscheme.py"], "/run.py": ["/colorscheme/__init__.py"]}
|
5,210
|
customr/COLORSCHEME
|
refs/heads/master
|
/run.py
|
import requests
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
import colorscheme
TEST = 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT1i6jN4Lj2kP2glv4vD3p16chAH6q-V9JpeHYd6URd9-GyoM7reg'
test = colorscheme.colorscheme(requests.get(TEST).content)
plt.imshow(np.array(Image.open(BytesIO(test))))
plt.show()
|
{"/colorscheme/__init__.py": ["/colorscheme/colorscheme.py"], "/run.py": ["/colorscheme/__init__.py"]}
|
5,212
|
quadrohedron/covid
|
refs/heads/master
|
/Covid2p1_DBB_RND.py
|
import datetime, re, requests, sqlite3
from time import time, sleep, strftime, strptime, localtime
from Covid2p1_Backend_RND import *
BASE_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_{0}_global.csv'
REALTIME_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/web-data/data/cases_country.csv'
DATE_RE = re.compile('[0-9]{1,2}/[0-9]{1,2}/[0-9]{1,2}')
CSV_KEYS = ['confirmed', 'deaths', 'recovered']
MATH_KEYS = ['new', 'active']
LOCALES_USED = ['ENG', 'ARAB', 'ESP']
TIMEOUT_RETRIES = 5
def csvDate2date(timestring):
t = strptime(timestring, '%m/%d/%y')
return datetime.date(t.tm_year, t.tm_mon, t.tm_mday)
def rtDate2date(timestring):
t = strptime(timestring, '%Y-%m-%d %H:%M:%S')
return datetime.date(t.tm_year, t.tm_mon, t.tm_mday)
##### Data fillers
COORDINATES = {}
DATA = {k:{} for k in CSV_KEYS+MATH_KEYS}
RT_DATA = {k:{} for k in CSV_KEYS+MATH_KEYS}
DATES = []
RT_DATE = None
N_DAYS = 0
RT_COUNTRIES = None
def fetch_set(key):
for i in range(TIMEOUT_RETRIES):
try:
resp = requests.get(BASE_URL.format(key))
print('Fetch done: \'{0}\''.format(key))
break
except requests.exceptions.ConnectTimeout:
if i == TIMEOUT_RETRIES-1:
print('Timeout {0} at key \'{1}\', skipping.'.format(i+1, key))
return False, None
else:
print('Timeout {0} at key {1}, retrying...'.format(i+1, key))
continue
return resp.ok, resp.text.strip() if resp.ok else None
def fetch_realtime():
for i in range(TIMEOUT_RETRIES):
try:
resp = requests.get(REALTIME_URL)
print('Fetch done: \'realtime\'')
break
except requests.exceptions.ConnectTimeout:
if i == TIMEOUT_RETRIES-1:
print('Timeout {0} at key \'realtime\', skipping.'.format(i+1))
return False, None
else:
print('Timeout {0} at key realtime, retrying...'.format(i+1))
continue
return resp.ok, resp.text.strip() if resp.ok else None
def fill_data():
global DATA, DATES, N_DAYS, RT_COUNTRIES, RT_DATA, RT_DATE
for k in CSV_KEYS:
### Fetching dataset
ok, source = fetch_set(k)
if not ok:
return None
source = split_csv(source)
### Filling dates
if len(DATES) == 0:
DATES = list(map(csvDate2date, source[0][4:]))
N_DAYS = len(DATES)
for line in source[1:]:
p, c = line[:2]
### Filling coordinates
cp_key = c+' : '+p
if cp_key not in COORDINATES:
COORDINATES[cp_key] = tuple(map(float, line[2:4]))
### Filling cases
tab = DATA[k]
for i in range(4, len(line)):
if not (c in tab):
tab[c] = {}
c_dict = tab[c]
if not (p in c_dict):
c_dict[p] = []
c_dict[p].append(int(line[i]))
### Filling realtime
ok, source = fetch_realtime()
if not ok:
return None
source = split_csv(source, 0)
indices = {}
line = source[0]
RT_DATE = rtDate2date(source[1][1])
samedayflag = not (RT_DATE == DATES[-1])
for i in range(len(line)):
val = line[i].lower()
if val in CSV_KEYS:
indices[val] = i
for line in source[1:]:
c = line[0]
for k in indices:
val = int(line[indices[k]])
RT_DATA[k][c] = val
if samedayflag and ('' in DATA[k][c]):
DATA[k][c][''][-1] = val
RT_COUNTRIES = set(RT_DATA[CSV_KEYS[0]].keys())
#print(RT_DATA[CSV_KEYS[0]])
### Setting coordinates
countries = []
for c in DATA[CSV_KEYS[0]]:
countries.append(c)
if len(countries) > 0:
coords, still_missing = get_coordinates(countries)
## if len(still_missing) > 0:
## print('Still missing the following countries:', still_missing)
for c in coords:
if not (coords[c] == None):
COORDINATES[c+' : '] = coords[c]
### Setting special coordinates (from null)
locs = sp_locs()
for c in locs:
COORDINATES[c] = locs[c]
return None
##### Analysis
POW_GLOBAL = 1
POW_LATAM = 1
RATING_LIMIT = 30
def gen_global_dbg(power):
for k in CSV_KEYS:
tab = DATA[k]
filename = f'Output/chart_{power}.tab'
data = []
coords = []
for c in tab:
c_dict = tab[c]
if '' in c_dict:
for p in c_dict:
data.append(c_dict[p])
coords.append(COORDINATES[c+' : '+p])
else:
vals = [0 for _ in range(N_DAYS)]
for p in c_dict:
p_list = c_dict[p]
for d_i in range(N_DAYS):
vals[d_i] += p_list[d_i]
data.append(vals)
coords.append(COORDINATES[c+' : '])
write_tabfile(data, coords, DATES, power, filename)
print(f'Chart completed: global {power}')
return None
def gen_charts_global_unified():
for k in CSV_KEYS:
tab = DATA[k]
filename = f'Output/chart_global_{k}_{DATES[-1].isoformat()}.tab'
data = []
coords = []
for c in tab:
c_dict = tab[c]
if '' in c_dict:
for p in c_dict:
data.append(c_dict[p])
coords.append(COORDINATES[c+' : '+p])
else:
vals = [0 for _ in range(N_DAYS)]
for p in c_dict:
p_list = c_dict[p]
for d_i in range(N_DAYS):
vals[d_i] += p_list[d_i]
data.append(vals)
coords.append(COORDINATES[c+' : '])
write_tabfile(data, coords, DATES, POW_GLOBAL, filename)
print(f'Chart completed: global \'{k}\'')
return None
def gen_charts_latam():
A, B, C, D = LATAM_LIMITS
for k in CSV_KEYS:
tab = DATA[k]
filename = f'Output/chart_latam_{k}_{DATES[-1].isoformat()}.tab'
data = []
coords = []
for c in tab:
c_dict = tab[c]
if '' in c_dict:
lat, long = COORDINATES[c+' : ']
if not (A < lat < B)*(C < long < D):
continue
for p in c_dict:
data.append(c_dict[p])
coords.append(COORDINATES[c+' : '+p])
else:
vals = [0 for _ in range(N_DAYS)]
for p in c_dict:
lat, long = COORDINATES[c+' : '+p]
if not (A < lat < B)*(C < long < D):
continue
p_list = c_dict[p]
for d_i in range(N_DAYS):
vals[d_i] += p_list[d_i]
data.append(vals)
coords.append(COORDINATES[c+' : '])
write_tabfile(data, coords, DATES, POW_GLOBAL, filename)
print(f'Chart completed: latam \'{k}\'')
return None
def gen_linegraphs():
for k in CSV_KEYS:
tab = DATA[k]
filename_g = f'Output/linegraph_{k}_{DATES[-1].isoformat()}.txt'
filename_m = f'Output/maxval_{k}_{DATES[-1].isoformat()}.txt'
vals = [0 for _ in range(N_DAYS)]
for c in tab:
c_dict = tab[c]
for p in c_dict:
for i in range(N_DAYS):
vals[i] += c_dict[p][i]
with open(filename_g, 'w') as f:
f.write('#'.join(map(str, vals)))
with open(filename_m, 'w') as f:
f.write(str(max(vals)))
print(f'Linegraph completed: \'{k}\'')
return None
def gen_linegraphs_rt():
nextdayflag = not (RT_DATE == DATES[-1])
for k in CSV_KEYS:
tab = DATA[k]
rt_tab = RT_DATA[k]
filename_g = f'Output/linegraph_RT_{k}_{RT_DATE.isoformat()}.txt'
filename_m = f'Output/maxval_RT_{k}_{RT_DATE.isoformat()}.txt'
vals = [0 for _ in range(N_DAYS+(1 if nextdayflag else 0))]
for c in tab:
c_dict = tab[c]
for p in c_dict:
for i in range(N_DAYS):
vals[i] += c_dict[p][i]
if nextdayflag:
for c in tab:
if c in RT_COUNTRIES:
vals[-1] += rt_tab[c]
else:
c_dict = tab[c]
for p in c_dict:
vals[-1] += c_dict[p][-1]
with open(filename_g, 'w') as f:
f.write('#'.join(map(str, vals)))
with open(filename_m, 'w') as f:
f.write(str(max(vals)))
print(f'RT linegraph completed: \'{k}\'')
return None
def gen_ratings():
l_keys, translations = None, None
for k in CSV_KEYS:
tab = DATA[k]
filename = f'Output/rating_{{0}}_{k}_{DATES[-1].isoformat()}.txt'
data = {}
for c in tab:
c_dict = tab[c]
val = 0
for p in c_dict:
val += c_dict[p][-1]
data[c] = val
if translations == None:
l_keys, translations, _ = build_country_dictionary(set(tab.keys()))
l_keys = list(filter(lambda x: x in LOCALES_USED, l_keys))
rating = sorted(data.items(), key = lambda x: -x[1])
text_val = rating[0][1]
texts_c = {l_k:translations[rating[0][0]][l_k] for l_k in l_keys}
text_val = str(text_val)
for c, val in rating[1:]:
for l_k in l_keys:
texts_c[l_k] += ','+translations[c][l_k]
text_val += '#'+str(val)
for l_k in l_keys:
with open(filename.format(f'countries_{l_k}'), 'w', encoding = 'utf-8') as f:
f.write(texts_c[l_k])
with open(filename.format(f'TOP{RATING_LIMIT}_countries_{l_k}'), 'w', encoding = 'utf-8') as f:
f.write(','.join(texts_c[l_k].split(',')[:RATING_LIMIT]))
with open(filename.format('values'), 'w') as f:
f.write(text_val)
with open(filename.format(f'TOP{RATING_LIMIT}_values'), 'w') as f:
f.write('#'.join(text_val.split('#')[:RATING_LIMIT]))
print(f'Rating completed: \'{k}\'')
return None
def gen_ratings_rt():
l_keys, translations = None, None
nextdayflag = not (RT_DATE == DATES[-1])
for k in CSV_KEYS:
tab = DATA[k]
rt_tab = RT_DATA[k]
filename = f'Output/rating_RT_{{0}}_{k}_{RT_DATE.isoformat()}.txt'
data = {}
for c in tab:
if nextdayflag and (c in RT_COUNTRIES):
val = rt_tab[c]
#print(0,type(val))
else:
c_dict = tab[c]
val = 0
for p in c_dict:
val += c_dict[p][-1]
#print(1,type(val))
data[c] = val
if translations == None:
l_keys, translations, _ = build_country_dictionary(set(tab.keys()))
l_keys = list(filter(lambda x: x in LOCALES_USED, l_keys))
rating = sorted(data.items(), key = lambda x: -x[1])
text_val = rating[0][1]
texts_c = {l_k:translations[rating[0][0]][l_k] for l_k in l_keys}
text_val = str(text_val)
for c, val in rating[1:]:
for l_k in l_keys:
texts_c[l_k] += ','+translations[c][l_k]
text_val += '#'+str(val)
for l_k in l_keys:
with open(filename.format(f'countries_{l_k}'), 'w', encoding = 'utf-8') as f:
f.write(texts_c[l_k])
with open(filename.format(f'TOP{RATING_LIMIT}_countries_{l_k}'), 'w', encoding = 'utf-8') as f:
f.write(','.join(texts_c[l_k].split(',')[:RATING_LIMIT]))
with open(filename.format('values'), 'w') as f:
f.write(text_val)
with open(filename.format(f'TOP{RATING_LIMIT}_values'), 'w') as f:
f.write('#'.join(text_val.split('#')[:RATING_LIMIT]))
print(f'RT rating completed: \'{k}\'')
return None
##### Run
def set_params():
with open('params.txt') as f:
source = f.read().strip()
for line in source.split('\n'):
name, val_type, val = line.split('\t')
globals()[name] = getattr(__builtins__, val_type)(val)
return None
if __name__ == '__main__':
print('Started!')
set_params()
fill_data()
gen_charts_global_unified()
gen_charts_latam()
## gen_linegraphs()
gen_linegraphs_rt()
## gen_ratings()
gen_ratings_rt()
print('Finished!')
##CKEYS,CDICT,MISSING=build_country_dictionary(set(DATA['confirmed'].keys()))
|
{"/Covid2p1_DBB_RND.py": ["/Covid2p1_Backend_RND.py"]}
|
5,213
|
quadrohedron/covid
|
refs/heads/master
|
/Covid2p1_Backend_RND.py
|
import re
LATAM_LIMITS = [-60, 25, -120, -30]
UHEIGHT = 0.102
FACTOR = 100000
def __scale(val, unitscale, power=1):
return int(FACTOR*((1.0-UHEIGHT)*(pow(val, power)-unitscale)/(1.0-unitscale)+UHEIGHT)) if val > 0 else 0
def __scale_vals(val_table, power=1):
maxval = max(list(map(max, val_table)))
unitscale = pow(1/maxval, power)
res = []
for val_list in val_table:
res.append([__scale(v/maxval, unitscale, power) for v in val_list])
return res
def sp_locs():
with open('sp_locs.txt') as f:
source = f.read().strip()
res = {}
for line in source.split('\n'):
c, lat, long = line.split('\t')
lat, long = map(float, (lat, long))
res[c] = (lat, long)
return res
__SKIPPED_CHARS = '*'
__COUNTRY_REPLACEMENTS = {
'Korea, South' : 'South Korea',
'Congo (Brazzaville)' : 'Congo [Republic]',
'Congo (Kinshasa)' : 'Congo [DRC]'
}
__IGNORED_PATTERNS = tuple(map(re.compile, (
'\ARecovered,Canada,.*\Z',
#'\ADiamond Princess,Canada,.*\Z'
)))
def split_csv(text, c_ind = 1):
res = []
for line in text.split('\n'):
ignored = False
for p in __IGNORED_PATTERNS:
if not (p.match(line) == None):
ignored = True
break
if ignored:
continue
res_line = []
field = ''
depth = 0
for char in line:
if char == '"':
depth = 1 - depth
elif char == ',':
if depth > 0:
field += char
else:
if (len(res_line) == c_ind) and (field in __COUNTRY_REPLACEMENTS):
field = __COUNTRY_REPLACEMENTS[field]
res_line.append(field)
field = ''
elif not (char in __SKIPPED_CHARS):
field += char
res_line.append(field)
res.append(res_line)
return res
def get_coordinates(countries):
res = {c:None for c in countries}
with open('countries.tab') as f:
source = f.read().strip().split('\n')
for line in source:
line = line.split('\t')
c = line[-1].strip()
if c in countries:
res[c] = tuple(map(float, line[-3:-1]))
missing = []
for c in res:
if res[c] == None:
missing.append(c)
return res, missing
def write_tabfile(data_list, coords_list, date_list, power, filename):
tab_vals = __scale_vals(data_list, power)
with open(filename, 'w') as f:
for c_i in range(len(tab_vals)):
for d_i in range(len(date_list)):
line = date_list[d_i].isoformat()+'\t00\t'
line += '\t'.join(map(str, coords_list[c_i]))
line += '\t'+str(tab_vals[c_i][d_i])
f.write(line+'\n')
return None
def build_country_dictionary(country_set):
with open('country_dictionary.csv',encoding='utf-8') as f:
source = f.read().strip()
res = {}
source = source.split('\n')
keys = source[0].replace('\ufeff', '').split(',')
eng_i = keys.index('ENG')
n_keys = len(keys)
for line in source[1:]:
names = line.split(',')
if len(tuple(filter(None, names))) == 0:
continue
engname = names[eng_i]
if engname in country_set:
translations = {}
for k_i in range(n_keys):
translations[keys[k_i]] = names[k_i]
res[engname] = translations
country_set.remove(engname)
return keys, res, country_set
|
{"/Covid2p1_DBB_RND.py": ["/Covid2p1_Backend_RND.py"]}
|
5,227
|
SRD2705/Student-Database
|
refs/heads/main
|
/login.py
|
from tkinter import *
from PIL import ImageTk
from tkinter import messagebox
import pymysql
import os
import sys
import random
import smtplib
import time
class Login_system:
def __init__(self,root):
self.root = root
self.root.title("Login Window")
self.root.geometry("1920x1080+0+0")
################################# All images
self.bg_icon = ImageTk.PhotoImage(file="images/bg.jpg")
self.user_icon=PhotoImage(file="images/username.png")
self.pass_icon=PhotoImage(file="images/pass.png")
self.logo_icon = PhotoImage(file="images/mainlogo.png")
self.email_icon = PhotoImage(file="images/mail.png")
self.otp_icon = PhotoImage(file="images/otp.png")
############################################# Variables
self.user_var=StringVar()
self.pass_var = StringVar()
self.forgot_email_var = StringVar()
self.new_pass_var = StringVar()
self.userotp_var = StringVar()
self.originalotp_var = StringVar()
self.otp_trylimit_var = 3
self.strt_time_var = 0
self.end_time_var = 0
self.time_var = 0
bg_label = Label(self.root,image=self.bg_icon).pack()
title=Label(self.root,text="Login Panel",font=("Times new roman",40,"bold"),bg="grey14",fg="yellow3",bd=10,relief=GROOVE)
title.place(x=0,y=0,relwidth=1)
Login_Frame = Frame(self.root,bg="forest green")
Login_Frame.place(x=200,y=150)
logo_lbl = Label(Login_Frame,image=self.logo_icon,bd=0).grid(row=0,columnspan=2,pady=20)
user_lbl = Label(Login_Frame,text="Username",image=self.user_icon,compound=LEFT,font=("times new roman",20,"bold"),bg="forest green").grid(row=1,column=0,padx=20,pady=10)
txt_user = Entry(Login_Frame,bd="5",textvariable=self.user_var,relief=GROOVE,font=("",15)).grid(row=1,column=1,padx=20)
pass_lbl = Label(Login_Frame, text="Password", image=self.pass_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=2, column=0, padx=20,pady=10)
txt_pass = Entry(Login_Frame, bd="5", show = "*",textvariable=self.pass_var,relief=GROOVE, font=("", 15)).grid(row=2, column=1, padx=20)
btn_login = Button(Login_Frame,text="Login",width=15,command=self.login,font=("times new roman",15,"bold"),bg="white",fg="Blue").grid(row=3,column=1,pady=10)
btn_register = Button(Login_Frame, text="Register", width=15, command=self.register,font=("times new roman", 15, "bold"), bg="white", fg="Blue").grid(row=3, column=0, pady=10)
btn_forgot = Button(Login_Frame, text="Forgot Password", width=15, command=self.forgot_pass,font=("times new roman", 15, "bold"), bg="white", fg="Blue").grid(row=4, columnspan=2,pady=10)
def login(self):
if self.user_var.get()=="" or self.pass_var.get()=="":
messagebox.showerror("Error","All fields are required")
else:
con = pymysql.connect(host='localhost', user="root", password="Rishi@2705", database="stm")
cur = con.cursor()
cur.execute("SELECT password FROM `admins` WHERE user='" + str(self.user_var.get()) + "'")
rows=cur.fetchone()
if rows:
# print(rows[0])
# print(self.pass_var.get())
if rows[0] == self.pass_var.get():
self.root.destroy()
import Student
Student.Students()
else:
messagebox.showerror("Invalid","Username and password did not match")
else:
messagebox.showerror("Invalid", "Username and password did not match")
def register(self):
self.root.destroy()
os.system("python3 register.py")
def forgot_pass(self):
self.otp_trylimit_var=3
self.Forgot_Frame = Frame(self.root, bg="forest green")
self.Forgot_Frame.place(x=800, y=150)
email_lbl = Label(self.Forgot_Frame, text="Email", image=self.email_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=1, column=0, padx=20,pady=10)
txt_email = Entry(self.Forgot_Frame, bd="5", textvariable=self.forgot_email_var, relief=GROOVE, font=("", 15)).grid(row=1,column=1, padx=20)
# new_pass_lbl = Label(self.Forgot_Frame, text="New password", image=self.email_icon, compound=LEFT, font=("times new roman", 20, "bold"), bg="forest green").grid(row=2, column=0, padx=20, pady=10)
# txt_new_pass = Entry(self.Forgot_Frame, bd="5", textvariable=self.new_pass_var, relief=GROOVE, font=("", 15)).grid(row=2, column=1, padx=20)
btn_sendotp = Button(self.Forgot_Frame, text="Send OTP", width=15, command=self.sendotp,font=("times new roman", 15, "bold"), bg="white", fg="Blue").grid(row=3, column=1, pady=10)
def sendotp(self):
# self.Forgot_Frame.destroy()
# self.Forgot_Frame = Frame(self.root, bg="forest green")
# self.Forgot_Frame.place(x=800, y=150)
# otp_lbl = Label(self.Forgot_Frame, text="OTP", image=self.otp_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=3, column=0, padx=20,pady=10)
# txt_otp = Entry(self.Forgot_Frame, bd="5", textvariable=self.userotp_var, relief=GROOVE,font=("", 15)).grid(row=3, column=1, padx=20)
# btn_submitotp = Button(self.Forgot_Frame, text="Verify", width=15, command=self.verifyotp,font=("times new roman", 15, "bold"), bg="white", fg="Blue").grid(row=4,column=1,pady=10)
# btn_back = Button(self.Forgot_Frame, text="Back", width=10, command=self.back,font=("times new roman", 15, "bold"), bg="white", fg="Blue").grid(row=4, column=0, pady=10)
self.strt_time_var = time.time()
if self.forgot_email_var.get() == "":
messagebox.showerror("Error","Please enter a email address")
else:
con = pymysql.connect(host='localhost', user="root", password="Rishi@2705", database="stm")
cur = con.cursor()
cur.execute("SELECT mail FROM `admins`")
rows = cur.fetchall()
st = False
for i in rows:
if i[0] == self.forgot_email_var.get():
st = True
break
if st == False:
messagebox.showerror("Error","Entered email address is not linked to any account")
self.forgot_email_var.set("")
else:
self.Forgot_Frame = Frame(self.root, bg="forest green")
self.Forgot_Frame.place(x=800, y=150)
otp_lbl = Label(self.Forgot_Frame, text="OTP", image=self.otp_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=3, column=0, padx=20,pady=10)
txt_otp = Entry(self.Forgot_Frame, bd="5", textvariable=self.userotp_var, relief=GROOVE,font=("", 15)).grid(row=3, column=1, padx=20)
btn_submitotp = Button(self.Forgot_Frame, text="Verify", width=15, command=self.verifyotp,font=("times new roman", 15, "bold"), bg="white", fg="Blue").grid(row=4,column=1,pady=10)
btn_back = Button(self.Forgot_Frame, text="Back", width=10, command=self.back,font=("times new roman", 15, "bold"), bg="white", fg="Blue").grid(row=4, column=0, pady=10)
vals = "1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwwxyz"
self.originalotp_var = ""
for i in range(6):
self.originalotp_var += (random.choice(vals))
subject = 'Reset password OTP'
part1 = 'Hello user,'
part2 = 'This is your otp for reset your password:'
part3 = 'NOTE: This OTP is valid for 5 minute and you can attempt 3 times.'
msg = f'Subject: {subject}\n\n{part1}\n{part2}\n{self.originalotp_var}\n{part3}'
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("contactus.indian@gmail.com", "Rishi@2705")
s.sendmail("contactus.indian@gmail.com", self.forgot_email_var.get(), msg)
# self.Forgot_Frame.destroy()
# self.Forgot_Frame = Frame(self.root, bg="forest green")
# self.Forgot_Frame.place(x=800, y=150)
def verifyotp(self):
self.end_time_var = time.time()
self.time_var = self.end_time_var - self.strt_time_var
if self.time_var > 60:
messagebox.showerror("Timeout","Your OTP is expired, Try again")
self.Forgot_Frame.destroy()
self.forgot_pass()
elif self.userotp_var.get()=="":
messagebox.showerror("Error","Please enter a OTP")
else:
if str(self.userotp_var.get())==str(self.originalotp_var):
self.Forgot_Frame.destroy()
self.Forgot_Frame = Frame(self.root, bg="forest green")
self.Forgot_Frame.place(x=800, y=150)
new_pass_lbl = Label(self.Forgot_Frame, text="New password", image=self.email_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=4, column=0, padx=20, pady=10)
txt_new_pass = Entry(self.Forgot_Frame, bd="5", textvariable=self.new_pass_var, relief=GROOVE,font=("", 15)).grid(row=4, column=1, padx=20)
btn_update = Button(self.Forgot_Frame, text="Update", width=15, command=self.update_pass,font=("times new roman", 15, "bold"), bg="white", fg="Blue").grid(row=5,column=1,pady=10)
else:
messagebox.showerror("Error","OTP does not match")
self.otp_trylimit_var -= 1
if self.otp_trylimit_var != 0:
self.sendotp()
else:
messagebox.showerror("Locked","Your account is locked,Please contact administrator")
self.Forgot_Frame.destroy()
self.forgot_email_var.set("")
self.login()
def update_pass(self):
if self.new_pass_var.get()=="":
messagebox.showerror("Error","Enter a password")
else:
con = pymysql.connect(host='localhost', user="root", password="Rishi@2705", database="stm")
cur = con.cursor()
cur.execute("UPDATE `admins` SET password='"+str(self.new_pass_var.get())+"' WHERE mail='"+str(self.forgot_email_var.get())+"'")
con.commit()
con.close()
self.Forgot_Frame.destroy()
messagebox.showinfo("Success","Password updated successfully")
self.Forgot_Frame.destroy()
def back(self):
self.Forgot_Frame.destroy()
self.forgot_pass()
root = Tk()
obj = Login_system(root)
root.mainloop()
|
{"/login.py": ["/Student.py"]}
|
5,228
|
SRD2705/Student-Database
|
refs/heads/main
|
/Student.py
|
import datetime
import tkinter
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import pymysql
import os
import qrcode
from PIL import Image,ImageTk,ImageDraw,ImageFont
from resizeimage import resizeimage
import tempfile
from datetime import datetime
class Students:
def __init__(self):
self.root = Tk()
self.root.title("Student Management System")
self.root.geometry("1920x1080+0+0")
title = Label(self.root, text="Student Management System", bd=20, relief=RIDGE,font=("times new roman", 50, "bold"), bg="midnight blue", fg="Green2")
title.pack(side=TOP, fill=X) # fill used for spread as its width
user_title = Label(self.root,text="Welcome User",bd=0,relief = RIDGE,font=("times new roman", 20, "bold"),bg="midnight blue",fg="red")
user_title.place(x=40,y=40)
logout_btn = Button(self.root,text="LOGOUT",width=8,bg='red',command=self.logout)
logout_btn.place(x=1750,y=42)
bottom = Label(self.root, text="Managed by SRD2705", bd=10, relief=SOLID, font=("Ariel", 20),bg="dark green", fg="white")
bottom.pack(side=BOTTOM, fill=X)
############################################################## All Variables for database
self.Roll_No_var = StringVar()
self.name_var = StringVar()
self.email_var = StringVar()
self.gender_var = StringVar()
self.contact_var = StringVar()
self.dob_var = StringVar()
self.course_var = StringVar()
self.dept_var = StringVar()
self.pass_var = StringVar()
self.cgpa_var = StringVar()
################################################################### Variable for search
self.search_by = StringVar()
self.search_txt = StringVar()
############################################################### Manage Fame
# Responsible for data operation of the students
self.Manage_Frame = Frame(self.root, bd=10, relief=RIDGE, bg='Grey24')
self.Manage_Frame.place(x=0, y=115, width=700, height=850)
m_title = Label(self.Manage_Frame,text=" Student Data Management Panel",font=("Times New Roman",30,"bold"),bg="Grey24",fg="Orange2")
# We are using grid because in this case we do not need to take care of x,y location it follows row,column
m_title.grid(row=0,columnspan=2,pady=10,padx=30)
# Label and text fields for ROLL
lbl_roll=Label(self.Manage_Frame,text="Roll No.",font=("Times New Roman",20,"bold"),bg="Grey24",fg="White")
lbl_roll.grid(row=1,column=0,pady=10,padx=20,sticky='w') # Sticky property used for the left alignment
txt_roll = Entry(self.Manage_Frame,textvariable=self.Roll_No_var,font=("Times New Roman",15,"bold"),bd=5,relief = GROOVE)
txt_roll.grid(row=1,column=1,pady=10,padx=20,sticky='w')
# Label and text fields for NAME
lbl_name = Label(self.Manage_Frame, text="Full Name", font=("Times New Roman", 20, "bold"), bg="Grey24", fg="White")
lbl_name.grid(row=2, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
txt_name = Entry(self.Manage_Frame, textvariable=self.name_var,font=("Times New Roman", 15, "bold"), bd=5, relief=GROOVE)
txt_name.grid(row=2, column=1, pady=10, padx=20, sticky='w')
# Label and text fields for EMAIL
lbl_email = Label(self.Manage_Frame, text="E-Mail", font=("Times New Roman", 20, "bold"), bg="Grey24", fg="White")
lbl_email.grid(row=3, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
txt_email = Entry(self.Manage_Frame, textvariable=self.email_var,font=("Times New Roman", 15, "bold"), bd=5, relief=GROOVE)
txt_email.grid(row=3, column=1, pady=10, padx=20, sticky='w')
# Label and text fields for GENDER
lbl_gender = Label(self.Manage_Frame, text="Gender", font=("Times New Roman", 20, "bold"), bg="Grey24", fg="White")
lbl_gender.grid(row=4, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
combo_gender = ttk.Combobox(self.Manage_Frame,textvariable=self.gender_var,font=("Times New Roman", 15, "bold"),state="readonly") # We use combo box because of gender we use drop down list
combo_gender['values']=("Male","Female","Other") # using readonly state because we dont want to edit the field after selecting it
combo_gender.grid(row=4,column=1,pady=10, padx=20,sticky='w')
# Label and text fields for PHONENUMBER
lbl_phnum = Label(self.Manage_Frame, text="Contact Number", font=("Times New Roman", 20, "bold"), bg="Grey24", fg="White")
lbl_phnum.grid(row=5, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
txt_phnum = Entry(self.Manage_Frame, textvariable=self.contact_var,font=("Times New Roman", 15, "bold"), bd=5, relief=GROOVE)
txt_phnum.grid(row=5, column=1, pady=10, padx=20, sticky='w')
# Label and text fields for DOB
lbl_dob = Label(self.Manage_Frame, text="DOB", font=("Times New Roman", 20, "bold"), bg="Grey24", fg="White")
lbl_dob.grid(row=6, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
txt_dob = Entry(self.Manage_Frame, textvariable=self.dob_var,font=("Times New Roman", 15, "bold"), bd=5, relief=GROOVE)
txt_dob.grid(row=6, column=1, pady=10, padx=20, sticky='w')
lbl_course = Label(self.Manage_Frame, text="Course", font=("Times New Roman", 20, "bold"), bg="Grey24", fg="White")
lbl_course.grid(row=7, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
combo_course = ttk.Combobox(self.Manage_Frame, textvariable=self.course_var, font=("Times New Roman", 15, "bold"),state="readonly") # We use combo box because of gender we use drop down list
combo_course['values'] = ("Diploma", "B.Tech", "BCA", "M.Tech", "MCA","Phd") # using readonly state because we dont want to edit the field after selecting it
combo_course.grid(row=7, column=1, pady=10, padx=20, sticky='w')
lbl_dept = Label(self.Manage_Frame, text="Department", font=("Times New Roman", 20, "bold"), bg="Grey24", fg="White")
lbl_dept.grid(row=8, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
combo_dept = ttk.Combobox(self.Manage_Frame, textvariable=self.dept_var, font=("Times New Roman", 15, "bold"),state="readonly") # We use combo box because of gender we use drop down list
combo_dept['values'] = ("Information Technology","Computer Science","Civil","Mechanical","Electrical","Electronics") # using readonly state because we dont want to edit the field after selecting it
combo_dept.grid(row=8, column=1, pady=10, padx=20, sticky='w')
lbl_pass = Label(self.Manage_Frame, text="Passing Year", font=("Times New Roman", 20, "bold"), bg="Grey24",fg="White")
lbl_pass.grid(row=9, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
txt_pass = Entry(self.Manage_Frame, textvariable=self.pass_var, font=("Times New Roman", 15, "bold"), bd=5,relief=GROOVE)
txt_pass.grid(row=9, column=1, pady=10, padx=20, sticky='w')
lbl_cgpa = Label(self.Manage_Frame, text="CGPA", font=("Times New Roman", 20, "bold"), bg="Grey24",fg="White")
lbl_cgpa.grid(row=10, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
txt_cgpa = Entry(self.Manage_Frame, textvariable=self.cgpa_var, font=("Times New Roman", 15, "bold"), bd=5,relief=GROOVE)
txt_cgpa.grid(row=10, column=1, pady=10, padx=20, sticky='w')
# Label and text fields for ADDRESS
lbl_address = Label(self.Manage_Frame, text="Address", font=("Times New Roman", 20, "bold"), bg="Grey24", fg="White")
lbl_address.grid(row=11, column=0, pady=10, padx=20, sticky='w') # Sticky property used for the left alignment
self.txt_address = Text(self.Manage_Frame,width=30,height=4,font=("Times New Roman", 15))
self.txt_address.grid(row=11, column=1, pady=10, padx=20, sticky='w')
# Button frame
# creating button for various operations
self.btn_Frame = Frame(self.root, bd=10,bg='grey24')
self.btn_Frame.place(x=70, y=870, width=600, height=80)
addbtn = Button(self.btn_Frame,text="ADD",width=8,bg='green',command=self.add_students).grid(row=0,column=0,padx=20)
updatebtn = Button(self.btn_Frame, text="UPDATE", width=8,bg="yellow",command=self.update_data).grid(row=0, column=1, padx=20)
deletebtn = Button(self.btn_Frame, text="DELETE", width=8,bg="red",command=self.delete_data).grid(row=0, column=2, padx=20)
clearbtn = Button(self.btn_Frame, text="CLEAR", width=8,command=self.clear).grid(row=0, column=3, padx=20)
generateidbtn = Button(self.btn_Frame, text="GENERATE ID", width=10,bg="blue",command=self.generate_id).grid(row=1, columnspan=4, padx=20,pady=10)
#############################################3 Detail Frame
# Responsible for data viewing and filtering
Detail_Frame = Frame(self.root, bd=10, relief=RIDGE, bg='Grey24')
Detail_Frame.place(x=700, y=115, width=1220, height=850)
lbl_search = Label(Detail_Frame, text="Search Filter", font=("Times New Roman", 20, "bold"), bg="Grey24", fg="Yellow")
lbl_search.grid(row=0, column=0, pady=10, padx=20, sticky='w')
combo_search = ttk.Combobox(Detail_Frame,textvariable=self.search_by, font=("Times New Roman", 15, "bold"),state="readonly") # We use combo box because of gender we use drop down list
combo_search['values'] = ("roll_no","name","contact","course","dept","pass_year","cgpa") # using readonly state because we dont want to edit the field after selecting it
combo_search.grid(row=0, column=1, pady=10, padx=20, sticky='w')
txt_search = Entry(Detail_Frame, textvariable=self.search_txt, font=("Times New Roman", 15, "bold"), bd=5, relief=RIDGE)
txt_search.grid(row=0, column=2, pady=10, padx=20, sticky='w')
searchbtn = Button(Detail_Frame, text="SEARCH", width=8,command=self.search_data).grid(row=0, column=3, padx=20)
showallbtn = Button(Detail_Frame, text="SHOW ALL", width=8,command=self.fetch_data).grid(row=0, column=4, padx=20)
######################################################### Table frame(for grid)
Table_Frame=Frame(Detail_Frame,bd=4,relief=SOLID,bg='White')
Table_Frame.place(x=4,y=60,width=1190,height=765)
# creating scrollbar for data scrolling
scroll_x = Scrollbar(Table_Frame,orient=HORIZONTAL)
scroll_y = Scrollbar(Table_Frame, orient=VERTICAL)
self.Student_table = ttk.Treeview(Table_Frame,columns=("roll","name","email","gender","contact","dob","course","department","passyear","cgpa","address"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)
scroll_x.pack(side=BOTTOM,fill=X)
scroll_y.pack(side=RIGHT,fill=Y)
scroll_x.config(command=self.Student_table.xview)
scroll_y.config(command=self.Student_table.yview)
self.Student_table.heading("roll",text="ROLL NO.")
self.Student_table.heading("name", text="NAME")
self.Student_table.heading("email", text="E-MAIL")
self.Student_table.heading("gender", text="GENDER")
self.Student_table.heading("contact", text="CONTACT NO.")
self.Student_table.heading("dob", text="DOB")
self.Student_table.heading("course", text="COURSE")
self.Student_table.heading("department",text="DEPARTMENT")
self.Student_table.heading("passyear", text="PASS YEAR")
self.Student_table.heading("cgpa", text="CGPA")
self.Student_table.heading("address", text="ADDRESS")
self.Student_table['show']='headings'
self.Student_table.column("roll", width=160)
self.Student_table.column("name", width=240)
self.Student_table.column("email", width=260)
self.Student_table.column("gender", width=110)
self.Student_table.column("contact", width=180)
self.Student_table.column("dob", width=170)
self.Student_table.column("course", width=120)
self.Student_table.column("department", width=220)
self.Student_table.column("passyear", width=120)
self.Student_table.column("cgpa", width=100)
self.Student_table.column("address", width=400)
self.Student_table.pack(fill=BOTH,expand=1) # For adjusting the screen
self.Student_table.bind("<ButtonRelease-1>",self.get_cursor)
self.fetch_data()
self.root.mainloop()
####################################################### Function for data add
def add_students(self):
if self.Roll_No_var.get() == "" or self.name_var.get()=="" or self.contact_var.get()=="" or self.dob_var.get()=="":
messagebox.showerror("Error","! All fields are required !")
elif len(self.contact_var.get()) != 10:
messagebox.showerror("Error","! Contact number must be 10 digits !")
else:
con = pymysql.connect(host='localhost',user="root",password="Rishi@2705",database="stm")
cur = con.cursor()
cur.execute("insert into Students values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(
self.Roll_No_var.get(),
self.name_var.get(),
self.email_var.get(),
self.gender_var.get(),
self.contact_var.get(),
self.dob_var.get(),
self.course_var.get(),
self.dept_var.get(),
self.pass_var.get(),
self.cgpa_var.get(),
self.txt_address.get('1.0',END)
))
con.commit()
self.fetch_data()
self.clear()
con.close()
messagebox.showinfo("Success","Data added successfully")
def fetch_data(self):
con = pymysql.connect(host='localhost', user="root", password="Rishi@2705", database="stm")
cur = con.cursor()
cur.execute("select * from Students")
rows = cur.fetchall()
if len(rows) != 0:
self.Student_table.delete(*self.Student_table.get_children())
for row in rows:
self.Student_table.insert('',END,values=row)
con.commit()
con.close()
def clear(self):
self.Roll_No_var.set(""),
self.name_var.set(""),
self.email_var.set(""),
self.gender_var.set(""),
self.contact_var.set(""),
self.dob_var.set(""),
self.course_var.set(""),
self.dept_var.set(""),
self.pass_var.set(""),
self.cgpa_var.set("")
self.txt_address.delete('1.0', END)
def get_cursor(self,ev):
cursor_row = self.Student_table.focus()
contents=self.Student_table.item(cursor_row)
row=contents['values']
self.Roll_No_var.set(row[0]),
self.name_var.set(row[1]),
self.email_var.set(row[2]),
self.gender_var.set(row[3]),
self.contact_var.set(row[4]),
self.dob_var.set(row[5]),
self.course_var.set(row[6]),
self.dept_var.set(row[7]),
self.pass_var.set(row[8]),
self.cgpa_var.set(row[9]),
self.txt_address.delete('1.0', END)
self.txt_address.insert(END,row[10])
def update_data(self):
con = pymysql.connect(host='localhost', user="root", password="Rishi@2705", database="stm")
cur = con.cursor()
cur.execute("update Students set name=%s,email=%s,gender=%s,contact=%s,dob=%s,course=%s,dept=%s,pass_year=%s,cgpa=%s,address=%s where roll_no=%s", (
self.name_var.get(),
self.email_var.get(),
self.gender_var.get(),
self.contact_var.get(),
self.dob_var.get(),
self.course_var.get(),
self.dept_var.get(),
self.pass_var.get(),
self.cgpa_var.get(),
self.txt_address.get('1.0', END),
self.Roll_No_var.get()
))
con.commit()
self.fetch_data()
self.clear()
con.close()
messagebox.showinfo("Success","Data updated successfully")
def delete_data(self):
con = pymysql.connect(host='localhost', user="root", password="Rishi@2705", database="stm")
cur = con.cursor()
cur.execute("delete from Students where roll_no=%s",self.Roll_No_var.get())
con.commit()
con.close()
self.fetch_data()
self.clear()
def search_data(self):
con = pymysql.connect(host='localhost', user="root", password="Rishi@2705", database="stm")
cur = con.cursor()
if self.search_by.get() == "roll_no":
cur.execute("select * from Students where roll_no="+str(self.search_txt.get()))
else:
cur.execute("select * from Students where "+str(self.search_by.get())+" LIKE '%"+str(self.search_txt.get())+"%'")
rows = cur.fetchall()
if len(rows) != 0:
self.Student_table.delete(*self.Student_table.get_children())
for row in rows:
self.Student_table.insert('', END, values=row)
con.commit()
con.close()
def logout(self):
self.root.destroy()
os.system("python3 login.py")
# import login
def generate_id(self):
# self.Student_table.bind("<ButtonRelease-1>", self.get_cursor)
# self.Manage_Frame.destroy()
# self.btn_Frame.destroy()
self.Generate_Frame = Frame(self.root, bd=10, relief=RIDGE, bg='Grey24')
self.Generate_Frame.place(x=0, y=115, width=700, height=850)
self.ID_Frame = Frame(self.Generate_Frame,bd=10,relief=SOLID,bg="White")
self.ID_Frame.place(x=140,y=40,width =400,height=600 )
self.qr_lbl = Label(self.ID_Frame,bd=10,)
self.qr_lbl.place(x=108,y=100,width =180,height=180)
qr_title = Label(self.ID_Frame,text="ID Card",font=("goudy old style",20),bg="blue2",fg="white").pack(side=TOP,fill=X)
qr_lbl_name = Label(self.ID_Frame, text="Name: ", font=("Times New Roman", 20, "bold"), bg="white",fg="Black")
qr_lbl_name.place(x=20,y=300)
qr_txt_name = Label(self.ID_Frame, text=self.name_var.get(), font=("times new roman", 15), bg="white", fg="Black")
qr_txt_name.place(x=120,y=305)
qr_lbl_roll = Label(self.ID_Frame, text="Roll: ", font=("Times New Roman", 20, "bold"), bg="white",fg="Black")
qr_lbl_roll.place(x=20,y=340)
qr_txt_roll = Label(self.ID_Frame, text=self.Roll_No_var.get(), font=("times new roman", 15), bg="white", fg="Black")
qr_txt_roll.place(x=120,y=345)
qr_lbl_dept = Label(self.ID_Frame, text="Dept.: ", font=("Times New Roman", 20, "bold"), bg="white",fg="Black")
qr_lbl_dept.place(x=20,y=380)
qr_txt_dept = Label(self.ID_Frame, text=self.dept_var.get(), font=("times new roman", 15), bg="white", fg="Black")
qr_txt_dept.place(x=120,y=385)
qr_lbl_course = Label(self.ID_Frame, text="Course: ", font=("Times New Roman", 20, "bold"), bg="white",fg="Black")
qr_lbl_course.place(x=20,y=420)
qr_txt_course = Label(self.ID_Frame, text=self.course_var.get()+" ("+self.pass_var.get()+")", font=("times new roman", 15), bg="white", fg="Black")
qr_txt_course.place(x=120,y=425)
qr_lbl_gender = Label(self.ID_Frame, text="Gender: ", font=("Times New Roman", 20, "bold"), bg="white",fg="Black")
qr_lbl_gender.place(x=20,y=460)
qr_txt_gender = Label(self.ID_Frame, text=self.gender_var.get(), font=("times new roman", 15), bg="white", fg="Black")
qr_txt_gender.place(x=120,y=465)
back_btn = Button(self.Generate_Frame, text="BACK", width=10, bg="yellow", command=self.back).place(x=100,y=700)
generateidbtn = Button(self.Generate_Frame, text="GENERATE ID", width=10, bg="blue",command=self.generate_id).place(x=280, y=700)
download_btn = Button(self.Generate_Frame, text="DOWNLOAD", width=10, bg="yellow",command=self.download).place(x=460, y=700)
if self.name_var.get()=="" or self.Roll_No_var.get()=="":
fail_lbl = Label(self.Generate_Frame,text="No ID generated !!",font=("goudy old style",20),bg="grey24",fg="red").place(x=220,y=750)
advice_lbl = Label(self.Generate_Frame,text="Please select a student from side table ---> ",font=("goudy old style",20),bg="grey24",fg="red").place(x=60,y=790)
else:
############################################################################ QR code part
qr_data = (f"Name: {self.name_var.get()}\nRoll: {self.Roll_No_var.get()}\nDepartment: {self.dept_var.get()}\nCourse: {self.course_var.get()} ({self.pass_var.get()})\nGender: {self.gender_var.get()}")
self.qr_code = qrcode.make(qr_data)
self.qr_code = resizeimage.resize_cover(self.qr_code,[180,180])
self.qr_code.save("ID cards/"+self.Roll_No_var.get()+"-QR.png")
self.qrimage = ImageTk.PhotoImage(self.qr_code)
self.qr_lbl.config(image=self.qrimage)
success_lbl = Label(self.Generate_Frame,text="ID generated successfully !",font=("goudy old style",20),bg="grey24",fg="green").place(x=160,y=750)
def back(self):
self.Generate_Frame.destroy()
def download(self):
tmpdate = datetime.now()
gen_date= tmpdate.strftime("ID generation date: %d-%m-%y (%I:%M:%S %p)")
image=Image.new('RGB',(400,600),(255,255,255))
draw=ImageDraw.Draw(image)
font=ImageFont.truetype('timesnewroman.ttf',size=50)
(x,y)=(105,0)
shape = [(0,0),(600,55)]
draw.rectangle(shape,fill="blue")
shape = [(5,60),(394,564)]
draw.rectangle(shape,outline="black",width=3)
shape = [(0,600), (400, 568)]
draw.rectangle(shape, fill="blue")
(x, y) = (105, 0)
draw.text((x, y), "ID CARD", fill="white", font=font,align="center")
font = ImageFont.truetype('timesnewroman.ttf', size=20)
draw.text((20, 300), "Name: ", fill="black", font=font)
draw.text((120, 300), self.name_var.get(), fill="black", font=font)
draw.text((20, 340), "Roll No.: ", fill="black", font=font)
draw.text((120, 340), self.Roll_No_var.get(), fill="black", font=font)
draw.text((20, 380), "Dept.: ", fill="black", font=font)
draw.text((120, 380), self.dept_var.get(), fill="black", font=font)
draw.text((20, 420), "Course: ", fill="black", font=font)
draw.text((120, 420), self.course_var.get()+" ("+self.pass_var.get()+")", fill="black", font=font)
draw.text((20, 460), "Gender: ", fill="black", font=font)
draw.text((120, 460), self.gender_var.get(), fill="black", font=font)
draw.text((40,572), gen_date,fill="white", font=ImageFont.truetype('timesnewroman.ttf', size=18),align="center")
im = Image.open("ID cards/"+self.Roll_No_var.get()+"-QR.png")
image.paste(im,(110,90))
shape = [(110,90),(290,270)]
draw.rectangle(shape,outline="black",width=3)
image.save("ID cards/"+self.Roll_No_var.get()+".png")
im.close()
os.remove("ID cards/"+self.Roll_No_var.get()+"-QR.png")
# root = Tk() # For tkinter invoke
# ob = Students(root) # make an object(ob) of that class and pass the tkinter(root)
# root.mainloop() # for run tkinter in main loop
# comment this because we dont want to access this panel directly
# this panel can only be access with login page
# NOTE: If want to access this page from here uncomment above thing, pass root as an argument in the class, remove mainloop execution before all def started
# and in class dont initialize tk(), replace tk() with root in the class first line
|
{"/login.py": ["/Student.py"]}
|
5,229
|
SRD2705/Student-Database
|
refs/heads/main
|
/register.py
|
import os
from tkinter import *
from tkinter import ttk
from PIL import ImageTk
from tkinter import messagebox
import pymysql
class Login_system:
def __init__(self,root):
self.root = root
self.root.title("Registration Window")
self.root.geometry("1920x1080+0+0")
################################# All images
self.bg_icon = ImageTk.PhotoImage(file="images/bg.jpg")
self.user_icon=PhotoImage(file="images/username.png")
self.mail_icon=PhotoImage(file="images/mail.png")
self.gender_icon=PhotoImage(file="images/gender.png")
self.contact_icon=PhotoImage(file="images/contact.png")
self.id_icon=PhotoImage(file="images/id.png")
self.pass_icon=PhotoImage(file="images/pass.png")
self.logo_icon = PhotoImage(file="images/register.png")
############################################# Variables
self.name_var = StringVar()
self.mail_var = StringVar()
self.gender_var = StringVar()
self.contact_var=StringVar()
self.idtype_var = StringVar()
self.idnum_var = StringVar()
self.user_var=StringVar()
self.pass_var = StringVar()
bg_label = Label(self.root,image=self.bg_icon).pack()
title=Label(self.root,text="Registration Panel",font=("Times new roman",40,"bold"),bg="grey14",fg="yellow3",bd=10,relief=GROOVE)
title.place(x=0,y=0,relwidth=1)
Login_Frame = Frame(self.root,bg="forest green")
Login_Frame.place(x=420,y=120)
logo_lbl = Label(Login_Frame,image=self.logo_icon,bd=0).grid(row=0,columnspan=6,pady=20)
name_lbl = Label(Login_Frame,text="Full name", image=self.user_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=1, column=0, padx=0,pady=0)
txt_name = Entry(Login_Frame, bd="5", textvariable=self.name_var, relief=GROOVE, font=("", 15)).grid(row=1,column=1,padx=20)
mail_lbl = Label(Login_Frame, text="Email", image=self.mail_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=1, column=3, padx=0,pady=0)
txt_mail = Entry(Login_Frame, bd="5", textvariable=self.mail_var, relief=GROOVE, font=("", 15)).grid(row=1, column=4,padx=20)
lbl_gender = Label(Login_Frame, text="Gender",image=self.gender_icon,compound=LEFT, font=("Times New Roman", 20, "bold"), bg="forest green")
lbl_gender.grid(row=3, column=0, pady=0, padx=0, sticky='w') # Sticky property used for the left alignment
combo_gender = ttk.Combobox(Login_Frame, textvariable=self.gender_var, font=("Times New Roman", 20, "bold"),state="readonly") # We use combo box because of gender we use drop down list
combo_gender['values'] = ("Male", "Female", "Other") # using readonly state because we dont want to edit the field after selecting it
combo_gender.grid(row=3, column=1, padx=0, sticky='w')
contact_lbl = Label(Login_Frame, text="Contact Number", image=self.contact_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=3, column=3, padx=0,pady=0)
txt_contact = Entry(Login_Frame, bd="5", textvariable=self.contact_var, relief=GROOVE, font=("", 15)).grid(row=3,column=4,padx=0)
lbl_idtype = Label(Login_Frame, text="ID Type",image=self.id_icon,compound=LEFT, font=("Times New Roman", 20, "bold"), bg="forest green")
lbl_idtype.grid(row=4, column=0, padx=0, sticky='w') # Sticky property used for the left alignment
combo_idtype = ttk.Combobox(Login_Frame, textvariable=self.idtype_var, font=("Times New Roman", 20, "bold"),state="readonly") # We use combo box because of gender we use drop down list
combo_idtype['values'] = ("Aadhaar Card", "Passport", "Pancard","Voter ID") # using readonly state because we dont want to edit the field after selecting it
combo_idtype.grid(row=4, column=1, padx=0, sticky='w')
idnum_lbl = Label(Login_Frame, text="ID Number", image=self.id_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=4, column=3, padx=20,pady=10)
txt_idnum = Entry(Login_Frame, bd="5", textvariable=self.idnum_var, relief=GROOVE, font=("", 15)).grid(row=4,column=4,padx=0)
user_lbl = Label(Login_Frame,text="Username",image=self.user_icon,compound=LEFT,font=("times new roman",20,"bold"),bg="forest green").grid(row=6,column=0,padx=0,pady=0)
txt_user = Entry(Login_Frame,bd="5",textvariable=self.user_var,relief=GROOVE,font=("",15)).grid(row=6,column=1,padx=0)
pass_lbl = Label(Login_Frame, text="Password", image=self.pass_icon, compound=LEFT,font=("times new roman", 20, "bold"), bg="forest green").grid(row=6, column=3, padx=0,pady=0)
txt_pass = Entry(Login_Frame, bd="5", show = "*",textvariable=self.pass_var,relief=GROOVE, font=("", 15)).grid(row=6, column=4, padx=0)
btn_register = Button(Login_Frame,text="Register",command=self.register,width=15,font=("times new roman",15,"bold"),bg="white",fg="Blue").grid(row=8,columnspan=8,pady=10)
def register(self):
if self.name_var.get() == "" or self.mail_var.get()=="" or self.gender_var.get()=="" or self.contact_var.get()=="" or self.idtype_var.get()=="" or self.idnum_var.get()=="" or self.user_var.get()=="" or self.pass_var.get()=="":
messagebox.showerror("Error","! All fields are required !")
elif len(self.contact_var.get()) != 10:
messagebox.showerror("Error","! Contact number must be 10 digits !")
else:
con = pymysql.connect(host='localhost',user="root",password="Rishi@2705",database="stm")
cur = con.cursor()
cur.execute("insert into admins values(%s,%s,%s,%s,%s,%s,%s,%s)",(
self.name_var.get(),
self.mail_var.get(),
self.gender_var.get(),
self.contact_var.get(),
self.idtype_var.get(),
self.idnum_var.get(),
self.user_var.get(),
self.pass_var.get()
))
con.commit()
self.clear()
con.close()
messagebox.showinfo("Success","User registered successfully")
self.root.destroy()
os.system("python3 login.py")
def clear(self):
self.name_var.set(""),
self.mail_var.set(""),
self.gender_var.set(""),
self.contact_var.set(""),
self.idtype_var.set(""),
self.idnum_var.set(""),
self.user_var.set(""),
self.pass_var.set("")
root = Tk()
obj = Login_system(root)
root.mainloop()
|
{"/login.py": ["/Student.py"]}
|
5,232
|
Daniel98p/QuizApp
|
refs/heads/master
|
/quiz/api/views.py
|
from quiz_app.models import Question, Quiz, Answer
from .serializers import QuestionSerializer, QuizSerializer, AnswerSerializer
from rest_framework import generics
class QuestionList(generics.ListCreateAPIView):
queryset = Question.objects.all()
serializer_class = QuestionSerializer
class QuizList(generics.ListCreateAPIView):
queryset = Quiz.objects.all()
serializer_class = QuizSerializer
class AnswerList(generics.ListCreateAPIView):
queryset = Answer.objects.all()
serializer_class = AnswerSerializer
|
{"/quiz/api/views.py": ["/quiz/api/serializers.py"], "/quiz/quiz_app/views.py": ["/quiz/quiz_app/models.py", "/quiz/quiz_app/forms.py"], "/quiz/quiz_app/urls.py": ["/quiz/quiz_app/views.py"]}
|
5,233
|
Daniel98p/QuizApp
|
refs/heads/master
|
/quiz/api/serializers.py
|
from rest_framework import serializers
from quiz_app.models import Question, Quiz, Answer
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ['id', 'question_text', 'publication_date', 'category', 'quiz_id']
class QuizSerializer(serializers.ModelSerializer):
class Meta:
model = Quiz
fields = ['id', 'name', 'publication_date']
class AnswerSerializer(serializers.ModelSerializer):
class Meta:
model = Answer
fields = ['id', 'answer_text', 'question_id', 'answer_int']
|
{"/quiz/api/views.py": ["/quiz/api/serializers.py"], "/quiz/quiz_app/views.py": ["/quiz/quiz_app/models.py", "/quiz/quiz_app/forms.py"], "/quiz/quiz_app/urls.py": ["/quiz/quiz_app/views.py"]}
|
5,234
|
Daniel98p/QuizApp
|
refs/heads/master
|
/quiz/quiz_app/forms.py
|
from django import forms
class QuizForm(forms.Form):
def __init__(self, *args, **kwargs):
self.answers = kwargs.pop("answers")
self.questions = kwargs.pop("questions")
super(QuizForm, self).__init__(*args, *kwargs)
choices = []
for answer in self.answers:
choices.append((answer, answer.answer_text))
for number in range(3):
field_name = f"question_{number+1}"
self.fields[field_name].choices = choices
self.fields[field_name].label = self.questions[number].question_text
question_1 = forms.ChoiceField(widget=forms.RadioSelect)
question_2 = forms.ChoiceField(widget=forms.RadioSelect)
question_3 = forms.ChoiceField(widget=forms.RadioSelect)
|
{"/quiz/api/views.py": ["/quiz/api/serializers.py"], "/quiz/quiz_app/views.py": ["/quiz/quiz_app/models.py", "/quiz/quiz_app/forms.py"], "/quiz/quiz_app/urls.py": ["/quiz/quiz_app/views.py"]}
|
5,235
|
Daniel98p/QuizApp
|
refs/heads/master
|
/quiz/api/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('questions/', views.QuestionList.as_view()),
path('quiz/', views.QuizList.as_view()),
path('answer/', views.AnswerList.as_view()),
]
|
{"/quiz/api/views.py": ["/quiz/api/serializers.py"], "/quiz/quiz_app/views.py": ["/quiz/quiz_app/models.py", "/quiz/quiz_app/forms.py"], "/quiz/quiz_app/urls.py": ["/quiz/quiz_app/views.py"]}
|
5,236
|
Daniel98p/QuizApp
|
refs/heads/master
|
/quiz/quiz_app/views.py
|
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from .models import Question, Answer, Quiz
from .forms import QuizForm
from django.contrib.auth import get_user_model
User = get_user_model()
@login_required
def select_quiz(request):
quizzes = Quiz.objects.all()
context = {"quizzes": quizzes}
return render(request, 'quiz_app/selector.html', context=context)
@login_required
def render_quiz(request, quiz_id):
questions = Question.objects.filter(quiz_id=quiz_id)
quiz = Quiz.objects.get(pk=quiz_id)
questions_id = [question.id for question in questions]
answers = Answer.objects.filter(question_id__in=questions_id)
if request.method == 'POST':
form = QuizForm(request.POST, answers=answers, questions=questions)
if form.is_valid():
request.session["question_1"] = form.cleaned_data["question_1"]
request.session["question_2"] = form.cleaned_data["question_2"]
request.session["question_3"] = form.cleaned_data["question_3"]
return redirect('quiz_app:show-results', quiz_id=quiz_id)
else:
form = QuizForm(answers=answers, questions=questions)
context = {"questions": questions, "form": form, "quiz": quiz}
return render(request, "quiz_app/index.html", context=context)
@login_required
def show_results(request, quiz_id):
questions = Question.objects.filter(quiz_id=quiz_id)
questions_id = [question.id for question in questions]
try:
selected_choice_1 = request.session.get('question_1')
selected_choice_2 = request.session.get('question_2')
selected_choice_3 = request.session.get('question_3')
except KeyError:
return HttpResponse("There is no choice")
selected_choice_obj_1 = Answer.objects.get(answer_text=selected_choice_1)
selected_choice_obj_2 = Answer.objects.get(answer_text=selected_choice_2)
selected_choice_obj_3 = Answer.objects.get(answer_text=selected_choice_3)
selected_choices_question_id = [selected_choice_obj_1.question_id, selected_choice_obj_2.question_id,
selected_choice_obj_3.question_id]
good_answers = 0
for question, choice in zip(questions_id, selected_choices_question_id):
if question == choice:
good_answers += 1
if request.user.is_authenticated:
request.user.points = request.user.points + good_answers
request.user.save()
context = {"questions_id": questions_id, "selected_choices_id": selected_choices_question_id,
"good_answers": good_answers}
return render(request, "quiz_app/result.html", context=context)
@login_required
def show_ranking(request):
users_list = User.objects.all()
users = sorted(users_list, key=lambda x: x.points, reverse=True)
context = {"users": users}
return render(request, "quiz_app/ranking.html", context=context)
|
{"/quiz/api/views.py": ["/quiz/api/serializers.py"], "/quiz/quiz_app/views.py": ["/quiz/quiz_app/models.py", "/quiz/quiz_app/forms.py"], "/quiz/quiz_app/urls.py": ["/quiz/quiz_app/views.py"]}
|
5,237
|
Daniel98p/QuizApp
|
refs/heads/master
|
/quiz/accounts/views.py
|
from .forms import CustomUserCreationForm
from django.shortcuts import render, redirect
def home(request):
return render(request, 'accounts/home.html')
def signup(request):
form = CustomUserCreationForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
return render(request, 'accounts/signup.html', {'form': form})
|
{"/quiz/api/views.py": ["/quiz/api/serializers.py"], "/quiz/quiz_app/views.py": ["/quiz/quiz_app/models.py", "/quiz/quiz_app/forms.py"], "/quiz/quiz_app/urls.py": ["/quiz/quiz_app/views.py"]}
|
5,238
|
Daniel98p/QuizApp
|
refs/heads/master
|
/quiz/quiz_app/models.py
|
from django.db import models
class Quiz(models.Model):
name = models.CharField(max_length=300)
publication_date = models.DateField()
def __str__(self):
return self.name
class Question(models.Model):
question_text = models.CharField(max_length=300)
publication_date = models.DateField()
category = models.CharField(max_length=100)
quiz = models.ForeignKey(Quiz, on_delete=models.CASCADE, default=1)
def __str__(self):
return self.question_text
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
answer_text = models.CharField(max_length=100, blank=True)
answer_int = models.IntegerField(blank=True, default=0)
def __str__(self):
return self.answer_text
|
{"/quiz/api/views.py": ["/quiz/api/serializers.py"], "/quiz/quiz_app/views.py": ["/quiz/quiz_app/models.py", "/quiz/quiz_app/forms.py"], "/quiz/quiz_app/urls.py": ["/quiz/quiz_app/views.py"]}
|
5,239
|
Daniel98p/QuizApp
|
refs/heads/master
|
/quiz/quiz_app/urls.py
|
from django.urls import path
from .views import select_quiz, render_quiz, show_results, show_ranking
app_name = "quiz_app"
urlpatterns = [
path('', select_quiz, name='select-quiz'),
path('<int:quiz_id>/', render_quiz, name="render-quiz"),
path('<int:quiz_id>/results/', show_results, name='show-results'),
path('ranking/', show_ranking, name='show-ranking'),
]
|
{"/quiz/api/views.py": ["/quiz/api/serializers.py"], "/quiz/quiz_app/views.py": ["/quiz/quiz_app/models.py", "/quiz/quiz_app/forms.py"], "/quiz/quiz_app/urls.py": ["/quiz/quiz_app/views.py"]}
|
5,241
|
The01337/sparkshine
|
refs/heads/master
|
/porch.py
|
import asyncio
import aiohttp
import datetime
import json
import requests
import logging
import light_control
"""
If it's dark, nobody was home and someone is home now - light on
"""
logging.basicConfig(
filename='lights.log', level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s',
datefmt='%Y-%m-%dT%H:%M:%S'
)
is_dark = False
last_home = None
DAYLIGHT_API = 'http://api.sunrise-sunset.org/json'
CHECK_INTERVAL = 5 # Anyone came home within <minutes>?
def read_settings():
with open('./settings.json') as f:
settings = json.load(f)
return settings
def parse_date(date_string):
"""
Parse date string to timezone-naive datetime object.
:param date_string: Date/time string in UTC
:return: parsed datetime object
"""
# tokens = date_string.split(':')
# tokens[-2] = ''.join(tokens[-2:])
#
# dt = datetime.datetime.strptime(
# ':'.join(tokens[:-1]),
# '%Y-%m-%dT%H:%M:%S%z'
# )
dt = datetime.datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S+00:00')
return dt
def get_daylight(latitude, longitude, apiurl=DAYLIGHT_API):
"""
Retrieves today's daylight info from Web API
:param apiurl: Web API URL
:param latitude: Location coordinates
:param longitude: Location coordinates
:return: A tuple of datetimes for nautical twilight end (=no longer dark in the morning) and nautical twilight
start (=start of darkness in the evening).
"""
data = requests.get(
apiurl,
params={
'lat': latitude,
'lng': longitude,
'formatted': 0
}).json()
start_dark = parse_date(data['results']['nautical_twilight_end'])
end_dark = parse_date(data['results']['nautical_twilight_begin'])
return start_dark, end_dark
def check_darkness(dt, latitude, longitude):
"""
Check if it's dark in the location at the specified time.
:param dt: Time to check
:param latitude: Location's latitude
:param longitude: Location's longitude
:return: True if it's dark, False otherwise
"""
start_dark, end_dark = get_daylight(latitude=latitude, longitude=longitude)
if dt < end_dark or dt > start_dark:
return True
return False
def read_leases(filepath, macs=[]):
entries = []
with open(filepath) as leases:
entry = {}
for line in leases:
if line.startswith('lease'):
if entry:
entries.append(entry)
entry = {}
entry['ip'] = line.split()[1]
elif line == '}':
if entry:
entries.append(entry)
entry = {}
if not entry:
continue
line = line.strip()
if line.startswith('cltt'):
entry['ltt'] = datetime.datetime.strptime(
'T'.join(line.split()[2:]),
'%Y/%m/%dT%H:%M:%S;')
elif line.startswith('hardware ethernet'):
entry['mac'] = line.split()[-1].rstrip(';')
if entry:
entries.append(entry)
if macs:
return [entry for entry in entries if entry['mac'] in macs]
else:
return entries
def anyone_home(dt, leases):
"""
Is anyone just came home at this time?
:param dt: datetime object to check
:param leases: leases
:return: True if anyone came back less than 5 minutes ago, False otherwise
"""
interval = datetime.timedelta(minutes=CHECK_INTERVAL)
for lease in leases:
diff = dt-lease['ltt']
if diff <= interval:
return True
return False
async def turnon_light(settings):
api = await light_control.get_api(
settings['gateway'], settings['identity'], settings['key']
)
lights = await light_control.get_lights(api)
if lights:
await light_control.control_light(lights[0], api, True)
async def main_loop(loop):
is_home_occupied = False
settings = read_settings()
while True:
logging.debug('Running check loop!')
now = datetime.datetime.utcnow()
leases = read_leases(
filepath=settings['leases_file'], macs=settings['macs']
)
if not is_home_occupied and anyone_home(now, leases):
# Nobody was home but came home within 5 minutes ago and it's dark
logging.info('Someone just came home!')
is_home_occupied = True
if check_darkness(now, settings['latitude'], settings['longitude']):
logging.debug('And it\'s dark! Turn the light on!')
await turnon_light(settings)
elif is_home_occupied and not anyone_home(now, leases) and not check_darkness(
now, settings['latitude'], settings['longitude']
):
# Someone was home but nobody home now AND it's not dark
logging.info('Assuming home is empty now')
is_home_occupied = False
else:
logging.debug('Nothing happened. Sleeping...')
await asyncio.sleep(5)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main_loop(loop))
loop.close()
|
{"/porch.py": ["/light_control.py"], "/tests/test_porch.py": ["/porch.py"]}
|
5,242
|
The01337/sparkshine
|
refs/heads/master
|
/light_control.py
|
from pytradfri import Gateway
from pytradfri.api.aiocoap_api import APIFactory
from pytradfri.error import PytradfriError
import asyncio
from asyncio import ensure_future
@asyncio.coroutine
def get_api(hostname, identity, psk):
api_factory = APIFactory(host=hostname, psk_id=identity, psk=psk)
return api_factory.request
@asyncio.coroutine
def get_lights(api):
gateway = Gateway()
devices_commands = yield from api(gateway.get_devices())
devices = yield from api(devices_commands)
return [dev for dev in devices if dev.has_light_control]
@asyncio.coroutine
def control_light(light, api, state_on=True):
def observe_callback(updated_device):
print("Received message for: %s" % updated_device.light_control.lights[0])
def observe_err_callback(err):
print('observe error:', err)
observe_command = light.observe(
observe_callback, observe_err_callback, duration=120)
# Start observation as a second task on the loop.
ensure_future(api(observe_command))
# Yield to allow observing to start.
yield from asyncio.sleep(0)
if state_on:
light.light_control.set_dimmer(100)
else:
light.light_control.set_dimmer(0)
|
{"/porch.py": ["/light_control.py"], "/tests/test_porch.py": ["/porch.py"]}
|
5,243
|
The01337/sparkshine
|
refs/heads/master
|
/tests/test_porch.py
|
import datetime
import unittest
import json
import porch
class PorchTest(unittest.TestCase):
def test_getdaylight(self):
with open('./settings.json') as f:
settings = json.load(f)
start, end = porch.get_daylight(latitude=settings['latitude'], longitude=settings['longitude'])
self.assertIsNotNone(start)
self.assertIsNotNone(end)
def test_parsedate(self):
self.assertIsNotNone(porch.parse_date('2018-05-15T01:31:14+00:00'))
self.assertIsNotNone(porch.parse_date('1018-02-12T21:31:14+00:00'))
self.assertIsNotNone(porch.parse_date('2018-05-15T23:31:14+00:00'))
self.assertRaises(ValueError, porch.parse_date, '2018-05-35T01:31:14+00:00')
self.assertRaises(ValueError, porch.parse_date, '2018-02-30T01:31:14+00:00')
self.assertRaises(ValueError, porch.parse_date, '2018-05-15 T 01:31:14+00:00')
self.assertRaises(ValueError, porch.parse_date, '2018-05-15T31:31:14+00:00')
self.assertRaises(ValueError, porch.parse_date, '2018-05-15T01:71:14+00:00')
self.assertRaises(ValueError, porch.parse_date, '2018-05-15T01:31:14+01:00')
self.assertRaises(ValueError, porch.parse_date, '2018-05-15T01:31:14+things')
def test_checkdarkness(self):
# We are not considering to be in locations where arctic days/nights are happening
with open('./settings.json') as f:
settings = json.load(f)
now = datetime.datetime.utcnow()
self.assertEqual(
porch.check_darkness(now.replace(hour=0), latitude=settings['latitude'], longitude=settings['longitude']),
# Should be dark minutes AFTER midnight
True
)
self.assertEqual(
porch.check_darkness(now.replace(hour=13), latitude=settings['latitude'], longitude=settings['longitude']),
# Should NOT be dark minutes after noon
False
)
self.assertEqual(
porch.check_darkness(now.replace(hour=23), latitude=settings['latitude'], longitude=settings['longitude']),
# Should be dark minutes BEFORE midnight
True
)
def test_readleases(self):
with open('./settings.json') as f:
settings = json.load(f)
output = porch.read_leases('./dhcpd.leases.bak', settings['macs'])
self.assertEqual(
output, [
{'mac': settings['macs'][0], 'ltt': datetime.datetime(2018, 5, 15, 5, 39, 42), 'ip': '192.168.0.10'},
{'mac': settings['macs'][1], 'ltt': datetime.datetime(2018, 5, 15, 5, 40, 5), 'ip': '192.168.0.11'}
]
)
def test_anyonehome(self):
with open('./settings.json') as f:
settings = json.load(f)
leases = porch.read_leases('./dhcpd.leases.bak', settings['macs'])
dt = datetime.datetime(2018, 5, 15, 5, 43, 00)
self.assertTrue(porch.anyone_home(dt, leases))
dt = datetime.datetime(2018, 5, 15, 5, 45, 00)
self.assertTrue(porch.anyone_home(dt, leases))
dt = datetime.datetime(2018, 5, 15, 5, 50, 00)
self.assertFalse(porch.anyone_home(dt, leases))
dt = datetime.datetime.utcnow()
self.assertFalse(porch.anyone_home(dt, leases))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(PorchTest)
suite.run(None)
|
{"/porch.py": ["/light_control.py"], "/tests/test_porch.py": ["/porch.py"]}
|
5,246
|
marialui/LB2
|
refs/heads/master
|
/pssm_parsing.py
|
#!/usr/bin/python
import sys
import numpy as np
import pandas as pd
import json
def parsingpssm(data,out):
profile=data[data.columns[19:-2]]
profile= profile.set_axis(['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'], axis=1, inplace=False)
profile=profile.droplevel([0,1,3])
profile=(profile.div(100).round(2))
example_string = profile.to_string()
output_file = open('%s' %out,'w')
output_file.write(example_string)
output_file.close()
if __name__ == "__main__":
file= pd.read_csv('%s' %sys.argv[1], sep='\t', index_col=None)
output= sys.argv[2]
parsingpssm(file, output)
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,247
|
marialui/LB2
|
refs/heads/master
|
/functions.py
|
#!/usr/bin/python
import sys
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import pandas as pd
# this will take as an input 2 files: the first one with all the ss , the second is a fasta file
# and will return a merged file where for each id the secondary structure and the fasta is reported
def merger(f1, f2):
l1 = f1.readlines()
l2 = f2.readlines()
f3 = open("maintext", "w+")
for i in l1:
if i[0] == ">":
f3.write(i)
f3.write(l1[l1.index(i) + 1])
for j in l2:
if j in i:
f3.write(l2[l1.index(j) + 1])
else:
continue
else:
continue
return (f3)
#this function will count the number of residues that are present in each secondary structure and will return the
#relative percentages
def ss_percentage(set):
c = 0
e = 0
h = 0
k = 0
file=[]
ss=[]
fasta=[]
for line in set:
line=line.rstrip()
if line[0]!='>':
file.append(line)
for j in range (0,len(file),2):
ss.append(file[j])
if j!= 0:
fasta.append(file[j-1])
#print (file[j])
for i in file[j]:
if i == 'E':
e = e + 1
k = k + 1
elif i == 'H':
h = h + 1
k = k + 1
elif i == '-':
c = c + 1
k = k + 1
#print(ss,fasta)
fasta.append(file[len(file) - 1])
print(k)
coil = float(c / k)* 100
print(coil)
helix = float(h / k) * 100
strand = float(e / k) * 100
return ([coil, strand, helix],fasta,ss)
def aa_percentage(fasta,ss):
residuepercentage = {}
strutturesecondarie = {}
for i in range(len(ss)):
for sec in ss[i]:
# print(aa)
if sec in strutturesecondarie:
strutturesecondarie[sec] = strutturesecondarie[sec] + 1
else:
strutturesecondarie[sec] = 1
#print(strutturesecondarie)
# print ('here is the total count:' ,counts) #this prints a vocabulary
# where each ss is associated to the number of times it appears
for m in range(len(fasta)):
for n in range(len(fasta[m])):
# print(ss[m][n])
if fasta[m][n] in residuepercentage:
if ss[m][n] in residuepercentage[fasta[m][n]]:
residuepercentage[fasta[m][n]][ss[m][n]] = residuepercentage[fasta[m][n]][ss[m][n]] + 1
else:
residuepercentage[fasta[m][n]][ss[m][n]] = 1
else:
dizionario = {}
dizionario[ss[m][n]] = 1
residuepercentage[fasta[m][n]] = dizionario
for aa in residuepercentage:
for s in residuepercentage[aa]:
residuepercentage[aa][s] = round(float(residuepercentage[aa][s] / strutturesecondarie[s]) * 100, 2)
#print(residuepercentage[aa][s],strutturesecondarie[s])
print('here is the relative composition:',
residuepercentage) # this prints a dictionary where for each residue we have associated another dictionary
#where for each secondary structure we have the relative percentage of that specific residue in that specific secondary
#structure over the total number of residues in that secondary structure
return(residuepercentage)
#this is a function that takes the percentage of coil , elixes and strand and plot it in a pie chart
def printpie(a):
b = ['percentage of coil', 'percentage of strends', 'percentage of helixes']
fig, ax = plt.subplots(figsize=(6, 3), subplot_kw=dict(aspect="equal"))
wedges, texts = ax.pie(a, wedgeprops=dict(width=0.5), startangle=-40)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1) / 2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(b[i] + ' is ' + '{0:.2f}%'.format(round(a[i]), 2), xy=(x, y), xytext=(1.35 * np.sign(x), 1.4 * y),
horizontalalignment=horizontalalignment, **kw)
ax.set_title("SS composition")
plt.show()
#this is a function that takes the residues persentage and the relative composition in
#secondary structure and gives backs the histogram
def print_histogram(data):
combined_df =pd.DataFrame(data)
#print(combined_df)
df_transposed = combined_df.T
df_transposed.plot.bar()
plt.show()
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,248
|
marialui/LB2
|
refs/heads/master
|
/taxonomy.py
|
#!/usr/bin/python
import sys
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import pandas as pd
data = pd.read_csv("taxonomy.csv")
# Preview the first 5 lines of the loaded data
#print(data.head())
taxa=data.iloc[::,2:]
total=taxa.describe()
#print(total)
#crime_year = pd.DataFrame(df_1.year.value_counts(), columns=["Year", "AggregateCrime"])
Others=2750-1203
main=pd.DataFrame(taxa.apply(pd.value_counts)[:10].reset_index().values,columns=['source', 'n'])
main=main.append({'source' : 'Others' , 'n' : Others} , ignore_index=True)
#total 2750
#maincount=int(main.sum()) #1203 belongs to main taxa
# Create a list of colors (from iWantHue)
colors = ["#E13F29", "#D69A80", "#D63B59", "#AE5552", "#CB5C3B", "#EB8076", "#96624E"]
# Create a pie chart
plt.pie(
# using data total)arrests
main['n'],
# with the labels being officer names
labels=main['source'],
# with no shadows
shadow=False,
# with colors
colors=colors,
# with one slide exploded out
#explode=(0, 0, 0, 0, 0.15),
# with the start angle at 90%
startangle=90,
# with the percent listed as a fraction
autopct='%1.1f%%',
)
# View the plot drop above
plt.axis('equal')
# View the plot
plt.tight_layout()
plt.show()
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,249
|
marialui/LB2
|
refs/heads/master
|
/information_function.py
|
# !/usr/bin/python
import os, sys
import sys
import numpy as np
import pandas as pd
def information(beta,coil,helix,aa,ss):
Ph=float(ss.loc['H'])
PrPh= aa.multiply(float(Ph))
Ih=(np.log10((helix.div(PrPh.values).astype(float))))
Pe=float(ss.loc['E'])
PrPe = aa.multiply(float(Pe))
Ie =np.log10(( beta.div(PrPe.values).astype(float)))
Pc =float( ss.loc['C'])
PrPc = aa.multiply(float(Pc))
Ic=np.log10(coil.div((PrPc.values).astype(float)))
#print(Ie)
ll=[Ie,Ih,Ic]
return(ll)
def prediction(IFs):
for item in os.listdir("/home/um77/project/fasta_blindset/profile"):
if '%s' % (item.split('.')[1]) == 'profile':
stuff = '%s' % (item.split('.')[0]) + '.DSSP'
pro = pd.read_fwf('/home/um77/project/fasta_blindset/profile/%s' % ( item), sep="\t")
pro.drop(pro.columns[0], axis=1, inplace=True)
#print(item, stuff)
dssp = open('/home/um77/project/dssp_blindset/predictedDSSP/%s' % (stuff), 'w')
DSSP=''
residues = (pro.shape[0] + 16)
new = np.zeros(shape=((residues, 20)))
profili = pd.DataFrame(new,
columns=['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',
'S', 'T', 'W',
'Y', 'V'])
pro.index = pro.index + 8
# here df is the dataframe containing the profile plus 10 row at the end and at the beginning with all zeros.
df = pro.combine_first(profili)
#print(df)
structure = ['E', 'H', 'C']
d={0:'E',1:'H',2:'C'}
for i in range(8, df.shape[0] - 8):
valori = []
for m in range(len(structure)):
profile = df.iloc[i - 8:i + 9]
globals()[structure[m]]=(IFs[m].astype(float)).multiply((profile.values))
value= (globals()[structure[m]]).values.sum()
valori.append(value)
massimo=max(valori)
DSSP=DSSP +'%s'%d[valori.index(massimo)]
dssp.write('>%s'%(item.split('.')[0]) +'\n%s'%(DSSP))
if __name__ == "__main__":
n = np.arange(17)
index0 =pd.MultiIndex.from_product([list(n), ['E']])
strand = pd.read_csv('%s' % sys.argv[1], delimiter=',').set_index(index0)
index1 = pd.MultiIndex.from_product([list(n), ['C']])
coil = pd.read_csv('%s' % sys.argv[2], delimiter=',').set_index(index1)
index2 = pd.MultiIndex.from_product([list(n), ['H']])
helix = pd.read_csv('%s' % sys.argv[3], delimiter=',').set_index(index2)
index3 = pd.MultiIndex.from_product([list(n), ['R']])
residue = pd.read_csv('%s' % sys.argv[4], delimiter=',').set_index(index3)
structures=pd.read_csv('%s' % sys.argv[5], delimiter=',')
strutture=['H', 'E', 'C']
structures.index = strutture
risultati= information(strand,coil,helix,residue,structures)
dizionario = {0: 'strand', 1: 'helics', 2: 'coil'}
for rs in range(len(risultati)):
export_csv = risultati[rs].to_csv('%sIF.csv' % (dizionario[rs]), index=None, header=True)
prediction(risultati)
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,250
|
marialui/LB2
|
refs/heads/master
|
/SOV.py
|
# !/usr/bin/python
import os, sys
import sys
import re
import numpy as np
lista=['H','C','E']
def SOV(observed, predicted):
num = 0
totalsov = 0
for item in os.listdir(observed):
if '%s' % (item.split('.')[1]) == 'dssp':
stuff = '%s' % (item.split('.')[0]) + '.DSSP'
pred = open('%s/%s' % (predicted, stuff), 'r')
obs = open('%s/%s' % (observed,item), 'r')
P = pred.readlines()[1]
O=obs.readlines()[1]
#O='CCCHHHHHCCCCCCCCHHHHHHHHHHCCC'
#P='CCCCCCCCCCCHHHHHHHCCCCCHHHHCC'
LISTAP=[]
LISTAO=[]
sommatoria=[]
Nh=O.count('H')
SOVh=(100*(1/Nh))
C=re.compile("H*")
for L in C.finditer(P):
if 'H' in ( L.group()):
LISTAP.append(L.span())
for J in C.finditer(O):
if 'H' in (J.group()):
LISTAO.append(J.span())
for el in LISTAP:
for el1 in LISTAO:
sp = set(np.arange(*el)) #predicted
so= set(np.arange(*el1)) #observed
intersec= sp.intersection(so)
if intersec != set():
MINOV=len(intersec)
MAXOV=len(sp)+len(so)-MINOV
correction=[(MAXOV-MINOV),MINOV,(len(sp)/2),(len(so)/2)]
sig=min(correction)
value=((MINOV+sig)/MAXOV)*len(so)
sommatoria.append(value)
sov= SOVh*(sum(sommatoria))
if sov !=0:
num=num +1
totalsov=totalsov+sov
average=totalsov / num
return(average)
if __name__ == "__main__":
pathobserveddssp = sys.argv[1]
pathpredicteddssp = sys.argv[2]
result=SOV(pathobserveddssp,pathpredicteddssp)
print('sov is ',result)
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,251
|
marialui/LB2
|
refs/heads/master
|
/scop.py
|
#!/usr/bin/python
import cufflinks as cf
import sys
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import pandas as pd
data = pd.read_csv('Scop.csv', sep=",", header=None)
data.columns = ["letter", "domain",]
df = data.drop_duplicates(subset=['letter'])
#print (df)
data1 = pd.read_csv('scopid.csv', sep=",", header=None,)
statistica=pd.DataFrame(data1.apply(pd.value_counts).reset_index().values,columns=['letter', 'count'])
dfB=(statistica.sort_values('letter'))
#print(statistica.shape)
s1 = pd.merge(df, dfB, how='inner', on=['letter'])
cmap = plt.get_cmap('Spectral')
colors = [cmap(i) for i in np.linspace(0, 1, 12)]
s1 = s1.sort_values(['count'], ascending = False)
print(s1)
s1.drop(s1.tail(7).index,inplace=True)
print(s1)
df2 = pd.DataFrame({'letter':['m'],"domain":['others'],
"count":[21685]})
s1=s1.append(df2,ignore_index = True)
print(df2,'\n',s1)
#colors = ["#E13F29", "#D69A80", "#D63B59", "#AE5552", "#CB5C3B", "#EB8076", "#96624E"]
# Create a pie chart
plt.pie(
# using data total)arrests
s1['count'],
# with the labels being officer names
#labels=s1['letter'],
# with no shadows
shadow=False,
# with colors
colors=colors,
# with one slide exploded out
#explode=(0, 0, 0, 0, 0.15),
# with the start angle at 90%
startangle=90,
# with the percent listed as a fraction
autopct='%.2f')
# View the plot drop above
plt.axis('equal')
plt.title('scop classification')
plt.legend(s1['domain'],loc="center left",
bbox_to_anchor=(1, 0, 0.5, 1))
# View the plot
plt.tight_layout()
plt.show()
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,252
|
marialui/LB2
|
refs/heads/master
|
/prova.py
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
ss=['HE---EH']
fasta=['AKTCAAA']
composition={}
residuepercentage={}
for m in range(len(ss)):
# print (m)
# print (ss[m], fasta[m])
# print(len(ss[m]),len(fasta[m]))
for n in range(len(ss[m])):
# print(ss[m][n])
if ss[m][n] in composition:
if fasta[m][n] in composition[ss[m][n]] :
#print('*****' ,composition[ss[m][n]])
composition[ss[m][n]][fasta[m][n]] = composition[ss[m][n]][fasta[m][n]] + 1
#print(+5)
# print(fasta[m][n])
# composition[ss[m][n]] = dizionario
#print(composition)
else:
composition[ss[m][n]][fasta[m][n]] = 1
# print(fasta[m][n])
#composition[ss[m][n]] = dizionario
# print(dizionario)
else:
dizionario = {}
dizionario[fasta[m][n]] = 1
#print('<<<',dizionario)
composition[ss[m][n]] = dizionario
# print(composition)
# print(composition[ss[m][n]])
print(composition)
# print(m,n)
for m in range(len(fasta)):
for n in range(len(fasta[m])):
# print(ss[m][n])
if fasta[m][n] in residuepercentage:
if ss[m][n] in residuepercentage[fasta[m][n]]:
residuepercentage[fasta[m][n]][ss[m][n]] = residuepercentage[fasta[m][n]][ss[m][n]] + 1
else:
residuepercentage[fasta[m][n]][ss[m][n]] = 1
else:
dizionario = {}
dizionario[ss[m][n]] = 1
residuepercentage[fasta[m][n]] = dizionario
print('here is the relative composition:',residuepercentage) # this prints a dictionary where for each ss we have associated the corrispective number of residue
# prensent in that ss.
d = residuepercentage
secodary_structure = d['score_india']
legend = ['overall', 'helix','strand','coil']
score_pk = d['score_pk']
plt.hist([score_india, score_pk], color=['orange', 'green'])
plt.xlabel("Residues")
plt.ylabel("Residues Frequency")
plt.legend(legend)
plt.xticks(range(0, 7))
plt.yticks(range(1, 20))
plt.title('Champions Trophy 2017 Final\n Runs scored in 3 overs')
plt.show()
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,253
|
marialui/LB2
|
refs/heads/master
|
/isto.py
|
import sys
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import pandas as pd
residuepercentage= {'K': {'-': 5.79, 'E': 5.04, 'H': 6.87}, 'V': {'-': 4.38, 'E': 13.42, 'H': 6.56}, 'S': {'-': 7.25, 'E': 5.02, 'H': 4.45}, 'H': {'-': 2.47, 'E': 2.26, 'H': 1.95}, 'R': {'-': 4.57, 'E': 4.93, 'H': 6.19}, 'T': {'-': 5.86, 'E': 6.6, 'H': 4.19}, 'E': {'-': 6.28, 'E': 4.78, 'H': 9.59}, 'P': {'-': 7.85, 'E': 2.23, 'H': 1.93}, 'G': {'E': 4.94, '-': 11.82, 'H': 3.24}, 'L': {'E': 9.95, '-': 6.62, 'H': 12.26}, 'Q': {'E': 2.88, '-': 3.36, 'H': 4.93}, 'D': {'-': 8.13, 'E': 3.28, 'H': 4.61}, 'N': {'-': 5.64, 'E': 2.58, 'H': 2.97}, 'M': {'-': 1.73, 'E': 2.09, 'H': 2.66}, 'A': {'E': 6.41, '-': 6.65, 'H': 11.72}, 'I': {'-': 3.24, 'E': 9.56, 'H': 6.21}, 'C': {'E': 1.7, '-': 1.39, 'H': 1.05}, 'Y': {'E': 4.95, '-': 2.68, 'H': 3.22}, 'F': {'E': 5.48, '-': 3.11, 'H': 3.94}, 'W': {'E': 1.81, 'H': 1.39, '-': 1.09}, 'X': {'-': 0.0}}
print(residuepercentage)
def print_histogram(data):
combined_df =pd.DataFrame(residuepercentage)
print(combined_df)
df_transposed = combined_df.T
df_transposed.plot.bar()
plt.show()
print_histogram(residuepercentage)
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,254
|
marialui/LB2
|
refs/heads/master
|
/performance.py
|
# !/usr/bin/python
import os, sys
import sys
import numpy as np
import pandas as pd
import math
d = {'E': 'E', 'H': 'H', '-': 'C', 'C':'C'}
lista=['H','C','E']
def performance(observed, predicted):
total=0
HEC = pd.DataFrame([[0, 0, 0], [0, 0, 0], [0, 0, 0]], index=['H', 'E', 'C'], columns=['H', 'E', 'C'])
for item in os.listdir(observed):
if '%s' % (item.split('.')[1]) == 'dssp':
stuff = '%s' % (item.split('.')[0]) + '.DSSP'
pred = open('%s/%s' % (predicted, stuff), 'r')
obs = open('%s/%s' % (observed,item), 'r')
P = pred.readlines()[1]
O=obs.readlines()[1]
total=total+len(P)
for i in range(len(P)):
HEC.loc[d[P[i]]].loc[d[O[i]]]=HEC.loc[d[P[i]]].loc[d[O[i]]]+1
return(HEC, total)
def binary_scoring(tab):
perf=[]
for i in lista:
globals()['C%s'%(i)]=tab.loc[i].loc[i]
globals()['O%s'%(i)]= (tab.loc[i].sum())-globals()['C%s'%(i)]
globals()['U%s'%(i)]=(tab.loc[:,i].sum())-globals()['C%s'%(i)]
globals()['N%s'%(i)]=(((tab.drop(index='%s'%(i), columns='%s'%(i)))).values).sum()
globals()['SEN%s' % (i)]=globals()['C%s'%(i)]/(globals()['C%s'%(i)]+globals()['U%s'%(i)])
globals()['PPV%s' % (i)]=globals()['C%s'%(i)]/(globals()['C%s'%(i)]+globals()['O%s'%(i)])
num=(globals()['C%s'%(i)]*globals()['N%s'%(i)])-(globals()['O%s'%(i)]*globals()['U%s'%(i)])
den=math.sqrt((globals()['C%s'%(i)]+ globals()['O%s'%(i)])*(globals()['C%s'%(i)]+ globals()['U%s'%(i)])\
*(globals()['N%s'%(i)]+globals()['O%s'%(i)])*(globals()['N%s'%(i)]+globals()['U%s'%(i)]))
globals()['MCC%s' % (i)]=num/den
globals()['performance_%s' % (i)]=['PERFORMANCE %s :' %(i),'SEN_%s= '%(i),globals()['SEN%s' % (i)], 'PPV_%s= '% (i),globals()['PPV%s' % (i)],'MMC_%s= '%(i),globals()['MCC%s' % (i)]]
perf.append(globals()['performance_%s' % (i)])
return (perf)
if __name__ == "__main__":
pathobserveddssp = sys.argv[1]
pathpredicteddssp = sys.argv[2]
result=performance(pathobserveddssp,pathpredicteddssp)
confusion_matrix = result[0]
totale=result[1]
matrice=confusion_matrix.to_numpy()
#print(totale)
Q=((matrice.diagonal()).sum())/totale #this is the accuracy
print(confusion_matrix)
for i in (binary_scoring(confusion_matrix)):
print(i,'\n')
print('Three-class accuracy Q3 = %s' % (Q))
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,255
|
marialui/LB2
|
refs/heads/master
|
/sel_seq_dictionary.py
|
#!/usr/bin/python
import sys
def get_list_fasta(lid,fasta):
f=open(fasta)
c=0
for line in f:
line=line.rstrip()
if line[0]=='>':
tid=line.split('|')[0][1:]
if lid.get(tid,False)==1:
#you go here only if the statement is true and so c=1 and it will print the value
#in the case the id is not present in line it will return false and it will pass in the else statement
c=1
else:
c=0
if c==1 :
print(line)
if __name__=="__main__":
fid=sys.argv[1]
fasta= sys.argv[2]
lid=dict([(i,True) for i in open(fid).read().split('\n')])
#here i generate a set of touples where each key
get_list_fasta(lid,fasta)
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,256
|
marialui/LB2
|
refs/heads/master
|
/SVM.py
|
# !/usr/bin/python
import os, sys
import sys
import numpy as np
import pandas as pd
d = {'E': '2', 'H': '1', '-': '3'}
# with this function it automatically produces 4 matrices:
# one for helixes(H), one for strand(E), one for coil (C) and one for the frequency of each residue (R)
# here pro and dssp should be the path
def svm_training(pathprofile, pathdssp):
outputfile = open("ss.train.dat", "w+")
# ora che abbiamo generato le matrici dobbiamo riempirle:
for item in os.listdir(pathprofile):
if '%s' % (item.split('.')[1]) == 'profile':
stuff = '%s' % (item.split('.')[0]) + '.dssp'
pro = pd.read_fwf('%s/%s' % (pathprofile, item), sep="\t")
pro.drop(pro.columns[0], axis=1, inplace=True)
print(item, stuff)
dssp = open('%s/%s' % (pathdssp, stuff), 'r')
# global profile
lines = dssp.readlines()
residues = (pro.shape[0] + 16)
new = np.zeros(shape=((residues, 20)))
profili = pd.DataFrame(new,
columns=['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',
'S', 'T', 'W',
'Y', 'V'])
pro.index = pro.index + 8
# here df is the dataframe containing the profile plus 10 row at the end and at the beginning with all zeros.
df = pro.combine_first(profili)
structure = ['0' for m in range(8)]
for s in (lines[1].strip()):
structure.append(d[s])
for k in range(8):
structure.append('0')
# from now on i'm filling the matrices
#
l = []
for i in range(8, df.shape[0] - 8):
row = [structure[i]]
profile = df.iloc[i - 8:i + 9].to_numpy()
for e in range (len(profile)):
for element in profile[e]:
l.append(element)
for j in l:
if j != 0:
row.append('%d:%f'%(int(l.index(j))+1,float(j)))
s = " ".join(row)
outputfile.write('%s\n'%(s))
l = []
outputfile.close()
if __name__ == "__main__":
pathprofile = sys.argv[1]
pathdssp = sys.argv[2]
inputtraining = svm_training(pathprofile, pathdssp)
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,257
|
marialui/LB2
|
refs/heads/master
|
/SOV(general).py
|
# !/usr/bin/python
import os, sys
import sys
import re
import numpy as np
lista = ['H', 'C', 'E']
def SOV(observed, predicted):
for i in lista:
globals()['num_%s' % (i)] = 0
globals()['totalsov_%s' % (i)] = 0
for item in os.listdir(observed):
if '%s' % (item.split('.')[1]) == 'dssp':
stuff = '%s' % (item.split('.')[0]) + '.DSSP'
pred = open('%s/%s' % (predicted, stuff), 'r')
obs = open('%s/%s' % (observed, item), 'r')
P = pred.readlines()[1]
O = obs.readlines()[1]
# O='CCCHHHHHCCCCCCCCHHHHHHHHHHCCC'
# P='CCCCCCCCCCCHHHHHHHCCCCCHHHHCC'
for i in lista:
globals()['LISTAP_%s' % (i)] = []
globals()['LISTAO_%s' % (i)] = []
globals()['sommatoria_%s' % (i)] = []
globals()['N%s' % (i)] = O.count(i)
if globals()['N%s' % (i)]!=0:
globals()['SOV%s' % (i)] = 100 * (1 / globals()['N%s' % (i)])
else:
globals()['SOV%s' % (i)]=0
globals()['C%s' % (i)] = re.compile("%s*" % (i))
for L in globals()['C%s' % (i)].finditer(P):
if i in (L.group()):
globals()['LISTAP_%s' % (i)].append(L.span())
for J in globals()['C%s' % (i)].finditer(O):
if i in (J.group()):
globals()['LISTAO_%s' % (i)].append(J.span())
for el in globals()['LISTAP_%s' % (i)]:
for el1 in globals()['LISTAO_%s' % (i)]:
globals()['sp_%s' % (i)] = set(np.arange(*el)) # predicted
globals()['so_%s' % (i)] = set(np.arange(*el1)) # observed
globals()['intersec_%s' % (i)] = globals()['sp_%s' % (i)].intersection(globals()['so_%s' % (i)])
if globals()['intersec_%s' % (i)] != set():
globals()['MINOV_%s' % (i)] = len(globals()['intersec_%s' % (i)])
globals()['MAXOV_%s' % (i)] = len(globals()['sp_%s' % (i)]) + len(
globals()['so_%s' % (i)]) - globals()['MINOV_%s' % (i)]
globals()['correction_%s' % (i)] = [
(globals()['MAXOV_%s' % (i)] - globals()['MINOV_%s' % (i)]),
globals()['MINOV_%s' % (i)], \
(len(globals()['sp_%s' % (i)]) / 2), (len(globals()['so_%s' % (i)]) / 2)]
globals()['sig_%s' % (i)] = min(globals()['correction_%s' % (i)])
globals()['value_%s' % (i)] = ((globals()['MINOV_%s' % (i)] + globals()['sig_%s' % (i)]) /
globals()['MAXOV_%s' % (i)]) * len(globals()['so_%s' % (i)])
globals()['sommatoria_%s' % (i)].append(globals()['value_%s' % (i)])
globals()['sov%s' % (i)] = globals()['SOV%s' % (i)] * (sum(globals()['sommatoria_%s' % (i)]))
if globals()['sov%s' % (i)] != 0:
globals()['num_%s' % (i)] = globals()['num_%s' % (i)] + 1
if globals()['num_%s' % (i)]!=0:
globals()['totalsov_%s' % (i)] = globals()['totalsov_%s' % (i)] + globals()['sov%s' % (i)]
globals()['average%s' % (i)] = globals()['totalsov_%s' % (i)] / globals()['num_%s' % (i)]
else:
globals()['average%s' % (i)]=0
return (averageH, averageE,averageC)
if __name__ == "__main__":
pathobserveddssp = sys.argv[1]
pathpredicteddssp = sys.argv[2]
result = SOV(pathobserveddssp, pathpredicteddssp)
print('respective SOVs for H, E and C are: ', result)
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,258
|
marialui/LB2
|
refs/heads/master
|
/parsingblindset.py
|
import sys
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import pandas as pd
data = pd.read_csv('blindset.csv', sep=",", header=0)
data1=((data.drop('Chain ID',axis=1)).drop('Resolution',axis=1)).drop('Sequence',axis=1)
# dropping duplicate values : same id, same chain length
data1.drop_duplicates(keep='first',inplace=True)
intersected_df = data[data.index.isin(data1.index)]
#print(intersected_df.head())
res = intersected_df.set_index('PDB ID')['Resolution']
#create a fasta file from the list of pdb chains we retained.
f4=open('PDB.fasta','w')
for j in range(len(intersected_df['PDB ID'])):
f4.write('>' + (intersected_df['PDB ID'].iloc[j]).lower()+'_'+ intersected_df['Chain ID'].iloc[j] + '\n' + \
intersected_df['Sequence'].iloc[j] + '\n'
)
#import cluster file as dataframe
clust=pd.read_csv('clust.txt', sep="\n", header=0)
clust.columns = ['clusters']
#print(clust)
#create a dictionary from the dataframe where we have the id :resolution
#print (intersected_df)
Res=(intersected_df).drop('Sequence',axis=1).drop('Chain Length',axis=1)
forfasta=(intersected_df).drop('Chain Length',axis=1)
forfasta['Name'] = (forfasta['PDB ID']).str.cat(forfasta['Chain ID'],sep="_")
Res['Name'] = (Res['PDB ID']).str.cat(Res['Chain ID'],sep="_")
Res1=(Res.drop('Chain ID',axis=1)).drop('PDB ID',axis=1)
forfasta1=((forfasta.drop('Chain ID',axis=1)).drop('PDB ID',axis=1)).drop('Resolution', axis=1)
Res1=Res1[['Name','Resolution']]
forfasta1=forfasta1[['Name','Sequence']]
#print(forfasta1)
df_dict = dict(zip(Res1.Name, Res1.Resolution))
#print (df_dict)
lun=(clust.shape)[0]
best=[]
#now create a list best in which will be stored the best id for each cluster, the one with
#the best resolution.
for m in range (0,lun):
cluster = (clust['clusters'].iloc[m]).split()
if sum(cluster.index(x) for x in cluster) > 1:
best_id = []
for pid in cluster:
v = df_dict.get(pid, float('inf'))
best_id.append([v, pid])
best_id.sort()
best.append(best_id[0][1])
else:
best.append(cluster[0])
#print(best_id)
#so best is the list made of the best representative for each clustering on the basis of the best resolution.
print('hi i am the length' , len(best))
#best_fasta = best[best.index.isin(intersected_df.index)]
#print(best)
#and we will put this list into a file 'representatives.txt in wich we will have all the ids of representative
#structures , one for each cluster on the basis on the best resolution.
f5=open('representatives.txt','w')
for k in range(len(best)):
f5.write(best[k] + '\n')
#HERE WE FIRST BUILD THE DATAFRAME COMPOSED BY THE COLUMN NAME WITH ALL THE IDS OF THE REPRESENTATIVES
#THEN WE MERGED THIS DATAFRAME WITH THE FORFASTA1 INITIAL ONE WHERE WE SOTRED FOR ALL THE IDS WE STARTED FROM
#THE CORRISPONDENT SEQUENCE
representatives = pd.DataFrame(best)
representatives.columns = ['Name']
print(representatives)
print(forfasta1)
Fasta = pd.merge(representatives, forfasta1, how='inner', on=['Name'])
print(Fasta)
#here we store in file representatives.fasta all the ids of all the representatives
#and the corrispondent sequence in fasta format
f6=open('representatives.fasta','w')
for L in range(len(Fasta['Name'])):
f6.write('>' + (Fasta['Name'].iloc[L]).upper()+ '\n'+ Fasta['Sequence'].iloc[L] + '\n' )
f7=open('reducedblidset','w+')
f9=open('reducedblidset.fasta','w+')
#here we produce a file where we store all the representatives ids without the 473 ids from removeids file
#and we produce also a multifasta file with all the sequences for each chain (reducedblindset.fasta)
for L in range(len(Fasta['Name'])):
with open('removeids.txt') as f8:
if ((Fasta['Name'].iloc[L]).upper()) not in f8.read():
f7.write((Fasta['Name'].iloc[L]).upper()+ '\n' )
f9.write(('>' + Fasta['Name'].iloc[L]).upper() + '\n' + Fasta['Sequence'].iloc[L] + '\n')
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,259
|
marialui/LB2
|
refs/heads/master
|
/statistics.py
|
#!/usr/bin/python
import sys
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import pandas as pd
lista = ['percentage of coil', 'percentage of strends', 'percentage of helixes']
# this will take as an input 2 files: the first one with all the ss , the second is a fasta file
# and will return a merged file where for each id the secondary structure and the fasta is reported
def merger(f1, f2):
l1 = f1.readlines()
l2 = f2.readlines()
f3 = open("maintext", "w+")
for i in l1:
if i[0] == ">":
f3.write(i)
f3.write(l1[l1.index(i) + 1])
for j in l2:
if j in i:
f3.write(l2[l1.index(j) + 1])
else:
continue
else:
continue
return (f3)
def ss_percentage(set):
c = 0
e = 0
h = 0
t = 0
file=[]
ss=[]
fasta=[]
id=[]
counts = {}
residuepercentage={}
for line in set:
line=line.rstrip()
if line[0]!='>':
file.append(line)
else:id.append(line[1:])
for j in range (0,len(file),2):
ss.append(file[j])
if j!= 0:
fasta.append(file[j-1])
for i in file[j]:
if i == 'E':
e = e + 1
t = t + 1
elif i == 'H':
h = h + 1
t = t + 1
elif i == '-':
c = c + 1
t = t + 1
#print(id)
#fasta.append(file[len(file) - 1])
coil = (float(c / t) * 100)
helix = (float(h / t) * 100)
strand = (float(e / t) * 100)
#for k in range(-1, len(file) , 2):
#fasta.append(file[k])
for i in range(len(fasta)):
for aa in fasta[i]:
#print(aa)
if aa in counts:
counts[aa] = counts[aa] + 1
else :
counts[aa] = 1
strutturesecondarie={}
for i in range(len(ss)):
for sec in ss[i]:
# print(aa)
if sec in strutturesecondarie:
strutturesecondarie[sec]=strutturesecondarie[sec] +1
else:
strutturesecondarie[sec] = 1
print(strutturesecondarie)
# print ('here is the total count:' ,counts) #this prints a vocabulary where each residue is associated to the number of times it appears
for m in range(len(fasta)):
for n in range(len(fasta[m])):
# print(ss[m][n])
if fasta[m][n] in residuepercentage:
if ss[m][n] in residuepercentage[fasta[m][n]]:
residuepercentage[fasta[m][n]][ss[m][n]] = residuepercentage[fasta[m][n]][ss[m][n]] + 1
else:
residuepercentage[fasta[m][n]][ss[m][n]] = 1
else:
dizionario = {}
dizionario[ss[m][n]] = 1
residuepercentage[fasta[m][n]] = dizionario
for aa in residuepercentage:
for s in residuepercentage[aa]:
residuepercentage[aa][s]= round(float (residuepercentage[aa][s]/strutturesecondarie[s])*100,2)
#print(residuepercentage[aa][s],strutturesecondarie[s])
print('here is the relative composition:',residuepercentage) # this prints a dictionary where for each ss we have associated the corrispective number of residue
# prensent in that ss.
return ([coil, strand, helix],residuepercentage)
def printpie(a):
b = ['percentage of coil', 'percentage of strends', 'percentage of helixes']
fig, ax = plt.subplots(figsize=(6, 3), subplot_kw=dict(aspect="equal"))
wedges, texts = ax.pie(a, wedgeprops=dict(width=0.5), startangle=-40)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1) / 2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(b[i] + ' is ' + '{0:.2f}%'.format(round(a[i]), 2), xy=(x, y), xytext=(1.35 * np.sign(x), 1.4 * y),
horizontalalignment=horizontalalignment, **kw)
ax.set_title("SS composition")
plt.show()
def print_histogram(data):
combined_df =pd.DataFrame(data)
print(combined_df)
df_transposed = combined_df.T
df_transposed.plot.bar()
plt.show()
#we took the pdb ids and with the advance search in pdb we retrieved
#the number of ids for each kindom and we plot the result
def kindom_pie(listk):
tot = sum(listk)
for i in range(len(listk)):
listk[i]=round(float((listk[i]/tot))*100,1)
#print(listk,kindo)
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'Bacteria','Eukaryota','Archaea','Viruses','Other'
sizes = listk
explode = (0, 0, 0, 0.4,0.7) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
if __name__ == "__main__":
ss = open(sys.argv[1], "r")
fasta = open(sys.argv[2], "r")
data = open(sys.argv[3], "w+")
set = merger(ss, fasta)
m=ss_percentage(data)
l = (m[0])
print(l)
printpie(l)
residui=(m[1])
print(residui)
print_histogram(residui)
listak = [639, 455, 93, 63, 2]
kindom_pie(listak)
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,260
|
marialui/LB2
|
refs/heads/master
|
/gor_training.py
|
#!/usr/bin/python
import sys
import numpy as np
import pandas as pd
d={'E':'E','H':'H','-':'C'}
#with this function it automatically produces 4 matrices:
#one for helixes(H), one for strand(E), one for coil (C) and one for the frequency of each residue (R)
#here pro and dssp should be the path
def gor_training(pro,dssp):
global profile
n=np.arange(17)
ss=[['H'],['E'],['C'],['R']]
for structure in (ss):
index=pd.MultiIndex.from_product([n, structure])
matrix = pd.DataFrame(index=index,
columns=['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W',
'Y', 'V'])
for col in matrix.columns:
matrix[col].values[:] = 0
globals()[structure[0]]=matrix
#ora che abbiamo generato le matrici dobbiamo riempirle:
lines=dssp.readlines()
residues=(pro.shape[0] + 16)
new = np.zeros(shape=((residues, 20)))
profili= pd.DataFrame(new , columns=['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W',
'Y', 'V'])
pro.index= pro.index+8
#here df is the dataframe containing the profile plus 10 row at the end and at the beginning with all zeros.
df = pro.combine_first(profili)
structure=['0' for m in range(8)]
for s in (lines[1].strip()):
structure.append(d[s])
for k in range(8):
structure.append('0')
#creare un contatore di strutture secondarie
HEC = pd.DataFrame([0,0,0], index=['H', 'E', 'C'],columns=['Number of SS'])
#from now on i'm filling the matrices
for i in range (8, df.shape[0]-8):
midx = pd.MultiIndex.from_product([list(n),[structure[i]]])
midx1 = pd.MultiIndex.from_product([list(n), ['R']])
profile = df.iloc[i-8:i+9]
profile1 = profile.set_index(midx1)
profile=profile.set_index(midx)
(globals()[structure[i]]).update(globals()[structure[i]]+ profile)
#UPDATE RESIDUE MATRIX
globals()['R'].update(globals()['R'] + profile1)
#UPDATE SECONDARY STRUCTURE MATRIX
HEC.loc[structure[i]] = HEC.loc[structure[i]] + 1
for mat in ss:
globals()[mat[0]]= globals()[mat[0]].div(len(structure) - 16).round(2)
return (E,H,C,R,(HEC.div(len(structure) - 16).round(2)))
if __name__ == "__main__":
profile= pd.read_fwf('%s' %sys.argv[1], sep="\t")
profile.drop(profile.columns[0],axis=1,inplace=True)
dsspfile=open('%s' %sys.argv[2], 'r')
results=gor_training(profile, dsspfile)
for maj in results:
print(maj,'\n')
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,261
|
marialui/LB2
|
refs/heads/master
|
/statisticsLB2.py
|
#!/usr/bin/python
import sys
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import functions as f
import pandas as pd
if __name__ == "__main__":
ss = open(sys.argv[1], "r")
fasta = open(sys.argv[2], "r")
data = open(sys.argv[3], "w+")
set = f.merger(ss, fasta)
m= (f.ss_percentage(data))
l=m[0]
#print(l)
#f.printpie(l)
stru=m[1]
fas=m[2]
residui = (f.aa_percentage(fas,stru))
print(residui)
f.print_histogram(residui)
|
{"/statisticsLB2.py": ["/functions.py"]}
|
5,278
|
miphreal/arise.todo
|
refs/heads/master
|
/tests/tests_todo_engine.py
|
from logging import info
from unittest import TestCase
from hamcrest import *
from arise_todo.core.parser import Task
class TaskTest(TestCase):
TEST_DATA = (
('(A) task', '(A) task'),
('task (A)', '(A) task'),
('task (A12) ksat', '(A12) task ksat'),
('(A) task +prj', '(A) task +prj'),
('(A) task +prj-prj', '(A) task +prj-prj'),
('(A) task +prj/prj +jrp', '(A) task +prj/prj +jrp'),
(' +prj (A) task', '(A) task +prj'),
('(A) +prj-prj task ', '(A) task +prj-prj'),
('+prj/prj (A) task +jrp', '(A) task +prj/prj +jrp'),
('(A) task +prj @ctx', '(A) task +prj @ctx'),
('(A) task +prj-prj @ctx-ctx', '(A) task +prj-prj @ctx-ctx'),
('(A) task +prj/prj +jrp @ctx/ctx @xtc', '(A) task +prj/prj +jrp @ctx/ctx @xtc'),
(' +prj @ctx (A) task', '(A) task +prj @ctx'),
('(A) +prj-prj @ctx-ctx task ', '(A) task +prj-prj @ctx-ctx'),
('+prj/prj @ctx/ctx (A) task +jrp @xtc', '(A) task +prj/prj +jrp @ctx/ctx @xtc'),
('(A) task &todo', '(A) &todo task'),
('(A) task &in-progress', '(A) &in-progress task'),
('(A) task &done(2013-11-10 22:33)', '(A) &done(2013-11-10 22:33) task'),
('(A) task #2013-11-10 22:33 task', '(A) task task #2013-11-10 22:33'),
('(A) task key:value key1:"value value"', '(A) task key:value key1:"value value"'),
('(A) task key:value key1:"value value" task-continue asdf..',
'(A) task task-continue asdf.. key:value key1:"value value"'),
)
def test_init(self):
for task, expected in self.TEST_DATA:
assert_that(unicode(Task(task)), equal_to(expected))
info('{:<40} result=> {}'.format(task, Task(task)))
def test_consistent(self):
for task, expected in self.TEST_DATA:
assert_that(unicode(Task(unicode(Task(task)))), equal_to(expected))
def test_data(self):
for task, expected in self.TEST_DATA:
print '{:<40} result=> {}'.format(task, Task(task).data)
|
{"/arise_todo/storage/base.py": ["/arise_todo/core/parser.py"], "/arise_todo/core/utils.py": ["/arise_todo/core/conf.py"], "/arise_todo/core/manager.py": ["/arise_todo/core/parser.py", "/arise_todo/core/utils.py"], "/arise_todo/storage/backends/text_file.py": ["/arise_todo/core/parser.py", "/arise_todo/storage/base.py"]}
|
5,279
|
miphreal/arise.todo
|
refs/heads/master
|
/arise_todo/core/conf.py
|
from arise_todo.storage.backends import text_file
STORAGES = {
'text_file': text_file.TextStorage,
}
DEFAULT_STORAGE = {
'name': 'text_file',
'options': {'file_name': '~/.config/arise.todo/todo'}
}
|
{"/arise_todo/storage/base.py": ["/arise_todo/core/parser.py"], "/arise_todo/core/utils.py": ["/arise_todo/core/conf.py"], "/arise_todo/core/manager.py": ["/arise_todo/core/parser.py", "/arise_todo/core/utils.py"], "/arise_todo/storage/backends/text_file.py": ["/arise_todo/core/parser.py", "/arise_todo/storage/base.py"]}
|
5,280
|
miphreal/arise.todo
|
refs/heads/master
|
/arise_todo/access/cli.py
|
from __future__ import unicode_literals
import cmd
from arise_todo.core.manager import TasksManager
class CLI(cmd.Cmd):
intro = 'Simple tasks CLI'
prompt = '> '
def _print_tasks(self, tasks):
for task in tasks:
print '[{t.id!s}] {t!s}'.format(t=task)
def emptyline(self):
pass
def preloop(self):
self.tasks = TasksManager()
def postloop(self):
self.tasks.close()
def do_q(self, empty):
return True
def do_mk(self, task_text):
self.tasks.add(task_text)
def do_rm(self, task_text):
self.tasks.delete(task_text)
def do_ed(self, task_uuid):
new_task = raw_input('[Edit {}] {}'.format(task_uuid, self.prompt))
self.tasks.edit(task_uuid, new_task)
def do_mv(self, task):
after = raw_input('[After] {}'.format(self.prompt))
self.tasks.move_after(task, after)
def do_ls(self, query):
self._print_tasks(self.tasks.search(query) if query else self.tasks)
def run():
CLI().cmdloop()
if __name__ == '__main__':
run()
|
{"/arise_todo/storage/base.py": ["/arise_todo/core/parser.py"], "/arise_todo/core/utils.py": ["/arise_todo/core/conf.py"], "/arise_todo/core/manager.py": ["/arise_todo/core/parser.py", "/arise_todo/core/utils.py"], "/arise_todo/storage/backends/text_file.py": ["/arise_todo/core/parser.py", "/arise_todo/storage/base.py"]}
|
5,281
|
miphreal/arise.todo
|
refs/heads/master
|
/arise_todo/storage/base.py
|
import re
import uuid
from arise_todo.core.parser import Task
class BaseStorage(object):
uuid_re = re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
def __init__(self):
self._connected = False
def _is_uuid(self, task):
if isinstance(task, (str, unicode)):
return self.uuid_re.match(task)
elif isinstance(task, uuid.UUID):
return True
return False
def _fake_task(self, task_uuid):
return Task('', task_id=task_uuid)
def task(self, task):
if isinstance(task, Task):
return task
elif self._is_uuid(task):
return self._fake_task(task)
elif isinstance(task, (str, unicode)):
return Task(task)
raise TypeError
def get(self, task_uuid):
raise NotImplementedError
def add(self, task, after_task=None, before_task=None):
raise NotImplementedError
def delete(self, task_or_uuid):
raise NotImplementedError
def replace(self, task_or_uuid, new_task):
raise NotImplementedError
def move_after(self, task_or_uuid, after_task_or_uuid):
"""
Places the task after `after_task`
"""
raise NotImplementedError
def move_before(self, task_or_uuid, before_task_or_uuid):
"""
Places the task before `before_task`
"""
raise NotImplementedError
def search(self, query, treat_as_regex=True):
"""
Matches tasks with query
"""
raise NotImplementedError
def iterate(self):
"""
Iterates over all tasks
"""
raise NotImplementedError
def __iter__(self):
return iter(self.iterate())
def __contains__(self, task):
raise NotImplementedError
def connect(self):
self._connected = True
def disconnect(self):
self._connected = False
@property
def connected(self):
return self._connected
|
{"/arise_todo/storage/base.py": ["/arise_todo/core/parser.py"], "/arise_todo/core/utils.py": ["/arise_todo/core/conf.py"], "/arise_todo/core/manager.py": ["/arise_todo/core/parser.py", "/arise_todo/core/utils.py"], "/arise_todo/storage/backends/text_file.py": ["/arise_todo/core/parser.py", "/arise_todo/storage/base.py"]}
|
5,282
|
miphreal/arise.todo
|
refs/heads/master
|
/arise_todo/core/utils.py
|
def get_storage(name=None, options=None):
from .conf import STORAGES, DEFAULT_STORAGE
if name and name not in STORAGES:
raise KeyError('{} storage does not exist'.format(name))
options = options or {}
if name:
return STORAGES[name](**options)
else:
storage_class = STORAGES[DEFAULT_STORAGE['name']]
return storage_class(**dict(DEFAULT_STORAGE['options'], **options))
|
{"/arise_todo/storage/base.py": ["/arise_todo/core/parser.py"], "/arise_todo/core/utils.py": ["/arise_todo/core/conf.py"], "/arise_todo/core/manager.py": ["/arise_todo/core/parser.py", "/arise_todo/core/utils.py"], "/arise_todo/storage/backends/text_file.py": ["/arise_todo/core/parser.py", "/arise_todo/storage/base.py"]}
|
5,283
|
miphreal/arise.todo
|
refs/heads/master
|
/arise_todo/core/manager.py
|
from .parser import Task
from .utils import get_storage
class TasksManager(object):
def __init__(self, storage=None):
self._storage = storage
if self._storage is None:
self._storage = get_storage()
def close(self):
if self._storage.connected:
self._storage.disconnect()
@property
def storage(self):
if not self._storage.connected:
self._storage.connect()
return self._storage
def add(self, task):
self.storage.add(self._storage.task(task))
def delete(self, task):
self.storage.delete(self._storage.task(task))
def edit(self, task, new_task):
self.storage.replace(self._storage.task(task),
self._storage.task(new_task))
def search(self, query):
return self.storage.search(query)
def move_after(self, task, after):
self.storage.move_after(task, after)
def move_before(self, task, before):
self.storage.move_after(task, before)
def __iter__(self):
return iter(self.storage)
|
{"/arise_todo/storage/base.py": ["/arise_todo/core/parser.py"], "/arise_todo/core/utils.py": ["/arise_todo/core/conf.py"], "/arise_todo/core/manager.py": ["/arise_todo/core/parser.py", "/arise_todo/core/utils.py"], "/arise_todo/storage/backends/text_file.py": ["/arise_todo/core/parser.py", "/arise_todo/storage/base.py"]}
|
5,284
|
miphreal/arise.todo
|
refs/heads/master
|
/arise_todo/storage/backends/text_file.py
|
import os
import operator
from funcy import ikeep, any, partial, ifilter, first, imap, select
from arise_todo.core.parser import Task
from ..base import BaseStorage
class TextStorage(BaseStorage):
def __init__(self, file_name, keep_opened=False):
super(TextStorage, self).__init__()
self._keep_opened = keep_opened
self._file_name = os.path.realpath(os.path.expanduser(file_name))
self._fd = None
@property
def _file(self):
if not self._keep_opened or self._fd is None or (self._fd and self._fd.closed):
if self._fd and not self._fd.closed:
self._fd.close()
self._fd = open(self._file_name, 'a+')
return self._fd
def _atomic_write(self, tasks):
tasks = map(u'{!s}\n'.format, tasks)
self._file.truncate()
self._file.writelines(tasks)
self._file.flush()
def __contains__(self, task):
return any(partial(operator.eq, task), self)
def get(self, task_uuid):
return first(ifilter(partial(operator.eq, self.task(task_uuid)), self))
def add(self, task, after_task=None, before_task=None):
if task not in self:
self._file.write('{!s}\n'.format(task))
self._file.flush()
def delete(self, task):
tasks = ifilter(partial(operator.ne, self.task(task)), self)
self._atomic_write(tasks)
def replace(self, task_or_uuid, new_task):
src_task = self.task(task_or_uuid)
tasks = imap(lambda t: self.task(new_task) if t == src_task else t, self)
self._atomic_write(tasks)
def search(self, query, treat_as_regex=True):
if treat_as_regex:
return map(Task, select(query, self.iterate(raw=True)))
return map(Task, ifilter(lambda t: query in t, self.iterate(raw=True)))
def move_before(self, task_or_uuid, before_task_or_uuid, shift=0):
task, before = self.task(task_or_uuid), self.task(before_task_or_uuid)
tasks = list(self)
if task in tasks:
tasks.remove(task)
tasks.insert(tasks.index(before) + shift if before in tasks else len(tasks), task)
self._atomic_write(tasks)
def move_after(self, task_or_uuid, after_task_or_uuid):
self.move_before(task_or_uuid, after_task_or_uuid, shift=1)
def iterate(self, raw=False):
if raw:
return ikeep(self._file)
return ikeep(Task, self._file)
|
{"/arise_todo/storage/base.py": ["/arise_todo/core/parser.py"], "/arise_todo/core/utils.py": ["/arise_todo/core/conf.py"], "/arise_todo/core/manager.py": ["/arise_todo/core/parser.py", "/arise_todo/core/utils.py"], "/arise_todo/storage/backends/text_file.py": ["/arise_todo/core/parser.py", "/arise_todo/storage/base.py"]}
|
5,285
|
miphreal/arise.todo
|
refs/heads/master
|
/arise_todo/core/parser.py
|
from __future__ import unicode_literals
from inspect import isclass
import operator
import re
import uuid
DATE_YYYY = r'(\d{4})'
DATE_MM = r'(10|11|12|(0?\d))'
DATE_DD = r'([012]?\d|30|31)'
DATE_RE = r'({yyyy}(-{mm})?(-{dd})?)'.format(yyyy=DATE_YYYY, mm=DATE_MM, dd=DATE_DD)
TIME_HH = r'([01]?\d|2[0-4])'
TIME_MM = r'([0-5]?\d)'
TIME_SS = r'({mm}|60|61)'.format(mm=TIME_MM)
TIME_RE = r'({hh}:{mm}(:{ss})?)'.format(hh=TIME_HH, mm=TIME_MM, ss=TIME_SS)
DATE_TIME_RE = r'({date}( {time})?)'.format(date=DATE_RE, time=TIME_RE)
class TaskItem(object):
pattern = None
EMPTY = ''
def __init__(self, src_text, task):
self.task = task
self.data = self._parse(src_text)
def _parse(self, src_text):
if self.pattern:
match = self.pattern.search(src_text)
return match.group(1) if match else self.EMPTY
return src_text
def _format(self, item):
return item
def format(self):
if self.data:
return self._format(self.data)
return self.EMPTY
def clean(self, src_text):
"""Removes itself from src_text"""
return src_text.replace(self.format(), '')
@property
def name(self):
return re.sub(r'^_', '', re.sub(r'([A-Z])', '_\\1', self.__class__.__name__.replace('Item', '')).lower())
__str__ = __unicode__ = format
class MultipleTaskItem(TaskItem):
def _parse(self, src_text):
return self.pattern.findall(src_text)
def _format(self, projects):
return ' '.join('{!s}'.format(prj) for prj in projects)
def clean(self, src_text):
for prj in self.format().split():
src_text = src_text.replace(prj, '')
return src_text
class PriorityItem(TaskItem):
pattern = re.compile(r'\(([A-Z]\d*)\)')
def _format(self, data):
return '({})'.format(data)
class CreationDateItem(TaskItem):
pattern = re.compile(r'^({})'.format(DATE_TIME_RE))
class SchedulingItem(TaskItem):
pattern = re.compile(r'#({})'.format(DATE_TIME_RE))
def _format(self, data):
return '#{}'.format(data)
class FadingDateItem(TaskItem):
pattern = re.compile(r'#~({})'.format(DATE_TIME_RE))
def _format(self, data):
return '#~{}'.format(data)
class TaskMsgItem(TaskItem):
@property
def data(self):
return self.task.clean_text
@data.setter
def data(self, value):
pass
class ProjectsItem(MultipleTaskItem):
pattern = re.compile(r'\+([\w/-]+)')
def _format(self, projects):
return ' '.join('+{}'.format(prj) for prj in projects)
class ContextsItem(MultipleTaskItem):
pattern = re.compile(r'@([\w/-]+)')
def _format(self, projects):
return ' '.join('@{}'.format(prj) for prj in projects)
def text_item(text):
class TextNodeItem(TaskItem):
_text_node = text
pattern = re.compile(r'({})'.format(re.escape(_text_node)))
def _parse(self, src_text):
return self._text_node
def format(self):
return self._text_node
return TextNodeItem
TextItem = text_item
class StateItem(TaskItem):
pattern = re.compile(r'&([\w-]+(\({datetime}\))?)'.format(datetime=DATE_TIME_RE))
STATES = ('todo', 'in-progress', 'done', 'removed', 'skipped')
UNKNOWN_STATE = 'unknown'
def _parse(self, src_text):
value = super(StateItem, self)._parse(src_text)
return value if not value or any(map(value.lower().startswith, self.STATES)) else self.UNKNOWN_STATE
def _format(self, item):
return '&{}'.format(item)
class MetadataItem(MultipleTaskItem):
pattern = re.compile(r'''([\w-]+:([\w-]+|"[^"]+"|'[^']+'))''')
def _parse(self, src_text):
return [match[0] for match in self.pattern.findall(src_text)]
TASK_PARTS = (
CreationDateItem,
PriorityItem,
StateItem,
TaskMsgItem,
ProjectsItem,
ContextsItem,
SchedulingItem,
MetadataItem,
)
class Task(object):
"""
Task Parser
"""
def __init__(self, todo_text, task_id=None, task_parts=TASK_PARTS):
self.src_text = todo_text.strip().decode('utf-8')
self.clean_text = ''
self.task_parts, self.cleaned_text = self.parse_task_parts(self.src_text, task_parts)
self.clean_text = re.sub(r'\s{2,}', ' ', self.cleaned_text).strip()
self.id = self._task_id(task_id)
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return self.id != other.id
def _task_id(self, task_uuid=None):
from hashlib import sha1
if task_uuid is None:
return uuid.uuid5(namespace=uuid.NAMESPACE_OID, name=sha1(unicode(self).encode('utf-8')).digest())
return uuid.UUID(hex=task_uuid, version=5)
def parse_task_parts(self, todo_text, task_parts):
parts = []
for part in task_parts:
if isclass(part) and issubclass(part, TaskItem):
part = part(todo_text, self)
todo_text = part.clean(todo_text)
parts.append(part)
return parts, todo_text
@property
def data(self):
d = {p.name: p.data for p in self.task_parts}
d['id'] = self.id
return d
def __str__(self):
return ' '.join(filter(operator.truth, map(operator.methodcaller('format'), self.task_parts)))
|
{"/arise_todo/storage/base.py": ["/arise_todo/core/parser.py"], "/arise_todo/core/utils.py": ["/arise_todo/core/conf.py"], "/arise_todo/core/manager.py": ["/arise_todo/core/parser.py", "/arise_todo/core/utils.py"], "/arise_todo/storage/backends/text_file.py": ["/arise_todo/core/parser.py", "/arise_todo/storage/base.py"]}
|
5,286
|
roosnic1/rpi_cap
|
refs/heads/master
|
/Adafruit_MPR121.py
|
# Based on Arduino example by Jim Lindblom
# http://bildr.org/2011/05/mpr121_arduino/
import smbus
bus = smbus.SMBus(1)
from Cap import Captivity
class Adafruit_MPR121(Captivity):
# MPR121 Register Defines
MHD_R = 0x2B
NHD_R = 0x2C
NCL_R = 0x2D
FDL_R = 0x2E
MHD_F = 0x2F
NHD_F = 0x30
NCL_F = 0x31
FDL_F = 0x32
ELE0_T = 0x41
ELE0_R = 0x42
ELE1_T = 0x43
ELE1_R = 0x44
ELE2_T = 0x45
ELE2_R = 0x46
ELE3_T = 0x47
ELE3_R = 0x48
ELE4_T = 0x49
ELE4_R = 0x4A
ELE5_T = 0x4B
ELE5_R = 0x4C
ELE6_T = 0x4D
ELE6_R = 0x4E
ELE7_T = 0x4F
ELE7_R = 0x50
ELE8_T = 0x51
ELE8_R = 0x52
ELE9_T = 0x53
ELE9_R = 0x54
ELE10_T = 0x55
ELE10_R = 0x56
ELE11_T = 0x57
ELE11_R = 0x58
FIL_CFG = 0x5D
ELE_CFG = 0x5E
GPIO_CTRL0 = 0x73
GPIO_CTRL1 = 0x74
GPIO_DATA = 0x75
GPIO_DIR = 0x76
GPIO_EN = 0x77
GPIO_SET = 0x78
GPIO_CLEAR = 0x79
GPIO_TOGGLE = 0x7A
ATO_CFG0 = 0x7B
ATO_CFGU = 0x7D
ATO_CFGL = 0x7E
ATO_CFGT = 0x7F
# Global Constants
TOU_THRESH = 0x06
REL_THRESH = 0x0A
def __init__(self, i2c_addr, i2c_bus, touch_offset = 0):
super(MPR121,self).__init__(i2c_addr, i2c_bus, touch_offset)
self.setup()
# Routines
@property
def driver_name(self):
return "Adafruit_MPR121"
def readData(address):
MSB = self._i2c.read_byte_data(address, 0x00)
LSB = self._i2c.read_byte_data(address, 0x01)
#touchData = (MSB << 8) | LSB
touchData = MSB;
return touchData;
def setup():
self._i2c.write_byte_data(address, ELE_CFG, 0x00)
# Section A - Controls filtering when data is > baseline.
self._i2c.write_byte_data(address, MHD_R, 0x01)
self._i2c.write_byte_data(address, NHD_R, 0x01)
self._i2c.write_byte_data(address, NCL_R, 0x00)
self._i2c.write_byte_data(address, FDL_R, 0x00)
# Section B - Controls filtering when data is < baseline.
self._i2c.write_byte_data(address, MHD_F, 0x01)
self._i2c.write_byte_data(address, NHD_F, 0x01)
self._i2c.write_byte_data(address, NCL_F, 0xFF)
self._i2c.write_byte_data(address, FDL_F, 0x02)
#Section C - Sets touch and release thresholds for each electrode
self._i2c.write_byte_data(address, ELE0_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE0_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE1_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE1_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE2_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE2_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE3_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE3_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE4_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE4_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE5_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE5_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE6_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE6_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE7_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE7_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE8_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE8_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE9_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE9_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE10_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE10_R, REL_THRESH)
self._i2c.write_byte_data(address, ELE11_T, TOU_THRESH)
self._i2c.write_byte_data(address, ELE11_R, REL_THRESH)
# Section D
# Set the Filter Configuration
# Set ESI2
self._i2c.write_byte_data(address, FIL_CFG, 0x04)
# Section E
# Electrode Configuration
# Set ELE_CFG to 0x00 to return to standby mode
self._i2c.write_byte_data(address, ELE_CFG, 0x0C) # Enables all 12 Electrodes
|
{"/Cap.py": ["/I2CInfo.py"]}
|
5,287
|
roosnic1/rpi_cap
|
refs/heads/master
|
/I2CInfo.py
|
#!/usr/bin/env python
class I2CInfo(object):
def __init__(self, bus, address):
self._bus = bus
self._address = address
def write_byte_data(self, register, value):
self._bus.write_byte_data(self._address, register, value)
def read_byte_data(self, register):
return self._bus.read_byte_data(self._address, register)
|
{"/Cap.py": ["/I2CInfo.py"]}
|
5,288
|
roosnic1/rpi_cap
|
refs/heads/master
|
/main.py
|
#!/usr/bin/env python
import smbus
import time
from Adafruit_CAP1188 import Adafruit_CAP1188 as CAP1188
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(29,GPIO.IN)
if __name__ == "__main__":
bus = smbus.SMBus(1)
cap2_addr = 0x29
cap2 = CAP1188(cap2_addr, bus, touch_offset = 0)
#print "multitouch Status: " + str(cap2.multitouch_enabled)
# Turn on multitouch
cap2.multitouch_enabled = False
#print "multitouch Status: " + str(cap2.multitouch_enabled)
# Link LEDs to touches
cap2.leds_linked = True
# Enabled IRQ
cap2.irq_enabled = True
# Speed it up
#cap2.write_register(Adafruit_CAP1188.STANDBYCFG, 0x30)
print cap2
while True:
if(GPIO.input(29)):
print "INPUT is HIGH"
else:
print "INPUT is LOW"
t = cap2.touched
for x in t:
print "Touched: " + str(x)
time.sleep(1)
|
{"/Cap.py": ["/I2CInfo.py"]}
|
5,289
|
roosnic1/rpi_cap
|
refs/heads/master
|
/Cap.py
|
from I2CInfo import I2CInfo as I2C
class Captivity(object):
"""
i2c_addr: the address of the device on the given i2c bus
i2c_bus: the SMBus instance to use for this device.
touch_offset: If provided, an offset to be applied to the
reported touch indices (helpful when chaining
multiple units)
"""
def __init__(self, i2c_addr, i2c_bus, touch_offset = 0):
self._i2c = I2C(i2c_bus, i2c_addr)
self._touch_offset = touch_offset
# Writes the given value to the given register as a single transaction and returns the result.
def write_register(self, register, value):
if self.is_i2c:
return self._i2c.write_byte_data(register, value)
# Reads a value from the given register and returns it.
def read_register(self, register):
if self.is_i2c:
return self._i2c.read_byte_data(register)
# Returns true if we're configured to use I2C, false otherwise.
@property
def is_i2c(self):
return self._i2c is not None
# Returns true if we're configured to use SPI, false otherwise.
@property
def is_spi(self):
# TODO really implement this
return not self.is_i2c
|
{"/Cap.py": ["/I2CInfo.py"]}
|
5,295
|
ZephSibley/agrimetrics_challenge
|
refs/heads/master
|
/main.py
|
from fastapi import FastAPI
app = FastAPI()
SCHEDULE = []
@app.get("/")
async def root():
return {"message": "Hello World"}
@app.get("/order")
async def order():
from datetime import datetime, timedelta
current_time = datetime.now()
make_time = current_time
if SCHEDULE:
latest_scheduled_time = SCHEDULE[-1][0]
if current_time < latest_scheduled_time:
# The latest entry is always 'Take a break'
SCHEDULE.pop()
time_diff = latest_scheduled_time - current_time
make_time += time_diff
SCHEDULE.append((make_time, 'Make sandwich'))
SCHEDULE.append((make_time + timedelta(seconds=150), 'Serve sandwich'))
SCHEDULE.append((make_time + timedelta(seconds=210), 'Take a break'))
return {"message": "OK"}
@app.get("/schedule")
async def schedule():
return {"schedule": SCHEDULE}
|
{"/test_main.py": ["/main.py"]}
|
5,296
|
ZephSibley/agrimetrics_challenge
|
refs/heads/master
|
/test_main.py
|
from fastapi.testclient import TestClient
from datetime import timedelta
from main import app
from main import SCHEDULE
client = TestClient(app)
# TODO: Mock out the SCHEDULE list out so we're not testing global state
def test_read_main():
response = client.get("/")
assert response.status_code == 200
assert response.json() == {"message": "Hello World"}
def test_read_schedule():
response = client.get("/schedule")
assert response.status_code == 200
assert response.json() == {"schedule": []}
def test_read_order():
response = client.get("/order")
assert response.status_code == 200
assert response.json() == {"message": "OK"}
assert len(SCHEDULE) == 3
assert SCHEDULE[-1][1] == 'Take a break'
assert SCHEDULE[-1][0] - SCHEDULE[-2][0] == timedelta(seconds=60)
assert SCHEDULE[-2][0] - SCHEDULE[-3][0] == timedelta(seconds=150)
def test_read_order_again():
response = client.get("/order")
assert response.status_code == 200
assert response.json() == {"message": "OK"}
assert len(SCHEDULE) == 5
assert SCHEDULE[-1][1] == 'Take a break'
assert SCHEDULE[-1][0] - SCHEDULE[-2][0] == timedelta(seconds=60)
assert SCHEDULE[-2][0] - SCHEDULE[-3][0] == timedelta(seconds=150)
assert SCHEDULE[-4][1] != 'Take a break'
assert SCHEDULE[-3][0] - SCHEDULE[-4][0] == timedelta(seconds=60)
def test_read_schedule_with_orders():
response = client.get("/schedule")
assert response.status_code == 200
schedule = response.json()['schedule']
assert type(schedule) == list
assert len(schedule) == 5
|
{"/test_main.py": ["/main.py"]}
|
5,302
|
elaugier/aiohttp_samples
|
refs/heads/master
|
/controllers/sendfile.py
|
from aiohttp import web
import mimetypes
import os
import tempfile
class sendfile():
def __init__(self):
pass
async def get(self, request):
fh = tempfile.NamedTemporaryFile(delete=False)
fh.write(bytes("test;test2" + os.linesep, 'UTF-8'))
fh.close()
filename = os.path.abspath(fh.name)
with open(filename, "rb") as f:
resp = web.StreamResponse()
resp.content_type, _ = mimetypes.guess_type(filename)
disposition = 'filename="{}"'.format(filename)
if 'text' not in resp.content_type:
disposition = 'attachment; ' + disposition
resp.headers['CONTENT-DISPOSITION'] = disposition
data = f.read()
f.close()
os.remove(fh.name)
resp.content_length = len(data)
await resp.prepare(request)
await resp.write(data)
return resp
|
{"/app.py": ["/controllers/sendfile.py"]}
|
5,303
|
elaugier/aiohttp_samples
|
refs/heads/master
|
/app.py
|
from aiohttp import web
from controllers.sendfile import sendfile
app = web.Application()
sendfileCtrl = sendfile()
app.router.add_get("/sendfile", sendfileCtrl.get)
web.run_app(app,port=4563)
|
{"/app.py": ["/controllers/sendfile.py"]}
|
5,305
|
epfl-dlab/BT-eval
|
refs/heads/main
|
/TrueSkill.py
|
import itertools
import trueskill as ts
from trueskill import rate_1vs1
def TrueSkill(df, mu=ts.MU, sigma=ts.SIGMA,
beta=ts.BETA, tau=ts.TAU,
draw_prob=ts.DRAW_PROBABILITY):
trueskill_env = ts.TrueSkill(mu=mu, sigma=sigma, beta=beta, tau=tau, draw_probability=draw_prob)
competitors = df.columns
system_ratings = {}
for x in competitors:
system_ratings[x] = trueskill_env.create_rating()
for (sys_a, sys_b) in itertools.combinations(competitors, 2):
scores_a, scores_b = df[sys_a], df[sys_b]
for Xs_a, Xs_b in zip(scores_a, scores_b):
if Xs_a > Xs_b:
sys_a_rating, sys_b_rating = rate_1vs1(system_ratings[sys_a], system_ratings[sys_b])
elif Xs_a < Xs_b:
sys_b_rating, sys_a_rating = rate_1vs1(system_ratings[sys_b], system_ratings[sys_a])
else:
sys_b_rating, sys_a_rating = rate_1vs1(system_ratings[sys_b], system_ratings[sys_a], drawn=True)
system_ratings[sys_a] = sys_a_rating
system_ratings[sys_b] = sys_b_rating
return [system_ratings[sys].mu for sys in df.columns]
|
{"/simulations.py": ["/bt.py", "/Elo.py", "/TrueSkill.py"]}
|
5,306
|
epfl-dlab/BT-eval
|
refs/heads/main
|
/Elo.py
|
import itertools
class Elo:
def __init__(self, k, g=1, homefield=0):
self.ratingDict = {}
self.k = k
self.g = g
self.homefield = homefield
def add_player(self, name, rating=1500):
self.ratingDict[name] = rating
def game_over(self, winner, loser, winnerHome=False):
if winnerHome:
result = self.expectResult(self.ratingDict[winner] + self.homefield, self.ratingDict[loser])
else:
result = self.expectResult(self.ratingDict[winner], self.ratingDict[loser] + self.homefield)
self.ratingDict[winner] = self.ratingDict[winner] + (self.k * self.g) * (1 - result)
self.ratingDict[loser] = self.ratingDict[loser] + (self.k * self.g) * (0 - (1 - result))
def expectResult(self, p1, p2):
exp = (p2 - p1) / 400.0
return 1 / ((10.0**(exp)) + 1)
def ELO(df, k=20, g=1, homefield=0):
# n_competitors = df.shape[1]
competitors = df.columns
elo_eval = Elo(k, g, homefield)
for x in competitors:
elo_eval.add_player(x)
for (sys_a, sys_b) in itertools.combinations(competitors, 2):
scores_a, scores_b = df[sys_a], df[sys_b]
for Xs_a, Xs_b in zip(scores_a, scores_b):
if Xs_a > Xs_b:
elo_eval.game_over(sys_a, sys_b)
else:
elo_eval.game_over(sys_b, sys_a)
return [elo_eval.ratingDict[sys] for sys in df.columns]
|
{"/simulations.py": ["/bt.py", "/Elo.py", "/TrueSkill.py"]}
|
5,307
|
epfl-dlab/BT-eval
|
refs/heads/main
|
/simulations.py
|
from collections import defaultdict
import numpy as np
import scipy.stats as stats
import pandas as pd
from bt import BT
from Elo import ELO
from TrueSkill import TrueSkill
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def sample_dataset(n_regular_types, n_reverse_types, sys_strength, noise=1, n=100):
systems_scores = defaultdict(list)
for _ in range(n_regular_types):
scale = 100 * np.random.rand()
for sys, alpha in sys_strength.items():
n_samples = int(n / n_regular_types)
systems_scores[sys].extend(stats.norm.rvs(loc=alpha + scale, scale=noise, size=n_samples))
# for _ in range(n_reverse_types):
# scale = 100 * np.random.rand()
# for sys, alpha in sys_strength.items():
# inverse_perf = 100 - alpha
# systems_scores[sys].extend(stats.norm.rvs(loc=inverse_perf + scale, scale=noise, size=n))
return pd.DataFrame.from_dict(systems_scores)
def add_outliers(df, sys_strengths, percent_outliers):
row_list = []
if percent_outliers > 0:
n_outliers = max(int(percent_outliers * df.shape[0]), 1)
else:
n_outliers = int(percent_outliers * df.shape[0])
strengths = list(sys_strengths.values())
while len(row_list) < n_outliers:
observations = np.random.rand(len(sys_strengths.keys()))
if stats.kendalltau(observations, strengths)[0] < 0.:
row_list.append(dict(zip(sys_strengths.keys(), 100 * observations)))
return pd.concat([df, pd.DataFrame(row_list)], axis=0)
def evaluate(df, sys_strength, method):
mean_scores = dict(zip(df.columns, df.mean(axis=0).to_list()))
median_scores = dict(zip(df.columns, df.median(axis=0).to_list()))
if method['name'] == 'BT':
bt_scores = dict(zip(df.columns, BT(df)))
elif method['name'] == 'ELO':
bt_scores = dict(zip(df.columns, ELO(df, method['k'])))
else:
bt_scores = dict(zip(df.columns, TrueSkill(df, method['mu'], method['sigma'], method['beta'])))
bt, mean, median = [], [], []
for s in sys_strength.keys():
bt.append(bt_scores[s])
median.append(median_scores[s])
mean.append(mean_scores[s])
strengths = list(sys_strength.values())
return stats.kendalltau(strengths, mean)[0], stats.kendalltau(strengths, median)[0], stats.kendalltau(strengths, bt)[0]
def run_simulations(n_regular_list, percentage_reverse, percent_outliers_list, n_systems_list, n_samples_list, method='BT'):
n_repeat = 10
mean_perf, median_perf, bt_perf = [], [], []
number_samples, number_regular_types, percent_outliers, number_reverse_types, noise, n_systems = [], [], [], [], [], []
for n_reg in n_regular_list:
for rev_percent in percentage_reverse:
n_rev = int(rev_percent * n_reg) # + 1
for outlier_percent in percent_outliers_list:
for n_sys in n_systems_list:
for n_samples in n_samples_list:
for _ in range(n_repeat):
strengths = np.random.rand(n_sys)
strengths /= np.sum(strengths)
sys_strengths = dict(zip(['sys_{}'.format(i) for i in range(n_sys)], 10 * strengths))
dataset = sample_dataset(n_reg, n_rev, sys_strengths, n=n_samples)
dataset = add_outliers(dataset, sys_strengths, outlier_percent)
# print(dataset)
# exit()
res = evaluate(dataset, sys_strengths, method=method)
mean, median, bt = res
mean_perf.append(mean)
median_perf.append(median)
bt_perf.append(bt)
percent_outliers.append(outlier_percent)
number_samples.append(dataset.shape[0])
number_regular_types.append(n_reg)
number_reverse_types.append(rev_percent)
noise.append(0.1)
n_systems.append(n_sys)
return pd.DataFrame.from_dict({'Mean': mean_perf, 'Median': median_perf, 'BT': bt_perf,
'n_samples': number_samples, 'n_regular': number_regular_types, 'n_outliers': percent_outliers, 'n_reverse': number_reverse_types,
'noise': noise, 'n_systems': n_systems})
def obtain_x_y_yerr(df, name_x, name_y):
x = df.groupby(name_x).mean().index.to_list()
y = df.groupby(name_x).mean()[name_y].to_list()
# m = df.groupby(name_x).quantile(0.10)[name_y]
# M = df.groupby(name_x).quantile(0.90)[name_y]
# yerr = (M - m) / 2.
yerr = 2 * 1.96 * df.groupby(name_x).sem()[name_y].to_numpy()
return x, y, yerr
if __name__ == '__main__':
n_regular_list = [1, 3, 5, 10]
percentage_reverse = [0.]
n_systems_list = [2, 3, 5, 10, 25, 50]
percent_outliers_list = [0., 0.01, 0.025, 0.05, 0.075]
n_samples = [10, 30, 100, 200]
res_df = run_simulations(n_regular_list, percentage_reverse, percent_outliers_list, n_systems_list, n_samples)
print(res_df.mean(axis=0))
easy_cases = res_df[(res_df['n_outliers'] == 0.) & (res_df['n_reverse'] == 0.)]
easy_cases = easy_cases[easy_cases['n_regular'] == 1]
fig, axes = plt.subplots(1, 6, figsize=(30, 5), sharey=True)
ft = 15
ax = axes[0]
x_axis = 'n_systems'
x, y, yerr = obtain_x_y_yerr(easy_cases, x_axis, 'Mean')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')
x, y, yerr = obtain_x_y_yerr(easy_cases, x_axis, 'Median')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')
x, y, yerr = obtain_x_y_yerr(easy_cases, x_axis, 'BT')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')
ax.set_xlabel('Number of systems (easy cases)', fontsize=ft)
ax.set_ylabel('Kendall\'s tau with true strengths', fontsize=ft)
# ax.set_ylabel('Kendall\'s tau with true strengths', fontsize=17)
# res_df['difficulty'] = res_df['n_outliers'] # + res_df['n_reverse']
ax = axes[1]
outliers_df = res_df[res_df['n_regular'] == 1]
x_axis = 'n_outliers'
x, y, yerr = obtain_x_y_yerr(outliers_df, x_axis, 'Mean')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')
x, y, yerr = obtain_x_y_yerr(outliers_df, x_axis, 'Median')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')
x, y, yerr = obtain_x_y_yerr(outliers_df, x_axis, 'BT')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')
ax.set_xlabel('Percentage of outliers (1 test type)', fontsize=ft)
# ax.set_ylabel('Kendall\'s tau with true strengths', fontsize=18)
ax = axes[2]
regular_df = res_df[res_df['n_outliers'] == 0.]
x_axis = 'n_regular'
x, y, yerr = obtain_x_y_yerr(regular_df, x_axis, 'Mean')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')
x, y, yerr = obtain_x_y_yerr(regular_df, x_axis, 'Median')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')
x, y, yerr = obtain_x_y_yerr(regular_df, x_axis, 'BT')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')
ax.set_xlabel('Test instances types (no outliers)', fontsize=ft)
# ax.set_ylabel('Kendall\'s tau with true strengths', fontsize=18)
ax = axes[3]
# regular_df = res_df[res_df['n_outliers'] == 0.]
x_axis = 'n_regular'
x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Mean')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')
x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Median')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')
x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'BT')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')
ax.set_xlabel('Test instances types (with outliers)', fontsize=ft)
ax = axes[4]
# outliers_df = res_df[res_df['n_regular'] == 1]
x_axis = 'n_outliers'
x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Mean')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')
x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Median')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')
x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'BT')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')
ax.set_xlabel('Percentage of outliers (varying test types)', fontsize=ft)
ax = axes[5]
x_axis = 'n_systems'
x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Mean')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')
x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Median')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')
x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'BT')
ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')
ax.set_xlabel('Number of systems (all cases)', fontsize=ft)
legend_elem = [Line2D([0], [0], linestyle='-', linewidth=2, c='tab:blue', label='Mean'),
Line2D([0], [0], linestyle='-', linewidth=2, c="tab:green", label='Median'),
Line2D([0], [0], linestyle='-', linewidth=2, c="tab:red", label='BT')]
fig.legend(handles=legend_elem, ncol=3, loc='upper center', frameon=False, fontsize=21, bbox_to_anchor=(0.55, 1.15))
#fig.legend(handles=legend_elem, fontsize=13)
fig.tight_layout(pad=1.1)
# fig.savefig("simulations.pdf", bbox_inches="tight")
plt.show()
# Plot for increasing n_systems for easy cases
# Plot with increasing difficulty: outliers + rev
# Plot for increasing n_system all cases
# Plot for increasing noise easy cases
# Plot for increasing noise all cases
|
{"/simulations.py": ["/bt.py", "/Elo.py", "/TrueSkill.py"]}
|
5,308
|
epfl-dlab/BT-eval
|
refs/heads/main
|
/bt.py
|
import itertools
import numpy as np
import pandas as pd
def BT(df, epsilon=1e-9):
n_competitors = df.shape[1]
competitors = df.columns
win_matrix = np.zeros((n_competitors, n_competitors))
for pair in itertools.combinations(range(n_competitors), 2):
idx_a, idx_b = pair
competitor_a = competitors[idx_a]
competitor_b = competitors[idx_b]
win_ab = np.sum([int(score_a > score_b) for score_a, score_b in zip(df[competitor_a], df[competitor_b])])
win_ba = df.shape[0] - win_ab
win_matrix[idx_a][idx_b] = win_ab
win_matrix[idx_b][idx_a] = win_ba
W = np.sum(win_matrix, axis=1)
p = [0.5] * n_competitors
while True:
new_p = [0.5] * n_competitors
for i in range(n_competitors):
summing_term = 0
for j in range(n_competitors):
if i == j:
continue
summing_term += (win_matrix[i][j] + win_matrix[j][i]) / (p[i] + p[j])
new_p[i] = W[i] / summing_term
new_p /= np.sum(new_p)
diff = np.sum([(x - y) ** 2 for x, y in zip(p, new_p)])
if diff < epsilon:
return new_p
p = new_p
if __name__ == '__main__':
player_a = [2, 5, 2, 3, 4]
player_b = [1, 2, 3, 4, 1]
player_c = [2, 4, 5, 2, 2]
df = pd.DataFrame.from_dict({'player_a': player_a, 'player_b': player_b, 'player_c': player_c})
res = BT(df)
print(res)
|
{"/simulations.py": ["/bt.py", "/Elo.py", "/TrueSkill.py"]}
|
5,331
|
smokeinside/game
|
refs/heads/master
|
/moves.py
|
import logging
import random
from players import Player
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(name=__name__)
class Move:
def __init__(self, name, is_self_action=False):
self.name = name
self.is_self_action = is_self_action
def execute(self, target: Player):
raise NotImplementedError
def __str__(self):
return self.name
class MoveWithRange(Move):
def __init__(self, minimum, maximum, *args, **kwargs):
super().__init__(*args, **kwargs)
self.minimum = minimum
self.maximum = maximum
class Attack(MoveWithRange):
def execute(self, target: Player):
damage = random.randrange(self.minimum, self.maximum + 1)
logger.info(f"{target} received {damage} damage")
target.health = 0 if damage >= target.health else target.health - damage
class Heal(MoveWithRange):
def execute(self, target: Player):
heal = random.randrange(self.minimum, self.maximum + 1)
logger.info(f"{target} received {heal} heal")
after_heal = target.health + heal
target.health = (
target.max_health if after_heal > target.max_health else after_heal
)
class MoveSelector:
def select(self, player, moves):
move = random.choice(moves)
logger.info(f"{move} was chosen for {player}")
return move
class IncreasedHealMoveSelector(MoveSelector):
def __init__(self, critical_health):
self.critical_health = critical_health
def select(self, player, moves):
if player.health <= self.critical_health:
weights = [2.0 if isinstance(move, Heal) else 1.0 for move in moves]
else:
weights = [1.0 for _ in moves]
[move] = random.choices(moves, weights)
logger.info(f"{move} was chosen for {player}")
return move
|
{"/moves.py": ["/players.py"], "/games.py": ["/players.py"], "/play.py": ["/moves.py", "/games.py", "/players.py"]}
|
5,332
|
smokeinside/game
|
refs/heads/master
|
/games.py
|
import logging
import random
from typing import List
from players import Player
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(name=__name__)
class Game:
def __init__(self, players: List[Player]):
self.players = players
def play(self) -> Player:
executor, target = random.sample(self.players, 2)
while True:
executor.make_move(target)
if target.health == 0:
logger.info(
f"{executor} has won in the battle with {target} and has {executor.health} health left"
)
return executor
else:
logger.info(
f"Round result: {executor} has {executor.health} health; {target} has {target.health} health"
)
executor, target = target, executor
|
{"/moves.py": ["/players.py"], "/games.py": ["/players.py"], "/play.py": ["/moves.py", "/games.py", "/players.py"]}
|
5,333
|
smokeinside/game
|
refs/heads/master
|
/players.py
|
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(name=__name__)
class ValidationError(Exception):
pass
class Player:
def __init__(self, name, moves, move_selector, health=100, max_health=100):
self.name = name
self.moves = moves
self.move_selector = move_selector
self.max_health = max_health
self.health = health
@property
def moves(self):
return self._moves
@moves.setter
def moves(self, value):
if not isinstance(value, list) or len(value) < 1:
raise ValidationError(
"Moves has to be list of length greater or equal to 1"
)
self._moves = value
@property
def health(self):
return self._health
@health.setter
def health(self, value):
if value > self.max_health:
raise ValidationError(
f"Health value is {value} which is greater then {self.max_health}"
)
if value < 0:
raise ValidationError(
f"Health value is {value.health} which is lower then 0"
)
if hasattr(self, "_health"):
logger.info(f"{self} health changed from {self._health} to {value}")
self._health = value
else:
self._health = value
logger.info(f"{self} health was initialized as {self._health}")
def make_move(self, opponent):
move = self.move_selector.select(self, self.moves)
target = self if move.is_self_action else opponent
move.execute(target)
def __str__(self):
return self.name
|
{"/moves.py": ["/players.py"], "/games.py": ["/players.py"], "/play.py": ["/moves.py", "/games.py", "/players.py"]}
|
5,334
|
smokeinside/game
|
refs/heads/master
|
/play.py
|
import moves
from games import Game
from players import Player
if __name__ == "__main__":
defalut_moves = [
moves.Attack(18, 25, name="Simple attack"),
moves.Attack(10, 35, name="Wide range attack"),
moves.Heal(18, 25, name="Simple self heal", is_self_action=True),
]
computer = Player("Computer", defalut_moves, moves.IncreasedHealMoveSelector(35))
player = Player("Alexander", defalut_moves, moves.MoveSelector())
game = Game([computer, player])
game.play()
|
{"/moves.py": ["/players.py"], "/games.py": ["/players.py"], "/play.py": ["/moves.py", "/games.py", "/players.py"]}
|
5,337
|
williballenthin/dissect
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
#from distutils.core import setup
from setuptools import setup,find_packages
# For Testing:
#
# python3.4 setup.py register -r https://testpypi.python.org/pypi
# python3.4 setup.py bdist_wheel upload -r https://testpypi.python.org/pypi
# python3.4 -m pip install -i https://testpypi.python.org/pypi
#
# For Realz:
#
# python3.4 setup.py register
# python3.4 setup.py bdist_wheel upload
# python3.4 -m pip install
import dissect
setup(
name='dissect',
version='.'.join( str(v) for v in dissect.__version__ ),
description='Vivisect (Mark II) File/Protocol Parsers',
author='Invisigoth Kenshoto',
author_email='invisigoth.kenshoto@gmail.com',
url='https://github.com/vivisect/dissect',
license='Apache License 2.0',
packages=find_packages(exclude=['*.tests','*.tests.*']),
install_requires=[
'vstruct>=2.0.2',
],
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.4',
],
)
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,338
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/tests/test_common.py
|
import unittest
from dissect.common import *
class CommonTest(unittest.TestCase):
def test_common_ondemand(self):
def foo():
return 'foo'
def bar(x):
return x + 20
def baz(x,y=0):
return x + y
ondem = OnDemand()
ondem.add('foo',foo)
ondem.add('bar',bar, 10)
ondem.add('baz',baz, 10, y=40)
self.assertEqual( ondem['foo'], 'foo' )
self.assertEqual( ondem['bar'], 30 )
self.assertEqual( ondem['baz'], 50 )
def test_common_keycache(self):
data = {'hits':0}
def woot(x):
data['hits'] += 1
return x + 20
cache = KeyCache(woot)
self.assertEqual( cache[10], 30 )
self.assertEqual( cache[10], 30 )
self.assertEqual( cache[10], 30 )
self.assertEqual( data['hits'], 1 )
self.assertEqual( cache[20], 40 )
self.assertEqual( data['hits'], 2 )
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,339
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/formats/cab.py
|
import tempfile
from io import BytesIO
from vstruct.types import *
from dissect.filelab import *
import dissect.bitlab as bitlab
import dissect.algos.mszip as mszip
import dissect.algos.lzx as lzx
class OffCabFile(Exception):pass
#https://msdn.microsoft.com/en-us/library/bb417343.aspx
_CAB_MAGIC = b'MSCF'
_A_RDONLY = 0x01 # file is read-only
_A_HIDDEN = 0x02 # file is hidden
_A_SYSTEM = 0x04 # file is a system file
_A_ARCH = 0x20 # file modified since last backup
_A_EXEC = 0x40 # run after extraction
_A_NAME_IS_UTF = 0x80 # szName[] contains UTF
_F_PREV_CABINET = 0x0001 # When this bit is set, the szCabinetPrev and szDiskPrev fields are present in this CFHEADER.
_F_NEXT_CABINET = 0x0002 # When this bit is set, the szCabinetNext and szDiskNext fields are present in this CFHEADER.
_F_RESERVE_PRESENT = 0x0004 # When this bit is set, the cbCFHeader, cbCFFolder, and cbCFData fields are present in this CFHEADER.
comp = venum()
comp.NONE = 0x00 # no compression
comp.MSZIP = 0x01 # ms decompress compression
comp.QUANTUM = 0x02 # ms quantum compression
comp.LZX = 0x03 # ms lzx compression
class CFHEADER(VStruct):
def __init__(self):
VStruct.__init__(self)
self.signature = vbytes(4) # file signature
self.reserved1 = uint32() # reserved
self.cbCabinet = uint32() # size of this cabinet file in bytes
self.reserved2 = uint32() # reserved
self.coffFiles = uint32() # offset of the first CFFILE entry
self.reserved3 = uint32() # reserved
self.versionMinor = uint8() # cabinet file format version, minor
self.versionMajor = uint8() # cabinet file format version, major
self.cFolders = uint16() # number of CFFOLDER entries in this cabinet
self.cFiles = uint16() # number of CFFILE entries in this cabinet
self.flags = uint16() # cabinet file option indicators
self.setID = uint16() # must be the same for all cabinets in a set
self.iCabinet = uint16() # number of this cabinet file in a set
self.cbOptFields = VStruct() # container struct for optional fields (flags based)
#self.cbCFHeader = uint16() # (optional) size of per-cabinet reserved area
#self.cbCFFolder = uint8() # (optional) size of per-folder reserved area
#self.cbCFData = uint8() # (optional) size of per-datablock reserved area
#self.abReserve = vbytes() # (optional) per-cabinet reserved area
#self.szCabinetPrev = vbytes()#v_zstr() # (optional) name of previous cabinet file
#self.szDiskPrev = vbytes()#v_zstr() # (optional) name of previous disk
#self.szCabinetNext = vbytes()#v_zstr() # (optional) name of next cabinet file
#self.szDiskNext = vbytes()#v_zstr() # (optional) name of next disk
self.cfDirArray = VArray()
self.cfFileArray = VArray()
self['flags'].vsOnset( self._onSetFlags )
self['cFiles'].vsOnset( self._onSetFiles )
self['cFolders'].vsOnset( self._onSetFolders )
#self['cbCFHeader'].vsOnset( self._onSetCfHeader )
def _onSetFiles(self):
self.cfFileArray = varray( self.cFiles, CFFILE )()
def _onSetFolders(self):
abres = 0
if self.flags & _F_RESERVE_PRESENT:
abres = self.cbOptFields.cbCFFolder
self.cfDirArray = varray( self.cFolders, CFFOLDER, abres=abres )()
def _onSetFlags(self):
f = self.flags
# these *must* remain in this order...
if f & _F_RESERVE_PRESENT:
self.cbOptFields.cbCFHeader = uint16() # (optional) size of per-cabinet reserved area
self.cbOptFields.cbCFFolder = uint8() # (optional) size of per-folder reserved area
self.cbOptFields.cbCFData = uint8() # (optional) size of per-datablock reserved area
self.cbOptFields.abReserve = vbytes() # (optional) per-cabinet reserved area
self.cbOptFields['cbCFHeader'].vsOnset( self._onSetCfHeader )
if f & _F_PREV_CABINET:
self.cbOptFields.szCabinetPrev = zstr()
self.cbOptFields.szDiskPrev = zstr()
if f & _F_NEXT_CABINET:
self.cbOptFields.szCabinetNext = zstr()
self.cbOptFields.szDiskNext = zstr()
def _onSetCfHeader(self):
self.cbOptFields['abReserve'].vsResize( self.cbOptFields.cbCFHeader )
class CFFOLDER(VStruct):
def __init__(self, abres=0):
VStruct.__init__(self)
self.coffCabStart = uint32() # file offset of CFDATA blocs
self.cCFData = uint16() # CFDATA block count
self.typeCompress = uint16(enum=comp)
self.abReserve = vbytes(abres)
class CFFILE(VStruct):
def __init__(self):
VStruct.__init__(self)
self.cbFile = uint32() # uncompressed size of this file in bytes
self.uoffFolderStart = uint32() # uncompressed offset of this file in the folder
self.iFolder = uint16() # index into the CFFOLDER area
self.date = uint16() # date stamp for this file
self.time = uint16() # time stamp for this file
self.attribs = uint16() # attribute flags for this file
self.szName = zstr() # name of this file
class CFDATA(VStruct):
def __init__(self,abres=0):
VStruct.__init__(self)
self.csum = uint32() # checksum of this CFDATA entry */
self.cbData = uint16() # number of compressed bytes in this block */
self.cbUncomp = uint16() # number of uncompressed bytes in this block */
self.abReserved = vbytes(abres) # (optional) per-datablock reserved area */
self.ab = vbytes() # compressed data bytes */
self['cbData'].vsOnset( self._onSetCbData )
def _onSetCbData(self):
self['ab'].vsResize( self.cbData )
class CabLab(FileLab):
def __init__(self, fd, off=0):
FileLab.__init__(self, fd, off=off)
self.addOnDemand('CFHEADER', self._getCabHeader )
self.addOnDemand('filesbyname', self._loadFilesByName )
self.decomps = {
comp.NONE:self._deCompNoneBlock,
comp.MSZIP:self._deCompMsZipBlock,
comp.QUANTUM:self._deCompQuantumBlock,
comp.LZX:self._deCompLzxBlock
}
def _deCompNoneBlock(self, itblks, comp_type=None):
for d in itblks:
yield d.ab
def _deCompLzxBlock(self, itblks, comp_type):
lzxd = lzx.Lzx(comp_type)
for d in lzxd.decompBlock(itblks):
yield d
def _deCompMsZipBlock(self, itblks, comp_type=None):
mszipd = mszip.MsZip()
for d in mszipd.decompBlock(itblks):
yield d
def _deCompQuantumBlock(self, itblks, comp_type=None):
raise NotImplementedError('Quantum is not support...yet')
def _getCabHeader(self):
hdr = self.getStruct(0, CFHEADER)
if _CAB_MAGIC != hdr.signature:
raise OffCabFile('Invalid CAB File Header: %r' % (hdr.signature,))
return hdr
def _loadFilesByName(self):
ret = {}
for off,cff in self['CFHEADER'].cfFileArray:
ret[cff.szName] = cff
return ret
def getCabFiles(self):
'''
Example:
for filename, finfo, fd in cab.getCabFiles(self):
fdata = fd.read()
'''
cfh = self['CFHEADER']
ifldr = None
fdata = b''
for fname,finfo in self.listCabFiles():
fsize = finfo['size']
uoff = finfo['uoff']
if finfo['ifldr'] != ifldr:
ifldr = finfo['ifldr']
fldr = cfh.cfDirArray[finfo['ifldr']]
calg = fldr.typeCompress & 3
icd = self.iterCabData(fldr.coffCabStart, fldr.cCFData)
dblk = self.decomps[calg](icd, fldr.typeCompress)
while fsize > len(fdata):
fdata += next(dblk)
bio = BytesIO(fdata[:fsize])
fdata = fdata[fsize:]
yield (fname, finfo, bio)
def listCabFiles(self):
'''
Yield (name,info) tuples for files within the cab.
Example:
for filename,fileinfo in cab.listCabFiles():
print('filename:%s' % (filename,))
'''
cfh = self['CFHEADER']
for idx,cff in cfh.cfFileArray:
fileinfo = dict(size=cff.cbFile,attrs=cff.attribs)
fileinfo['comp'] = repr( cfh.cfDirArray[cff.iFolder]['typeCompress'] )
fileinfo['ifldr'] = cff.iFolder
fileinfo['uoff'] = cff.uoffFolderStart
yield cff.szName, fileinfo
def iterCabData(self, off, cnt):
'''
Yield CFDATA blocks within the cab.
'''
uoff = 0
abres = 0
cfh = self['CFHEADER']
if cfh.flags & _F_RESERVE_PRESENT:
abres = cfh.cbOptFields.cbCFData
cda = self.getStruct(off, varray(cnt, CFDATA, abres=abres))
for idx,cd in cda:
yield cd
def getCabVersion(self):
'''
Retrieve a version tuple for the CAB file.
'''
hdr = self['CFHEADER']
return ( hdr.versionMajor, hdr.versionMinor )
def getCabSize(self):
'''
Retrieve the size ( in bytes ) of the CAB file.
'''
return self['CFHEADER'].cbCabinet
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,340
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/tests/test_inet.py
|
import unittest
import dissect.formats.inet as ds_inet
ipv4bytes = b'\x45\x00\x14\x00\x42\x41\x00\x00\x30\x06\x57\x56\x01\x02\x03\x04\x05\x06\x07\x08'
class InetTest(unittest.TestCase):
def test_inet_ipv4(self):
ipv4 = ds_inet.IPv4()
ipv4.vsParse(ipv4bytes)
self.assertEqual( len(ipv4), 20 )
self.assertEqual( repr(ipv4['proto']), 'TCP' )
self.assertEqual( ipv4.veriphl, 0x45 )
self.assertEqual( ipv4.ttl, 0x30 )
self.assertEqual( repr(ipv4['srcaddr']), '1.2.3.4' )
self.assertEqual( repr(ipv4['dstaddr']), '5.6.7.8' )
def test_inet_icmp(self):
icmp = ds_inet.ICMP()
icmp.type = 3
icmp.code = 4
icmp.checksum = 0x0202
self.assertEqual( icmp.vsEmit(), b'\x03\x04\x02\x02' )
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,341
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/bitlab.py
|
from dissect.compat import iterbytes
LSB = (0,1,2,3,4,5,6,7)
MSB = (7,6,5,4,3,2,1,0)
def bits(byts, order='big', cb=iterbytes):
'''
Yield generator for bits within bytes.
'''
bord = LSB
if order == 'big':
bord = MSB
#foo = [ ((b >> shft) & 0x01) for b in cb(byts) for shft in bord ]
#for b in foo:
# yield b
for byte in cb(byts):
for bit in [ (byte >> shft) & 0x1 for shft in bord ]:
yield bit
def cast(bitgen,bitsize,bord='big'):
'''
Consume a "bitsize" integer from a bit generator.
Example:
# cast the next 5 bits as an int
valu = cast(bits,5)
'''
ret = 0
if bord == 'little':
for i in range(bitsize):
b = next(bitgen)
ret |= b << i
elif bord == 'big':
for i in range(bitsize):
b = next(bitgen)
if b:
ret |= (1 << (bitsize - 1 - i))
return ret
class BitStream(object):
def __init__(self, byts, order='big', cb=iterbytes):
self.bitoff = 0
self.bits = self.getBitGen(byts, order, cb)
def getBitGen(self, byts, order='big', cb=iterbytes):
bord = LSB
if order == 'big':
bord = MSB
for byte in cb(byts):
for bit in [ (byte >> shft) & 0x1 for shft in bord ]:
self.bitoff += 1
yield bit
#foo = [ ((b >> shft) & 0x01) for b in cb(byts) for shft in bord ]
#for self.bitoff, b in enumerate(foo):
# yield b
def __iter__(self):
return self.bits
def getOffset(self):
return self.bitoff
def cast(self, bitsize, bord='big'):
'''
Consume a "bitsize" integer from a bit generator.
Example:
# cast the next 5 bits as an int
valu = cast(bits,5)
'''
ret = 0
if bord == 'little':
for i in range(bitsize):
b = next(self.bits)
ret |= b << i
elif bord == 'big':
for i in range(bitsize):
b = next(self.bits)
if b:
ret |= (1 << (bitsize - 1 - i))
return ret
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,342
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/algos/huffman.py
|
import collections
from dissect.bitlab import cast
class OffHuffTree(Exception):pass
def bitvals(valu,bits=8):
#HACK ugly for speed
return [ (valu >> shft) & 0x1 for shft in range(bits-1, -1, -1) ]
class HuffTree(object):
'''
A huffman encoding tree.
'''
# The Huffman codes used for each alphabet in the "deflate"
# format have two additional rules:
# * All codes of a given bit length have lexicographically
# consecutive values, in the same order as the symbols
# they represent;
# * Shorter codes lexicographically precede longer codes.
def __init__(self):
self.clear()
def clear(self):
self.root = [None,[None,None]] # root of the huffman binary tree
self.codebysym = {}
def iterHuffSyms(self, bits, offset=0):
'''
Use the HuffTree to decode bits yielding (bitoff,sym) tuples.
Example:
import dissect.bitlab as bitlab
bits = bitlab.bits( byts )
for bit,sym in huff.iterHuffSyms( bits ):
dostuff()
'''
node = self.root
for bit in bits:
node = node[1][bit]
if node == None:
raise OffHuffTree()
if node[0] != None:
yield node[0]
node = self.root
def getCodeBySym(self, sym):
'''
Return a (bits,code) tuple by symbol.
Example:
bitcode = huff.getCodeBySym(x)
if bitcode != None:
bits,code = bitcode
stuff()
'''
return self.codebysym.get(sym)
def addHuffNode(self, sym, bits, code):
'''
Add a symbol to the huffman tree.
Example:
huff.addHuffNode( 'A', 3, 0b101 )
'''
node = self.root
for bit in bitvals(code,bits):
step = node[1][bit]
if step == None:
step = [ None, [None,None] ]
node[1][bit] = step
node = step
if node[0]:
raise OffHuffTree('Huffman node conflict')
node[0] = sym
if self.getCodeBySym(sym):
raise OffHuffTree('Huffman sym conflict')
self.codebysym[ sym ] = (bits,code)
def loadCodeBook(self, codebook):
'''
Load a list of (sym,bits,code) tuples into the tree.
Example:
codebook = huff.initCodeBook( symbits )
huff.loadCodeBook(codebook)
'''
[ self.addHuffNode(s,b,c) for (s,b,c) in codebook ]
def initCodeBook(self, symbits):
'''
As per rfc1951, use a list of symbol code widths to make a codebook:
Notes:
Consider the alphabet ABCDEFGH, with bit lengths (3, 3, 3, 3, 3, 2, 4, 4)
Symbol Length Code
------ ------ ----
A 3 010
B 3 011
C 3 100
D 3 101
E 3 110
F 2 00
G 4 1110
H 4 1111
'''
nbits = collections.defaultdict(int)
for bits in symbits:
nbits[ bits ] += 1
nbits[0] = 0
code = 0
maxbits = max( nbits.keys() )
codebase = [0]
for bits in range( maxbits ):
code = ( code + nbits[ bits ] ) << 1
codebase.append( code )
codebook = []
for sym in range( len( symbits ) ):
bits = symbits[sym]
code = codebase[bits]
codebase[bits] += 1
if bits:
codebook.append( (sym,bits,code) )
return codebook
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,343
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/filelab.py
|
from dissect.common import *
class FileLab:
'''
Base class for file format parsers.
The FileLab class provides routines to help file parsers
with concepts like API caching and on-demand parsing.
Example:
class FooLab(FileLab):
def __init__(self, fd, off=0):
FileLab.__init__(self, fd, off=off)
self.addOnDemand('foo', self._getFoo )
self.addOnDemand('bars', self._getFooBars )
self.barbybaz = LookDict( self._getBarByBaz )
def getBarByBaz(self, baz):
return self.barbybaz[baz]
def _getBarByBaz(self, baz):
return dostuff(baz)
def _getFoo(self):
return 'foo'
def _getFooBars(self):
return ['bar','bar','bar']
foo = FooLab()
for bar in foo['bars']:
dostuff()
'''
def __init__(self, fd, off=0):
self.fd = fd
self.off = off
self.ondem = OnDemand()
#self.addOnDemand('md5', self._onDemMd5 )
#self.addOnDemand('sha1', self._onDemSha1 )
#self.addOnDemand('sha256', self._onDemSha256 )
def getStruct(self, off, cls, *args, **kwargs):
'''
Construct a VStruct and load from the file offset.
Example:
class Foo(VStruct):
# ...
foo = lab.getStruct(0, Foo)
Notes:
* if off is unspecified, the current file offset is used
'''
if off == None:
off = self.fd.tell()
obj = cls(*args,**kwargs)
obj.vsLoad( self.fd, offset=off )
return obj
def addOnDemand(self, name, meth):
'''
Add on-demand parser callback.
Example:
class FooLab(FileLab):
def __init__(self, fd, off=0):
FileLab.__init__(self, fd, off=off)
self.addOnDemand('bars', self._getFooBars )
def _getFooBars(self):
return []
foo = FooLab()
for bar in foo['bars']:
dostuff()
'''
self.ondem.add(name,meth)
def getOnDemand(self, name):
'''
Retrieve the results of an on-demand parser callback.
Example:
for bar in foo.getOnDemand('bars'):
dostuff()
'''
return self.ondem[name]
def __getitem__(self, name):
return self.ondem[name]
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,344
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/tests/test_cab.py
|
import unittest
import hashlib
import dissect.formats.cab as cab
import dissect.tests.files as files
class CabTest(unittest.TestCase):
hash_chk = '00010548964e7bbca74da0d1764bdd70'
def test_cab_decomp(self):
with files.getTestFd('test_cab.cab') as fd:
c = cab.CabLab(fd)
for fname,finfo,cab_fd in c.getCabFiles():
self.assertEqual(fname, 'test_cab.txt')
dec_data = cab_fd.read()
h = hashlib.md5()
h.update(dec_data)
self.assertEqual(self.hash_chk, h.hexdigest())
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,345
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/formats/inet.py
|
from __future__ import absolute_import,unicode_literals
import socket
from vstruct.types import *
'''
Inet Packet Structures
'''
ethp = venum()
ethp.ipv4 = 0x0800
ethp.ipv6 = 0x86dd
ethp.vlan = 0x8100
ipproto = venum()
ipproto.ICMP = 1
ipproto.TCP = 6
ipproto.UDP = 17
ipproto.IPV6 = 41
TCP_F_FIN = 0x01
TCP_F_SYN = 0x02
TCP_F_RST = 0x04
TCP_F_PUSH = 0x08
TCP_F_ACK = 0x10
TCP_F_URG = 0x20
TCP_F_ECE = 0x40
TCP_F_CWR = 0x80
# Useful combinations...
TCP_F_SYNACK = (TCP_F_SYN | TCP_F_ACK)
icmptypes = venum()
icmptypes.ECHOREPLY = 0
icmptypes.DEST_UNREACH = 3
icmptypes.SOURCE_QUENCH = 4
icmptypes.REDIRECT = 5
icmptypes.ECHO = 8
icmptypes.TIME_EXCEEDED = 11
icmptypes.PARAMETERPROB = 12
icmptypes.TIMESTAMP = 13
icmptypes.TIMESTAMPREPLY = 14
icmptypes.INFO_REQUEST = 15
icmptypes.INFO_REPLY = 16
icmptypes.ADDRESS = 17
icmptypes.ADDRESSREPLY = 18
class IPv4Addr(uint32):
def __repr__(self):
return socket.inet_ntop(socket.AF_INET, self.vsEmit())
class IPv6Addr(vbytes):
def __init__(self):
vbytes.__init__(self, size=16)
def __repr__(self):
return socket.inet_ntop(socket.AF_INET6, self._vs_value)
class ETHERII(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.destmac = vbytes(size=6)
self.srcmac = vbytes(size=6)
self.etype = uint16(enum=ethp)
self['etype'].vsOnset( self._onSetEtype )
def _onSetEtype(self):
# append vlan tags if needed
if etype == ethp.vlan:
self.vtag = uint16()
self.vvlan = uint16()
class IPv4(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.veriphl = uint8()
self.tos = uint8()
self.totlen = uint16()
self.ipid = uint16()
self.flagfrag = uint16()
self.ttl = uint8()
self.proto = uint8(enum=ipproto)
self.cksum = uint16()
self.srcaddr = IPv4Addr()
self.dstaddr = IPv4Addr()
self['veriphl'].vsOnset( self._onSetVerIphl )
def _onSetVerIphl(self):
iphl = (self.veriphl & 0xf) * 4
if iphl > 20:
self.ipopts = vbytes( iphl - 20 )
class IPv6(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.verclsflowl= uint32()
self.totlen = uint16()
self.nexthdr = uint8()
self.hoplimit = uint8()
self.srcaddr = IPv6Addr()
self.dstaddr = IPv6Addr()
class TCP(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.srcport = uint16()
self.dstport = uint16()
self.sequence = uint32()
self.ackseq = uint32()
self.doff = uint8()
self.flags = uint8()
self.window = uint16()
self.checksum = uint16()
self.urgent = uint16()
self['doff'].vsOnset( self._onSetDoff )
def _onSetDoff(self):
off = (self.doff >> 2)
if off >= 20:
self.tcpopts = vbytes( off - 20 )
class UDP(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.srcport = uint16()
self.dstport = uint16()
self.udplen = uint16()
self.checksum = uint16()
class ICMP(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.type = uint8(enum=icmptypes)
self.code = uint8()
self.checksum = uint16()
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,346
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/compat.py
|
'''
Isolate 2.7 compatibility filth.
'''
import sys
major = sys.version_info.major
minor = sys.version_info.minor
micro = sys.version_info.micro
version = (major,minor,micro)
if version <= (3,0,0):
def iterbytes(byts):
for c in byts:
yield ord(c)
else:
def iterbytes(byts):
return iter(byts)
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,347
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/tests/test_huffman.py
|
import unittest
import dissect.bitlab as bitlab
import dissect.algos.huffman as huffman
huffbook = ( (0, 3, 2), (1, 3, 3), (2, 3, 4), (3, 3, 5), (4, 3, 6), (5, 2, 0), (6, 4, 14), (7, 4, 15) )
huffsyms = ( (0,6), (4,7) )
# TODO
class HuffTest(unittest.TestCase):
def test_huff_tree(self):
huff = huffman.HuffTree()
book = huff.initCodeBook( (3, 3, 3, 3, 3, 2, 4, 4) )
huff.loadCodeBook(book)
bits = bitlab.bits( b'\xef' )
syms = tuple( huff.iterHuffSyms( bits ) )
# self.assertEqual( tuple(book), huffbook )
# self.assertEqual( tuple(syms), huffsyms )
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,348
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/algos/lzx.py
|
import sys
import ctypes
import dissect.algos.huffman as huffman
import dissect.bitlab as bitlab
from dissect.compat import iterbytes
LZX_FRAME_SIZE = 32768
INSTR_CALL = 0xE8
NUM_CHARS = 256
BTYPE_INVALID = 0
BTYPE_VERBATIM = 1
BTYPE_ALIGNED = 2
BTYPE_UNCOMPRESSED = 3
NUM_SECONDARY_LENGTHS = 249
NUM_PRIMARY_LENGTHS = 7
MIN_MATCH = 2
class LzxError(Exception):pass
class LzxHuffTree(huffman.HuffTree):
'''
Extended Huffman Tree object with LZX specific methods
'''
slots = (30, 32, 34, 36, 38, 42, 50, 66, 98, 162, 290)
def __init__(self):
huffman.HuffTree.__init__(self)
self.lens = [0] * 2000
self.bytmode = False
def cast(self, bits, num):
return bits.cast(num,'big')
def getWordBytes(self, iterblk):
'''
LZX runs are stored as little endian. This callback is used by the
bitstream object to consume bytes for bit conversion
'''
# Need to ability to switch to a byte stream in the case of
# uncompressed blocks
for frm in iterblk:
byts = frm.ab
off = 0
while off < len(byts):
if self.bytmode:
yield byts[off]
off += 1
else:
b2 = byts[off + 1]
b1 = byts[off]
yield b2
yield b1
off += 2
def getLens(self):
'''
Return the LZX length array
'''
return self.lens
def updateLengths(self, bits, start, stop):
'''
Update the LZX length arrays
'''
ptree = huffman.HuffTree()
tlens = [self.cast(bits, 4) for i in range(20)]
book = ptree.initCodeBook(tlens)
ptree.loadCodeBook(book)
it = ptree.iterHuffSyms(bits)
i = start
while i < stop:
sym = next(it)
if sym == 17:
run = self.cast(bits, 4) + 4
self.lens[i:i+run] = [0]*run
elif sym == 18:
run = self.cast(bits, 5) + 20
self.lens[i:i+run] = [0]*run
elif sym == 19:
run = self.cast(bits, 1) + 4
nsym = next(it)
sym = self.lens[i] - nsym
if sym < 0:
sym += 17
self.lens[i:i+run] = [sym]*run
else:
sym = (self.lens[i] - sym)
if sym < 0:
sym += 17
self.lens[i] = sym
run = 1
i += run
class Lzx(LzxHuffTree):
'''
LZX Decompressor
'''
def __init__(self, comp_type):
self.debug = []
self.test = False
LzxHuffTree.__init__(self)
self.wbits = (comp_type >> 8) & 0x1f
self.wsize = 1 << self.wbits
self.win = memoryview(bytearray(self.wsize))
self.frmcnt = 0
self.ifs = 0
self.winpos = 0
self.intelbuf = [0] * LZX_FRAME_SIZE
self.icp = 0
self.atree = LzxHuffTree()
self.mtree = LzxHuffTree()
self.ltree = LzxHuffTree()
self.decomps = { BTYPE_VERBATIM : (self._initVerb, self.decVerbatim),
BTYPE_ALIGNED : (self._initAlign, self.decAligned),
BTYPE_UNCOMPRESSED : (self._initUncomp, self.decUncomp) }
rng = (15, 22)
# Create the extra_bits slots
self.xbits = []
j = 0
for i in range(51):
self.xbits.append(j)
self.xbits.append(j)
if i != 0 and j < 17:
j += 1
# Create the position base slots
self.pbase = []
j = 0
for i in range(51):
self.pbase.append(j)
j += 1 << self.xbits[i]
if self.wbits not in range(rng[0], rng[1]):
raise LzxError('Invalid window size')
self.offs = LzxHuffTree.slots[self.wbits - 15] << 3
self.ival = 0
self.r0, self.r1, self.r2 = 1,1,1
self.run_num = 0
def getBlockLen(self, bits):
'''
Get the length of an LZX block from a bitstream object
'''
hi = self.cast(bits, 16)
lo = self.cast(bits, 8)
return (hi << 8) | lo
def _initVerb(self, bits):
'''
Initialize parser to process an verbatim LZX block from a bitstream object
'''
self.mtree.clear()
self.ltree.clear()
# Create the main tree
self.mtree.updateLengths(bits, 0, NUM_CHARS)
self.mtree.updateLengths(bits, NUM_CHARS, NUM_CHARS + self.offs)
mlens = self.mtree.getLens()
book = self.mtree.initCodeBook(mlens)
self.mtree.loadCodeBook(book)
# Check for preprocessing
self.ival = mlens[INSTR_CALL]
# Get the length tree
self.ltree.updateLengths(bits, 0, NUM_SECONDARY_LENGTHS)
llens = self.ltree.getLens()
book = self.ltree.initCodeBook(llens)
self.ltree.loadCodeBook(book)
def _initAlign(self, bits):
self.atree.clear()
lens = [self.cast(bits, 3) for i in range(8)]
book = self.atree.initCodeBook(lens)
self.atree.loadCodeBook(book)
self._initVerb(bits)
def _initUncomp(self, bits):
'''
Initialize parser to process an uncompressed LZX block from a bitstream object
'''
need = 16 - (bits.getOffset() % 16)
self.cast(bits, need)
self.ival = 1
self.r0 = self.readInt(bits)
self.r1 = self.readInt(bits)
self.r2 = self.readInt(bits)
def readInt(self, bits):
'''
Read a LZX dword from a supplied bitstream object
'''
byts = self.readBytes(bits, 4)
return int.from_bytes(byts, 'little')
def readBytes(self, bits, cnt):
'''
Read bytes from a bitstream object and byte flip. The byte flip is
necessary because LZX words are stored as big endian
'''
out = []
i = 0
self.bytmode = True
for i in range(cnt):
out.append(self.cast(bits, 8))
self.bytmode = False
return out
def decAligned(self, bits, blen):
'''
Decompress and yield LZX align frame from a bitstream object
'''
run = 0
remains = blen
mit = self.mtree.iterHuffSyms(bits)
lit = self.ltree.iterHuffSyms(bits)
ait = self.atree.iterHuffSyms(bits)
# Max bytes for this run
maxrun = self._getFrameAlign(bits)
for sym in mit:
if sym < NUM_CHARS:
self._winAppend(sym)
run += 1
else:
sym -= NUM_CHARS
# Get the match len
mlen = sym & NUM_PRIMARY_LENGTHS
if mlen == NUM_PRIMARY_LENGTHS:
mlen += next(lit)
mlen += MIN_MATCH
# Get the match offset
moff = sym >> 3
if moff > 2:
ext = self.xbits[moff]
moff = self.pbase[moff] - 2
if ext > 3:
ext -= 3
vbits = self.cast(bits, ext)
moff += (vbits << 3)
moff += next(ait)
elif ext == 3:
moff += next(ait)
elif ext > 0:
vbits = self.cast(bits, ext)
moff += vbits
else:
moff = 1
self.r2 = self.r1
self.r1 = self.r0
self.r0 = moff
elif moff == 0:
moff = self.r0
elif moff == 1:
moff = self.r1
self.r1 = self.r0
self.r0 = moff
else:
moff = self.r2
self.r2 = self.r0
self.r0 = moff
if moff > self.winpos:
rem = moff - self.winpos
mach = self.wsize - rem
if rem < mlen:
mlen -= rem
rep = self._getAbsView(mach, rem)
self._setWinView(0, rep)
self.winpos += rem
run += rem
mach = 0
rep = self._getAbsView(mach, mlen)
self._setWinView(0, rep)
self.winpos += mlen
run += mlen
else:
[ self._winAppend(self.win[self.winpos-moff]) for i in range(mlen) ]
run += mlen
if self.winpos % LZX_FRAME_SIZE == 0:
self.alignWord(bits)
if run >= remains:
yield self._getWinView(-run, run)
raise StopIteration
if run >= maxrun:
maxrun = LZX_FRAME_SIZE
yield self._getWinView(-run, run)
remains -= run
run = 0
def decVerbatim(self, bits, blen):
'''
Decompress and yield LZX verbatim frame from a bitstream object
'''
remains = blen
run = 0
lit = self.ltree.iterHuffSyms(bits)
mit = self.mtree.iterHuffSyms(bits)
# Max bytes for this run
maxrun = self._getFrameAlign(bits)
for sym in mit:
if sym < NUM_CHARS:
self._winAppend(sym)
run += 1
else:
sym -= NUM_CHARS
mlen = sym & NUM_PRIMARY_LENGTHS
if mlen == NUM_PRIMARY_LENGTHS:
mlen += next(lit)
mlen += MIN_MATCH
# Get the match offset
moff = sym >> 3
if moff > 3:
ext = self.xbits[moff]
vbits = self.cast(bits, ext)
moff = self.pbase[moff] - 2 + vbits
self.r2 = self.r1
self.r1 = self.r0
self.r0 = moff
elif moff == 0:
moff = self.r0
elif moff == 1:
moff = self.r1
self.r1 = self.r0
self.r0 = moff
elif moff == 2:
moff = self.r2
self.r2 = self.r0
self.r0 = moff
else:
moff = 1
self.r2 = self.r1
self.r1 = self.r0
self.r0 = moff
if moff > self.winpos:
rem = moff - self.winpos
mach = self.wsize - rem
if rem < mlen:
mlen -= rem
rep = self._getAbsView(mach, rem)
self._setWinView(0, rep)
self.winpos += rem
run += rem
mach = 0
rep = self._getAbsView(mach, mlen)
self._setWinView(0, rep)
self.winpos += mlen
run += mlen
else:
[ self._winAppend(self.win[self.winpos-moff]) for i in range(mlen) ]
run += mlen
if self.winpos % LZX_FRAME_SIZE == 0:
self.alignWord(bits)
if run >= remains:
yield self._getWinView(-run, run)
raise StopIteration
if run >= maxrun:
maxrun = LZX_FRAME_SIZE
yield self._getWinView(-run, run)
remains -= run
run = 0
def _winAppend(self, item):
self.win[self.winpos] = item
self.winpos += 1
def _getAbsView(self, offset, nbytes):
return self.win[offset : offset + nbytes]
def _setAbsView(self, offset, data):
self.win[offset : offset + len(data)] = data
def _getWinView(self, offset, nbytes):
return self.win[self.winpos + offset : self.winpos + offset + nbytes].tolist()
def _setWinView(self, offset, data):
self.win[self.winpos + offset : self.winpos + offset + len(data)] = data
def decUncomp(self, bits, blen):
'''
Decodes and yields an uncompressed LZX frame
'''
remains = blen
while remains:
# Get the bytes left in this frame
align = self._getFrameAlign(bits)
# Is the window currently frame aligned?
if align == 0:
need = LZX_FRAME_SIZE
else:
need = align
if need > remains:
need = remains
byts = self.readBytes(bits, need)
[self._winAppend(b) for b in byts]
yield byts
remains -= need
def _getFrameAlign(self, bits):
'''
Get the number of bytes needed to make the window aligned
on a frame boundary (32768 bytes)
'''
return LZX_FRAME_SIZE - (self.winpos % LZX_FRAME_SIZE)
def alignWord(self, bits):
'''
Align the given bitstream object to word (16-bit) alignment
'''
need = (16) - bits.getOffset() % 16
if need == 16:
# Already word aligned, leave
return
bits.cast(need)
def getIntelHeader(self, bits):
fs = 0
# Check for preprocessing
if self.cast(bits, 1):
hi = self.cast(bits, 16)
lo = self.cast(bits, 16)
fs = (hi << 16) | lo
return fs
def getBlockHeader(self, bits):
'''
Return the block type and block length from a block header
'''
# Get the block type
btype = self.cast(bits, 3)
# Get the block length
blen = self.getBlockLen(bits)
return btype,blen
def _decIntel(self, fsize):
'''
Decode intel preprocessing if necessary
'''
self.debug += self._getWinView(-fsize, fsize)
if not self.ival or not self.ifs or self.frmcnt > LZX_FRAME_SIZE and fsize <= 10:
if self.ifs:
self.icp += fsize
return self._getWinView(-fsize, fsize)
curpos = ctypes.c_int(self.icp).value
ibuf = self._getWinView(-fsize, fsize)
# Find all occurances of a 'call' byte
indices = [i for i, b in enumerate(ibuf) if b == INSTR_CALL]
if not len(indices):
self.icp += fsize
return self._getWinView(-fsize, fsize)
# Validate the markers
markers = [indices[0]]
for l in indices:
if l - markers[-1] >= 5 and l < (len(ibuf)-10):
markers.append(l)
for i, idx in enumerate(markers):
if i == 0:
curpos += idx
else:
curpos += (idx - markers[i-1])
idx += 1
absoff = ctypes.c_int((ibuf[idx] | (ibuf[idx+1]<<8) |
(ibuf[idx+2]<<16) | (ibuf[idx+3]<<24) ))
absoff = absoff.value
prev = absoff
if absoff >= -(0xFFFFFFFF & curpos) and absoff < self.ifs:
if absoff >= 0:
reloff = absoff - curpos
else:
reloff = absoff + self.ifs
ibuf[idx] = (0xFF & reloff)
ibuf[idx+1] = (0xFF & (reloff >> 8))
ibuf[idx+2] = (0xFF & (reloff >> 16))
ibuf[idx+3] = (0xFF & (reloff >> 24))
self.icp += fsize
return ibuf
def _postProcess(self, frame):
# Check if this is the final frame
if len(frame) >= self.rawcb:
fsize = len(frame)
else:
fsize = LZX_FRAME_SIZE
fix = self._decIntel(fsize)
return fix
# TODO currently expects an iterator of CFDATA objects
def decompBlock(self, iterblk):
'''
Decompress and yield uncompressed byte blocks via a CFDATA iterator
'''
blen = 0
btype = 0
out = []
# Parse out the blocks
blocks = [b for b in iterblk]
# Get the total amount of uncompressed
self.rawcb = sum([cf.cbUncomp for cf in blocks])
bits = bitlab.BitStream(blocks, order='big', cb=self.getWordBytes)
# Read Intel Header
self.ifs = self.getIntelHeader(bits)
while self.rawcb:
# If the previous block was uncompressed and misaligned (16-bit)
# realign now
if btype == BTYPE_UNCOMPRESSED and (blen & 1):
self.readBytes(bits, 1)
btype,blen = self.getBlockHeader(bits)
self.decomps[btype][0](bits)
run = blen
for frame in self.decomps[btype][1](bits, blen):
out += frame
if len(out) == LZX_FRAME_SIZE or len(frame) == self.rawcb:
self.frmcnt += 1
fdata = self._postProcess(out)
yield bytes(fdata)
out = []
if self.winpos >= self.wsize:
self.winpos = 0
run -= len(frame)
self.rawcb -= len(frame)
if run == 0:
break
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,349
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/common.py
|
import collections
class KeyCache(collections.defaultdict):
'''
A dictionary based key/val cache.
Example:
cache = KeyCache( getFooThing )
if cache['woot']:
dostuff()
'''
def __init__(self, lookmeth):
collections.defaultdict.__init__(self)
self.lookmeth = lookmeth
def __missing__(self, key):
valu = self.lookmeth(key)
self[key] = valu
return valu
class OnDemand(collections.defaultdict):
def __init__(self):
collections.defaultdict.__init__(self)
self.ctors = {}
self.names = []
def add(self, name, ctor, *args, **kwargs):
self.names.append(name)
self.ctors[name] = (ctor,args,kwargs)
def __missing__(self, key):
meth,args,kwargs = self.ctors.get(key)
val = meth(*args,**kwargs)
self[key] = val
return val
def colify(rows,titles=None):
'''
Generate colum text output from rows.
Example:
rows = [
('bob','33'),
('bill','24')
]
print( colify( rows, titles=('name','age') ))
'''
colcount = max([ len(r) for r in rows ])
colsizes = collections.defaultdict(int)
if titles != None:
for i in range(len(titles)):
colsizes[i] = len(titles[i])
for i in range(colcount):
for j in range(len(rows)):
colsizes[i] = max(colsizes[i], len(rows[j][i]))
sumlen = sum( colsizes.values() ) + ( 3 * colcount )
lines = []
lines.append( '-' * sumlen )
if titles:
pres = [ titles[i].ljust(colsizes[i]) for i in range(colcount) ]
lines.append(' | '.join(pres))
lines.append('-' * sumlen)
for row in rows:
pres = [ row[i].ljust(colsizes[i]) for i in range(colcount) ]
lines.append( ' | '.join(pres) )
lines.append( '-' * sumlen )
return '\n'.join(lines)
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,350
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/algos/mszip.py
|
import dissect.bitlab as bitlab
import dissect.algos.inflate as inflate
#BTYPE specifies how the data are compressed, as follows:
# 00 - no compression
# 01 - compressed with fixed Huffman codes
# 10 - compressed with dynamic Huffman codes
# 11 - reserved (error)
TYPE_UNCOMP = 0x0
TYPE_FIXED = 0x1
TYPE_DYNAMIC = 0x2
TYPE_INVALID = 0x3
class MsZipError(Exception):pass
class MsZip(inflate.Inflate):
def __init__(self):
inflate.Inflate.__init__(self)
self.decomps = {
TYPE_UNCOMP:self._getUncompBlock,
TYPE_FIXED:self._deCompFixedHuffman,
TYPE_DYNAMIC:self._deCompDynHuffman,
TYPE_INVALID:self._invalidBlock
}
def cast(self, bits, num):
return bits.cast(num,'little')
#TODO: expects a CFDATA iterator
def decompBlock(self, iterblk):
for frame in iterblk:
byts = frame.ab
if not byts.startswith(b'CK'):
raise MsZipError('Invalid MsZip Block: %r' % (byts[:8],))
bits = bitlab.BitStream(byts[2:], order='little')
final = 0
msblock = []
while not final:
final = self.cast(bits, 1)
bt = self.cast(bits, 2)
msblock.extend(self.decomps[bt](bits, byts))
yield bytes(msblock)
def _invalidBlock(self, bits, byts=None):
raise MsZipError('Invalid block type')
def _deCompDynHuffman(self, bits, byts=None):
return self.getDynHuffBlock(bits)
def _deCompFixedHuffman(self, bits, byts=None):
return self.getFixHuffBlock(bits)
def _getUncompBlock(self, bits, byts):
# TODO Assuming we are at index 3 here
self.cast(bits, 5)
dlen = self.cast(bits, 16)
clen = self.cast(bits, 16)
out = []
if (dlen ^ 0xFFFF) != clen:
raise DeflateError('Invalid uncompressed block length')
return byts[ 5 : 5 + dlen]
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,351
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/tests/files/__init__.py
|
import os
filesdir = os.path.dirname( __file__ )
def getTestFd(*names):
path = os.path.join(filesdir,*names)
return open(path,'rb')
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,352
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/tests/test_bitlab.py
|
import unittest
import dissect.bitlab as bitlab
class BitLabTest(unittest.TestCase):
def test_bitlab_bits(self):
self.assertEqual( len(list(bitlab.bits(b'ABC'))), 24 )
bits = bitlab.BitStream(b'A', order='big')
self.assertEqual( bits.cast(5), 8)
self.assertEqual( bits.cast(3), 1)
bits = bitlab.BitStream(b'A', order='little')
self.assertEqual( bits.cast(5), 16)
self.assertEqual( bits.cast(3), 2)
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,353
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/tests/test_filelab.py
|
import io
import unittest
from vstruct.types import *
from dissect.filelab import *
class CommonTest(unittest.TestCase):
def test_filelab(self):
fd = io.BytesIO( b'asdfqwer' )
class Woot(VStruct):
def __init__(self):
VStruct.__init__(self)
self.one = uint8()
self.two = uint16()
class FooLab(FileLab):
def __init__(self, fd, off=0):
FileLab.__init__(self, fd, off=off)
self.addOnDemand('woot',self._getWoot)
self.addOnDemand('baz',self._getFooBaz)
self.addOnDemand('bars',self._getFooBars)
def _getFooBaz(self):
return 'foobaz'
def _getFooBars(self):
return ('foo','bar','baz')
def _getWoot(self):
return self.getStruct( 0, Woot )
foo = FooLab(fd)
self.assertEqual( foo['baz'], 'foobaz' )
self.assertEqual( foo['bars'], ('foo','bar','baz') )
self.assertEqual( foo['woot'].one, 0x61 )
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,354
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/algos/inflate.py
|
# -*- coding: utf8 -*-
import dissect.bitlab as bitlab
import dissect.algos.huffman as huff
COPY_LEN = 16
REP_BIG_LEN = 17
REP_TINY_LEN = 18
END_BLOCK = 256
MAX_MATCH = 285
MAX_DIST = 29
MAX_HIST = 32768
class InflateError(Exception):pass
def cast(bits, num):
return bits.cast(num, 'little')
# Inflate RFC1951 Compliant Decompressor
class Inflate(object):
def __init__(self):
self.fix_lits = huff.HuffTree()
self.fix_dists = huff.HuffTree()
self.buff = [] # History buffer
self._initFixedTrees()
def getFixHuffBlock(self, bits):
return self._decHuffBlock(bits, self.fix_lits, self.fix_dists)
def getDynHuffBlock(self, bits):
'''
The Huffman codes for the two alphabets appear in the block
immediately after the header bits and before the actual
compressed data, first the literal/length code and then the
distance code. Each code is defined by a sequence of code
lengths, as discussed in Paragraph 3.2.2, above. For even
greater compactness, the code length sequences themselves are
compressed using a Huffman code. The alphabet for code lengths
is as follows:
0 - 15: Represent code lengths of 0 - 15
16: Copy the previous code length 3 - 6 times.
The next 2 bits indicate repeat length
(0 = 3, ... , 3 = 6)
Example: Codes 8, 16 (+2 bits 11),
16 (+2 bits 10) will expand to
12 code lengths of 8 (1 + 6 + 5)
17: Repeat a code length of 0 for 3 - 10 times.
(3 bits of length)
18: Repeat a code length of 0 for 11 - 138 times
(7 bits of length)
'''
hlit = ( cast(bits, 5) + 257 )
hdist = ( cast(bits, 5) + 1 )
hclen = ( cast(bits, 4) + 4 )
len_map = [16, 17, 18, 0, 8, 7, 9, 6, 10, 5,
11, 4, 12, 3, 13, 2, 14, 1, 15]
lens = [0]*19
code_lens = [0] * (hlit + hdist)
len_tree = huff.HuffTree()
lit_tree = huff.HuffTree()
dist_tree = None
for i in range(hclen):
lens[len_map[i]] = cast(bits, 3)
book = len_tree.initCodeBook(lens)
len_tree.loadCodeBook(book)
it = len_tree.iterHuffSyms(bits)
i = 0
val = -1
vlen = 0
while i < len(code_lens):
if vlen > 0:
code_lens[i] = val
vlen -= 1
else:
sym = next(it)
if sym < COPY_LEN:
code_lens[i] = sym
val = sym
else:
if sym == COPY_LEN:
if val == -1:
raise InflateError("Invalid code copy length")
vlen = cast(bits, 2) + 3
elif sym == REP_BIG_LEN:
val = 0
vlen = cast(bits, 3) + 3
elif sym == REP_TINY_LEN:
val = 0
vlen = cast(bits, 7) + 11
else:
raise InflateError("Invalid or corrupt block data")
i -= 1
i += 1
if vlen:
raise InflateError('Invalid match length')
lit_len = code_lens[:hlit]
book = lit_tree.initCodeBook(lit_len)
lit_tree.loadCodeBook(book)
dist_len = code_lens[hlit:]
if len(dist_len) != 1 or dist_len[0] != 0:
if 0 == sum(x > 0 for x in dist_len) and dist_len.count(1) == 1:
raise DecompError('Unhandled code book irregularity')
dist_tree = huff.HuffTree()
book = dist_tree.initCodeBook(dist_len)
dist_tree.loadCodeBook(book)
dec = self._decHuffBlock(bits, lit_tree, dist_tree)
return dec
# Get the INFLATE match length for symbols 257–285 (3-258 bytes)
def _getMatchLen(self, s, bits):
mlen = 0
if s < 257 or s > 285:
raise InflateError('Invalid match sym')
if s <= 264:
mlen = s - 254
elif s <= 284:
xbits = int((s - 261) / 4)
mlen = (((s - 265) % 4 + 4) << xbits) + 3 + cast(bits, xbits)
elif s == MAX_MATCH:
mlen = 258
else:
raise InflateError('Invalid match length')
return mlen
def _getDist(self, s, bits):
dist = 0
if s > 29:
raise InflateError('Invalid distance code')
if s <= 3:
dist = s + 1
else:
xbits = int((s / 2) - 1)
dist = ((s % 2 + 2) << xbits) + 1 + cast(bits, xbits)
return dist
def _updateHistBuff(self):
'''
Roll the history buffer so it remains at its maxium size.
'''
self.buff = self.buff[-MAX_HIST:]
def _initFixedTrees(self):
'''
A HuffTree which is constucted pre-loaded with the rfc1951 fixed tree.
From rfc1951:
3.2.6. Compression with fixed Huffman codes (BTYPE=01)
The Huffman codes for the two alphabets are fixed, and are not
represented explicitly in the data. The Huffman code lengths
for the literal/length alphabet are:
Lit Value Bits Codes
--------- ---- -----
0 - 143 8 00110000 through
10111111
144 - 255 9 110010000 through
111111111
256 - 279 7 0000000 through
0010111
280 - 287 8 11000000 through
11000111
'''
symbits = [ 8 for i in range(144) ]
symbits.extend( [ 9 for i in range(144, 256) ] )
symbits.extend( [ 7 for i in range(256, 280) ] )
symbits.extend( [ 8 for i in range(280, 288) ] )
# Literal Length Codes
lit_codes = self.fix_lits.initCodeBook(symbits)
self.fix_lits.loadCodeBook(lit_codes)
distbits = [ 5 for i in range(32) ]
dist_codes = self.fix_dists.initCodeBook(distbits)
self.fix_dists.loadCodeBook(dist_codes)
def _decHuffBlock(self, bits, lit_tree, dist_tree):
'''
Decompress the huffman block using the supplied ltieral and distance trees.
'''
out = []
if not lit_tree:
raise InflateError('Invalid literal code tree')
dit = dist_tree.iterHuffSyms(bits)
for sym in lit_tree.iterHuffSyms(bits):
# Its a literal symbol
if sym < END_BLOCK:
out.append(sym)
self.buff.append(sym)
# End of this block return back out
elif sym == END_BLOCK:
self._updateHistBuff()
return out
else:
# It needs a lookup
mlen = self._getMatchLen(sym, bits)
d = next(dit)
dist = self._getDist(d, bits)
while mlen > dist:
out += self.buff[-dist:]
self.buff += self.buff[-dist:]
mlen -= dist
if mlen == dist:
out += self.buff[-dist:]
self.buff += self.buff[-dist:]
else:
out += self.buff[-dist:mlen-dist]
self.buff += self.buff[-dist:mlen-dist]
# Should never get here
raise InflateError('Failed to find end of block sym')
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,355
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/tests/test_rar.py
|
import unittest
import dissect.formats.rar as rar
import dissect.tests.files as files
class RarTest(unittest.TestCase):
#def test_rar_iv(self):
def test_rar_filelab(self):
fd = files.getTestFd('test.rar')
lab = rar.RarLab(fd)
#print(rarlab['header'])
#rarlab['header'].vsPrint()
#self.assertEqual( len(list(lab.iterRar4Files())), 4 )
#for hdr in rarlab.iterRar4Files():
#hdr.vsPrint()
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,356
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/__init__.py
|
'''
Vivisect (Mark II) File/Protocol Parsers
'''
__version__ = (0,0,1)
__copyright__ = 'Copyright 2015 Invisigoth (invisigoth.kenshoto@gmail.com)'
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,357
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/tools/cab.py
|
import sys
import argparse
import dissect.formats.cab as d_cab
from dissect.common import *
def main(argv):
p = argparse.ArgumentParser()
p.add_argument('--list',default=False, action='store_true', help='list files within the cab file')
p.add_argument('--catfile',help='cat a file from cab to stdout')
p.add_argument('cabfiles',nargs='+',help='ms cab files')
args = p.parse_args(argv)
for filename in args.cabfiles:
fd = open(filename,'rb')
cab = d_cab.CabLab(fd)
if args.list:
ver = cab.getCabVersion()
size = cab.getCabSize()
verstr = '.'.join([ str(v) for v in ver ])
print('listing cab: %s (ver: %s)' % (filename,verstr))
rows = []
for name,info in cab.listCabFiles():
rows.append( (name, str(info['size']), info['comp']) )
titles = ('File Name','Size','Compression')
print( colify( rows, titles=titles) )
continue
if args.catfile:
cab['CFHEADER'].vsPrint()
fd = cab.openCabFile( args.catfile )
fd.seek(0)
print(repr(fd.read()))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,358
|
williballenthin/dissect
|
refs/heads/master
|
/dissect/mimescan.py
|
typers = []
scanners = []
def scanForMimes(fd, off=0, only=None, ignore=None):
'''
Scan an fd for "carveable" files.
Returns (offset,mimetype) tuples.
Example:
for off,mime in scanForMimes(fd):
carvestuff(fd,off)
'''
for mime,scanner in scanners:
if only != None and mime not in only:
continue
if ignore != None and mime in ignore:
continue
fd.seek(off)
for hit in scanner(fd):
yield (mime,hit)
def getMimeType(fd):
'''
Returns a mime type name for the file content.
'''
|
{"/setup.py": ["/dissect/__init__.py"], "/dissect/tests/test_common.py": ["/dissect/common.py"], "/dissect/formats/cab.py": ["/dissect/filelab.py", "/dissect/bitlab.py", "/dissect/algos/mszip.py", "/dissect/algos/lzx.py"], "/dissect/tests/test_inet.py": ["/dissect/formats/inet.py"], "/dissect/bitlab.py": ["/dissect/compat.py"], "/dissect/algos/huffman.py": ["/dissect/bitlab.py"], "/dissect/filelab.py": ["/dissect/common.py"], "/dissect/tests/test_cab.py": ["/dissect/formats/cab.py", "/dissect/tests/files/__init__.py"], "/dissect/tests/test_huffman.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/algos/lzx.py": ["/dissect/algos/huffman.py", "/dissect/bitlab.py", "/dissect/compat.py"], "/dissect/algos/mszip.py": ["/dissect/bitlab.py", "/dissect/algos/inflate.py"], "/dissect/tests/test_bitlab.py": ["/dissect/bitlab.py"], "/dissect/tests/test_filelab.py": ["/dissect/filelab.py"], "/dissect/algos/inflate.py": ["/dissect/bitlab.py", "/dissect/algos/huffman.py"], "/dissect/tests/test_rar.py": ["/dissect/tests/files/__init__.py"], "/dissect/tools/cab.py": ["/dissect/formats/cab.py", "/dissect/common.py"]}
|
5,360
|
alefaggravo/quackers
|
refs/heads/master
|
/quackers/core.py
|
import json
import logging
import os
import random
from copy import deepcopy
from datetime import datetime
import dotenv
import slack
from airtable import Airtable
from quackers.data import error_modal, start_modal
from quackers.helpers import fire_and_forget
DEBUG = False
if DEBUG:
dotenv.load_dotenv(".env.testing")
else:
dotenv.load_dotenv(".env")
client = slack.WebClient(token=os.environ["BOT_USER_OAUTH_ACCESS_TOKEN"])
se_students = Airtable(os.environ.get('SE_AIRTABLE_BASE_ID'), 'Students')
se_instructors = Airtable(os.environ.get('SE_AIRTABLE_BASE_ID'), 'Instructors')
se_questions = Airtable(os.environ.get('SE_AIRTABLE_BASE_ID'), 'Quackers Questions')
ux_students = Airtable(os.environ.get('UX_AIRTABLE_BASE_ID'), 'Students')
ux_instructors = Airtable(os.environ.get('UX_AIRTABLE_BASE_ID'), 'Instructors')
ux_questions = Airtable(os.environ.get('UX_AIRTABLE_BASE_ID'), 'Quackers Questions')
logger = logging.getLogger('gunicorn.error')
def post_message_to_coaches(user, channel, question, info, client, channel_map):
logger.info(f'Posting question from {user} to {channel}!')
ch = channel_map.get_coach_channel(channel)
message = (
f"Received request for help from @{user} with the following info:\n\n"
f"Question: {question}\n"
f"Additional info: {info}"
)
client.chat_postMessage(
channel=ch,
blocks=[
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": message
}
}
],
icon_emoji=":quackers:"
)
def post_to_airtable(user_id, slack_username, channel, channel_map, question, info):
# We want to log both student interactions and instructor interactions.
# We'll check the student table first (because it's most likely that a
# student is the one using the system), then check for an instructor.
# The mapping in main.py is used to select which set of airtable instances
# we check, mostly just to save time.
#
# ...and if we don't get a positive response from AirTable? Send it to
# Unresolved User.
# make pycharm happy
person_id = None
option = None
base = channel_map.get_base(channel).lower() # .lower() == safety check
if base == "se":
airtable_target = se_questions
search_options = [
{'table': se_students, 'is_student': True},
{'table': se_instructors, 'is_student': False},
]
elif base == "ux":
airtable_target = ux_questions
search_options = [
{'table': ux_students, 'is_student': True},
{'table': ux_instructors, 'is_student': False},
]
else:
raise Exception(f"No search options found for Airtable base {base}")
for option in search_options:
if person := option['table'].search('Slack ID', user_id):
person_id = person[0]['id']
break
if not person_id:
# we didn't find anyone with the right Slack ID in Airtable, so we'll force
# the next set of checks to return None for each of the questions.
option = {}
student_id, instructor_id, unresolved_user_id = (
person_id if option.get("is_student") else None,
person_id if not option.get("is_student") else None,
slack_username if not person_id else ""
)
data = {
'Question': question,
'Additional Info': info,
'Channel': channel,
'Student': [student_id] if student_id else None,
'Instructor': [instructor_id] if instructor_id else None,
'Unresolved User': unresolved_user_id if unresolved_user_id else None,
'Date': datetime.now().isoformat()
}
airtable_target.insert(data)
def post_message_to_user(user_id, channel, channel_map, question, emoji_list, client):
channel = channel_map.get_channel_id(channel)
client.chat_postEphemeral(
user=user_id,
channel=channel,
text=(
"Thanks for reaching out! One of the coaches or facilitators will be"
" with you shortly! :{}: Your question was: {}".format(
random.choice(emoji_list), question
)
)
)
@fire_and_forget
def process_question_followup(data, channel_map, emoji_list):
# the payload is a dict... as a string.
data['payload'] = json.loads(data['payload'])
# TODO: add example response from slack
# slack randomizes the block names. That means the location that the response will
# be in won't always be the same. We need to pull the ID out of the rest of the
# response before we go hunting for the data we need.
# Bonus: every block will have an ID! Just... only one of them will be right.
channel = None
original_q = None
addnl_info_block_id = None
user_id = None
for block in data['payload']['view']['blocks']:
if block.get('type') == "input":
addnl_info_block_id = block.get('block_id')
if block.get('type') == "section":
previous_data = block['text']['text'].split("\n")
original_q = previous_data[0][previous_data[0].index(":") + 2:]
channel = previous_data[1][previous_data[1].index(":") + 2:]
if block.get('type') == "context":
user_id = block['elements'][0]['text'].split(':')[2].strip()
dv = data['payload']['view']
additional_info = dv['state']['values'][addnl_info_block_id]['ml_input']['value']
username = data['payload']['user']['username']
post_message_to_coaches(
user=username,
channel=channel,
question=original_q,
info=additional_info,
client=client,
channel_map=channel_map
)
post_to_airtable(
user_id, username, channel, channel_map, original_q, additional_info
)
post_message_to_user(
user_id=user_id,
channel=channel,
channel_map=channel_map,
question=original_q,
emoji_list=emoji_list,
client=client
)
def process_question(data, channel_map):
if trigger_id := data.get('trigger_id'):
# first we need to verify that we're being called in the right place
if data.get('channel_name') not in channel_map.keys():
client.views_open(
trigger_id=trigger_id,
view=error_modal
)
return ("", 200)
# copy the modal so that we don't accidentally modify the version in memory.
# the garbage collector will take care of the copies later.
start_modal_copy = deepcopy(start_modal)
# stick the original question they asked and the channel they asked from
# into the modal so we can retrieve it in the next section
start_modal_copy['blocks'][0]['text']['text'] = \
start_modal['blocks'][0]['text']['text'].format(
data.get('text'), data.get('channel_name')
)
start_modal_copy['blocks'][4]['elements'][0]['text'] = \
start_modal['blocks'][4]['elements'][0]['text'].format(data.get('user_id'))
client.views_open(
trigger_id=trigger_id,
view=start_modal_copy
)
# return an empty string as fast as possible per slack docs
return ("", 200)
|
{"/quackers/core.py": ["/quackers/data.py", "/quackers/helpers.py"], "/main.py": ["/quackers/core.py", "/quackers/helpers.py"]}
|
5,361
|
alefaggravo/quackers
|
refs/heads/master
|
/main.py
|
import logging
from flask import Flask, request
from quackers.core import process_question, process_question_followup, client
from quackers.helpers import ChannelMap
# *********************************************
# EDIT HERE
# *********************************************
# map is in the following format:
# (channel-to-listen-to, coach-channel, program-this-channel-set-belongs-to)
UX = 'ux'
SE = 'se'
channel_map = ChannelMap(slack_conn=client)
channels = [
("joe-slackbot-testing", "joe-slackbot-coaches", SE),
# software engineering channels
("se-july-2020", "se-july-2020-coaches", SE),
("se-april-2020", "se-april-2020-coaches", SE),
("se-october-2019", "se-q4-staff", SE),
("se-january-2020", "se-q3-staff", SE),
# user experience channels
("ux-5", "ux-triage-uie", UX),
("ux-6", "ux-triage-uxd", UX),
("ux-7", "ux-triage-uxd", UX),
# old maps
("ux-4-indy", "ux-triage-uie", UX),
("ux-4-remote", "ux-triage-uie", UX)
]
for channel in channels:
channel_map.add_channel(
listen_to=channel[0], post_to=channel[1], airtable=channel[2]
)
# for responses returned to the student
emoji_list = [
'party',
'thepuff',
'carlton',
'fire',
'spinning',
'party-parrot',
'heykirbyhey',
'capemario'
]
# *********************************************
# DO NOT EDIT BEYOND THIS POINT
# *********************************************
app = Flask(__name__)
if __name__ != "__main__":
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
else:
app.logger.setLevel(logging.INFO)
@app.route('/questionfollowup/', methods=['POST'])
def questionfollowup():
with app.app_context():
process_question_followup(request.form.to_dict(), channel_map, emoji_list)
# this endpoint spawns another thread to do its dirty work, so we need to
# return the 200 OK ASAP so that Slack will be happy.
return ("", 200)
@app.route('/question/', methods=['POST'])
def question():
with app.app_context():
return process_question(request.form.to_dict(), channel_map)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
|
{"/quackers/core.py": ["/quackers/data.py", "/quackers/helpers.py"], "/main.py": ["/quackers/core.py", "/quackers/helpers.py"]}
|
5,362
|
alefaggravo/quackers
|
refs/heads/master
|
/quackers/helpers.py
|
import threading
import logging
logger = logging.getLogger('gunicorn.error')
# https://stackoverflow.com/a/59043636
def fire_and_forget(f, *args, **kwargs):
def wrapped(*args, **kwargs):
threading.Thread(target=f, args=(args), kwargs=kwargs).start()
return wrapped
class ChannelMap(object):
def __init__(self, slack_conn):
self.client = slack_conn
self.mapping = {}
def add_channel(self, listen_to: str=None, post_to: str=None, airtable: str=None):
if not listen_to or not post_to or not airtable:
raise ValueError("Must pass in all three variables!")
self.mapping.update({listen_to: {'target': post_to, 'airtable': airtable}})
logger.info(f"Registered {listen_to} -> {post_to} for the {airtable.upper()} program")
def get_coach_channel(self, c):
result = self.mapping[c]
if not result:
raise Exception("No matching channel found!")
if not result['target'].startswith("#"):
result = "#{}".format(result['target'])
return result
def get_channel_id(self, channel_name):
# reference: https://github.com/KenzieAcademy/quackers/issues/8
# https://github.com/KenzieAcademy/quackers/issues/7
channels = self.client.users_conversations(
types="public_channel,private_channel"
).data['channels']
for c in channels:
if c.get('name') == channel_name:
return c['id']
logger.error(f'Unable to resolve channel {channel_name}!')
def get_base(self, channel):
result = self.mapping[channel]
if not result:
raise Exception("No matching channel found!")
return result['airtable']
def get(self, item):
return self.mapping.get(item)
def keys(self):
return self.mapping.keys()
def items(self):
return self.mapping.items()
|
{"/quackers/core.py": ["/quackers/data.py", "/quackers/helpers.py"], "/main.py": ["/quackers/core.py", "/quackers/helpers.py"]}
|
5,363
|
alefaggravo/quackers
|
refs/heads/master
|
/quackers/data.py
|
start_modal = {
"type": "modal",
"title": {
"type": "plain_text",
"text": "Quackers!",
"emoji": True
},
"submit": {
"type": "plain_text",
"text": "Submit",
"emoji": True
},
"close": {
"type": "plain_text",
"text": "Cancel",
"emoji": True
},
"blocks": [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "The question was: {0}\nYour channel: {1}",
"emoji": True
}
},
{
"type": "divider"
},
{
"type": "input",
"element": {
"type": "plain_text_input",
"action_id": "ml_input",
"multiline": True
},
"label": {
"type": "plain_text",
"text": "What else should we know about the problem you're facing?"
},
"hint": {
"type": "plain_text",
"text": "Any context you can provide will help!"
}
},
{
"type": "divider"
},
{
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": "*NOTE*: Your question won't get sent to the coaches until you click submit!\nID: {}"
}
]
}
]
}
error_modal = {
"type": "modal",
"title": {
"type": "plain_text",
"text": "Hey! Listen! 🌟",
"emoji": True
},
"close": {
"type": "plain_text",
"text": "OK",
"emoji": True
},
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "I'm not set up to run in this channel; you'll have to call me from your cohort channel. Sorry!"
}
},
{
"type": "image",
"image_url": "https://gamepedia.cursecdn.com/zelda_gamepedia_en/0/08/OoT3D_Navi_Artwork.png?version=61b243ef9637615abdf7534b17361c7a",
"alt_text": "Navi from The Legend of Zelda - a blue glowing orb with fairy wings. Artwork from the Ocarina of Time 3D."
}
]
}
|
{"/quackers/core.py": ["/quackers/data.py", "/quackers/helpers.py"], "/main.py": ["/quackers/core.py", "/quackers/helpers.py"]}
|
5,364
|
alefaggravo/quackers
|
refs/heads/master
|
/scripts/add_slack_ids_to_airtable.py
|
import dotenv
import os
import slack
from airtable import Airtable
import json
dotenv.load_dotenv()
client = slack.WebClient(token=os.environ["BOT_USER_OAUTH_ACCESS_TOKEN"])
a = Airtable(os.environ.get('SE_AIRTABLE_BASE_ID'), 'Students')
# a = Airtable(os.environ.get('CT_AIRTABLE_BASE_ID'), 'Students')
# a = Airtable(os.environ.get('UX_AIRTABLE_BASE_ID'), 'Students')
students = a.get_all()
result = client.users_list()
users = [u for u in result.data['members'] if u['deleted'] is False]
processed_results = [
[
u['real_name'], u['profile']['display_name'], u['profile'].get('email'), u['id']
] for u in users
]
for record in students:
student_email = record['fields'].get('Email')
if record['fields'].get('Slack ID'):
try:
fname = record['fields']['Name']
print(f"Record {fname} is up to date!")
except KeyError:
pass
if not student_email:
continue
for i in processed_results:
if i[2]:
i[2] = i[2].lower()
if student_email.lower() == i[2]:
try:
# SE airtable
print('Updating {}'.format(record['fields']['Name']))
except KeyError:
# UX airtable
print('Updating {}'.format(record['fields']['Name']))
a.update(record['id'], {'Slack ID': i[3]})
i.append('PROCESSED')
unprocessed = [u for u in processed_results if len(u) == 4]
processed = [u for u in processed_results if len(u) == 5]
print("Unprocessed Slack IDs: ", len(unprocessed))
print("Number of students in Airtable: ", len(students))
print("Updated Slack IDs: ", len(processed))
students = a.get_all()
no_slack_id = [u for u in students if u['fields'].get('Slack ID') == None]
if len(no_slack_id) == 0:
print(
"Everyone present and accounted for! All student records in Airtable"
" have a Slack ID."
)
else:
print("Found {} students in Airtable with no Slack ID.".format(len(no_slack_id)))
print("This will require manual intervention.")
print()
print("Accounts that need attention:")
for i in no_slack_id:
try:
print(i['fields']['Name'])
except KeyError:
print(i)
print()
print('The full unprocessed results from Slack are found in slack_data.json')
with open('slack_data.json', 'w') as f:
data = {'data': []}
[
data['data'].append({
'Real name': u[0],
'Display name': u[1],
'Email': u[2],
'Slack ID': u[3]
}) for u in unprocessed
]
f.write(json.dumps(data, indent=2))
|
{"/quackers/core.py": ["/quackers/data.py", "/quackers/helpers.py"], "/main.py": ["/quackers/core.py", "/quackers/helpers.py"]}
|
5,366
|
RobMurray98/BribeNet
|
refs/heads/master
|
/test/BribeNet/bribery/temporal/action/test_briberyAction.py
|
from unittest import TestCase
from unittest.mock import MagicMock
from BribeNet.bribery.temporal.action.briberyAction import BriberyActionTimeNotCorrectException, \
BriberyActionExecutedMultipleTimesException
from BribeNet.bribery.temporal.nonBriber import NonBriber
from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction
from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph
class TestBriberyAction(TestCase):
def setUp(self) -> None:
self.briber = NonBriber(1)
self.graph = NoCustomerActionGraph(self.briber)
self.action = SingleBriberyAction(self.briber)
def test_perform_action_fails_if_at_different_times(self):
try:
self.graph.get_time_step = MagicMock(return_value=self.action.get_time_step()+1)
self.action.perform_action()
except BriberyActionTimeNotCorrectException:
return
self.fail()
def test_perform_action_fails_if_already_executed(self):
try:
self.action.add_bribe(0, 0.01)
self.action.perform_action()
self.action.perform_action()
except BriberyActionExecutedMultipleTimesException:
return
self.fail()
|
{"/test/BribeNet/bribery/temporal/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_randomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_budgetBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/test_briber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveInfluentialNodeBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_mostInfluentialBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_randomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"]}
|
5,367
|
RobMurray98/BribeNet
|
refs/heads/master
|
/src/BribeNet/graph/ratingGraph.py
|
import random
from abc import ABC
from copy import deepcopy
from typing import Tuple, Optional, List, Any, Set
import networkit as nk
import numpy as np
from weightedstats import weighted_mean, weighted_median, mean, median
from BribeNet.graph.generation import GraphGeneratorAlgo
from BribeNet.graph.generation.flatWeightGenerator import FlatWeightedGraphGenerator
from BribeNet.graph.generation.generator import GraphGenerator
from BribeNet.graph.ratingMethod import RatingMethod
from BribeNet.helpers.bribeNetException import BribeNetException
DEFAULT_GEN = FlatWeightedGraphGenerator(GraphGeneratorAlgo.WATTS_STROGATZ, 30, 5, 0.3)
MAX_RATING = 1.0
MAX_DIFF = 0.6
class BribersAreNotTupleException(BribeNetException):
pass
class NoBriberGivenException(BribeNetException):
pass
class BriberNotSubclassOfBriberException(BribeNetException):
pass
class VotesNotInstantiatedBySpecificsException(BribeNetException):
pass
class TruthsNotInstantiatedBySpecificsException(BribeNetException):
pass
class GammaNotSetException(BribeNetException):
pass
class RatingGraph(ABC):
"""
Representation of network graph which bribers interact with
"""
def __init__(self, bribers: Tuple[Any], generator: GraphGenerator = DEFAULT_GEN, specifics=None,
**kwargs):
"""
Abstract class for rating graphs
:param bribers: the bribing actors on the graph
:param generator: the graph generator used to instantiate the graph
:param specifics: function in implementing class to call after the superclass initialisation,
but prior to _finalise_init (template design pattern)
:param **kwargs: additional keyword arguments to the graph, such as max_rating
"""
# Generate random ratings network
self._g = generator.generate()
from BribeNet.bribery.briber import Briber
if issubclass(bribers.__class__, Briber):
bribers = tuple([bribers])
if not isinstance(bribers, tuple):
raise BribersAreNotTupleException()
if not bribers:
raise NoBriberGivenException()
for b in bribers:
if not issubclass(b.__class__, Briber):
raise BriberNotSubclassOfBriberException(f"{b.__class__.__name__} is not a subclass of Briber")
self._bribers = bribers
self._max_rating: float = MAX_RATING
self._votes: np.ndarray[Optional[float]] = None
self._truths: np.ndarray[float] = None
self._rating_method: RatingMethod = RatingMethod.P_RATING
self._gamma: Optional[float] = None
if specifics is not None:
specifics()
self._finalise_init()
def _finalise_init(self):
"""
Perform assertions that ensure everything is initialised
"""
if not isinstance(self._bribers, tuple):
raise BribersAreNotTupleException("specifics of implementing class did not instantiate self._bribers "
"as a tuple")
from BribeNet.bribery.briber import Briber
for briber in self._bribers:
if not issubclass(briber.__class__, Briber):
raise BriberNotSubclassOfBriberException(f"{briber.__class__.__name__} is not a subclass of Briber")
# noinspection PyProtectedMember
briber._set_graph(self)
if not isinstance(self._votes, np.ndarray):
raise VotesNotInstantiatedBySpecificsException()
if not isinstance(self._truths, np.ndarray):
raise TruthsNotInstantiatedBySpecificsException()
def get_bribers(self) -> Tuple[Any]:
"""
Get the bribers active on the graph
:return: the bribers
"""
return self._bribers
def get_max_rating(self) -> float:
"""
Get the maximum rating
:return: the maximum rating
"""
return self._max_rating
def set_rating_method(self, rating_method: RatingMethod):
"""
Set the rating method being used
:param rating_method: the rating method to use
"""
self._rating_method = rating_method
def set_gamma(self, gamma: float):
"""
Set gamma which is used as the dampening factor in P-gamma-rating
:param gamma: the dampening factor in P-gamma-rating
"""
self._gamma = gamma
def get_rating(self, node_id: int = 0, briber_id: int = 0, rating_method: Optional[RatingMethod] = None,
nan_default: Optional[int] = None):
"""
Get the rating for a certain node and briber, according to the set rating method
:param node_id: the node to find the rating of (can be omitted for O-rating)
:param briber_id: the briber to find the rating of (can be omitted in single-briber rating graphs)
:param rating_method: a rating method to override the current set rating method if not None
:param nan_default: optional default integer value to replace np.nan as default return
:return: the rating
"""
rating_method_used = rating_method or self._rating_method
rating = np.nan
if rating_method_used == RatingMethod.O_RATING:
rating = self._o_rating(briber_id)
elif rating_method_used == RatingMethod.P_RATING:
rating = self._p_rating(node_id, briber_id)
elif rating_method_used == RatingMethod.MEDIAN_P_RATING:
rating = self._median_p_rating(node_id, briber_id)
elif rating_method_used == RatingMethod.SAMPLE_P_RATING:
rating = self._sample_p_rating(node_id, briber_id)
elif rating_method_used == RatingMethod.WEIGHTED_P_RATING:
rating = self._p_rating_weighted(node_id, briber_id)
elif rating_method_used == RatingMethod.WEIGHTED_MEDIAN_P_RATING:
rating = self._median_p_rating_weighted(node_id, briber_id)
elif rating_method_used == RatingMethod.P_GAMMA_RATING:
if self._gamma is None:
raise GammaNotSetException()
rating = self._p_gamma_rating(node_id, briber_id, self._gamma)
if np.isnan(rating) and nan_default is not None:
rating = nan_default
return rating
def get_graph(self):
"""
Return the NetworKit graph of the network
Ensure this information isn't used by a briber to "cheat"
:return: the graph
"""
return self._g
def _neighbours(self, node_id: int, briber_id: int = 0) -> List[int]:
"""
Get the voting neighbours of a node
:param node_id: the node to get neighbours of
:param briber_id: the briber on which voting has been done
:return: the voting neighbours of the node for the briber
"""
return [n for n in self.get_graph().neighbors(node_id) if not np.isnan(self._votes[n][briber_id])]
def get_customers(self) -> List[int]:
"""
Get the customer ids without knowledge of edges or ratings
:return: the customer ids in the graph
"""
return list(self.get_graph().iterNodes())
def customer_count(self) -> int:
"""
Get the number of customers
:return: the number of nodes in the graph
"""
return self.get_graph().numberOfNodes()
def get_random_customer(self, excluding: Optional[Set[int]] = None) -> int:
"""
Gets the id of a random customer
:param excluding: set of customer ids not to be returned
:return: random node id in the graph
"""
if excluding is None:
excluding = set()
return random.choice(tuple(set(self.get_graph().iterNodes()) - excluding))
def get_vote(self, idx: int):
"""
Returns the vote of a voter in the current network state
:param idx: the id of the voter
:return: np.nan if non-voter, otherwise float if single briber, np.ndarray of floats if multiple bribers
"""
return self._votes[idx]
def _p_rating(self, node_id: int, briber_id: int = 0):
"""
Get the P-rating for the node
:param node_id: the id of the node
:param briber_id: the id number of the briber
:return: mean of actual rating of neighbouring voters
"""
ns = self._neighbours(node_id, briber_id)
if len(ns) == 0:
return np.nan
return mean([self.get_vote(n)[briber_id] for n in ns])
def _p_rating_weighted(self, node_id: int, briber_id: int = 0):
"""
Get the P-rating for the node, weighted based on trust
:param node_id: the id of the node
:param briber_id: the id number of the briber
:return: mean of actual rating of neighbouring voters
"""
ns = self._neighbours(node_id, briber_id)
if len(ns) == 0:
return np.nan
weights = [self.get_weight(n, node_id) for n in ns]
votes = [self.get_vote(n)[briber_id] for n in ns]
return weighted_mean(votes, weights)
def _median_p_rating(self, node_id: int, briber_id: int = 0):
"""
Get the median-based P-rating for the node
:param node_id: the id of the node
:param briber_id: the id number of the briber
:return: median of actual rating of neighbouring voters
"""
ns = self._neighbours(node_id, briber_id)
if len(ns) == 0:
return np.nan
return median([self.get_vote(n)[briber_id] for n in ns])
def _median_p_rating_weighted(self, node_id: int, briber_id: int = 0):
"""
Get the median-based P-rating for the node, weighted based on trust
:param node_id: the id of the node
:param briber_id: the id number of the briber
:return: median of actual rating of neighbouring voters
"""
ns = self._neighbours(node_id, briber_id)
if len(ns) == 0:
return np.nan
weights = [self.get_weight(n, node_id) for n in ns]
votes = [self.get_vote(n)[briber_id] for n in ns]
return weighted_median(votes, weights)
def _sample_p_rating(self, node_id: int, briber_id: int = 0):
"""
Get the sample-based P-rating for the node
:param node_id: the id of the node
:param briber_id: the id number of the briber
:return: mean of a sample of actual rating of neighbouring voters
"""
ns = self._neighbours(node_id, briber_id)
if len(ns) == 0:
return np.nan
sub = random.sample(ns, random.randint(1, len(ns)))
return mean([self.get_vote(n)[briber_id] for n in sub])
def _o_rating(self, briber_id: int = 0):
"""
Get the O-rating for the node
:param briber_id: the id number of the briber
:return: mean of all actual ratings
"""
ns = [n for n in self.get_graph().iterNodes() if not np.isnan(self._votes[n][briber_id])]
if len(ns) == 0:
return np.nan
return mean([self.get_vote(n)[briber_id] for n in ns])
def _p_gamma_rating(self, node_id: int, briber_id: int = 0, gamma: float = 0.05):
"""
Get the P-gamma-rating for the node, which weights nodes based on the gamma factor:
The gamma factor is defined as gamma^(D(n,c) - 1), where n is our starting node, c
is the node we are considering and D(n,c) is the shortest distance.
:param briber_id: the id number of the briber
:return: weighted mean of all actual ratings based on the gamma factor
"""
ns = [n for n in self._g.iterNodes() if (not np.isnan(self._votes[n][briber_id])) and n != node_id]
# noinspection PyUnresolvedReferences
unweighted_g = nk.graphtools.toUnweighted(self.get_graph())
# noinspection PyUnresolvedReferences
bfs_run = nk.distance.BFS(unweighted_g, node_id).run()
distances = bfs_run.getDistances()
weights = [gamma ** (distances[n] - 1) for n in ns]
votes = [self.get_vote(n)[briber_id] for n in ns]
return weighted_mean(votes, weights)
def is_influential(self, node_id: int, k: float = 0.1, briber_id: int = 0,
rating_method: Optional[RatingMethod] = None, charge_briber: bool = True) -> float:
"""
Determines if a node is influential using a small bribe
:param node_id: the id of the node
:param k: the cost of information
:param briber_id: the briber for which the node may be influential
:param rating_method: a rating method to override the current set rating method if not None
:param charge_briber: whether this query is being made by a briber who must be charged and the ratings adjusted
:return: float > 0 if influential, 0 otherwise
"""
prev_p = self.eval_graph(briber_id, rating_method)
vote = self.get_vote(node_id)[briber_id]
if (not np.isnan(vote)) and (vote < 1 - k):
if charge_briber:
# bribe via the briber in order to charge their utility
self._bribers[briber_id].bribe(node_id, k)
reward = self.eval_graph(briber_id, rating_method) - prev_p - k
else:
# "bribe" directly on the graph, not charging the briber and not affecting ratings
g_ = deepcopy(self)
g_.bribe(node_id, k, briber_id)
reward = g_.eval_graph(briber_id, rating_method) - prev_p - k
if reward > 0:
return reward
return 0.0
def _get_influence_weight(self, node_id: int, briber_id: Optional[int] = 0):
"""
Get the influence weight of a node in the graph, as defined by Grandi
and Turrini.
:param node_id: the node to fetch the influence weight of
:param briber_id: the briber (determines which neighbours have voted)
:return: the influence weight of the node
"""
neighbourhood_sizes = [len(self._neighbours(n, briber_id)) for n in self._neighbours(node_id, briber_id)]
neighbour_weights = [1.0 / n for n in neighbourhood_sizes if n > 0] # discard size 0 neighbourhoods
return sum(neighbour_weights)
def bribe(self, node_id, b, briber_id=0):
"""
Increase the rating of a node by an amount, capped at the max rating
:param node_id: the node to bribe
:param b: the amount to bribe the node
:param briber_id: the briber who's performing the briber
"""
if not np.isnan(self._votes[node_id][briber_id]):
self._votes[node_id][briber_id] = min(self._max_rating, self._votes[node_id][briber_id] + b)
else:
self._votes[node_id][briber_id] = min(self._max_rating, b)
def eval_graph(self, briber_id=0, rating_method=None):
"""
Metric to determine overall rating of the graph
:param rating_method: a rating method to override the current set rating method if not None
:param briber_id: the briber being considered in the evaluation
:return: the sum of the rating across the network
"""
return sum(self.get_rating(node_id=n, briber_id=briber_id, rating_method=rating_method, nan_default=0)
for n in self.get_graph().iterNodes())
def average_rating(self, briber_id=0, rating_method=None):
voting_customers = [c for c in self.get_graph().iterNodes() if not np.isnan(self.get_vote(c))[briber_id]]
return self.eval_graph(briber_id, rating_method) / len(voting_customers)
def set_weight(self, node1_id: int, node2_id: int, weight: float):
"""
Sets a weight for a given edge, thus allowing for trust metrics to affect graph structure.
:param node1_id: the first node of the edge
:param node2_id: the second node of the edge
:param weight: the weight of the edge to set
"""
self.get_graph().setWeight(node1_id, node2_id, weight)
def get_weight(self, node1_id: int, node2_id: int) -> float:
"""
Gets the weight of a given edge.
:param node1_id: the first node of the edge
:param node2_id: the second node of the edge
"""
return self.get_graph().weight(node1_id, node2_id)
def get_edges(self) -> [(int, int)]:
return list(self.get_graph().iterEdges())
def trust(self, node1_id: int, node2_id: int) -> float:
"""
Determines the trust of a given edge, which is a value from 0 to 1.
This uses the average of the difference in vote between each pair of places.
:param node1_id: the first node of the edge
:param node2_id: the second node of the edge
"""
votes1 = self.get_vote(node1_id)
votes2 = self.get_vote(node2_id)
differences = votes1 - votes2
nans = np.isnan(differences)
differences[nans] = 0
differences = np.square(differences)
trust = 1 - (np.sum(differences) / (len(differences) * MAX_DIFF ** 2))
return max(0, min(1, trust))
def average_trust(self):
"""
Average trust value for all pairs of nodes
"""
trusts = [self.get_weight(a, b)
for (a, b) in self.get_graph().iterEdges()]
return np.mean(trusts)
def __copy__(self):
"""
copy operation.
:return: A shallow copy of the instance
"""
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo=None):
"""
deepcopy operation.
:param memo: the memo dictionary
:return: A deep copy of the instance
"""
if memo is None:
memo = {}
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
# noinspection PyArgumentList
setattr(result, k, deepcopy(v, memo))
return result
|
{"/test/BribeNet/bribery/temporal/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_randomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_budgetBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/test_briber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveInfluentialNodeBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_mostInfluentialBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_randomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"]}
|
5,368
|
RobMurray98/BribeNet
|
refs/heads/master
|
/src/BribeNet/bribery/static/nonBriber.py
|
from BribeNet.bribery.static.briber import StaticBriber
# performs no bribery
class NonBriber(StaticBriber):
def _next_bribe(self):
pass
|
{"/test/BribeNet/bribery/temporal/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_randomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_budgetBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/test_briber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveInfluentialNodeBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_mostInfluentialBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_randomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"]}
|
5,369
|
RobMurray98/BribeNet
|
refs/heads/master
|
/src/BribeNet/bribery/temporal/nonBriber.py
|
from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction
from BribeNet.bribery.temporal.briber import TemporalBriber
class NonBriber(TemporalBriber):
def _next_action(self) -> SingleBriberyAction:
return SingleBriberyAction(self)
|
{"/test/BribeNet/bribery/temporal/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_randomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_budgetBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/test_briber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveInfluentialNodeBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_mostInfluentialBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_randomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"]}
|
5,370
|
RobMurray98/BribeNet
|
refs/heads/master
|
/test/BribeNet/bribery/temporal/test_oneMoveRandomBriber.py
|
from BribeNet.bribery.temporal.oneMoveRandomBriber import OneMoveRandomBriber
from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph
from test.BribeNet.bribery.temporal.briberTestCase import BriberTestCase
class TestOneMoveRandomBriber(BriberTestCase):
def setUp(self) -> None:
self.briber = OneMoveRandomBriber(10)
self.rg = NoCustomerActionGraph(self.briber)
def test_next_action_increases_p_rating(self):
graph = self.briber._g
action = self.briber.next_action()
briber_id = self.briber.get_briber_id()
prev_eval = graph.eval_graph(briber_id=briber_id)
action.perform_action()
self.assertGreaterEqual(graph.eval_graph(briber_id=briber_id), prev_eval)
|
{"/test/BribeNet/bribery/temporal/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_randomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_budgetBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/test_briber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveInfluentialNodeBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_mostInfluentialBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_randomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"]}
|
5,371
|
RobMurray98/BribeNet
|
refs/heads/master
|
/test/BribeNet/bribery/temporal/briberTestCase.py
|
from abc import ABC, abstractmethod
from unittest import TestCase
from BribeNet.bribery.temporal.nonBriber import NonBriber
from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph
class BriberTestCase(TestCase, ABC):
@abstractmethod
def setUp(self) -> None:
self.briber = NonBriber(0)
self.rg = NoCustomerActionGraph(self.briber)
def tearDown(self) -> None:
del self.briber, self.rg
|
{"/test/BribeNet/bribery/temporal/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_randomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_budgetBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/test_briber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveInfluentialNodeBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_mostInfluentialBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/temporal/test_randomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py"], "/test/BribeNet/bribery/static/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/static/briberTestCase.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.