text stringlengths 38 1.54M |
|---|
from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING, Any
from loguru import logger
from .html import format_attrs, format_attrs_kw
if TYPE_CHECKING:
from flask import Flask
def format_attrs_ctx() -> dict[str, Any]:
return {"format_attrs": format_attrs, "format_attrs_kw": format_attrs_kw}
def register(app: Flask, version: str) -> None:
_version_ctx = partial(dict, version=version)
app.context_processor(_version_ctx)
app.context_processor(format_attrs_ctx)
app.template_filter("fmt_attrs")(format_attrs)
@app.template_filter("log")
def _log_filter(inp: Any) -> str:
logger.info("{0!r}", inp)
return ""
|
from copy import copy
from card import Card
from symbols import Direction, Rank, Suit
# TODO: This class doesn't seem very useful now. Either find its use in the next step of the work, or delete it.
class State(object):
""" We store both declarer and dummy card lists here
These lists need not be lists of all cards, but can be lists of
some subset of cards for example a particular suit
"""
def __init__(self, declarerCards, dummyCards):
"""
@param declarerCards: list of declarer cards
@type declarerCards: list of Card
@param dummyCards: list of dummy cards
@type dummyCards: list of Card
Both lists should be sorted.
"""
self.declarerCards = declarerCards
self.dummyCards = dummyCards
class Play(object):
def __init__(self, declarerCards, dummyCards):
#if suit not in Suit:
# raise TypeError, "Expected Suit, got %s" % type(suit)
#self.suit = suit
self.suitDeclarerCards = {}
self.suitDummyCards = {}
for suit in Suit:
self.suitDeclarerCards[suit] = [dc for dc in declarerCards if dc.suit == suit]
self.suitDummyCards[suit] = [dc for dc in dummyCards if dc.suit == suit]
self.suitDeclarerCards[suit].sort(reverse = True)
self.suitDummyCards[suit].sort(reverse = True)
self.orderedRanks = sorted(Rank, key = lambda x: Rank[x], reverse = True)
self.declarerLead = 1
self.dummyLead = 0
self.initialState = State(self.suitDeclarerCards, self.suitDummyCards) # useless as of now
def findTouchingHonorCards(self, suit):
if suit not in Suit:
return
# TODO: Write some typerror here and do proper exception handling
i = 0
j = 0
k = 0
limit1 = len(self.suitDeclarerCards[suit])
limit2 = len(self.suitDummyCards[suit])
touchingHonors = []
while i < limit1 and j < limit2:
if Rank[self.suitDeclarerCards[suit][i].rank] == Rank[self.orderedRanks[k]]:
touchingHonors.append(self.orderedRanks[k])
k += 1
i += 1
elif Rank[self.suitDummyCards[suit][j].rank] == Rank[self.orderedRanks[k]]:
touchingHonors.append(self.orderedRanks[k])
k += 1
j += 1
else:
break
while i < limit1:
if Rank[self.suitDeclarerCards[suit][i].rank] == Rank[self.orderedRanks[k]]:
touchingHonors.append(self.orderedRanks[k])
k += 1
i += 1
else:
break
while j < limit2:
if Rank[self.suitDummyCards[suit][j].rank] == Rank[self.orderedRanks[k]]:
touchingHonors.append(self.orderedRanks[k])
k += 1
j += 1
else:
break
return touchingHonors
def planForEqualDistribution(self, maxNoOfTricks, declarerCards, dummyCards):
size = len(declarerCards)
declarerStart = 0
declarerEnd = size - 1
dummyStart = 0
dummyEnd = size - 1
moves = [] # We'll store tuples of type (declarer card, dummy card)
trickNo = 0
while trickNo < maxNoOfTricks:
if Rank[declarerCards[declarerStart].rank] > Rank[dummyCards[dummyStart].rank]:
moves.append((declarerCards[declarerStart], dummyCards[dummyEnd]))
declarerStart += 1
dummyEnd -= 1
else:
moves.append((declarerCards[declarerEnd], dummyCards[dummyStart]))
declarerEnd -= 1
dummyStart += 1
trickNo += 1
return moves
def planForUnequalDistribution(self, declarerCards, dummyCards, whoseLead, honorRankPosition, touchingHonors):
moves = []
totalTricsTaken = 0
declarerLength = len(declarerCards)
dummyLength = len(dummyCards)
if declarerLength > dummyLength:
shorterHand = dummyCards
longerHand = declarerCards
shorter = 0
else:
shorterHand = declarerCards
longerHand = dummyCards
shorter = 1
shorterStart = 0
longerStart = 0
if (shorter == 1):
shorterEnd = declarerLength - 1
longerEnd = dummyLength - 1
else:
shorterEnd = dummyLength - 1
longerEnd = declarerLength - 1
for trickNo in range(0, max(declarerLength, dummyLength)):
if (whoseLead == self.declarerLead and declarerLength - trickNo <= 0):
break
if (whoseLead == self.dummyLead and dummyLength - trickNo <= 0):
break
# TODO: This seems like the best possible way. Still, have to see how good it works.
if (shorterStart <= shorterEnd) and (shorterHand[shorterStart].rank in touchingHonors) and (Rank[shorterHand[shorterStart].rank] > Rank[longerHand[longerStart].rank]):
if (shorter == 1):
move = (shorterHand[shorterStart], longerHand[longerEnd])
nextLead = self.declarerLead
else:
move = (longerHand[longerEnd], shorterHand[shorterStart])
nextLead = self.dummyLead
moves.append(move)
shorterStart += 1
longerEnd -= 1
whoseLead = nextLead
totalTricsTaken += 1
honorRankPosition += 1 #important to keep modifying this
# TODO: this is a bit of troubling situation. We haven't taken care of the case with the gap in the shorter suit. Have to do something
elif longerHand[longerStart].rank in touchingHonors:
# I think here we can be sure that the shorter one is not the one leading right now
if (shorterStart > shorterEnd):
while longerStart <= longerEnd:
if (longerHand[longerStart].rank in touchingHonors):
if (shorter == 1):
move = ("Discard", longerHand[longerStart])
else:
move = (longerHand[longerStart], "Discard")
moves.append(move)
longerStart += 1
totalTricsTaken += 1
honorRankPosition += 1
else:
break
else:
if shorter == 1:
possibleMove1 = (shorterHand[shorterStart], longerHand[longerEnd])
if shorterHand[shorterStart].rank not in touchingHonors:
tricks1 = 0
futureMoves1 = []
else:
if (Rank[shorterHand[shorterStart].rank] > Rank[longerHand[longerEnd].rank]):
lead = self.declarerLead
else:
lead = self.dummyLead
futureMoves1 = self.planForUnequalDistribution(declarerCards[shorterStart + 1:shorterEnd + 1], dummyCards[longerStart:longerEnd], lead, honorRankPosition, touchingHonors)
tricks1 = len(futureMoves1) + 1
possibleMove2 = (shorterHand[shorterEnd], longerHand[longerStart])
futureMoves2 = self.planForUnequalDistribution(declarerCards[shorterStart:shorterEnd], dummyCards[longerStart + 1:longerEnd + 1], self.dummyLead, honorRankPosition + 1, touchingHonors)
tricks2 = 1 + len(futureMoves2)
else:
possibleMove1 = (longerHand[longerEnd], shorterHand[shorterStart])
if shorterHand[shorterStart].rank not in touchingHonors:
tricks1 = 0
futureMoves1 = []
else:
if (Rank[shorterHand[shorterStart].rank] > Rank[longerHand[longerEnd].rank]):
lead = self.dummyLead
else:
lead = self.declarerLead
futureMoves1 = self.planForUnequalDistribution(declarerCards[longerStart:longerEnd], dummyCards[shorterStart + 1:shorterEnd + 1], lead, honorRankPosition, touchingHonors)
tricks1 = len(futureMoves1) + 1
possibleMove2 = (longerHand[longerStart], shorterHand[shorterEnd])
futureMoves2 = self.planForUnequalDistribution(declarerCards[longerStart + 1:longerEnd + 1], dummyCards[shorterStart:shorterEnd], self.declarerLead, honorRankPosition + 1, touchingHonors)
tricks2 = 1 + len(futureMoves2)
if (tricks1 > tricks2):
moves.append(possibleMove1)
moves = moves + futureMoves1
else:
moves.append(possibleMove2)
moves = moves + futureMoves2
break
else:
break
return moves
def whatToDiscard(self, cards, suit, touchingHonors):
cardToDiscard = None
for s in Suit:
if s == suit:
continue
if (len(cards[s]) > 0) and (cards[s][-1] not in touchingHonors[s]):
if (not cardToDiscard) or (Rank[cards[s][-1].rank] < Rank[cardToDiscard.rank]):
cardToDiscard = cards[s][-1]
if cardToDiscard:
return cardToDiscard
for s in Suit:
if s == suit:
continue
if (len(cards[s]) > 0) and (not cardToDiscard) or (Rank[cards[s][-1].rank] < Rank[cardToDiscard.rank]):
cardToDiscard = cards[s][-1]
def planForMultipleSuits(self, declarerCards, dummyCards, touchingHonors, whoseLead, depth, initialSuit = None):
#if depth == 15:
# return []
print "here"
print declarerCards
print dummyCards
print touchingHonors, whoseLead, depth
declarerLength = {}
dummyLength = {}
finalPlan = []
for suit in Suit:
declarerLength[suit] = len(declarerCards[suit])
dummyLength[suit] = len(dummyCards[suit])
if (initialSuit and initialSuit in Suit):
if declarerLength[initialSuit] == dummyLength[initialSuit]:
maxNoOfTricks = min(len(touchingHonors[initialSuit]), declarerLength[initialSuit])
initialPlan = self.planForEqualDistribution(maxNoOfTricks, declarerCards[initialSuit], dummyCards[initialSuit])
else:
initialPlan = self.planForUnequalDistribution(declarerCards[initialSuit], dummyCards[initialSuit], whoseLead, 0, touchingHonors[initialSuit])
if (len(initialPlan)) == 0:
return finalPlan
if type(initialPlan[-1][0]) == type('Discard'):
whoseLead = self.dummyLead
elif type(initialPlan[-1][1]) == type('Discard'):
whoseLead = self.declarerLead
elif Rank[initialPlan[-1][0].rank] > Rank[initialPlan[-1][1].rank]:
whoseLead = self.declarerLead
else:
whoseLead = self.dummyLead
trickNo = 0
for move in initialPlan:
declarerMove = move[0]
dummyMove = move[1]
if type(declarerMove) != type('Discard'):
declarerCards[initialSuit].remove(declarerMove)
else:
cardToDiscard = self.whatToDiscard(declarerCards, suit, touchingHonors)
initialPlan[trickNo] = (cardToDiscard, move[1])
declarerCards[cardToDiscard.suit].remove(cardToDiscard)
if type(dummyMove) != type('Discard'):
dummyCards[initialSuit].remove(dummyMove)
else:
cardToDiscard = self.whatToDiscard(dummyCards, suit, touchingHonors)
initialPlan[trickNo] = (move[0], move[1])
dummyCards[cardToDiscard.suit].remove(cardToDiscard)
trickNo += 1
finalPlan = initialPlan + self.planForMultipleSuits(declarerCards, dummyCards, touchingHonors, whoseLead, depth + 1)
return finalPlan
possibleInitialPlan = []
for suit in Suit:
if whoseLead == 1 and len(declarerCards[suit]) == 0:
continue
if whoseLead == 0 and len(dummyCards[suit]) == 0:
continue
if (len(declarerCards[suit]) > 0 and declarerCards[suit][0].rank not in touchingHonors[suit]) and (len(dummyCards[suit]) > 0 and dummyCards[suit][0].rank not in touchingHonors[suit]):
continue
declarerCardsCopy = {}
dummyCardsCopy = {}
# As this is shallow copy, we have to separately copy lists for all the suits
# I sincerely hope that this suit in suit loop doesn't cause any trouble
for suit2 in Suit:
declarerCardsCopy[suit2] = copy(declarerCards[suit2])
dummyCardsCopy[suit2] = copy(dummyCards[suit2])
if declarerLength[suit] == dummyLength[suit]:
maxNoOfTricks = min(len(touchingHonors[suit]), declarerLength[suit])
possibleInitialPlan = self.planForEqualDistribution(maxNoOfTricks, declarerCards[suit], dummyCards[suit])
else:
possibleInitialPlan = self.planForUnequalDistribution(declarerCards[suit], dummyCards[suit], whoseLead, 0, touchingHonors[suit]) # TODO:something has to be done for all these honor rank positions stuff and all
if len(possibleInitialPlan) == 0:
continue
if type(possibleInitialPlan[-1][0]) == type('Discard'):
whoseLead = self.dummyLead
elif type(possibleInitialPlan[-1][1]) == type('Discard'):
whoseLead = self.declarerLead
elif Rank[possibleInitialPlan[-1][0].rank] > Rank[possibleInitialPlan[-1][1].rank]:
whoseLead = self.declarerLead
else:
whoseLead = self.dummyLead
trickNo = 0
for move in possibleInitialPlan:
declarerMove = move[0]
dummyMove = move[1]
if type(declarerMove) != type('Discard'):
declarerCardsCopy[suit].remove(declarerMove)
else:
cardToDiscard = self.whatToDiscard(declarerCardsCopy, suit, touchingHonors)
possibleInitialPlan[trickNo] = (cardToDiscard, move[1])
declarerCardsCopy[cardToDiscard.suit].remove(cardToDiscard)
if type(dummyMove) != type('Discard'):
dummyCardsCopy[suit].remove(dummyMove)
else:
cardToDiscard = self.whatToDiscard(dummyCardsCopy, suit, touchingHonors)
possibleInitialPlan[trickNo] = (move[0], cardToDiscard)
dummyCardsCopy[cardToDiscard.suit].remove(cardToDiscard)
trickNo += 1
print "suit initial plan", suit, possibleInitialPlan
possibleFinalPlan = possibleInitialPlan + self.planForMultipleSuits(declarerCardsCopy, dummyCardsCopy, touchingHonors, whoseLead, depth + 1)
if len(possibleFinalPlan) > len(finalPlan):
finalPlan = possibleFinalPlan
return finalPlan
def plan(self):
touchingHonors = {}
for suit in Suit:
touchingHonors[suit] = self.findTouchingHonorCards(suit)
return self.planForMultipleSuits(self.suitDeclarerCards, self.suitDummyCards, touchingHonors, self.dummyLead, 0, "Diamond")
def main():
c1 = Card('Ace', 'Spade')
c2 = Card('King', 'Spade')
c3 = Card('Eight', 'Spade')
c4 = Card('Seven', 'Spade')
c5 = Card('Six', 'Spade')
c6 = Card('Five', 'Spade')
c7 = Card('Ace', 'Heart')
c8 = Card('King', 'Heart')
c9 = Card('Queen', 'Heart')
c10 = Card('Jack', 'Heart')
c11 = Card('Ten', 'Heart')
c12 = Card('Nine', 'Heart')
c13 = Card('Queen', 'Spade')
c14 = Card('Jack', 'Spade')
c15 = Card('Two', 'Heart')
c16 = Card('Five', 'Heart')
c17 = Card('Ace', 'Club')
c18 = Card('King', 'Club')
c19 = Card('Jack', 'Club')
c20 = Card('Ten', 'Club')
c21 = Card('Nine', 'Club')
c22 = Card('Six', 'Club')
c23 = Card('Five', 'Club')
c24 = Card('Four', 'Club')
c25 = Card('Three', 'Club')
c26 = Card('Two', 'Club')
game = Play([c1, c2, c3, c7, c8, c9, c14, c15, c16, c17, c24, c25, c26], [c4, c5, c6, c10, c11, c12, c13, c18, c19, c20, c22, c22, c23])
finalPlan = game.plan()
for move in finalPlan:
print move
if (len(finalPlan) == 0):
print "No plan found"
main()
|
# summarize multiple confidence intervals on an ARIMA forecast
from pandas import Series
from statsmodels.tsa.arima_model import ARIMA
# load data
series = Series.from_csv('daily-total-female-births.csv', header=0)
# split data into train and test setes
X = series.values
X = X.astype('float32')
size = len(X) - 1
train, test = X[0:size], X[size:]
# fit an ARIMA model
model = ARIMA(train, order=(5,1,1))
model_fit = model.fit(disp=False)
# summarize confidence intervals
intervals = [0.2, 0.1, 0.05, 0.01]
for a in intervals:
forecast, stderr, conf = model_fit.forecast(alpha=a)
print('%.1f%% Confidence Interval: %.3f between %.3f and %.3f' % ((1-a)*100, forecast, conf[0][0], conf[0][1])) |
import scrapy
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spiders import Rule
from geoscrap_project.items.Items import *
from bs4 import BeautifulSoup
import cfscrape
import json
import jmespath
import pendulum
from pathlib import Path
import random
from urllib.parse import urlparse
class GeocachingSpider(scrapy.Spider):
name = "GeocachingSpider"
start_urls = ['https://www.geocaching.com/account/signin']
custom_settings = {
'CONCURRENT_REQUESTS': '1',
'DOWNLOAD_DELAY': '2',
'COOKIES_ENABLED': True,
'ITEM_PIPELINES': {
'geoscrap_project.pipelines.JsonPipeline': 200,
},
'HTTPERROR_ALLOWED_CODES': [301,302,404],
'HTTPPROXY_ENABLED': False,
'REDIRECT_ENABLED': True
}
allowed_domains = ['geocaching.com']
def parse(self, response):
meta = response.meta
self.logger.debug('Parse function called on %s', response.url)
# https://stackoverflow.com/questions/34076989/python-scrapy-login-authentication-spider-issue
token = response.css('input[name=__RequestVerificationToken]::attr(value)').extract()[0]
return scrapy.FormRequest.from_response(
response,
meta = meta,
formxpath="//form[@action='/account/signin']",
formdata={'__RequestVerificationToken':token,'UsernameOrEmail': 'xxx', 'Password': 'xxx'},
callback=self.after_login
)
def after_login(self, response):
print(response)
meta = response.meta
# go to nearest page
return scrapy.Request(url="https://www.geocaching.com/seek/nearest.aspx",
meta=meta,
callback=self.parse_cacheSearch,
dont_filter=True)
## SIMULATE THE THREE STEP TO POPULATE THE FORM : search type, country, state
## NEEDED TO POPULATE ASP __VIEWSTATE hidden value
## STEP 1 : SEARCH TYPE = SC
def parse_cacheSearch(self,response):
print("TYPE OF SEARCH")
return scrapy.FormRequest.from_response(
response,
#meta={'proxy': 'http://localhost:8888'},
formxpath="//form[@id='aspnetForm']",
formdata={
'ctl00$ContentBody$uxTaxonomies':'9a79e6ce-3344-409c-bbe9-496530baf758',
'ctl00$ContentBody$LocationPanel1$ddSearchType':'SC'},
callback=self.parse_cacheCountry
)
## STEP 2 : SELECT COUNTRY
def parse_cacheCountry(self, response):
print("COUNTRY SELECT")
return scrapy.FormRequest.from_response(
response,
#meta={'proxy': 'http://localhost:8888'},
formxpath="//form[@id='aspnetForm']",
formdata={
'ctl00$ContentBody$uxTaxonomies': '9a79e6ce-3344-409c-bbe9-496530baf758',
'ctl00$ContentBody$LocationPanel1$ddSearchType': 'SC',
'ctl00$ContentBody$LocationPanel1$CountryStateSelector1$selectCountry': '73'},
callback=self.parse_cacheState
)
## STEP 3 : SELECT STATE AND SENT FINAL QUERY by submit
## 421 haute normandie
## 414 basse normandie
def parse_cacheState(self, response):
print ("SELECT STATE NORMANDY")
return scrapy.FormRequest.from_response(
response,
#meta={'proxy': 'http://localhost:8888'},
formxpath="//form[@id='aspnetForm']",
formdata={
'ctl00$ContentBody$uxTaxonomies': '9a79e6ce-3344-409c-bbe9-496530baf758',
'ctl00$ContentBody$LocationPanel1$ddSearchType': 'SC',
'ctl00$ContentBody$LocationPanel1$CountryStateSelector1$selectCountry': '73',
'ctl00$ContentBody$LocationPanel1$CountryStateSelector1$selectState': '487',
'ctl00$ContentBody$LocationPanel1$btnLocale': 'Recherche+de+géocaches'},
callback=self.parse_pages
)
def display_hidden_tag(self,response):
soup = BeautifulSoup(response.body)
hidden_tags = soup.find_all("input", type="hidden")
for tag in hidden_tags:
print(tag)
def parse_cachesList(self, response):
# print("PAGE >> ", response.meta['page'] ," <<<<<<<<<<<<<")
# self.display_hidden_tag(response)
# Update Meta
geocaches = {}
#response.meta['viewstate'] = self.get_viewstate(response)
tdList = response.xpath('(//td[@class="Merge"][2])')
for td in tdList:
geocache={}
link = td.xpath('a//@href')
name = td.xpath('a/span/text()')
print("links = ", link.extract())
# print("name = ", name.extract())
geocache["url"] = link.extract_first()
geocache["name"] = name.extract_first()
p = urlparse(geocache["url"])
code = p.path.split("/")[2].split("_")[0]
if "page" not in response.meta.keys():
print("PAGE NOT IN RESPONSE")
geocache["page"] = 1
else:
print("PAGE IN RESPONSE")
geocache["page"] = response.meta['page'][0]
geocaches[code] = geocache
return geocaches
def get_viewstate(self,response):
state = response.xpath('//input[@id="__VIEWSTATE"]/@value').extract()
state1 = response.xpath('//input[@id="__VIEWSTATE1"]/@value').extract()
print('xxx STATE = ', state)
print('xxx STATE1 = ', state1)
return [state, state1]
def parse_pages(self,response):
print("META KEY = ", response.meta.keys())
viewstate = self.get_viewstate(response)
geocaches = self.parse_cachesList(response)
if 'page' not in response.meta.keys():
infoPage = response.xpath('//td[@class="PageBuilderWidget"]/span/b[3]//text()')
print("PAGE NOT IN RESPONSE META KEY")
numberOfPage = int(infoPage.extract_first())
response.meta['page'] = [1, 3]#numberOfPage
yield scrapy.FormRequest.from_response(
response,
#meta={'proxy': 'http://localhost:8888', 'page': response.meta['page'], 'geocaches': geocaches},
meta={ 'page': response.meta['page']},
formname="aspnetForm",
formxpath="//form[@id='aspnetForm']",
formdata={'recaptcha_challenge_field': None,
'recaptcha_response_field': None,
'ctl00$ContentBody$chkHighlightBeginnerCaches': None,
'ctl00$ContentBody$chkAll': None,
'__EVENTTARGET': None,
'__EVENTARGUMENT': None},
dont_click=True,
callback=self.parse_pages,
dont_filter=True
)
else:
if response.meta['page'][0] > response.meta['page'][1]:
return
print("NEXT Page : ", response.meta['page'])
response.meta['page'][0] += 1
if (response.meta['page'][0] - 1) % 10 == 0:
yield scrapy.FormRequest.from_response(
response,
meta={ 'page': response.meta['page']},
#meta={'proxy': 'http://localhost:8888', 'page': response.meta['page'], 'geocaches': geocaches},
formname="aspnetForm",
# meta={'page': page},
formxpath="//form[@id='aspnetForm']",
formdata={'recaptcha_challenge_field': None,
'recaptcha_response_field': None,
'ctl00$ContentBody$chkHighlightBeginnerCaches': None,
'ctl00$ContentBody$chkAll': None,
'__EVENTTARGET': 'ctl00$ContentBody$pgrBottom$ctl06', },
dont_click=True,
callback=self.parse_pages,
dont_filter=True
#priority=(21 - response.meta['page'][0])
)
else:
print("ctl00$ContentBody$pgrTop$lbGoToPage_"+ str(response.meta['page'][0]))
yield scrapy.FormRequest.from_response(
response,
meta={'page': response.meta['page']},
#meta={'proxy': 'http://localhost:8888', 'page': response.meta['page'], 'geocaches':geocaches},
formname="aspnetForm",
# meta={'page': page},
formxpath="//form[@id='aspnetForm']",
formdata={'recaptcha_challenge_field': None,
'recaptcha_response_field': None,
'ctl00$ContentBody$chkHighlightBeginnerCaches': None,
'ctl00$ContentBody$chkAll': None,
'__EVENTTARGET': 'ctl00$ContentBody$pgrTop$lbGoToPage_' + str(response.meta['page'][0]), },
dont_click=True,
callback=self.parse_pages,
dont_filter=True
#priority=(21 - response.meta['page'][0])
)
print("GEOCACHES = ", geocaches)
yield geocaches
#print ("RUN > ", 'ctl00$ContentBody$pgrTop$lbGoToPage_'+str(response.meta['page'][0]))
#yield result
def __init__(self, aDate = pendulum.today()):
super(GeocachingSpider, self).__init__()
self.aDate = aDate
self.timestamp = self.aDate.timestamp()
print("PENDULUM UTC TODAY ", self.aDate.isoformat())
print("PENDULUM TO TIMESTAMP ", self.timestamp)
|
from django.db import models
from django.conf import settings
from django.urls import reverse
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
from taggit.managers import TaggableManager
class Category(models.Model):
DEFAULT = "카테고리"
CATEGORY_CHOICE = [
("FOOD", "음식"),
("CAR", "자동차"),
("ETC", "기타"),
# Add Category
#("DB에 저장되는 이름", "읽기 좋은 뜻이 있는 이름"),
]
category = models.CharField(
max_length=10,
choices=CATEGORY_CHOICE,
default=DEFAULT,
)
def user_path(instance, filename): # 파라미터 instance는 Photo 모델을 의미 filename은 업로드 된 파일의 파일 이름
from random import choice
from string import ascii_letters # string.ascii_letters : ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
arr = [choice(ascii_letters) for _ in range(8)]
pid = ''.join(arr) # 8자리 임의의 문자를 만들어 파일명으로 지정
extension = filename.split('.')[-1] # 배열로 만들어 마지막 요소를 추출하여 파일확장자로 지정
# file will be uploaded to MEDIA_ROOT/user_<id>/<random>
return '%s/%s.%s' % (instance.owner.username, pid, extension) # 예 : wayhome/abcdefgs.png
# return '%s/%Y/%m/%d/%s.%s' % (instance.owner.username, pid, extension) # 예 : wayhome/abcdefgs.png
THUMBNAIL_WIDTH = 120
THUMBNAIL_HIGHT = 100
class Mapmodel(models.Model):
# 로그인 한 사용자, many to one relation
owner = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
)
title = models.CharField(max_length=200) #제목
content = models.TextField()
# 주소 정보
address = models.CharField(max_length=200, default="장소를 지정해주세요.")
# 레코드 생성시 현재 시간으로 자동 생성
pub_date = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
# image는 선택사항
image = models.ImageField(blank=True, null=True, upload_to=user_path)
thumbnail = ImageSpecField(source='image',
processors=[ResizeToFill(THUMBNAIL_WIDTH, THUMBNAIL_HIGHT)],
format="JPEG",
options={'quality':60})
likes = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='likes')
tags = TaggableManager(blank=True)
@property
def totla_like(self):
return self.likes.count()
class Meta:
ordering = ('-pub_date',)
class Memo(models.Model):
# map Data가 삭제되면 함께 삭제되야함
target = models.ForeignKey(
Mapmodel,
on_delete=models.CASCADE,
related_name='memos')
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True,
)
pub_date = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
approved = models.BooleanField(default=False)
memo = models.TextField()
class Meta:
ordering = ('-pub_date',) |
# -*- coding: utf-8 -*-
"""Hyperparameter optimization by grid search.
"""
# Author: Taku Yoshioka, Shohei Shimizu
# License: MIT
import numpy as np
from bmlingam.cache_mc import create_cache_source
from bmlingam.bmlingam_np import comp_logP, comp_logP_bmlingam_np
from bmlingam.cache_mc_shared import comp_logPs_mp
def find_best_model(xs, hparamss, sampling_mode='normal'):
"""Find the optimal and reverse-optimal models.
This function calculates marginal likelihoods for the models specified with
hyperparameter sets included in :code:`hparamss`. The optimal model is
the one whose marginal likelihood on data :code:`xs` is the maximum.
Reverse-optimal is the one with the maximum marginal likelihood across
models whose causations are opposite to the optimal model.
:param xs: Pairs of observed values.
:type xs: numpy.ndarray, shape=(n_samples, 2)
:param hparamss: List of model parameters which defines search space.
This is created with :func:`bmlingam.define_hparam_searchspace`.
:type hparamss: list of dict
:param sampling_mode: Specify the way to perform Monte Carlo sampling.
:type sampling_mode: str
"""
assert((xs.shape[1] == 2) and (xs.ndim == 2))
assert(type(sampling_mode) is str)
assert(type(hparamss) is list)
if sampling_mode == 'normal':
logPs = np.array([comp_logP_bmlingam_np(xs, hparams)[0]
for hparams in hparamss])
elif sampling_mode == 'cache':
cache = create_cache_source(xs, hparamss)
logPs = np.array([comp_logP(xs, hparams, cache)[0]
for hparams in hparamss])
elif sampling_mode == 'cache_mp2':
logPs = comp_logPs_mp(xs, hparamss, processes=4)
elif sampling_mode == 'cache_mp4':
logPs = comp_logPs_mp(xs, hparamss, processes=4)
elif sampling_mode == 'cache_mp8':
logPs = comp_logPs_mp(xs, hparamss, processes=8)
else:
raise ValueError("Invalid value of sampling_mode: %s" % sampling_mode)
ix_max_logP = np.argmax(logPs)
# Find reverse-optimal model
causality_est = hparamss[ix_max_logP]['causality']
ixs_rev = np.array(
[ix_model for ix_model in range(len(hparamss))
if hparamss[ix_model]['causality'] != causality_est]).astype(int)
assert(len(ixs_rev) == len(hparamss) / 2)
ix_max_rev = ixs_rev[np.argmax(logPs[ixs_rev])]
# Posterior probabilities
exps = np.exp(logPs - np.max(logPs))
posterior = 1. / np.sum(exps) # NOTE: exp(0) = 1
posterior_rev = exps[ix_max_rev] / np.sum(exps)
# Log-likelihoods
ll = np.max(logPs)
ll_rev = np.max(logPs[ix_max_rev])
return hparamss[ix_max_logP], posterior, ll, \
hparamss[ix_max_rev], posterior_rev, ll_rev
|
import tweepy
import json
auth = tweepy.OAuthHandler('pFduB9dBKq5zvEWrAkhfTSnyv',
'cBcMj9z9jIO3HhmeaykDdfaCsByDqQujF4mZ6VO8mTyjDn5SGG')
auth.set_access_token('1058601288223514624-piQQ1twMDAwuAg7jTwJzvjHIRGVGNA' , 'nOcTYajRX8RRQDQ5cb4Rpl12GVGM2atSUqpRRg2aCDP8n')
api = tweepy.API(auth)
trends1 = api.trends_available()
with open('data.json', 'w') as outfile:
json.dump(trends1, outfile)
with open('data.json', 'r+') as f:
data = json.load(f)
f.seek(0) # <--- should reset file position to the beginning.
json.dump(data, f, indent=4)
f.truncate()
|
# -*- coding: utf-8 -*-
#########################################################################
# Copyright (C) 2018-2019 by Simone Gaiarin <simgunz@gmail.com> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, see <http://www.gnu.org/licenses/>. #
#########################################################################
from anki.hooks import wrap
from aqt.editor import Editor
def setBrowserMaxImageHeight(self):
config = self.mw.addonManager.getConfig(__name__)
if config['height_or_width'] == "height":
self.web.eval('''$('head').append('<style type="text/css">'''
'''#fields img{{ max-height: {height};}}</style>')'''
.format(height=config['max-height']))
elif config['height_or_width'] == "width":
self.web.eval('''$('head').append('<style type="text/css">'''
'''#fields img{{ max-width: {width};}}</style>')'''
.format(width=config['max-width']))
elif config['height_or_width'] == "both":
self.web.eval('''$('head').append('<style type="text/css">'''
'''#fields img{{ max-width: {width}; max-height: {height};}}</style>')'''
.format(width=config['max-width'], height=config['max-height']))
Editor.setupWeb = wrap(Editor.setupWeb, setBrowserMaxImageHeight)
|
from typing import Callable, Union
from argparse import ArgumentTypeError as ArgumentError
import os
class PathType:
def __init__(
self,
exists=True,
val_type: Union[Callable[[str], bool], str, None] = "file",
dash_ok=True,
):
"""Represent an argument of type path to a file, directory or symlink
:param exists:
True: a path that does exist
False: a path that does not exist, in a valid parent directory
None: don't care
:param val_type: file, dir, symlink, None, or a function returning True for valid paths
None: don't care
:param dash_ok: whether to allow "-" as stdin/stdout"""
assert exists in (True, False, None)
assert val_type in ("file", "dir", "symlink", None) or callable(val_type)
self._exists = exists
self._val_type = val_type
self._dash_ok = dash_ok
def __call__(self, value):
if value == "-":
# the special argument "-" means sys.std{in,out}
if self._val_type == "dir":
raise ArgumentError(
"standard input/output (-) not allowed as directory path"
)
elif self._val_type == "symlink":
raise ArgumentError(
"standard input/output (-) not allowed as symlink path"
)
elif not self._dash_ok:
raise ArgumentError("standard input/output (-) not allowed")
else:
file_exists = os.path.exists(value)
if self._exists:
if not file_exists:
raise ArgumentError("path does not exist: '%s'" % value)
if self._val_type is None:
pass
elif self._val_type == "file":
if not os.path.isfile(value):
raise ArgumentError("path is not a file: '%s'" % value)
elif self._val_type == "symlink":
if not os.path.islink(value):
raise ArgumentError("path is not a symlink: '%s'" % value)
elif self._val_type == "dir":
if not os.path.isdir(value):
raise ArgumentError("path is not a directory: '%s'" % value)
elif not self._val_type(value):
raise ArgumentError("path not valid: '%s'" % value)
else:
# Necessary to check if it is False, because None might also eval to False
if self._exists is False and file_exists:
raise ArgumentError("path exists: '%s'" % value)
parent_dir = os.path.dirname(os.path.normpath(value)) or "."
if not os.path.isdir(parent_dir):
raise ArgumentError(
"parent path is not a directory: '%s'" % parent_dir
)
elif not os.path.exists(parent_dir):
raise ArgumentError(
"parent directory does not exist: '%s'" % parent_dir
)
return value
|
import time, numpy, pylab
import flypod, sky_times
pylab.ion()
#dirName = '/media/weir05/data/rotator/indoor/gray/12trials/fly10'
#dirName = '/media/weir05/data/rotator/indoor/12trials/fly17'
#dirName = '/media/weir05/data/rotator/indoor/sham12trials/fly09'
#dirName = '/media/weir05/data/rotator/12trials/fly92'
dirName = '/media/weir09/data/indoor/diffuserPol/sham12trials/fly04'
def circmean(alpha):
mean_angle = numpy.arctan2(numpy.mean(numpy.sin(alpha)),numpy.mean(numpy.cos(alpha)))
return mean_angle
def circvar(alpha):
if numpy.ma.isMaskedArray(alpha):
N = alpha.count()
else:
N = len(alpha)
R = numpy.sqrt(numpy.sum(numpy.sin(alpha))**2 + numpy.sum(numpy.cos(alpha))**2)/N
V = 1-R
return V
fly = flypod.analyze_directory(dirName)
CHANGEBUFFER = 0
if fly is not None:
sky = sky_times.analyze_directory(dirName)
COLORS = dict({'U': 'm', 'R': 'c'})
TOTAL_TIME = 12*3*60
worldTotals=numpy.array([])
totals = dict({'U':numpy.array([]),'R':numpy.array([])})
orientations = numpy.copy(fly['orientations']) + 180
times = numpy.copy(fly['times'])
fig1 = pylab.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(times,orientations,'k')
ax1.set_xticklabels([time.ctime(float(ti))[11:19] for ti in ax1.get_xticks()])
ax1.set_yticks((0,90,180,270,360))
for i, cT in enumerate(sky['changeTimes'][:-1]):
ax1.text(cT,380,sky['rotatorState'][i])
ax1.axvspan(cT-CHANGEBUFFER, sky['changeTimes'][i+1]-CHANGEBUFFER, facecolor=COLORS[sky['rotatorState'][i]], alpha=0.2)
if sum(numpy.isnan(orientations)) > 0:
for trackingErrorTime in times[numpy.isnan(orientations)]:
ax1.axvline(trackingErrorTime,linewidth=2,color='k')
if fly.has_key('stopTimes'):
for i, sT in enumerate(fly['stopTimes']):
ax1.axvspan(sT, fly['startTimes'][i], facecolor='r', alpha=0.5)
inds = (times > sT) & (times < fly['startTimes'][i])
ax1.plot(times[inds],orientations[inds],color='0.5')
orientations[inds] = numpy.nan
inds = (times > TOTAL_TIME + times[0])
orientations[inds] = numpy.nan
pylab.draw()
fig2 = pylab.figure()
fig2.set_facecolor('w')
fig2.suptitle(fly['fileName'])
for i, cT in enumerate(sky['changeTimes'][:-1]):
ax2=fig2.add_subplot(int(numpy.ceil(numpy.sqrt(len(sky['changeTimes'])-1))),int(numpy.ceil(numpy.sqrt(len(sky['changeTimes'])-1))),1+i,polar=True)
inds = (times > cT) & (times < sky['changeTimes'][i+1])
ors = orientations[inds]
ors = ors[~numpy.isnan(ors)]
if len(ors)>0:
orw,n,b,bc,ax = flypod.rose(ors,360)
m=circmean(ors*numpy.pi/180)*180/numpy.pi
v=circvar(ors*numpy.pi/180)
ax2.plot([0,numpy.pi/2-m*numpy.pi/180],[0,ax.get_rmax()*(1-v)])
ax2.set_rmax(.4)
ax2.set_rgrids([1],'')
ax2.set_thetagrids([0,90,180,270],['','','',''])
ax2.set_title(sky['rotatorState'][i])
#ax.set_axis_bgcolor(COLORS[sky['directions'][i]])
ax2.axesPatch.set_facecolor(COLORS[sky['rotatorState'][i]])
ax2.axesPatch.set_alpha(0.4)
totals[sky['rotatorState'][i]] = numpy.concatenate((totals[sky['rotatorState'][i]],ors))
pylab.draw()
fig3 = pylab.figure()
fig3.set_facecolor('w')
fig3.suptitle(fly['dirName'])
for i, d in enumerate(COLORS):
#m=circmean(totals[d]*numpy.pi/180)*180/numpy.pi
m=circmean(totals[d]*numpy.pi/180)*180/numpy.pi
v=circvar(totals[d]*numpy.pi/180)
ax3 = fig3.add_subplot(2,2,i+1,polar=True)
orw,n,b,bc,ax = flypod.rose(totals[d],360)
#orw,n,b,bc,ax = flypod.rose(totals[d],360)
#pylab.hold('on')
pylab.polar([0,numpy.pi/2-m*numpy.pi/180],[0,ax.get_rmax()*(1-v)])
ax3.set_rmax(.4)
ax3.set_rgrids([1],'')
ax3.set_thetagrids([0,90,180,270],['','','',''])
ax3.set_title(d)
ax3.axesPatch.set_facecolor(COLORS[d])
ax3.axesPatch.set_alpha(0.2)
|
# /usr/bin/env python
# -*- coding: utf-8 -*- #
# Copyright (C) 2018 Houwei and Tuhang
# FileName : predict.py
# Author : Hou Wei
# Version : V1.0
# Date: 2018-01-08
# Description: Train
# History:
import os
import datetime
import numpy as np
from keras.models import load_model
import prepare_data as pd
from postprocess import evaluate as ev
def load_data(sDataPath):
"""Prepare data for testing
"""
npX = np.load(sDataPath)
print("[Log] Read %s shape:%s" % (sDataPath, str(npX.shape)))
npX = np.reshape(npX, (npX.shape[0], npX.shape[1], npX.shape[2], 1))
return npX
def predict(sModelPath, npX):
"""Predict
"""
model = load_model(sModelPath)
# model.summary()
print("[Log] Model %s Predicting..." % sModelPath)
npY = model.predict(npX)
npY = npY[:, 1]
return npY
def save_result(sPath, npY, fThre, iAdd):
"""Save predict in ordered format
"""
fileRes = open(sPath, "w")
sLine = "signal_id,target\n"
fileRes.write(sLine)
npY = npY > fThre
npY = npY + 0
for iIndex in range(len(npY)):
sLine = str(iIndex + iAdd) + "," + str(npY[iIndex]) + "\n"
fileRes.write(sLine)
fileRes.close()
print("[Log] Sum is %d" % np.sum(npY))
print("[Log] Write result to %s" % sPath)
def predict_test(sModelPath):
"""Predict for test data, save result to "../list/result.csv"
"""
sDataPath = "../data/mel_test.npy"
sResultPath = "../list/result.csv"
npX = load_data(sDataPath)
npY = predict(sModelPath, npX)
save_result(sResultPath, npY, 0.5, 8712)
def predict_train(sModelPath):
"""Predict for all train data
"""
sDataPath = "../data/mel_train.npy"
sTagPath = "../list/metadata_train.csv"
npX = load_data(sDataPath)
npPredict = predict(sModelPath, npX)
npTag = pd.read_tag(sTagPath)
fBEP = ev.PRCurve(npPredict, npTag)
ev.FScore(npPredict, npTag, fBEP)
ev.ROC_AUC(npPredict, npTag)
mcc = ev.mcc(npPredict, npTag, 0.5)
mcc = ev.mcc(npPredict, npTag, fBEP)
def predict_val(sModelDir):
"""Predict val data by all models in sModelDir
"""
sDataPath = "../data/k_folder/mel_val_0.npy"
sTagPath = "../data/k_folder/valY_0.npy"
fMaxMcc = 0
sBestModel = ""
for sDirPath, lDirNames, lFileNames in os.walk(sModelDir):
for sFileName in lFileNames:
if sFileName.find("hdf5") == -1:
continue
sModelPath = os.path.join(sDirPath, sFileName)
npX = load_data(sDataPath)
npPredict = predict(sModelPath, npX)
npTag = np.load(sTagPath)
fBEP = ev.PRCurve(npPredict, npTag)
ev.FScore(npPredict, npTag, fBEP)
ev.ROC_AUC(npPredict, npTag)
mcc = ev.mcc(npPredict, npTag, 0.5)
if mcc > fMaxMcc:
fMaxMcc = mcc
sBestModel = sFileName
mcc = ev.mcc(npPredict, npTag, fBEP)
if mcc > fMaxMcc:
fMaxMcc = mcc
sBestModel = sFileName
print()
print("[Log] Best mcc: %f, Best model:%s" % (fMaxMcc, sBestModel))
def main():
sModelPath1 = "../model/cnn_no_enhance/cnn-64-100-0.975.hdf5"
sModelPath2 = "../model/excellent/cnn-64-66-0.972-n592-p0.529.hdf5"
sModelDir = "../model/cnn_no_enhance/"
predict_val(sModelDir)
# predict_train(sModelPath2)
# predict_test(sModelPath)
if __name__ == "__main__":
main()
|
from tf_activation.models.cff import CFF_Model
from tf_activation.models.fff import FFF_Model
from tf_activation.models.ccff import CCFF_Model
import tf_activation.functions.plotnn as pltnn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
persistence_module = tf.load_op_library('/home/gebha095/tensorflow/bazel-bin/tensorflow/core/user_ops/nn_graph_persistence.so')
from time import time
import argparse
import os
from functools import wraps
import errno
import os
import signal
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
P = 99
UPTO = 1000
def run(output_location, model_directory, trained_model, p=P, classes=[],
upto=UPTO, test_set=False):
model = CFF_Model(W_conv1=[5,5,1,32])
model.infer_graph_parameters_from_filename(model_directory)
training_parameters = model.infer_training_parameters_from_filename(trained_model)
print(training_parameters)
model_string = model_directory[model_directory.rfind('/')+1:]
train_string = trained_model[:trained_model.rfind('/')]
p_string = 'p{}_'.format(p).replace('.', 'p')
filtration_directory = os.path.join(output_location, model_string, p_string + train_string)
if not os.path.exists(filtration_directory):
os.makedirs(filtration_directory)
config = tf.ConfigProto()
config.log_device_placement = False
with tf.device('/cpu:0'):
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
net = model.build_graph(x)
if not model.implements_dropout:
keep_prob = tf.placeholder(tf.float32)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=model.get_output_layer()))
train_step = model.get_train_step(cross_entropy, optimizer=training_parameters['optimizer'], lr=training_parameters['learning_rate'])
correct_prediction = tf.equal(tf.argmax(model.get_output_layer(), 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.device('/cpu:0'):
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
saver.restore(sess, os.path.join(model_directory, trained_model))
filtration_suffix = ''
if test_set:
X = mnist.test.images
Y = mnist.test.labels
filtration_suffix = '_test.csv'
else:
X = mnist.train.images
Y = mnist.train.labels
filtration_suffix = '_train.csv'
for i in range(upto):
print("i: {}".format(i))
correct_label = np.argmax(Y[i])
# skip this iteration if we come across a class we are not interested in
if len(classes) > 0 and correct_label not in classes:
continue
filtration_info_loc = os.path.join(filtration_directory, str(correct_label))
if not os.path.exists(filtration_info_loc):
os.makedirs(filtration_info_loc)
filtration_filename = os.path.join(filtration_info_loc, str(i)+filtration_suffix)
# filtration_extra_filename = os.path.join(filtration_info_loc, str(i) + '.csv')
test_inputs = np.stack((X[i], X[i]))
test_labels = np.stack((Y[i], Y[i]))
percentiles = persistence_module.layerwise_percentile(model.get_persistence_structure(),
model.get_persistence_numbers(),
[p,p,p])
ps1 = percentiles.eval(feed_dict={x: test_inputs[0:1], keep_prob:1.0})
# ps2 = percentiles.eval(feed_dict={x: test_inputs[0:1], keep_prob:1.0})
result = persistence_module.input_graph_filtration(model.get_persistence_structure(),
model.get_persistence_numbers(),
np.stack((ps1, ps1)),
"_")
r = result.eval(feed_dict={x: test_inputs[0:1], keep_prob:1.0})
np.savetxt(filtration_filename, r, delimiter=',')
def get_train_set(classes):
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
if len(classes) == 0:
return mnist.train.images, mnist.train.labels
else:
train_msk = np.where(np.isin(np.argmax(mnist.train.labels, axis=1), classes))
return mnist.train.images[train_msk], mnist.train.labels[train_msk]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o","--output_location",
required=True,
help="directory in which to store subgraph representations")
parser.add_argument("-m","--model_directory",
required=True,
help="directory location of trained model to produce graph")
parser.add_argument("-t","--trained_model",
required=True,
help="name of trained model checkpoint")
parser.add_argument("-p","--percentile",
type=float,
required=False,
default=P,
help="The percentile filtration of edge weights")
parser.add_argument('-c','--classes',
required=False,
nargs='+',
default=[],
help='List of classes to train on (0-9)')
parser.add_argument('-ut','--upto',
type=int,
required=False,
default=UPTO,
help="Number examples to create persistent subgraphs from")
parser.add_argument('-te','--test',
required=False,
action='store_true',
help="Whether to run on test set")
args = parser.parse_args()
classes = list(map(lambda x: int(x), args.classes))
run(args.output_location, args.model_directory, args.trained_model, p=args.percentile,
classes=classes, upto=args.upto, test_set=args.test)
|
x = [2,54,-2,7,12,98]
#max value in array
print "Max value is : ", max(x)
#min value in array
print "Min value is : ", min(x)
#length of given array
print len(x)
#index of given array
print "Index for -2 : ",x.index(-2)
#append the number -77 to an array
x.append(-77)
print "Updated List : ", x
#append the string to an array
x.append('hello')
print 'Added the string : ', x
#pop the value in the array
x.pop(3)
print "New array wihtout value of index 3 : ", x
#removing all negative numbers
x.remove(-77 and -2)
print "Removed negative numbers : ", x
#insert number to index 4
x.insert( 4, 2017)
print "New list with inserted number to index [4] : ", x
#sort an array
x.sort()
print "Sorted Array : ", x
#revers an Array
x.reverse()
print "Reversed array : ", x
#extend an array
y = ['coding', 'dojo', 2017]
x.extend(y)
print "Extended List : ", x
|
#!/usr/bin/env python
'''
python script to submit atat jobs to the queue
usage:
In a directory with a str.out file, run this script.
To use it on many files, use this command:
foreachfile -e -d 3 wait run_atat_vasp.py
Execute the specified command in every first-level subdirectory containing the file filename.
The -e option cause foreachfile to skip directories containing the file "error".
The -d option specifies to go down to lower level subdirectories as well (the default is 1).
'''
import os, shutil, sys
from subprocess import Popen, PIPE
from ase.calculators.vasp import *
import ase.io
def run_atat_vasp():
# We need to create a customized vasp.wrap file
if os.path.exists('vasp.wrap'):
pass
elif os.path.exists('../vasp.wrap'):
shutil.copy('../vasp.wrap','vasp.wrap')
elif os.path.exists('../../vasp.wrap'):
shutil.copy('../../vasp.wrap','vasp.wrap')
else:
raise Exception, 'no vasp.wrap found in ../vasp.wrap or ../../vasp.wrap'
#Now we create the vasp input files so we can compute the number
#of bands we want
os.system('str2ezvasp') # creates vasp.in
os.system('ezvasp -n vasp.in') # creates VASP input files
# now we have a minimal setup we can read into ase, compute the
# nbands we want.
atoms = ase.io.read('POSCAR')
calc = Vasp()
de = calc.get_default_number_of_electrons()
# de is a list of tuples (symbol, nvalence) [('Fe',10.00),('Ni',8.00)]
# convert to a dictionary to make it easy to add them up.
default_electrons = {}
for sym,n in de:
default_electrons[sym] = n
NELEC = 0
for atom in atoms:
NELEC += default_electrons[sym]
NIONS = len(atoms)
# make sure there are always at least 8 bands for one atom, with
# one valence electron, you can get 3 bands with this, which is
# not enough.
NBANDS = max(8, NELEC*0.65 + 2*NIONS)
# removes VASP input files because they will be regenerated by
# runstruct_vasp
os.system('cleanvasp')
f = open('vasp.wrap','r')
lines = f.readlines()
f.close()
# now we need to modify vasp.in so that runstruct will create the
# right INCAR with NBANDS in it.
lines.insert(1,'NBANDS = %i\n' % int(NBANDS))
# Now we check if magnetic moments should be added in.
magnetic_species = {'Fe':2.5,
'Ni':2}
magnetic = False
magmoms = []
for atom in atoms:
if atom.symbol in magnetic_species:
magnetic = True
magmoms.append(magnetic_species[atom.symbol])
else:
magmoms.append(0)
if magnetic:
lines.append('MAGMOM = %s\n' % ' '.join([str(x) for x in magmoms]))
lines.append('ISPIND = 2\n')
lines.append('ISPIN = 2\n')
f = open('vasp.wrap','w')
f.writelines(lines)
f.close()
os.system('str2ezvasp') # remake vasp.in
# Now submit the job.
script = '''
#!/bin/bash
cd $PBS_O_WORKDIR
rm -f error # remove these if they are hanging around
rm -f energy
rm -f wait # we are about to start, so remove wait
runstruct_vasp -p
rm -f jobid # after job is over, remove jobid file
# end
'''
if os.path.exists('jobid'):
print 'jobid file exists in %s. Exiting' % os.getcwd()
sys.exit()
# submit a job to the queue
p = Popen(['qsub',
'-joe',
'-N',
"%s" % os.getcwd(),
'-l walltime=168:00:00'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate(script)
f = open('jobid','w')
f.write(out)
f.close()
print '|[[shell:qstat -f %s][%s]]|' % (out.strip(),out.strip())
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
CWD = os.getcwd()
if len(args) > 0:
for arg in args:
try:
os.chdir(arg)
run_atat_vasp()
finally:
os.chdir(CWD)
else:
run_atat_vasp()
|
"""
File name: grid.py
Author: ngocviendang
Date created: July 13, 2020
This file creat the grid for the images.
"""
import argparse
import sys
import os
import numpy as np
import nibabel as nib
from captcha.utils.helper import load_nifti_mat_from_file
from captcha.utils.helper import create_and_save_nifti
from captcha.utils.helper import getAllFiles
def main(args):
original_data_dir = args.original_data
grid_filepath = args.grid_filepath
patch_size = 32
if not os.path.exists(grid_filepath):
os.makedirs(grid_filepath)
unfiltered_filelist = getAllFiles(original_data_dir)
input_list = [item for item in unfiltered_filelist if re.search('_img', item)]
mask_list = [item for item in unfiltered_filelist if re.search('_mask', item)]
input_list = sorted(input_list)
mask_list = sorted(mask_list)
print(input_list)
print(mask_list)
# load image, mask and label stacks as matrices
for i, j in enumerate(input_list):
print('> Loading image...')
img_mat = load_nifti_mat_from_file(j)
print('Loading mask...')
mask_mat = load_nifti_mat_from_file(mask_list[i])
# the grid is going to be saved in this matrix
prob_mat = np.zeros(img_mat.shape, dtype=np.float32)
x_dim, y_dim, z_dim = prob_mat.shape
# get the x, y and z coordinates where there is brain
x, y, z = np.where(mask_mat)
print('x shape:', x.shape)
print('y shape:', y.shape)
print('z shape:', z.shape)
# get the z slices with brain
z_slices = np.unique(z)
# proceed slice by slice
for l in z_slices:
slice_vox_inds = np.where(z == l)
# find all x and y coordinates with brain in given slice
x_in_slice = x[slice_vox_inds]
y_in_slice = y[slice_vox_inds]
# find min and max x and y coordinates
slice_x_min = min(x_in_slice)
slice_x_max = max(x_in_slice)
slice_y_min = min(y_in_slice)
slice_y_max = max(y_in_slice)
# calculate number of patches in x and y direction in given slice
num_of_x_patches = np.int(np.ceil((slice_x_max - slice_x_min) / patch_size))
num_of_y_patches = np.int(np.ceil((slice_y_max - slice_y_min) / patch_size))
for m in range(num_of_x_patches):
for n in range(num_of_y_patches):
# find the starting and ending x and y coordinates of given patch
patch_start_x = slice_x_min + patch_size * m
patch_end_x = slice_x_min + patch_size * (m + 1)
patch_start_y = slice_y_min + patch_size * n
patch_end_y = slice_y_min + patch_size * (n + 1)
if patch_end_x > x_dim:
patch_end_x = slice_x_max
patch_start_x = slice_x_max - patch_size
if patch_end_y > y_dim:
patch_end_y = slice_y_max
patch_start_y = slice_y_max - patch_size
prob_mat[patch_start_x: patch_end_x, patch_start_y, l] = 1
prob_mat[patch_start_x: patch_end_x, patch_end_y, l] = 1
prob_mat[patch_start_x, patch_start_y: patch_end_y, l] = 1
prob_mat[patch_end_x, patch_start_y: patch_end_y, l] = 1
# SAVE AS NIFTI
create_and_save_nifti(prob_mat, grid_filepath + j.split(os.sep)[-1].split('_')[0] + '_grid.nii')
print('DONE')
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--original_data", type=str,
help='the filename path of the training set.')
parser.add_argument("--grid_filepath", type=str,
help='The filename path for saving the grid-mask matrix.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
from enum import Enum
from solutions import helpers
import numpy as np
import re
from dataclasses import dataclass, replace
np.set_printoptions(edgeitems=30, linewidth=100000)
filename = 'input'
# filename = 'test'
n_minutes = 32
strings = helpers.read_each_line_as_string(filename)
class BuildOption(Enum):
NOTHING = 0
OREBOT = 1
CLAYBOT = 2
OBSIBOT = 3
GEOBOT = 4
@dataclass
class Supply:
ore: int = 0
clay: int = 0
obsidian: int = 0
geodes: int = 0
orebot: int = 0
claybot: int = 0
obsibot: int = 0
geobot: int = 0
time: int = 0
@dataclass
class Blueprint:
blueprint_id: int
orebot_cost_ore: int
claybot_cost_ore: int
obsibot_cost_ore: int
obsibot_cost_clay: int
geobot_cost_ore: int
geobot_cost_obsidian: int
def determine_eligible_builds(self, supply):
eligible_builds = []
if supply.ore >= self.geobot_cost_ore and supply.obsidian >= self.geobot_cost_obsidian:
eligible_builds.append(BuildOption.GEOBOT)
if supply.ore >= self.obsibot_cost_ore and supply.clay >= self.obsibot_cost_clay:
# if (supply.obsidian + supply.obsibot * supply.time) <= (self.geobot_cost_obsidian * (supply.time-1)):
eligible_builds.append(BuildOption.OBSIBOT)
if supply.ore >= self.claybot_cost_ore:
# if (supply.clay + supply.claybot * supply.time) <= (self.obsibot_cost_clay * (supply.time-2)):
eligible_builds.append(BuildOption.CLAYBOT)
if supply.ore >= self.orebot_cost_ore:
# if (supply.ore + supply.orebot * supply.time) <= max(self.claybot_cost_ore * (supply.time - 3), self.obsibot_cost_ore * (supply.time - 2), self.geobot_cost_ore * (supply.time - 1)):
eligible_builds.append(BuildOption.OREBOT)
eligible_builds.append(BuildOption.NOTHING)
return eligible_builds
def determine_potential_next_builds(self, supply):
potential_next_builds = []
if self.will_have_enough_ore_to_build(supply, self.orebot_cost_ore, dt=-2):
potential_next_builds.append(BuildOption.OREBOT)
if self.will_have_enough_ore_to_build(supply, self.claybot_cost_ore, dt=-2):
potential_next_builds.append(BuildOption.CLAYBOT)
if self.will_have_enough_clay_to_build(supply, self.obsibot_cost_clay, dt=-2) and self.will_have_enough_ore_to_build(supply, self.obsibot_cost_ore, dt=-2):
potential_next_builds.append(BuildOption.OBSIBOT)
if self.will_have_enough_ore_to_build(supply, self.geobot_cost_ore, dt=-1) and self.will_have_enough_obsidian_to_build(supply, self.geobot_cost_obsidian, dt=-1):
potential_next_builds.append(BuildOption.GEOBOT)
return potential_next_builds
def will_have_enough_obsidian_to_build(self, supply: Supply, cost, dt=0):
return supply.obsidian + (supply.obsibot * (supply.time+dt)) >= cost
def will_have_enough_clay_to_build(self, supply: Supply, cost, dt=0):
return supply.clay + (supply.claybot * (supply.time+dt)) >= cost
def will_have_enough_ore_to_build(self, supply: Supply, cost, dt=0):
return supply.ore + (supply.orebot * (supply.time+dt)) >= cost
def build(self, supp, build_option: BuildOption):
supp.time += -1
if build_option == BuildOption.OREBOT:
supp.ore -= self.orebot_cost_ore
supp.orebot += 1
elif build_option == BuildOption.CLAYBOT:
supp.ore -= self.claybot_cost_ore
supp.claybot += 1
elif build_option == BuildOption.OBSIBOT:
supp.ore -= self.obsibot_cost_ore
supp.clay -= self.obsibot_cost_clay
supp.obsibot += 1
elif build_option == BuildOption.GEOBOT:
supp.ore -= self.geobot_cost_ore
supp.obsidian -= self.geobot_cost_obsidian
supp.geobot += 1
return replace(supp)
def collect(self, supp):
supp.ore += supp.orebot
supp.clay += supp.claybot
supp.obsidian += supp.obsibot
supp.geodes += supp.geobot
return replace(supp)
def build_next_build(self, supply, next_build):
new_supply = replace(supply)
while new_supply.time > 0:
if next_build in self.determine_eligible_builds(new_supply):
new_supply = self.collect(new_supply)
new_supply = self.build(new_supply, next_build)
return new_supply
new_supply = self.collect(new_supply)
new_supply = self.build(new_supply, BuildOption.NOTHING)
def evaluate_blueprint(blueprint: Blueprint):
starting_supply = Supply(orebot=1, time=n_minutes)
max_geodes = 0
supply_paths = [starting_supply]
def worse_than_max(supply: Supply):
max_possible = supply.geodes + supply.time * supply.geobot + sum(range(supply.time))
if max_possible < max_geodes:
return True
return False
while len(supply_paths) > 0:
supply = supply_paths.pop()
# print(supply)
if supply.geodes > max_geodes:
max_geodes = supply.geodes
# max_supply = replace(supply)
print("max_geodes", max_geodes)
print(supply)
if supply.time > 0:
potential_next_builds = blueprint.determine_potential_next_builds(supply)
if len(potential_next_builds) == 0:
new_supply = replace(supply)
while new_supply.time > 0:
new_supply = blueprint.collect(new_supply)
new_supply = blueprint.build(new_supply, BuildOption.NOTHING)
if not worse_than_max(new_supply):
supply_paths.append(new_supply)
for potential_next_build in potential_next_builds:
new_supply = blueprint.build_next_build(replace(supply), potential_next_build)
if new_supply is not None and not worse_than_max(new_supply):
supply_paths.append(new_supply)
return max_geodes
total_quality_level = 1
for string in strings[0:3]:
m = re.match(r'Blueprint (\d+): Each ore robot costs (\d+) ore. Each clay robot costs (\d+) ore. Each obsidian robot costs (\d+) ore and (\d+) clay. Each geode robot costs (\d+) ore and (\d+) obsidian.', string)
(
blueprint_id,
orebot_cost_ore,
claybot_cost_ore,
obsibot_cost_ore,
obsibot_cost_clay,
geobot_cost_ore,
geobot_cost_obsidian
) = (int(x) for x in m.groups())
bp = Blueprint(
blueprint_id,
orebot_cost_ore,
claybot_cost_ore,
obsibot_cost_ore,
obsibot_cost_clay,
geobot_cost_ore,
geobot_cost_obsidian
)
print("BLUEPRINT", bp.blueprint_id)
print(bp)
quality_level = evaluate_blueprint(bp)
total_quality_level *= quality_level
print(total_quality_level)
|
#!/usr/bin/python
import sys
from multiprocessing import Process
from primes import primes # python set of prime numbers
input_file = sys.argv[1] if len(sys.argv) > 1 else 'c_sample.in'
radixes = (2, 3, 4, 5, 6, 7, 8, 9, 10)
divisors_dict = {}
def is_prime(n): # Modified from http://stackoverflow.com/questions/15285534/isprime-function-for-python-language
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
if n in primes: return False # Added
r = int(n**0.5)
f=5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
def factors(n): # Taken from http://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
def quick_factor(n):
for i in range(2, int(n**0.5)+1):
if n % i == 0:
return i
def build_divisiors_dict(valid_coin):
for i in radixes:
computed = int(valid_coin, base=i) # Gross should have probably made a class to maintain this object
# decent_divisor = list(factors(computed))
# decent_divisor = decent_divisor[len(decent_divisor)/2]
decent_divisor = quick_factor(computed)
divisors_dict[valid_coin][str(i)] = decent_divisor
def valid(jamcoin):
divisors_dict[jamcoin] = {}
for i in radixes:
computed = int(jamcoin, base=i)
if is_prime(computed):
return False
return True
def generate_coins(NJ):
digits, needed_coins = NJ.split(' ')
valid_coins = set()
def add_coin(possible_coin):
if valid(possible_coin):
valid_coins.add(possible_coin)
add_divisors = Process(target=build_divisiors_dict(possible_coin))
add_divisors.start()
between_digits = int(digits) - 2
end = int('1'*between_digits, base=2)
for i in range(0, end+1):
if len(valid_coins) < int(needed_coins):
between_string = format(bin(i)[2:]).zfill(between_digits)
possible_coin = '1' + between_string + '1'
check_coin = Process(target=add_coin(possible_coin))
check_coin.start()
for coin in valid_coins:
divisors = [divisors_dict[coin][str(radix)] for radix in radixes]
print coin + ' ' + ' '.join([str(i) for i in divisors])
with open(input_file, 'r') as f:
test_cases = int(f.readline())
assert(1<=test_cases<=100)
for test in xrange(0, test_cases):
NJ = f.readline().rstrip('\n')
print "Case #{}:".format(test+1)
generate_coins(NJ)
f.close()
|
import re
import datetime
import numpy as np
from pandas import NA
import functools
from fuzzywuzzy import process
from Entires import SourceColumns as SC, ConvertedColumns as CC
from Entires.GeoCoordinatesFinder import GeoFinder
import Settings
def get_converter(column):
if column in [CC.Source, CC.Region, CC.StatusDecisionOfHospitalization, CC.StatusDecisionOfObservation,
CC.TestInformation, CC.TypeOfPneumonia, CC.NameOfHospital]:
return functools.partial(_get_category_without_replacement, column_name=column.get_name())
elif column == CC.Country:
return _get_country
elif column == CC.Age:
return _get_age
elif column == CC.Death:
return _get_death
elif column == CC.Sex:
return _get_sex
elif column == CC.DecisionOfAmbulance:
return _get_decision_of_ambulance
elif column == CC.ZodiacSign:
return _get_zodiac_sign
elif column == CC.PrimaryElement:
return _get_primary_element
elif column in [CC.PeriodOfObservation, CC.PeriodOfQuarantine, CC.PeriodOfAftercare]:
column_name = column.get_name().replace('PeriodOf', '')
start_name='DateAdmissionTo' + column_name
finish_name='DateDepartureFrom' + column_name
return functools.partial(_get_period, start_name=start_name, finish_name=finish_name)
elif column == CC.PeriodFromHospitalToAftercare:
return _get_period_from_hospital_to_aftercare
elif column == CC.PeriodFromAmbulanceToHospital:
return _get_period_from_ambulance_to_hospital
elif column == CC.PeriodOfHospitalization:
return _get_period_hospitalization
elif column == CC.DateAdmissionToHospital:
return _get_date_admission_to_hospital
elif column == CC.DateDepartureFromHospital:
return _get_date_departure_from_hospital
elif column == CC.PeriodDiseaseForHospitalization:
return _get_period_disease_for_hospitalization
elif column in [CC.DateCreating, CC.DateAnalysis, CC.Birthday]:
return functools.partial(_get_date, column_name=column.get_name())
elif column == CC.Birthmonth:
return _get_birthmonth
elif column in [CC.WeekDayArrivalAmbulance, CC.WeekDayAdmissionToHospital, CC.WeekDayDepartureFromHospital]:
sc_name = column.get_name().replace('WeekDay', 'Date')
return functools.partial(_get_week_day, column_name=sc_name)
elif column == CC.SeverityOfDisease:
return _get_severity_of_disease
elif column in CC.all_result:
return functools.partial(_get_result, column_name=column.get_name())
elif column == CC.GroupOfRisk:
return _get_group_of_risk
elif column in [CC.DIC, CC.MV, CC.ECMO, CC.DidNotTravel, CC.NotFoundAtHome, CC.AntiviralTreatment]:
return functools.partial(_convert_yes_no, column_name=column.get_name())
elif column in [CC.ImmunosuppressantsDrugs, CC.TreatmentHivInfectionDrugs, CC.AntiviralDrugs]:
return functools.partial(_get_pharmacological_group, column_name=column.get_name())
elif column == CC.TransferredToHospitalFromAnotherHospital:
return _get_transferred_to_hospital_from_another_hospital
elif column == CC.TransferredToHospitalFromQuarantine:
return _get_transferred_to_hospital_from_quarantine
elif column in [CC.SaturationLevel]:
return functools.partial(_get_int8, column_name=column.get_name())
elif column == CC.PhoneOperator:
return _get_phone_operator
elif column in [CC.Longitude, CC.Latitude]:
geo_finder = GeoFinder(silent=True)
priority_addresses = ['AddressArrival', 'AddressOfResidence']
priority_addresses.extend(map(SC._Column.get_name, SC.all_comment))
return functools.partial(_get_coordinate, geo_finder=geo_finder, priority_addresses=priority_addresses, coordinate_name=column.get_name())
else:
raise RuntimeError('An undefined column is used for the calculated column')
def _get_category_without_replacement(row, column_name):
value = row[column_name]
if value:
return value
return NA
def _get_country(row):
value = row['Country']
if not value:
return NA
value = value.upper()
if value in ['РОССИЯ', 'УКРАИНА', 'АРМЕНИЯ', 'АЗЕРБАЙДЖАН', 'КАЗАХСТАН', 'ТАДЖИКИСТАН', 'КИРГИЗИЯ', 'ГРУЗИЯ', 'БЕЛАРУСЬ', 'АБХАЗИЯ', 'УЗБЕКИСТАН', 'МОЛДОВА, РЕСПУБЛИКА']:
return NA
return value
def _get_zodiac_sign(row):
birthday = row['Birthday']
if not birthday:
return NA
day = int(birthday[:2])
month = int(birthday[3:5])
if day==1 and month==1:
#culling 1 january
return NA
if (month==12 and day>=23) or (month==1 and day<=20):
index = 0
elif (month==1 and day>=21) or (month==2 and day<=19):
index = 1
elif (month==2 and day>=20) or (month==3 and day<=20):
index = 2
elif (month==3 and day>=21) or (month==4 and day<=20):
index = 3
elif (month==4 and day>=21) or (month==5 and day<=21):
index = 4
elif (month==5 and day>=22) or (month==6 and day<=21):
index = 5
elif (month==6 and day>=22) or (month==7 and day<=22):
index = 6
elif (month==7 and day>=23) or (month==8 and day<=21):
index = 7
elif (month==8 and day>=22) or (month==9 and day<=23):
index = 8
elif (month==9 and day>=24) or (month==10 and day<=23):
index = 9
elif (month==10 and day>=24) or (month==11 and day<=22):
index = 10
elif (month==11 and day>=23) or (month==12 and day<=22):
index = 11
return Settings.VALUES_ZODIAC_SIGNS[index]
def _get_primary_element(row):
zodiac_sign = _get_zodiac_sign(row)
if zodiac_sign is NA:
return NA
if zodiac_sign in ['Aries', 'Leo', 'Sagittarius']:
return 'Fire'
elif zodiac_sign in ['Gemini', 'Libra', 'Aquarius']:
return 'Air'
elif zodiac_sign in ['Taurus', 'Virgo', 'Capricorn']:
return 'Ground'
elif zodiac_sign in ['Cancer', 'Scorpio', 'Pisces']:
return 'Water'
else:
raise RuntimeError('It is impossible to determine the element for this zodiac sign: ' + zodiac_sign)
def _get_source(row):
return row['Source']
def _get_age(row):
date_disease = _get_date(row, 'DateCreating')
if date_disease == Settings.EMPTY_DATE:
date_disease = _get_date(row, 'DateAnalysis')
if date_disease == Settings.EMPTY_DATE:
return Settings.EMPTY_INT
birthday = _get_date(row, 'Birthday')
if birthday == Settings.EMPTY_DATE:
return Settings.EMPTY_INT
period = (date_disease - birthday)/np.timedelta64(1, 'D')
if period < 0:
return Settings.EMPTY_INT
period /= 365.25
if period > 127:
return Settings.EMPTY_INT
return np.int8(period)
def _get_period(row, start_name, finish_name):
start = _get_date(row, start_name)
if start==Settings.EMPTY_DATE or start<Settings.FIRST_DATE or start>Settings.LAST_DATE:
return Settings.EMPTY_INT
finish = _get_date(row, finish_name)
if finish==Settings.EMPTY_DATE or finish<Settings.FIRST_DATE or finish>Settings.LAST_DATE:
return Settings.EMPTY_INT
period = (finish - start)/np.timedelta64(1, 'D')
if period < 0:
return Settings.EMPTY_INT
return np.int8(period)
def _get_period_hospitalization(row):
start = _get_date(row, 'DateAdmissionToHospital')
if start == Settings.EMPTY_DATE:
start = _get_date(row, 'DateArrivalAmbulance')
if start == Settings.EMPTY_DATE:
return Settings.EMPTY_INT
else:
start += Settings.PERIOD_FROM_AMBULANCE_TO_HOSPITALIZATION
if start<Settings.FIRST_DATE or start>Settings.LAST_DATE:
return Settings.EMPTY_INT
finish = _get_date(row, 'DateDepartureFromHospital')
if finish == Settings.EMPTY_DATE:
finish = _get_date(row, 'DateAdmissionToAftercare')
if finish == Settings.EMPTY_DATE:
return Settings.EMPTY_INT
else:
finish -= Settings.PERIOD_FROM_HOSPITALIZATION_TO_AFTERCARE
if finish<Settings.FIRST_DATE or finish>Settings.LAST_DATE:
return Settings.EMPTY_INT
period = (finish - start)/np.timedelta64(1, 'D')
if period < 0:
return Settings.EMPTY_INT
return np.int8(period)
def _get_date_admission_to_hospital(row):
start = _get_date(row, 'DateAdmissionToHospital')
if start == Settings.EMPTY_DATE:
if not _exist_hospitalization(row):
return Settings.EMPTY_DATE
start = _get_date(row, 'DateArrivalAmbulance')
if start == Settings.EMPTY_DATE:
return Settings.EMPTY_DATE
else:
start += Settings.PERIOD_FROM_AMBULANCE_TO_HOSPITALIZATION
if start<Settings.FIRST_DATE or start>Settings.LAST_DATE:
return Settings.EMPTY_DATE
return start
def _get_date_departure_from_hospital(row):
finish = _get_date(row, 'DateDepartureFromHospital')
if finish == Settings.EMPTY_DATE:
if not _exist_hospitalization(row):
return Settings.EMPTY_DATE
finish = _get_date(row, 'DateAdmissionToAftercare')
if finish == Settings.EMPTY_DATE:
return Settings.EMPTY_DATE
else:
finish -= Settings.PERIOD_FROM_HOSPITALIZATION_TO_AFTERCARE
if finish<Settings.FIRST_DATE or finish>Settings.LAST_DATE:
return Settings.EMPTY_DATE
return finish
def _get_period_from_hospital_to_aftercare(row):
if not _exist_hospitalization(row):
return Settings.EMPTY_INT
return _get_period(row, 'DateDepartureFromHospital', 'DateAdmissionToAftercare')
def _get_period_from_ambulance_to_hospital(row):
decision_of_ambulance = _get_decision_of_ambulance(row)
if decision_of_ambulance is NA or decision_of_ambulance != 'Стационар':
return Settings.EMPTY_INT
return _get_period(row, 'DateArrivalAmbulance', 'DateAdmissionToHospital')
def _exist_hospitalization(row):
decision_of_ambulance = _get_decision_of_ambulance(row)
if decision_of_ambulance is NA or decision_of_ambulance != 'Стационар':
transferred_to_hospital_from_quarantine = _get_transferred_to_hospital_from_quarantine(row)
if transferred_to_hospital_from_quarantine is NA or not transferred_to_hospital_from_quarantine:
return False
return True
def _get_period_disease_for_hospitalization(row):
start = _get_date(row, 'DateCreating')
if start==Settings.EMPTY_DATE or start<Settings.FIRST_DATE or start>Settings.LAST_DATE:
return Settings.EMPTY_INT
finish = _get_date_departure_from_hospital(row)
if finish==Settings.EMPTY_DATE or finish<Settings.FIRST_DATE or finish>Settings.LAST_DATE:
return Settings.EMPTY_INT
period = (finish - start)/np.timedelta64(1, 'D')
if period < 0:
return Settings.EMPTY_INT
return np.int8(period)
def _get_date(row, column_name):
current_date = row[column_name]
if not current_date:
return Settings.EMPTY_DATE
day = current_date[:2]
month = current_date[3:5]
if day=='01' and month=='01':
#culling 1 january
return Settings.EMPTY_DATE
year = current_date[6:]
year_i = int(year)
if year_i < 1900 or year_i > 2020:
return Settings.EMPTY_DATE
return np.datetime64(year + '-' + month + '-' + day, 'D')
def _get_birthmonth(row):
birthday = row['Birthday']
if not birthday:
return Settings.EMPTY_INT
day = int(birthday[:2])
month = int(birthday[3:5])
if day==1 and month==1:
#culling 1 january
return Settings.EMPTY_INT
return month
def _get_week_day(row, column_name):
current_date = row[column_name]
if not current_date:
return Settings.EMPTY_INT
day = int(current_date[:2])
month = int(current_date[3:5])
if day==1 and month==1:
#culling 1 january
return Settings.EMPTY_INT
year = int(current_date[6:])
if year<1900 or year>2020:
return Settings.EMPTY_INT
current_date = datetime.date(year, month, day)
weekday = current_date.weekday() + 1 #1, 2,..., 7
return weekday
def _get_death(row):
#bool stored as int8
for column in SC.all_result:
value = row[column.get_name()].upper()
if value.find('УМЕР') > -1:
return 1
#free-form text fields
words_of_death = ['УМЕР', 'СКОНЧАЛ', 'СМЕРТ']
for column in SC.all_comment:
value = row[column.get_name()].upper()
for word in words_of_death:
if value.find(word) > -1:
return 1
return 0
def _get_sex(row):
current_value = row['MiddleNameEnding']
if current_value:
return Settings.REPLACEMENTS_SEX.get(current_value, NA)
return NA
def _get_decision_of_ambulance(row):
current_value = row['DecisionOfAmbulance']
if current_value:
return Settings.REPLACEMENTS_DECISION_OF_AMBULANCE.get(current_value, current_value)
#after hospitalizataion and obervation patients are taken to aftercare
for name in SC.all_names_quarantine:
if row[name]:
return 'Домашний карантин'
if row['ResultOfObservation'] in ['Переведен в стационар']:
return 'Обсервация'
#transfer from a hospital to an obervation occurs more often than in the opposite direction
for name in SC.all_names_hospitalizataion:
if name in ['DIC', 'MV']:
if row[name] and row[name] != 'Нет':
return 'Стационар'
else:
if row[name]:
return 'Стационар'
for name in SC.all_names_obervation:
if row[name]:
return 'Обсервация'
return NA
def _get_severity_of_disease(row):
columns = []
columns.append('SeverityOfDiseaseAmbulance')
columns.append('SeverityOfDiseaseToHospital')
columns.append('SeverityOfDiseaseInHospital')
columns.append('SeverityOfDiseaseQuarantine')
columns.append('SeverityOfDiseaseCT')
return _get_max_severity_of_disease(columns, row)
def _get_max_severity_of_disease(columns, row):
indexes = []
for column in columns:
value = row[column]
if value:
value = Settings.REPLACEMENTS_SEVERITY_OF_DISEASE.get(value, value)
index = Settings.VALUES_SEVERITY_OF_DISEASE.index(value)
indexes.append(index)
if indexes:
max_index = max(indexes)
return Settings.VALUES_SEVERITY_OF_DISEASE[max_index]
else:
return NA
def _get_result(row, column_name):
current_value = row[column_name]
if current_value:
return Settings.REPLACEMENTS_RESULT.get(current_value, current_value)
return NA
def _get_group_of_risk(row):
columns = []
columns.append('GroupOfRiskHospitalization')
columns.append('GroupOfRiskObservation')
columns.append('GroupOfRiskQuarantine')
columns.append('GroupOfRiskCT')
return _get_group_of_risk_for_columns(columns, row)
def _get_group_of_risk_for_columns(columns, row):
for column in columns:
value = row[column]
if value:
value = Settings.REPLACEMENTS_GROUP_OF_RISK.get(value, value)
if value is not NA:
return value
return NA
def _convert_yes_no(row, column_name):
#bool stored as int8
value = row[column_name].upper()
if value == 'ДА':
return 1
elif value == 'НЕТ':
return 0
else:
return Settings.EMPTY_BOOL
def _get_pharmacological_group(row, column_name):
if column_name == 'ImmunosuppressantsDrugs':
dictionary = Settings.IMMUNOSUPPRESSANT_DRUGS
elif column_name == 'TreatmentHivInfectionDrugs':
dictionary = Settings.TREATMENT_HIV_INFECTION_DRUGS
elif column_name == 'AntiviralDrugs':
dictionary = Settings.ANTIVIRAL_DRUGS
else:
raise RuntimeError('No pharmacological group defined for your data column!')
table_of_simbols = {}
table_of_simbols[ord(',')] = ord(' ')
table_of_simbols[ord('.')] = ord(' ')
table_of_simbols[ord(';')] = ord(' ')
value = row['ListOfMedicines'].lower()
value = value.translate(table_of_simbols)
words_of_string = value.split()
for word in words_of_string:
if len(word) < 7:
continue
_, distance = process.extractOne(word, dictionary)
if distance > 75:
return 1
return Settings.EMPTY_BOOL
def _get_transferred_to_hospital_from_another_hospital(row):
#bool stored as int8
if row['TransferDate'] or row['InitialHospital']:
return 1
else:
decision = _get_decision_of_ambulance(row)
if decision is NA:
return Settings.EMPTY_BOOL
elif decision == 'Стационар':
return 0
else:
return Settings.EMPTY_BOOL
def _get_transferred_to_hospital_from_quarantine(row):
decision_of_ambulance = _get_decision_of_ambulance(row)
if decision_of_ambulance is NA or decision_of_ambulance != 'Домашний карантин':
return False
for name in SC.all_names_hospitalizataion:
if name in ['DIC', 'MV']:
if row[name] != 'Нет':
return True
else:
if row[name]:
return True
return False
def _get_int8(row, column_name):
value = row[column_name]
if value:
return np.int8(value)
return Settings.EMPTY_INT
def _get_phone_operator(row):
result = _get_phone_operator_from_column(row['AddressOfResidence'])
if result is not None:
return result
for column in SC.all_comment:
column_name = column.get_name()
result = _get_phone_operator_from_column(row[column_name])
if result is not None:
return result
return ''
def _get_phone_operator_from_column(column_value):
if not column_value:
return None
#some numbers in phone number are replaced with stars! First number always 4 or 9
pattern = '[78\*]{1} [-\s]* [\(]? ([49\*]{1} [\d\*]{2}) [\)]? [-\s]* [\d\*]{3} [-\s]* [\d\*]{2} [-\s]* [\d\*]{2}'
pattern = pattern.replace(' ', '')
result = re.search(pattern, column_value)
if result is None:
#without leading 7 or 8
pattern = '[\(]? ([49\*]{1} [\d\*]{2}) [\)]? [-\s]* [\d\*]{3} [-\s]* [\d\*]{2} [-\s]* [\d\*]{2}'
pattern = pattern.replace(' ', '')
result = re.search(pattern, column_value)
if result is None:
return None
phone_code = result.group(1)
if phone_code[0] == '4':
return 'МГТС'
phone_code = phone_code[1:]
star_position = phone_code.find('*')
if star_position == -1:
return Settings.PHONE_OPERATOR_CODES.get(int(phone_code), None)
second_number = phone_code[0]
if second_number == '*':
return None
else:
return Settings.PHONE_OPERATOR_CODES_SECOND_NUMBER.get(int(second_number), None)
def _get_coordinate(row, geo_finder, priority_addresses, coordinate_name):
if row['Region'] in ['Московская область', 'Другие регионы']:
return Settings.EMPTY_INT
fields = [row[field_name] for field_name in priority_addresses if row[field_name]]
coordinates = geo_finder.get_coordinates(fields)
if coordinates is None:
return Settings.EMPTY_INT
if coordinate_name == 'Latitude':
return coordinates[1]
else:
return coordinates[0]
|
from django.contrib import admin
from django.urls import path
from .views.pictures import PicturesIndexView
app_name = 'pictures'
urlpatterns = [
path('', PicturesIndexView.as_view(), name='index'),
]
|
#coding=gbk
'''
Created on 2015年12月21日
@author: 大雄
'''
import csv
import poplib
import email.header
import email.utils
import base64
import os
import logging
from zipfile import ZipFile
from io import BytesIO,StringIO
def parserCSV(string,row_range,col_range):
try:
strio = StringIO(string)
data = []
reader = csv.reader(strio,delimiter=',',quotechar='"')
index = 0
for row in reader:
if index in row_range:
item = []
for col in col_range:
item.append(row[col])
data.append(item)
index = index + 1
return data
except Exception as e:
logging.debug(e)
return None
finally:
strio.close()
def showMessage( msg ):
if msg.is_multipart():
for part in msg.get_payload():
showMessage( part )
else:
types = msg.get_content_type()
if types=='text/plain':
try:
body =msg.get_payload(decode=True)
print(bytes.decode(body))
except:
print( '[*001*]BLANK')
elif types=='text/base64':
try:
body = base64.decodestring(msg.get_payload())
print(bytes.decode(body))
except:
print( '[*001*]BLANK')
def parserAttachMent( msg ):
# 取得附件部分
attachments = {}
code = email.header.decode_header(msg['subject'])[0][1]
for part in msg.walk():
filename = part.get_filename()
if filename==None:
continue
filename = email.header.decode_header(filename)[0][0]
filename = bytes.decode(os.path.split(filename)[1], code)
# 防止文件名出现全路径错误
if filename:
###解析邮件的附件内容
attach_b64 = part.get_payload()
attach_byte = base64.decodestring(str.encode(attach_b64))
attachments[filename] = attach_byte
else:
continue
return attachments
def parserZipfile( zipbytes ):
with ZipFile(BytesIO(zipbytes), 'r') as myzip:
files = {}
for filename in myzip.namelist():
files[filename] = myzip.read(filename)
return files
def fetchMail(host,username,password,mail_from='autopost@baidu.com',max_top_mail_prefetch = 10):
try:
pop_conn = poplib.POP3(host,port=110)
pop_conn.user(username)
pop_conn.pass_(password)
messages = []
totalNum = pop_conn.stat()[0]
if max_top_mail_prefetch < totalNum:
start = totalNum
end = totalNum - max_top_mail_prefetch
else:
start = totalNum
end = 1
for i in range(start, end,-1):
msg = []
for line in pop_conn.retr(i)[1]:
msg.append(bytes.decode(line))
message = email.message_from_string('\n'.join(msg))
#print(message)
#m_subject = email.header.decode_header(message['subject'])[0][0]
m_subcode = email.header.decode_header(message['subject'])[0][1]
header_from = email.header.decode_header(message['From'])
m_from = header_from[len(header_from)-1][0]
#m_to = email.header.decode_header(message['To'])[0][0]
#m_date = email.header.decode_header(message['date'])[0][0]
#print(email.header.decode_header(message['From']))
if str(type(m_from)) == "<class 'str'>":
#准确格式
m_from = email.utils.parseaddr(m_from)[1]
elif str(type(m_from)) == "<class 'bytes'>":
#格式可能有点问题:" <jun.zhang@mcake.com>
m_from = bytes.decode(m_from,m_subcode).strip()
else:
logging.debug("from: wrong type")
if m_from:
if (m_from.find(mail_from) > -1):
messages.append(message)
return messages
except Exception as e:
logging.debug(e)
raise Exception(e)
finally:
pop_conn.quit()
def parserSubject( msg ):
#m_subject = email.header.decode_header(message['subject'])[0][0]
m_subcode = email.header.decode_header(msg['subject'])[0][1]
m_subject = email.header.decode_header(msg['subject'])[0][0]
#m_to = email.header.decode_header(message['To'])[0][0]
#m_date = email.header.decode_header(message['date'])[0][0]
#print(email.header.decode_header(message['From']))
if str(type(m_subject)) == "<class 'str'>":
pass
elif str(type(m_subject)) == "<class 'bytes'>":
m_subject = bytes.decode(m_subject,m_subcode)
else:
logging.debug("from: wrong type")
return m_subject
def mappingKeywords(subject):
keywords = {
"Grand-Cookies":["zumuquqi"],
"Joyseed":["joyseed"],
"HI-MCAKE":["mcake"],
"WithWheat":["withwheat"],
"Flower":["flower"]
}
if subject:
for (tag,keys) in keywords.items():
for key in keys:
if subject.find(key) > -1:
return tag
return None
def convertStr2Int(value,default=0):
try:
return int(value.replace(',',''))
except ValueError:
return default
def convertStr2Float(value,defalut=0.00):
try:
if value.find('%',-1) > 0:
return float(value.replace(',','')[0:-1]) / 100
else:
return float(value.replace(',',''))
except ValueError:
return defalut
|
from csv_comparison_package import Compare
from csv_comparison_package.decorator import call_each
@call_each
def set_start_end_disjunctive_column(comparable: Compare):
if comparable.number_of_disjunctive_columns > 0:
comparable.disjunctive_column_start = \
comparable.start_column \
+ comparable.number_of_index_column \
+ comparable.number_of_regular_columns \
+ comparable.number_of_mapped_columns \
+ comparable.number_of_not_checked_columns \
+ 1 - 1
comparable.disjunctive_column_end = \
comparable.disjunctive_column_start \
+ comparable.number_of_disjunctive_columns \
- 1
|
import pylab as pyl
dt = 0.05
p = -5.0
sp = 5.0
acc = [p*sp]
vel = [0.0]
s = [sp]
t = [0.0]
for i in range (1, 100):
acc.append(s[-1]*p)
vel.append(vel[-1] + acc[-1]*dt)
s.append(s[-1]+vel[-1]*dt)
t.append(dt*i)
dp = pyl.plot(t, s)
pyl.show()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : willi
# @Email : willi168@163.com
# @Description:
import json
import re
from lxml import etree
class Response(object):
def __init__(self, url, status_code, headers, body):
self.url = url
self.status_code = status_code
self.headers = headers
self.body = body
def xpath(self, rule):
"""
为响应对象封装xpath解析,用于解析响应体数据
@param rule:
@return:
"""
html = etree.HTML(self.body)
return html.xpath(rule)
@property
def json(self):
"""
给响应对象封装json模块的loads方法,用于解析响应体数据
@return:
"""
return json.loads(self.body)
def re_findall(self, rule, data=None):
"""
给响应对象封装正则的findall方法,用于解析响应体数据
@param rule:
@param data:
@return:
"""
if data is None:
data = self.body
return re.findall(rule, data)
|
# settings for app
# PAYPAL_ENDPOINT = 'https://svcs.sandbox.paypal.com/AdaptivePayments/' # sandbox
PAYPAL_ENDPOINT = 'https://svcs.paypal.com/AdaptivePayments/' # production
# PAYPAL_PAYMENT_HOST = 'https://www.sandbox.paypal.com/au/cgi-bin/webscr' # sandbox
PAYPAL_PAYMENT_HOST = 'https://www.paypal.com/webscr' # production
PAYPAL_USERID = 'will.taylor_api1.60secondlaundry.com'
PAYPAL_PASSWORD = 'A49UBQGNB867WYB5'
PAYPAL_SIGNATURE = 'A9DjNk8Rr-UrlRVhRUm.1wVPW2SpAHPVaivltNg.Tt6SlbMHUzxFQQ2V'
# PAYPAL_APPLICATION_ID = 'APP-80W284485P519543T' # sandbox only
PAYPAL_APPLICATION_ID = 'APP-80W284485P519543T' # sandbox only
PAYPAL_EMAIL = 'will.taylor-facilitator@60secondlaundry.com'
PREAPPROVAL_PERIOD = 182 # days to ask for in a preapproval
DEBUG = False # No outbound texts or emails.
PAYPAL_COMMISSION = 0.1 |
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
if len(self.items) != 0:
return False
else:
return True
def push(self,data):
self.items.append(data)
def pop(self):
self.items.pop()
def display(self):
print(self.items)
def size(self):
return len(self.items)
if __name__ == '__main__':
s = Stack()
print s.isEmpty()
s.push(10)
s.push(20)
s.display()
print s.isEmpty()
print(s.size())
print s.pop()
print(s.display())
|
"""
A module that binds all of the commands for the terminal CLI app together into
one command group.
"""
import logging
import sys
import click
from ._bot_command import bot
from ._caption_command import caption
from ._ls_command import ls
_LOGGING_FORMAT = "%(asctime)s %(levelname)s %(message)s"
@click.group()
def cli() -> None:
"""
The main terminal CLI command group.
"""
logging.basicConfig(
format=_LOGGING_FORMAT,
level=logging.INFO,
stream=sys.stdout
)
cli.add_command(bot)
cli.add_command(caption)
cli.add_command(ls)
|
from django.contrib import admin
from django.urls import path
import post.views
urlpatterns = [
path('admin/', admin.site.urls),
path('info/', post.views.info, name="info"),
path('', post.views.index, name='index'),
path('csv/', post.views.csv, name="csv"),
path('conner/', post.views.conner, name="conner"),
path('embellishment/', post.views.embellishment, name="embellishment"),
path('manbox/', post.views.manbox, name="manbox"),
path('ending/', post.views.ending, name="ending"),
] |
from unittest import TestCase
import os
import tempfile
import pickle
import shutil
from easy_word2vec.utils.data_utils import prepare_corpus
class TestCasePrepareCorpus(TestCase):
def test_prepare_corpus(self):
input_data = '''foo bar foo abc def
new old abc how yes no
no foo sir foo
abc foo old sir old new bar foo zoo
old foo how foo
foo bar zoo foo
bye bye foo
foo old foo'''
expected_corpus = [
['foo', 'bar', 'foo', 'abc', 'def'],
['new', 'old', 'abc', 'how', 'yes', 'no'],
['no', 'foo', 'sir', 'foo'],
['abc', 'foo', 'old', 'sir', 'old', 'new', 'bar', 'foo', 'zoo'],
['old', 'foo', 'how', 'foo'],
['foo', 'bar', 'zoo', 'foo'],
['bye', 'bye', 'foo'],
['foo', 'old', 'foo']
]
test_dir = tempfile.mkdtemp()
input_file = os.path.join(test_dir, 'test.txt')
with open(input_file, 'w', encoding='utf-8') as input_fh:
input_fh.write(input_data)
loaded_corpus = list(prepare_corpus(input_file))
self.assertEqual(loaded_corpus, expected_corpus)
shutil.rmtree(test_dir)
|
import hashlib
from http import HTTPStatus
from secrets import token_hex
from flask_restful import Resource, reqparse
from app.auth.handlers import auth
from app.entities.basic_schema import BasicResponseSchema, BasicSchema
from models.token import Token
from db.setup import db
class AuthController(Resource):
def create_params(self):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, required=True)
parser.add_argument('password', type=str, required=True)
args = parser.parse_args()
return args['username'], args['password']
@auth.login_required
def get(self):
return {
"result": {
"message": "Hello, {}!".format(auth.current_user())
}
}
@auth.login_required
def post(self):
username, password = self.create_params()
password_sha = hashlib.sha256(password.encode()).hexdigest()
token = token_hex(16)
auth = Token(user=username, password=password_sha, token=token)
db.session.add(auth)
db.session.commit()
token_json = Token.serialize(auth)
response = BasicSchema(result=token_json, status_code=HTTPStatus.CREATED)
return response.make_response(BasicResponseSchema().dump(response))
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 17:03:59 2019
@author: zhangyanhang
"""
import unittest
from unittest import TestCase
from Ball.bd import bd_test, bd
from Ball.bcorsis import bcorsis
from Ball.bcov import bcov_test, bcov
from Ball.wrap_c import bd_test_wrap_c, bcor_test_wrap_c, bcov_test_wrap_c, kbcov_test_wrap_c
import numpy as np
import math
class Test_bd(TestCase):
def test_bd(self):
np.random.seed(7654567)
x = np.random.normal(0, 1, 50)
y = np.random.normal(1, 1, 50)
bd_value = bd(x, y)
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.196408479999)
x = np.random.normal(0, 1, 100).reshape(50, 2)
y = np.random.normal(3, 1, 100).reshape(50, 2)
bd_value = bd(x, y)
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.5681075200000011)
from sklearn.metrics.pairwise import euclidean_distances
sigma = [[1, 0], [0, 1]]
x = np.random.multivariate_normal(mean=[0, 0], cov=sigma, size=50)
y = np.random.multivariate_normal(mean=[1, 1], cov=sigma, size=50)
bd_value = bd(x, y)
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.13847583999999966) # 0.059310879999
n = 90
x = np.random.normal(0, 1, n)
bd_value = bd(x, size=np.array([40, 50]))
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.011700674999999966)
x = [np.random.normal(0, 1, num) for num in [40, 50]]
x = np.hstack(x)
bd_value = bd(x, [40, 50])
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.9639094650205711)
def test_bd_test(self):
x = [1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5]
bd_value = bd_test(x, y)
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.0)
np.random.seed(7654567)
x = np.random.normal(0, 1, 50)
y = np.random.normal(1, 1, 50)
bd_value = bd_test(x, y)
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.196408479999)
x = np.random.normal(0, 1, 100).reshape(50, 2)
y = np.random.normal(3, 1, 100).reshape(50, 2)
bd_value = bd_test(x, y)
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.5681075200000011)
x = np.random.normal(0, 1, 100).reshape(50, 2)
y = np.random.normal(10, 1, 100).reshape(50, 2)
z = np.random.normal(100, 1, 100).reshape(50, 2)
bd_value = bd_test(x, y, z)
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 2.0604000000000022)
bd_value = bd_test(x, y, z, weight = "max")
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 1.3736000000000015)
n = 90
x = np.random.normal(0, 1, n)
bd_value = bd_test(x, size=np.array([40, 50]))
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.009086599999999997)
x = [np.random.normal(0, 1, num) for num in [40, 50]]
x = np.hstack(x)
bd_value = bd_test(x, [40, 50])
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.9639094650205713)
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
x = np.array(x, dtype=np.double)
bd_value = bd_test(x, size=np.array([5, 5]))
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.7231999999999997)
x = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]]
bd_value = bd_test(x)
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 2.403199999999999)
from sklearn.metrics.pairwise import euclidean_distances
sigma = [[1, 0], [0, 1]]
x = np.random.multivariate_normal(mean=[0, 0], cov=sigma, size=50)
y = np.random.multivariate_normal(mean=[1, 1], cov=sigma, size=50)
x = np.row_stack((x, y))
dx = euclidean_distances(x, x)
data_size = [50, 50]
bd_value = bd_test(dx, size=data_size, dst=True)
bd_value = bd_value[0]
self.assertAlmostEqual(bd_value, 0.10779759999999977)
def test_bcorsis(self):
np.random.seed(1000)
n = 150
p = 3000
mean = np.zeros(p)
cov = np.array([0.5]*np.square(p)).reshape((p, p))
cov[np.diag_indices(3000)] = 1
x = np.random.multivariate_normal(mean, cov, n)
error = np.random.normal(0, 1, n)
y = 4*np.square(x[:, 2])+6*np.square(x[:, 1])+8*x[:, 3]-10*x[:,4]+error
x_num = np.ones(3000)
target = [4, 1, 924, 2, 692, 3, 400, 2241, 2839, 2194, 170]
result = bcorsis(y, x, x_num, method="lm", params = [5, 3], d = 11)
self.assertAlmostEqual(result, target)
x = np.random.normal(0, 1, n*p).reshape((n, p))
error = np.random.normal(0, 1, n)
y = 3 * x[:, 1] * x[:, 5] * x[:, 10] + error
target = [10, 5, 1, 1118, 555, 1174, 2361, 567, 1599, 1739]
result = bcorsis(y, x, x_num, method = "interaction", d=10)
self.assertAlmostEqual(result, target)
y = 3 * x[:, 1] + 5 * np.square(x[:, 3]) + error
target = [3, 1, 2607, 2374, 762]
result = bcorsis(y, x, x_num, d=5, weight="prob")
self.assertAlmostEqual(result, target)
target = [3, 1, 762, 2374, 2607]
result = bcorsis(y, x, x_num, d=5, weight="chisq")
self.assertAlmostEqual(result, target)
def test_bcov_test(self):
np.random.seed(2)
error = np.random.uniform(-0.3, 0.3, 50)
x = np.random.uniform(0, 4*math.pi, 50)
y = np.cos(x) + error
bcov_value = bcov_test(x, y)[0]
self.assertAlmostEqual(bcov_value, 0.0021155347839999917)
bcov_value = bcov_test(x, y, weight = "prob")[0]
self.assertAlmostEqual(bcov_value, 0.05600363939468131)
x = np.random.normal(0, 1, 50)
y = [1 if i > 0 else 0 for i in x] * x + np.random.normal(0, 1, 50)
z = [1 if i < 0 else 0 for i in x] * x + np.random.normal(0, 1, 50)
bcov_value = bcov_test(x, y, z)[0]
self.assertAlmostEqual(bcov_value, 0.0025298346891263934)
x = np.random.normal(-math.pi, math.pi, 100).reshape((50, 2))
error = np.random.uniform(-0.1, 0.1, 50)
y = np.sin(x[:, 0] + x[:, 1]) + error
bcov_value = bcov_test(x, y, weight = "prob")[0]
self.assertAlmostEqual(bcov_value, 0.03201829726950482)
bcov_value = bcov_test(x, y, weight = "chisq")[0]
self.assertAlmostEqual(bcov_value, 0.05011815702428237)
def test_bcov(self):
np.random.seed(2)
error = np.random.uniform(-0.3, 0.3, 50)
x = np.random.uniform(0, 4*math.pi, 50)
y = np.cos(x) + error
bcov_value = bcov(x, y)[0]
self.assertAlmostEqual(bcov_value, 0.0021155347839999917)
bcov_value = bcov(x, y, weight = "prob")[0]
self.assertAlmostEqual(bcov_value, 0.05600363939468131)
x = np.random.normal(0, 1, 50)
y = [1 if i > 0 else 0 for i in x] * x + np.random.normal(0, 1, 50)
z = [1 if i < 0 else 0 for i in x] * x + np.random.normal(0, 1, 50)
bcov_value = bcov(x, y, z)[0]
self.assertAlmostEqual(bcov_value, 0.0025298346891263934)
x = np.random.normal(-math.pi, math.pi, 100).reshape((50, 2))
error = np.random.uniform(-0.1, 0.1, 50)
y = np.sin(x[:, 0] + x[:, 1]) + error
bcov_value = bcov(x, y, weight = "prob")[0]
self.assertAlmostEqual(bcov_value, 0.03201829726950482)
bcov_value = bcov(x, y, weight = "chisq")[0]
self.assertAlmostEqual(bcov_value, 0.05011815702428237)
if __name__ == '__main__':
unittest.main()
pass |
#/usr/bin/env python
#coding=utf-8
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score,recall_score,f1_score,accuracy_score
from sklearn import preprocessing
from joblib import dump, load
from result import figures
import input_w
train_data,train_labels,test_data,test_labels=input_w.inputs()
train_data=preprocessing.MinMaxScaler().fit_transform(train_data)
test_data=preprocessing.MinMaxScaler().fit_transform(test_data)
lr=LogisticRegression(verbose=1,max_iter=500)
model=lr.fit(train_data,train_labels)
lr.fit(train_data,train_labels)
dump(lr, './lr.model')
clf=load('./lr.model')
predicts=clf.predict(test_data)
print(predicts)
print("accuracy_score:",accuracy_score(test_labels,predicts))
print("precision_score:",precision_score(test_labels,predicts,average='macro'))
# print("f1_score_micro:",f1_score(y_true,predicts,average='micro'))
print("f1_score_macro:",f1_score(test_labels,predicts,average='macro'))
# print("recall_score_micro:",recall_score(y_true,predicts,average='micro'))
print("recall_score_macro:",recall_score(test_labels,predicts,average='macro'))
# alphabet=["AIM","email","facebookchat","gmailchat","hangoutsaudio","hangoutschat","icqchat","netflix","skypechat","skypefile","spotify","vimeo","youtube","youtubeHTML5"]
alphabet=softwares=["Baidu Map",
"Baidu Post Bar",
"Netease cloud music",
"iQIYI",
"Jingdong",
"Jinritoutiao",
"Meituan",
"QQ",
"QQ music",
"QQ reader",
"Taobao",
"Weibo",
"CTRIP",
"Zhihu",
"Tik Tok",
"Ele.me",
"gtja",
"QQ mail",
"Tencent",
"Alipay"]
figures.plot_confusion_matrix(test_labels, predicts,alphabet, "./lr") |
def pollsf(x, y, sigma, M):
import numpy as np
''' Function to perform a linear regression
Inputs
------
x Independent variable
y Dependent variable
sigma Estimated error in y
M Number of parameters used to fit data
Outputs
-------
a_fit Fit parameters; a(1) = intercept, a(2) = slope
sig_a Estimated error in the parameters a
yy Curve to fit the data
chisqr Chi squared statistic
'''
# Form the vector b and design matrix A
# The design matrix contains N rows (the number of points) and M
# columns (the number of parameters for the regression)
N = len(x)
b = np.empty(N)
A = np.empty((N, M))
for i in range(N):
b[i] = y[i] / sigma[i]
for j in range(M):
A[i, j] = x[i]**j / sigma[i] # A is a type of Vandermonde matrix, Yj(xi) = x**j
# Compute the correlation matrix C
C = np.linalg.inv(np.dot(A.T, A))
# Compute coefficients a_fit
a_fit = np.dot(C, np.dot(A.T, b))
# Compute error bars
sig_a = np.empty(M)
for j in range(M):
sig_a[j] = np.sqrt(C[j, j])
# Evaluate
yy = np.zeros(N)
chisqr = 0.
for i in range(N):
for j in range(M):
yy[i] += a_fit[j] * x[i] ** j
chisqr += ((y[i] - yy[i]) / sigma[i])**2
return [a_fit, sig_a, yy, chisqr] |
#!env python
# vim: set fileencoding=utf8
# Created:20080216
# By Jeff Connelly
#
# Trinary-related symbols
# See http://jeff.tk/wiki/Trinary/Symbols
# Note: to print, .encode('utf8') first
#
# MORE IMPORTANT NOTE: This isn't needed most of the time.
# Instead, just use the Unicode symbols directly. You can
# do this if the second line of the file is:
# # vim: set fileencoding=utf8
# TODO: Use literal values, actual characters, not escape
# Trinary.cc-based unary
ROTATE_UP = u"\u2229"
ROTATE_DN = u"\u222a"
SHIFT_UP = u"\u2197"
SHIFT_DN = u"\2198"
INVERT = u"/"
# Mouftah-based unary
FD = u"\u00ac"
RD = u"\u2310"
PTI = u"\u2518"; PTI2 = u"\u2518"
NTI = u"\u2207"; NTI2 = u"\u2514"
STI = u"/"
# TODO: dyadic
# Balanced base 9
NONARY = {
-1: u"\u2460",
-2: u"\u2461",
-3: u"\u2462",
-4: u"\u2463",
0: u"0",
1: u"1",
2: u"2",
3: u"3",
4: u"4"}
|
import jax.numpy as np
import jax.numpy as jnp
from jax import jit, vmap, random, value_and_grad, grad
import haiku as hk
import optax
from tqdm import tqdm
from functools import partial
import warnings
from typing import Mapping
import os
from . import utils, metrics, stein, kernels, nets
on_cluster = not os.getenv("HOME") == "/home/lauro"
disable_tqdm = on_cluster
"""
This file implements methods that simulate different kinds of particle dynamics.
It is structured as follows:
The class `Particles` acts as container for the particle positions and associated data.
Any update rule can be 'plugged in' by supplying the `gradient` argument. The following
update rules are implemented here:
- `SteinNetwork`: the method developed in this project, which dynamically learns a trajectory using a neural network.
- `KernelGradient`: simulates SVGD dynamics
- `EnergyGradient`: simulates Langevin dynamics
Finally, the mixin classes `VectorFieldMixin` and `EBMMixin` define different constraints on the neural update rule.
"""
class Patience:
"""Criterion for early stopping"""
def __init__(self, patience: int = 20):
self.patience = patience
self.time_waiting = 0
self.min_validation_loss = None
self.disable = patience == -1
def update(self, validation_loss):
"""Returns True when early stopping criterion (validation loss
failed to decrease for `self.patience` steps) is met"""
if self.min_validation_loss is None or self.min_validation_loss > validation_loss:
self.min_validation_loss = validation_loss
self.time_waiting = 0
else:
self.time_waiting += 1
return
def out_of_patience(self):
return (self.time_waiting > self.patience) and not self.disable
def reset(self, patience=None):
self.time_waiting = 0
self.min_validation_loss = None
if patience:
self.patience = patience
class Particles:
"""
Container class for particles, particle optimizer,
particle update step method, and particle metrics.
"""
def __init__(self,
key,
gradient: callable,
init_samples,
learning_rate=1e-2,
optimizer="sgd",
custom_optimizer=None,
n_particles: int = 50,
compute_metrics=None):
"""
Args:
gradient: takes in args (params, key, particles) and returns
an array of shape (n, d). Used to compute particle update x = x + eps * gradient(*args)
init_samples: either a callable sample(num_samples, key), or an array
of shape (n, d) containing initial samples.
learning_rate: scalar step-size for particle updates
compute_metrics: callable, takes in particles as array of shape (n, d) and
outputs a dict shaped {'name': metric for name, metric in
zip(names, metrics)}. Evaluated once every 50 steps.
"""
self.gradient = gradient
self.n_particles = n_particles
self.threadkey, subkey = random.split(key)
self.init_samples = init_samples
self.particles = self.init_particles(subkey)
# optimizer for particle updates
if custom_optimizer:
self.optimizer_str = "custom"
self.learning_rate = None
self.opt = custom_optimizer
else:
self.optimizer_str = optimizer
self.learning_rate = learning_rate
self.opt = utils.optimizer_mapping[optimizer](learning_rate)
self.optimizer_state = self.opt.init(self.particles)
self.step_counter = 0
self.rundata = {}
self.donedone = False
self.compute_metrics = compute_metrics
def init_particles(self, key=None):
"""Returns an jnp.ndarray of shape (n, d) containing particles."""
if key is None:
self.threadkey, key = random.split(self.threadkey)
if callable(self.init_samples):
particles = self.init_samples(self.n_particles, key)
else:
particles = self.init_samples
self.n_particles = len(particles)
self.d = particles.shape[1]
return particles
def get_params(self):
return self.particles
def next_batch(self,
key,
n_train_particles: int = None,
n_val_particles: int = None):
"""
Return next subsampled batch of training particles (split into training
and validation) for the training of a gradient field approximator.
"""
particles = self.get_params()
shuffled_batch = random.permutation(key, particles)
if n_train_particles is None:
if n_val_particles is None:
n_val_particles = self.n_particles // 4
n_train_particles = jnp.clip(self.n_particles - n_val_particles, 0)
elif n_val_particles is None:
n_val_particles = jnp.clip(self.n_particles - n_train_particles, 0)
assert n_train_particles + n_val_particles == self.n_particles
return shuffled_batch[:n_train_particles], shuffled_batch[-n_val_particles:]
@partial(jit, static_argnums=0)
def _step(self, particles, optimizer_state, params):
"""
Updates particles in the direction given by self.gradient
Arguments:
particles: jnp.ndarray of shape (n, d)
params: can be anything. e.g. inducing particles in the case of SVGD,
deep NN params for learned f, or None.
Returns:
particles (updated)
optimizer_state (updated)
grad_aux: dict containing auxdata
"""
grads, grad_aux = self.gradient(params, particles, aux=True)
updated_grads, optimizer_state = self.opt.update(grads, optimizer_state, particles)
particles = optax.apply_updates(particles, updated_grads)
grad_aux.update({
"global_grad_norm": optax.global_norm(grads),
"global_grad_norm_post_update": optax.global_norm(updated_grads),
})
grad_aux.update({})
# grad_aux.update({"grads": updated_grads})
return particles, optimizer_state, grad_aux
def step(self, params):
"""Log rundata, take step. Mutates state"""
updated_particles, self.optimizer_state, auxdata = self._step(
self.particles, self.optimizer_state, params)
self.log(auxdata)
self.particles = updated_particles
self.step_counter += 1
return None
def log(self, grad_aux=None):
metrics.append_to_log(self.rundata, self._log(self.particles, self.step_counter))
if self.step_counter % 10 == 0 and self.compute_metrics:
aux_metrics = self.compute_metrics(self.particles)
metrics.append_to_log(self.rundata,
{k: (self.step_counter, v) for k, v in aux_metrics.items()})
if grad_aux is not None:
metrics.append_to_log(self.rundata, grad_aux)
@partial(jit, static_argnums=0)
def _log(self, particles, step):
auxdata = {}
if self.d < 400:
auxdata.update({
"step": step,
"particles": particles,
"mean": np.mean(particles, axis=0),
"std": np.std(particles, axis=0),
})
return auxdata
def done(self):
"""converts rundata into arrays"""
if self.donedone:
print("already done.")
return
skip = "particles accuracy".split()
self.rundata = {
k: v if k in skip else np.array(v)
for k, v in self.rundata.items()
}
if "particles" in self.rundata:
self.rundata["particles"] = np.array(self.rundata['particles'])
self.donedone = True
class VectorFieldMixin:
"""Define the architecture of the witness function and initialize it."""
def __init__(self,
target_dim: int,
key=random.PRNGKey(42),
sizes: list = None,
aux=False,
normalize_inputs=False,
extra_term: callable = lambda x: 0,
hypernet: bool = False,
particle_unravel: callable = None,
**kwargs):
"""
args:
aux: bool; whether to add mean and std as auxiliary input to MLP.
normalize_inputs: whether to normalize particles
hypernet: if true, use a hypernet architecture (for BNN inference.)
"""
self.aux = aux
self.d = target_dim
self.sizes = sizes if sizes else [32, 32, self.d]
self.auxdim = self.d*2
if self.sizes[-1] != self.d:
warnings.warn(f"Output dim should equal target dim; instead "
f"received output dim {sizes[-1]} and "
f"target dim {self.d}.")
self.threadkey, subkey = random.split(key)
self.normalize_inputs = normalize_inputs
self.extra_term = extra_term
self.hypernet = hypernet
self.particle_unravel = particle_unravel
# net and optimizer
if hypernet:
def field(x, aux, dropout: bool = False):
h = nets.StaticHypernet(sizes=[64, 64])
params = self.particle_unravel(x)
return utils.ravel(h(params, dropout))
else:
def field(x, aux, dropout: bool = False):
mlp = nets.MLP(self.sizes)
scale = hk.get_parameter("scale", (), init=lambda *args: np.ones(*args))
mlp_input = np.concatenate([x, aux]) if self.aux else x
return scale * mlp(mlp_input, dropout)
self.field = hk.transform(field)
self.params = self.init_params()
super().__init__(**kwargs)
def compute_aux(self, particles):
"""Auxiliary data that will be concatenated onto MLP input.
Output has shape (self.auxdim,).
Can also be None."""
if not self.aux:
return None
aux = np.concatenate([np.mean(particles, axis=0), np.std(particles, axis=0)])
assert self.auxdim == len(aux)
return aux
def init_params(self, key=None, keep_params=False):
"""Initialize MLP parameter"""
if key is None:
self.threadkey, key = random.split(self.threadkey)
x_dummy = np.ones(self.d)
aux_dummy = np.ones(self.auxdim) if self.aux else None
params = self.field.init(key, x_dummy, aux_dummy)
return params
def get_params(self):
return self.params
def get_field(self, init_particles=None, params=None, dropout=False):
"""Retuns function v. v is a vector field, can take either single
particle of shape (d,) or batch shaped (..., d).
"""
if params is None:
params = self.get_params()
if self.normalize_inputs:
if init_particles is None:
raise ValueError("init_particles must not be None when"
"normalize_inputs is True.")
norm = nets.get_norm(init_particles)
else:
norm = lambda x: x
aux = self.compute_aux(init_particles)
if dropout:
def v(x, key):
"""x should have shape (n, d) or (d,)"""
return self.field.apply(
params, key, norm(x), aux, dropout=dropout) + self.extra_term(x)
else:
def v(x):
"""x should have shape (n, d) or (d,)"""
return self.field.apply(
params, None, norm(x), aux, dropout=dropout) + self.extra_term(x)
return v
class EBMMixin():
def __init__(self,
target_dim: int,
key=random.PRNGKey(42),
sizes: list = None,
**kwargs):
self.d = target_dim
self.sizes = sizes if sizes else [32, 32, 1]
if self.sizes[-1] != 1:
warnings.warn(f"Output dim should equal 1; instead "
f"received output dim {sizes[-1]}")
self.threadkey, subkey = random.split(key)
# net and optimizer
self.ebm = hk.transform(
lambda *args: nets.MLP(self.sizes)(*args))
self.params = self.init_params()
super().__init__(**kwargs)
def init_params(self, key=None):
"""Initialize MLP parameter"""
if key is None:
self.threadkey, key = random.split(self.threadkey)
x_dummy = np.ones(self.d)
params = self.ebm.init(key, x_dummy)
return params
def get_params(self):
return self.params
def get_field(self, init_particles, params=None):
del init_particles
if params is None:
params = self.get_params()
def ebm(x):
"""x should have shape (d,)"""
# norm = nets.get_norm(init_particles)
# x = norm(x)
return np.squeeze(self.ebm.apply(params, None, x))
return grad(ebm)
class TrainingMixin:
"""
Encapsulates methods for training the Stein network (which approximates
the particle update). Agnostic re: architecture. Needs existence of
a self.params at initialization.
Methods to implement:
* self.loss_fn
* self._log
"""
def __init__(self,
learning_rate: float = 1e-2,
patience: int = 10,
dropout: bool = False,
**kwargs):
"""
args:
dropout: whether to use dropout during training
"""
# schedule_fn = optax.piecewise_constant_schedule(
# -learning_rate, {50: 1/5, 100: 1/2})
# self.opt = optax.chain(
# optax.scale_by_adam(),
# optax.scale_by_schedule(schedule_fn))
self.opt = optax.adam(learning_rate)
self.optimizer_state = self.opt.init(self.params)
self.dropout = dropout
# state and logging
self.step_counter = 0
self.rundata = {"train_steps": []}
self.frozen_states = []
self.patience = Patience(patience)
super().__init__(**kwargs)
@partial(jit, static_argnums=0)
def _step(self,
key,
params,
optimizer_state,
dlogp,
val_dlogp,
particles,
val_particles):
"""
update parameters and compute validation loss
args:
dlogp: array of shape (n_train, d)
val_dlogp: array of shape (n_validation, d)
"""
[loss, loss_aux], grads = value_and_grad(self.loss_fn,
has_aux=True)(params,
dlogp,
key,
particles,
dropout=self.dropout)
grads, optimizer_state = self.opt.update(grads, optimizer_state, params)
params = optax.apply_updates(params, grads)
_, val_loss_aux = self.loss_fn(params,
val_dlogp,
key,
val_particles,
dropout=False)
auxdata = {k: v for k, v in loss_aux.items()}
auxdata.update({"val_" + k: v for k, v in val_loss_aux.items()})
auxdata.update({"global_gradient_norm": optax.global_norm(grads),})
return params, optimizer_state, auxdata
def step(self,
particles,
validation_particles,
dlogp,
val_dlogp):
"""Step and mutate state"""
self.threadkey, key = random.split(self.threadkey)
self.params, self.optimizer_state, auxdata = self._step(
key, self.params, self.optimizer_state, dlogp, val_dlogp,
particles, validation_particles)
self.write_to_log(auxdata)
self.step_counter += 1
return None
def write_to_log(self, step_data: Mapping[str, np.ndarray]):
metrics.append_to_log(self.rundata, step_data)
def train(self,
split_particles,
split_dlogp,
n_steps=5,
early_stopping=True,
progress_bar=False):
"""
batch and next_batch cannot both be None.
Arguments:
split_particles: arrays (training, validation) of particles,
shaped (n, d) resp (m, d)
split_dlogp: arrays (training, validation) of loglikelihood
gradients. Same shape as split_particles.
key: random.PRGNKey
n_steps: int, nr of steps to train
"""
self.patience.reset()
def step():
self.step(*split_particles, *split_dlogp)
val_loss = self.rundata["val_loss"][-1]
self.patience.update(val_loss)
return
for i in tqdm(range(n_steps), disable=not progress_bar):
step()
# self.write_to_log({"model_params": self.get_params()})
if self.patience.out_of_patience() and early_stopping:
break
self.write_to_log({"train_steps": i+1})
return
# def warmup(self,
# key,
# sample_split_particles: callable,
# next_data: callable = lambda: None,
# n_iter: int = 10,
# n_inner_steps: int = 30,
# progress_bar: bool = False,
# early_stopping: bool = True):
# """resample from particle initializer to stabilize the beginning
# of the trajectory
# args:
# key: prngkey
# sample_split_particles: produces next x_train, x_val sample
# next_data: produces next batch of data
# n_iter: number of iterations (50 training steps each)
# """
# for _ in tqdm(range(n_iter), disable=not progress_bar):
# key, subkey = random.split(key)
# self.train(sample_split_particles(subkey),
# n_steps=n_inner_steps,
# data=next_data(),
# early_stopping=early_stopping)
def freeze_state(self):
"""Stores current state as tuple (step_counter, params, rundata)"""
self.frozen_states.append((self.step_counter,
self.get_params(),
self.rundata))
return
def loss_fn(self, params, dlogp, key, particles, dropout):
raise NotImplementedError()
def gradient(self, params, particles, aux=False):
raise NotImplementedError()
class SteinNetwork(VectorFieldMixin, TrainingMixin):
"""Parametrize vector field to maximize the stein discrepancy"""
def __init__(self,
target_dim: int,
key: np.array = random.PRNGKey(42),
sizes: list = None,
learning_rate: float = 5e-3,
patience: int = 0,
aux=False,
lambda_reg=1/2,
use_hutchinson: bool = False,
dropout=False,
normalize_inputs=False,
extra_term: callable = lambda x: 0,
l1_weight: float = None,
hypernet: bool = False,
particle_unravel: callable = None):
"""
args:
aux: bool, whether to concatenate particle dist info onto
mlp input
use_hutchinson: when True, use Hutchinson's estimator to
compute the stein discrepancy.
normalize_inputs: normalize particles
"""
super().__init__(target_dim, key=key, sizes=sizes,
learning_rate=learning_rate, patience=patience,
aux=aux, dropout=dropout, normalize_inputs=normalize_inputs,
extra_term=extra_term, hypernet=hypernet,
particle_unravel=particle_unravel)
self.lambda_reg = lambda_reg
self.scale = 1. # scaling of self.field
self.use_hutchinson = use_hutchinson
self.l1_weight = l1_weight
def loss_fn(self,
params,
dlogp: np.ndarray,
key: np.ndarray,
particles: np.ndarray,
dropout: bool = False):
"""
Arguments:
params: neural net paramers
dlogp: gradient grad(log p)(x), shaped (n, d)
key: random PRNGKey
particles: array of shape (n, d)
dropout: whether to use dropout in the gradient network
"""
n, d = particles.shape
v = self.get_field(particles, params, dropout=dropout)
if dropout:
f = utils.negative(v)
else:
def f(x, dummy_key):
return -v(x)
# stein discrepancy
def h(x, dlogp_x, key):
zkey, fkey = random.split(key)
z = random.normal(zkey, (d,))
zdf = grad(lambda _x: np.vdot(z, f(_x, fkey)))
div_f = np.vdot(zdf(x), z)
#div_f = np.trace(jacfwd(f)(x, fkey))
sd = np.vdot(f(x, fkey), dlogp_x) + div_f
l2 = np.vdot(f(x, fkey), f(x, fkey))
aux = {
"sd": sd,
"l2": l2,
}
return -sd + l2 * self.lambda_reg, aux
keys = random.split(key, n)
loss, aux = vmap(h)(particles, dlogp, keys)
loss = loss.mean()
aux = {k: v.mean() for k, v in aux.items()}
fnorm = optax.global_norm(jnp.mean(vmap(f)(particles, keys), axis=0))
pnorm = optax.global_norm(jnp.mean(dlogp, axis=0))
aux.update({"loss": loss,
"l1_diff": fnorm - pnorm,
"l1_ratio": fnorm / pnorm})
# # add L1 term
# if self.l1_weight:
# loss = loss + self.l1_weight * np.abs(jnp.mean(vmap(f)(particles) - dlogp))
return loss, aux
def gradient(self, params, particles, aux=False):
"""
Plug-in particle update method. No dropout.
Update particles via particles = particles - eps * v(particles)
args:
params: pytree of neural net parameters
particles: array of shape (n, d)
aux: bool
"""
v = vmap(self.get_field(particles, params, dropout=False))
if aux:
return v(particles), {}
else:
return v(particles)
def grads(self, particles):
"""Same as `self.gradient` but uses state"""
return self.gradient(self.get_params(), particles)
def done(self):
"""converts rundata into arrays"""
self.rundata = {
k: v if k in ["model_params", "gradient_norms"] else np.array(v)
for k, v in self.rundata.items()
}
class KernelGradient():
"""Computes the SVGD approximation to grad(KL), ie
phi*(y) = E[grad(log p)(y) k(x, y) + div(k)(x, y)]"""
def __init__(self,
target_logp: callable = None, # TODO replace with dlogp supplied as array
get_target_logp: callable = None,
kernel=kernels.get_rbf_kernel,
bandwidth=None,
scaled=False,
lambda_reg=1/2,
use_hutchinson: bool = False):
"""get_target_log is a callable that takes in a batch of data
(can be any pytree of jnp.ndarrays) and returns a callable logp
that computes the target log prob (up to an additive constant).
scaled: whether to rescale gradients st. they match
(grad(logp) - grad(logp))/(2 * lambda_reg) in scale
"""
if target_logp:
assert not get_target_logp
self.get_target_logp = lambda *args: target_logp
elif get_target_logp:
self.get_target_logp = get_target_logp
else:
return ValueError("One of target_logp and get_target_logp must"
"be given.")
self.bandwidth = bandwidth
self.kernel = kernel
self.lambda_reg = lambda_reg
self.rundata = {}
self.scaled = scaled
self.use_hutchinson = use_hutchinson
def get_field(self, inducing_particles, batch=None):
"""return -phistar"""
target_logp = self.get_target_logp(batch)
bandwidth = self.bandwidth if self.bandwidth else kernels.median_heuristic(inducing_particles)
kernel = self.kernel(bandwidth)
phi = stein.get_phistar(kernel, target_logp, inducing_particles)
return utils.negative(phi), bandwidth
def gradient(self, batch, particles, aux=False):
"""Compute approximate KL gradient.
args:
batch: minibatch data used to estimate logp (can be None)
particles: array of shape (n, d)
"""
target_logp = self.get_target_logp(batch)
v, h = self.get_field_scaled(particles, batch) if self.scaled \
else self.get_field(particles, batch)
if aux:
return vmap(v)(particles), {"bandwidth": h,
"logp": vmap(target_logp)(particles)}
else:
return vmap(v)(particles)
def get_field_scaled(self, inducing_particles, batch=None):
hardcoded_seed = random.PRNGKey(0) # TODO seed should change across iters
target_logp = self.get_target_logp(batch)
bandwidth = self.bandwidth if self.bandwidth else kernels.median_heuristic(inducing_particles)
kernel = self.kernel(bandwidth)
phi = stein.get_phistar(kernel, target_logp, inducing_particles)
l2_phi_squared = utils.l2_norm_squared(inducing_particles, phi)
if self.use_hutchinson:
ksd = stein.stein_discrepancy_hutchinson(hardcoded_seed, inducing_particles, target_logp, phi)
else:
ksd = stein.stein_discrepancy(inducing_particles, target_logp, phi)
alpha = ksd / (2*self.lambda_reg*l2_phi_squared)
return utils.mul(phi, -alpha), bandwidth
class EnergyGradient():
"""Compute pure SGLD gradient grad(log p)(x) (without noise)"""
def __init__(self,
target_logp,
lambda_reg=1/2):
self.target_logp = target_logp
self.lambda_reg = lambda_reg
self.rundata = {}
def target_score(self, x):
return grad(self.target_logp)(x) / (2*self.lambda_reg)
def get_field(self, inducing_particles):
"""Return vector field used for updating, grad(log p)(x)$
(without noise)."""
return utils.negative(self.target_score)
def gradient(self, _, particles, aux=False):
"""Compute gradient used for SGD particle update"""
v = self.get_field(particles)
if aux:
return vmap(v)(particles), {}
else:
return vmap(v)(particles)
|
from __future__ import absolute_import
from celery import shared_task
from jobapplications.management.commands.import_crawlresults import Command
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def add(x, y):
logger.info('Adding {0} + {1}'.format(x, y))
return x + y
@shared_task
def mul(x, y):
return x * y
@shared_task
def xsum(numbers):
return sum(numbers)
@shared_task
def crawl():
co = Command()
co.handle() |
import shutil
import os
import sys
try:
files = os.listdir("c:/Users/Sachin/.jenkins/workspace/my_jenkins_pytest/allure-report/history")
print(files)
for f in files:
#print(f)
shutil.copy("c:/Users/Sachin/.jenkins/workspace/my_jenkins_pytest/allure-report/history/"+f,"c:/Users/Sachin/.jenkins/workspace/my_jenkins_pytest/allure-results/history",follow_symlinks=True)
except Exception as e:
print("Error message : " + format(e)) |
#!/usr/bin/python3
import json
import math
from distutils import util
import multiprocessing
from types import FrameType
import PIL
# from numpy.core.fromnumeric import repeat
import pyopencl as cl
import numpy as np
import os
import matplotlib.pyplot as plt
# from PIL import Image
import matplotlib.animation as animation
from multiprocessing import Pool
from multiprocessing import Process
# from multiprocessing import Queue
from multiprocessing import set_start_method
# from multiprocessing import get_context
from PIL import Image
import time
import datetime as dt
from datetime import datetime
import math
import concatenate as coca
from colorama import Fore
import shutil
# import julia_parm as s
class opencl_py:
PYOPENCL_COMPILER_OUTPUT='1' # set to '1' to see the openCL compile errors
os.environ['PYOPENCL_COMPILER_OUTPUT'] = PYOPENCL_COMPILER_OUTPUT
def __init__(self,platform,func,OUTPUT_SIZE_IN_PIXELS_X,OUTPUT_SIZE_IN_PIXELS_Y,CX,CY,RGB,FLAG_ROTATE,SPEEDF):
self.OUTPUT_SIZE_IN_PIXELS_X=OUTPUT_SIZE_IN_PIXELS_X
self.OUTPUT_SIZE_IN_PIXELS_Y=OUTPUT_SIZE_IN_PIXELS_Y
self.CX=CX
self.CY=CY
self.RGB=RGB
self.FLAG_ROTATE=FLAG_ROTATE
self.SPEEDF=SPEEDF
platforms = cl.get_platforms()
if (platform > len(platforms)):
assert("Selected platform %d doesn't exist" % platform)
devices = platforms[platform].get_devices()
# Create context for GPU/CPU
#print("Using Platform %d:" % platform)
self.ctx = cl.Context(devices)
# Create queue for each kernel execution, here we only use 1 device
self.queue = cl.CommandQueue(self.ctx,devices[0],cl.command_queue_properties.PROFILING_ENABLE)
if func=="julia":filen="julia_rgb.cl"
if func=="julia_c":filen="julia_rgb_complex.cl"
self.OPENCL_CODE_PATH=os.path.join("worker",filen)
def compile(self,marcos=dict,writeProcessedOpenCLCode=True):
ori_src =""
with open(self.OPENCL_CODE_PATH, "r") as rf:
ori_src += rf.read()
proc_src=""
for line in ori_src.splitlines():
if marcos:# processed all the needed marcos
for k,v in marcos.items():
if line.startswith("#define "+k+" "):
line="#define "+k+" "+v# re-define marcos
del(marcos[k])
break
proc_src += line+"\n"
if marcos:
print("Error! No matched marcos in "+self.OPENCL_CODE_PATH+" :")
for k,v in marcos.iteritems():
print(k)
if writeProcessedOpenCLCode:
with open(os.path.join(os.path.dirname(self.OPENCL_CODE_PATH),"processed.cl"), "w", encoding='utf-8') as f:
f.write(proc_src)
print("COMPILING KERNEL..")
# Kernel function instantiation
self.prg = cl.Program(self.ctx, proc_src).build()
print("KERNEL COMPILED")
def run_julia(self,input_i,thre,x_range,y_range,jiter):
def fjx(input_i):
if self.FLAG_ROTATE:return np.float64(math.pow(math.cos(input_i),2)*math.sin(input_i)*self.SPEEDF)
return np.float64(0)
def fjy(input_i):
if self.FLAG_ROTATE:return np.float64(math.pow(math.sin(input_i),2)*self.SPEEDF)
return np.float64(0)
julia_shape=(self.OUTPUT_SIZE_IN_PIXELS_X,self.OUTPUT_SIZE_IN_PIXELS_Y,self.RGB)
if self.RGB==3:workgroup_shape=(16,16,1)
if self.RGB==4:workgroup_shape=(8,16,4)
mf = cl.mem_flags# opencl memflag enum
# matrix_generation_domain = np.linspace(-MANDELBROT_THRESHOLD, MANDELBRT_THRESHOLD, num=OUTPUT_SIZE_IN_PIXELS)
# zoom=1-(c-1)/c
matrix_generation_domain_x = np.linspace(-x_range+self.CX, x_range+self.CX, num=self.OUTPUT_SIZE_IN_PIXELS_X,dtype=np.float64)
matrix_generation_domain_y = np.linspace(-y_range+self.CY, y_range+self.CY, num=self.OUTPUT_SIZE_IN_PIXELS_Y,dtype=np.float64)
# matrix_generation_domain_x=matrix_generation_domain_x
# matrix_generation_domain_x=matrix_generation_domain_y
gD_npx = np.array(matrix_generation_domain_x,dtype=np.float64)
gD_gx = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=gD_npx)
gD_npy = np.array(matrix_generation_domain_y,dtype=np.float64)
gD_gy = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=gD_npy)
input_ib=np.float64(input_i)
# input_thre=np.float32(thre)
input_jiter=np.float32(jiter)
rotx_i=fjx(input_i)
roty_i=fjy(input_i)
result = np.empty(julia_shape, dtype=np.uint32)
result_g = cl.Buffer(self.ctx, mf.WRITE_ONLY,result.nbytes)# size should be in byte
start_event=cl.enqueue_marker(self.queue)
finish_event=self.prg.julia(self.queue,
julia_shape,
# (1,1,4),
workgroup_shape,
# None,
gD_gx,
gD_gy,
input_ib,
input_jiter,
rotx_i,
roty_i,
result_g )
finish_event.wait()
rt = cl.enqueue_copy(self.queue, result, result_g)
gD_gx.release()
gD_gy.release()
result_g.release()
return result
def run_julia_py(input_i,x_range,y_range,jiter,OUTPUT_SIZE_IN_PIXELS_X,OUTPUT_SIZE_IN_PIXELS_Y,RGB,SPEEDF,CX,CY,FLAG_ROTATE):
def fjx(input_i):
if FLAG_ROTATE:return np.float64(math.pow(math.cos(input_i),2)*math.sin(input_i)*SPEEDF)
return np.float64(0)
def fjy(input_i):
if FLAG_ROTATE:return np.float64(math.pow(math.sin(input_i),2)*SPEEDF)
return np.float64(0)
matrix_generation_domain_x = np.linspace(-x_range+CX, x_range+CX, num=OUTPUT_SIZE_IN_PIXELS_X,dtype=np.float64)
matrix_generation_domain_y = np.linspace(-y_range+CY, y_range+CY, num=OUTPUT_SIZE_IN_PIXELS_Y,dtype=np.float64)
rotx_i=fjx(input_i)
roty_i=fjy(input_i)
gD_npx = np.array(matrix_generation_domain_x,dtype=np.float64)
gD_npy = np.array(matrix_generation_domain_y,dtype=np.float64)
input_ib=np.float64(input_i)
input_jiter=np.float32(jiter)
julia_shape=(OUTPUT_SIZE_IN_PIXELS_X,OUTPUT_SIZE_IN_PIXELS_Y,RGB)
result = np.empty(julia_shape, dtype=np.uint32)
ix=0
for x in matrix_generation_domain_x:
iy=0
for y in matrix_generation_domain_y:
iters=0
z=complex(0,0)
c=complex(y,x)
while iters < 100:
z=z**2+c
if abs(z)>2:break
iters+=1
perciters=(iters/100)*255
result[ix,iy,0]=int(perciters)
result[ix,iy,1]=0
result[ix,iy,2]=0
iy+=1
ix+=1
return result
def save_file( dir,filename,result_matrix,fig,ims,ccycle,figuresize_x,figuresize_y):
print("LOOP ANIMATION LOAD....")
nr_im=len(result_matrix)
ims=[]
for i in range(nr_im):
# im[i].write_png("img/test"+str(i)+".png")
# result_matrix=rescale_linear(result_matrix)
# print(f"processing matrix {i}")
# print(result_matrix[i].shape)
# img.show()
# print(f"image {i}")
# iml.append(img)
if i==nr_im-1:
# if julia.RGB==3:rgb='RGB'
# if julia.RGB==4:rgb='RGBA'
img=Image.fromarray(result_matrix[i].astype('uint8'))
# img.show()
img.save(dir+str(i)+".png")
im=plt.imshow(result_matrix[i],animated=True,interpolation="bilinear")
# plt.show()
ims.append([im])
del result_matrix
print("END LOOP...SAVING FILE....")
try:
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
except:
pass
plt.axis("off")
ani = animation.ArtistAnimation(fig, ims, interval=0, blit=True,repeat_delay=0,repeat=True)
ani.save(dir+filename,fps=60,extra_args=["-threads", "4"])
# ani.save(dir+filename,fps=60)
def actual_time(start):
return datetime.now()-start
def printtime(actual_time):
# return dt.timedelta(actual_time)
return str(actual_time).split('.', 2)[0]
def julia():
def calc_xrange(input_i):
return (MAX-input_i)/(MAX+input_i*100)
def calc_zoom(xrange,z):
return np.float64((xrange-z)/(100*z+xrange))
f="julia_parm.json"
with open(f) as f:
params=json.load(f)
RGB=int(params["RGB"])
OUTPUT_SIZE_IN_PIXELS_X = int(params["OUTPUT_SIZE_IN_PIXELS_X"])
OUTPUT_SIZE_IN_PIXELS_Y = int(params["OUTPUT_SIZE_IN_PIXELS_Y"]) # 2k number of rows
X_RANGE=float(params["X_RANGE"]) # initial start range of y values
MAX_ITERATIONS =int(params["MAX_ITERATIONS"]) # If 0 then it s dinamic. Else, it s max number of iterations in single pixel opencl calculation.
MINJITER=int(params["MINJITER"]) # if dinamic, initial number of iterations
MAXJITER=int(params["MAXJITER"]) # if dinamic, final number of iterations
MANDELBROT_THRESHOLD = int(params["MANDELBROT_THRESHOLD"]) # thresold of the absolute value of reiterated Z=
MIN=int(params["MIN"]) # start point of C values
MAX=int(params["MAX"]) # end point of C values
FRAMEEVERY=int(params["FRAMEEVERY"]) # number of frames not calculated between two calculated
CYCLEFRAMEBASE=int(params["CYCLEFRAMEBASE"])
CYCLEFRAME=CYCLEFRAMEBASE*FRAMEEVERY
SPEEDF =float(params["SPEEDF"]) # max delta of change of C value in julia set
POWR=int(params["POWR"]) # powr of Z in iteration function
CX=float(params["CX"]) # position of x center (good for julia set)
CY=float(params["CY"]) # position of x center (good for julia set)
DIR=params["DIR"] # working dir
MANDELBROT= int(params["MANDELBROT"]) # 1 = mandelbrot set , 0 = julia set
FLAG_ZOOM=bool(util.strtobool(params["FLAG_ZOOM"])) # Flag Zoom the image
FLAG_ROTATE=bool(util.strtobool(params["FLAG_ROTATE"])) #apply a movement to j values
JX=float(params["JX"])
JY=float(params["JY"])
EXPZOOMSTART=float(params["EXPZOOMSTART"])
EXPZOOM=float(params["EXPZOOM"])
# set_start_method("spawn")
try:shutil.rmtree(DIR)
except:pass
os.mkdir(DIR)
assert (RGB==3 or RGB==4)
loops=MAX-MIN
opencl_ctx=opencl_py(0,'julia_c',OUTPUT_SIZE_IN_PIXELS_X,OUTPUT_SIZE_IN_PIXELS_Y,CX,CY,RGB,FLAG_ROTATE,SPEEDF)
opencl_ctx.compile({"OUTPUT_SIZE_IN_PIXELS_X":str(OUTPUT_SIZE_IN_PIXELS_X),
"OUTPUT_SIZE_IN_PIXELS_Y":str(OUTPUT_SIZE_IN_PIXELS_Y),
"MAX_ITERATIONS":str(MAX_ITERATIONS),
"MANDELBROT_THRESHOLD":str(MANDELBROT_THRESHOLD),
"SPEEDF":str(SPEEDF),
"MANDELBROT":str(MANDELBROT),
"POWR":str(POWR),
"RGB":str(RGB),
"JX":str(JX),
"JY":str(JY)
})
figuresize_y=OUTPUT_SIZE_IN_PIXELS_X/100
figuresize_x=OUTPUT_SIZE_IN_PIXELS_Y/100
screen_format=OUTPUT_SIZE_IN_PIXELS_Y/OUTPUT_SIZE_IN_PIXELS_X
if loops>CYCLEFRAME:
cycleframe=CYCLEFRAME
frameevery=FRAMEEVERY
else:
cycleframe=CYCLEFRAMEBASE
frameevery=1
start=datetime.now()
nrloops=loops//cycleframe
counter=0
ccycle=0
countertot=0
video_list=[]
jobs=[]
cloops=loops//frameevery
expl=np.linspace(EXPZOOMSTART,EXPZOOM, num=cloops,dtype=np.float64)
jiterl=np.linspace(MINJITER,MAXJITER, num=cloops,dtype=np.float64)
rotlnsp=np.linspace(0,math.pi*2, num=cloops,dtype=np.float64)
for _ in range(nrloops):
min=MIN+ccycle*cycleframe
max=min+cycleframe
result_matrix=[]
cor=1
for i in range (min,max,frameevery):
if FLAG_ZOOM:
xrange=calc_xrange(i)
zoomnp=np.linspace(0,xrange, num=cloops,dtype=np.float64)
z=np.float64(zoomnp[counter])
zoom=calc_zoom(xrange,z)
# print(f"i {zoom}"):
exp=np.float64(expl[counter])
zoom=np.float64(zoom**exp)
x_range=np.float64(xrange*(zoom))
y_range=np.float64(x_range*screen_format)
jiter=jiterl[counter]
else:
xrange=x_range=X_RANGE
y_range=x_range*screen_format
z=0
zoom=0
jiter=0
perc=i/MAX
estimated_time=actual_time(start)*(MAX/i) - actual_time(start)
print(f"{Fore.YELLOW}{perc:.0%} {i:,}/{MAX:,} {Fore.CYAN} {cor}/{CYCLEFRAMEBASE} {Fore.RESET} {Fore.GREEN}{printtime(actual_time(start))}{Fore.RESET} {Fore.RED}{printtime(estimated_time)} {Fore.RESET} \
init xrange {xrange} desc zoom : {zoom} - new xrange {x_range}")
# input_i = counter/cloops
# input_i = counter
input_i=rotlnsp[counter]
result_matrix.append(opencl_ctx.run_julia(input_i,i/50,x_range,y_range,jiter))
# result_matrix.append(run_julia_py(input_i,x_range,y_range,jiter,OUTPUT_SIZE_IN_PIXELS_X,OUTPUT_SIZE_IN_PIXELS_Y,RGB,SPEEDF,CX,CY,FLAG_ROTATE))
cor+=1
counter+=1
# with Pool() as pj:
# result = pj.map(run_julia_py(input_i,x_range,y_range,jiter,OUTPUT_SIZE_IN_PIXELS_X,OUTPUT_SIZE_IN_PIXELS_Y,RGB,SPEEDF,CX,CY,FLAG_ROTATE),[])
# result=np.array(result)
# result_matrix.append(result)
ims = []
fig=plt.figure(figsize=(figuresize_x, figuresize_y))
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
sstcy=str(ccycle).rjust(5,'0')
filen='julia'+sstcy+'.mp4'
video_list.append(filen)
filename='julia'+sstcy+'.mp4'
while True:
pcs = len(multiprocessing.active_children())
if pcs<4:
p = Process(target=save_file,args=(DIR,filename,result_matrix,fig,ims,ccycle,figuresize_x,figuresize_y,))
jobs.append(p)
p.start()
break
time.sleep(1)
ccycle+=1
print("WAITING FOR ALL JOBS TO FINISH...")
for job in jobs:job.join()
print("CREATING VIDEO...")
mean_time_for_frame=actual_time(start)/counter
out=coca.concatenate(DIR,video_list)
if out==0:print(f"{Fore.LIGHTGREEN_EX}VIDEO CREATED! {Fore.RESET} ")
else:print("{Fore.RED}ERROR IN VIDEO CREATION!!! {Fore.RESET} ")
print(f"Elapsed {Fore.GREEN}{printtime(actual_time(start))} - Mean time for frame {printtime(mean_time_for_frame)}{Fore.RESET}")
if __name__ == "__main__":
julia() |
import socket
ip = input('[+] Ingresa la IP: ')
def scanner(puerto):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, puerto))
return True
except:
return False
for NumeroPuerto in range(1, 1025):
print("Escaneando puerto:", NumeroPuerto)
if scanner(NumeroPuerto):
print('[*] puerto', NumeroPuerto, '/tcp', 'está abierto')
|
import shelve
db = shelve.open('class-shelve')
bob = db['bob']
bob.giveRaise(.20)
db['bob'] = bob
tom = db['tom']
tom.giveRaise(.25)
db['tom'] = tom
db.close() |
'''
【程序23】
题目:打印出如下图案(菱形)
*
***
*****
*******
*****
***
*
1.程序分析:先把图形分成两部分来看待,前四行一个规律,后三行一个规律,利用双重
for循环,第一层控制行,第二层控制列。
2.程序源代码:
'''
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from sys import stdout
for i in range(4):
for j in range(3 - i):
stdout.write(' ')
for k in range(2 * i + 1):
stdout.write('*')
print
for i in range(4,7):
for j in range(i - 3):
stdout.write(' ')
for k in range(2*(6-i)+1):
stdout.write('*')
print |
import sys
import csv
class Todo():
def __init__(self):
self.file_name = 'todo_list.csv'
def main_menu(self):
print(' ')
print('===========================')
print(' ')
print(' TODO APPLICATION ')
print(' ')
print(' Command line arguments: ')
print(' ')
print(' -l Lists all the tasks ')
print(' -a Adds a new tasks ')
print(' -r Removes a task ')
print(' -c Checks a task ')
print(' -u Unchecks a task ')
print(' ')
print('===========================')
def ok_argument(self):
arguments = ['-l', '-a', '-r', '-c', '-u']
if sys.argv[1] not in arguments:
print('Unsupported argument')
self.main_menu()
def controller_l(self):
if len(sys.argv) == 2:
print(self.load_list())
else:
print('Too many arguments were given, only give 1!')
def controller_a(self):
if len(sys.argv) == 2:
print('Unable to add: No task is provided')
else:
self.add_to_list(sys.argv[2])
def controller_c(self):
if len(sys.argv) == 2:
print('Unable to check: No index is provided')
else:
self.check_task()
def controller_u(self):
if len(sys.argv) == 2:
print('Unable to uncheck: No index is provided')
else:
self.uncheck_task()
def controller_r(self):
if len(sys.argv) == 2:
print('Unable to remove: No index is provided')
else:
self.remove_from_list(sys.argv[2])
def main(self):
self.missing_file()
if len(sys.argv) == 1:
self.main_menu()
else:
self.ok_argument()
if sys.argv[1] == '-l':
self.controller_l()
elif sys.argv[1] == '-a':
self.controller_a()
elif sys.argv[1] == '-r':
self.controller_r()
elif sys.argv[1] == '-c':
self.controller_c()
elif sys.argv[1] == '-u':
self.controller_u()
def load_list(self):
f = open(self.file_name)
todo_list = csv.reader(f, delimiter = ';')
output = ''
number = 1
for i in todo_list:
output += (str(number) + ' - ' + self.checked_or_not(i[0]) + ' '+ i[1] + '\n')
number += 1
f.close()
if output == '':
return 'No todos for today! :)'
else:
return output
def add_to_list(self, new_todo_element):
f = open(self.file_name, 'a')
f.write('False;' + new_todo_element + '\n')
f.close()
def checked_or_not(self, x):
if x == 'True':
return '[x]'
else:
return '[ ]'
def check_task(self):
f = open(self.file_name)
check_task = csv.reader(f, delimiter = ';')
output = []
try:
task_number = int(sys.argv[2])
for i in check_task:
output.append(i)
if output[task_number-1][0] == 'True':
print('Task already checked!')
elif output[task_number-1][0] == 'False':
output[task_number-1][0] = 'True'
f.close()
f = open(self.file_name, 'w')
for i in output:
f.write(i[0] + ';' + i[1] + '\n')
f.close()
except ValueError:
print('Unable to check: Index is not a number')
except IndexError:
print('Unable to check: Index is out of bound')
def uncheck_task(self):
f = open(self.file_name)
uncheck_task = csv.reader(f, delimiter = ';')
output = []
try:
task_number = int(sys.argv[2])
for i in uncheck_task:
output.append(i)
if output[task_number-1][0] == 'True':
output[task_number-1][0] = 'False'
elif output[task_number-1][0] == 'False':
print('Task already unchecked!')
f.close()
f = open(self.file_name, 'w')
for i in output:
f.write(i[0] + ';' + i[1] + '\n')
f.close()
except ValueError:
print('Unable to uncheck: Index is not a number')
except IndexError:
print('Unable to uncheck: Index is out of bound')
def remove_from_list(self, remove_n_task):
f = open(self.file_name)
remove_line = f.readlines()
try:
remove_line.remove(remove_line[int(remove_n_task)-1])
except ValueError:
print('Unable to remove: Index is not a number')
except IndexError:
print('Unable to remove: Index is out of bound')
f.close()
f = open(self.file_name, 'w')
for i in remove_line:
f.write(i)
f.close()
def create_file(self):
try:
f = open('todo_list.csv', 'a')
f.close()
except FileNotFoundError:
return 'File does not exists!'
def missing_file(self):
try:
f = open(self.file_name, 'a')
f.close()
except FileNotFoundError:
self.create_file()
todo = Todo()
todo.main()
# import sys
# import csv
#
# class Todo():
#
# def __init__(self):
# self.file_name = 'todo_list.csv'
#
# def main_menu(self):
# print(' ')
# print('===========================')
# print(' ')
# print(' TODO APPLICATION ')
# print(' ')
# print(' Command line arguments: ')
# print(' ')
# print(' -l Lists all the tasks ')
# print(' -a Adds a new tasks ')
# print(' -r Removes a task ')
# print(' -c Completes a task ')
# print(' ')
# print('===========================')
#
# def ok_argument(self):
# arguments = ['-l', '-a', '-r', '-c']
# if sys.argv[1] not in arguments:
# print('Unsupported argument')
# self.main_menu()
#
# def controller_l(self):
# if len(sys.argv) == 2:
# print(self.load_list())
# else:
# print('Too many arguments were given, only give 1!')
#
# def controller_a(self):
# if len(sys.argv) == 2:
# print('Unable to add: No task is provided')
# else:
# self.add_to_list(sys.argv[2])
#
# def controller_c(self):
# if len(sys.argv) == 2:
# print('Unable to check: No index is provided')
# else:
# self.check_task()
#
# def controller_r(self):
# if len(sys.argv) == 2:
# print('Unable to remove: No index is provided')
# else:
# self.remove_from_list(sys.argv[2])
#
# def main(self):
# self.missing_file()
# if len(sys.argv) == 1:
# self.main_menu()
# else:
# self.ok_argument()
# if sys.argv[1] == '-l':
# self.controller_l()
# elif sys.argv[1] == '-a':
# self.controller_a()
# elif sys.argv[1] == '-r':
# self.controller_r()
# elif sys.argv[1] == '-c':
# self.controller_c()
#
# def load_list(self):
# f = open(self.file_name)
# todo_list = csv.reader(f, delimiter = ';')
# output = ''
# number = 1
# for i in todo_list:
# output += (str(number) + ' - ' + self.checked_or_not(i[0]) + ' '+ i[1] + '\n')
# number += 1
# f.close()
# if output == '':
# return 'No todos for today! :)'
# else:
# return output
#
# def add_to_list(self, new_todo_element):
# f = open(self.file_name, 'a')
# f.write('False;' + new_todo_element + '\n')
# f.close()
#
# def checked_or_not(self, x):
# if x == 'True':
# return '[x]'
# else:
# return '[ ]'
#
# def check_task(self):
# f = open(self.file_name)
# check_task = csv.reader(f, delimiter = ';')
# output = []
# try:
# for i in check_task:
# output.append(i)
# if output[int(sys.argv[2])-1][0] == 'False':
# output[int(sys.argv[2])-1][0] = 'True'
# f.close()
# f = open(self.file_name, 'w')
# for i in output:
# f.write(i[0] + ';' + i[1] + '\n')
# f.close()
# except ValueError:
# print('Unable to check: Index is not a number')
# except IndexError:
# print('Unable to check: Index is out of bound')
#
# def remove_from_list(self, remove_n_task):
# f = open(self.file_name)
# remove_line = f.readlines()
# try:
# remove_line.remove(remove_line[int(remove_n_task)-1])
# except ValueError:
# print('Unable to remove: Index is not a number')
# except IndexError:
# print('Unable to remove: Index is out of bound')
# f.close()
# f = open(self.file_name, 'w')
# for i in remove_line:
# f.write(i)
# f.close()
#
# def create_file(self):
# try:
# f = open(todo_list+'.'+csv, 'a')
# f.close()
# except FileNotFoundError:
# return 'File does not exists!'
#
# def missing_file(self):
# try:
# f = open(self.file_name, 'a')
# f.close()
# except FileNotFoundError:
# self.create_file()
#
#
# todo = Todo()
# todo.main()
|
# pass statement
# when dont want to write anything and just pass to the next
x = 18
if x > 18:
pass |
#!/usr/bin/python2
"""
This script makes an input file for [SweepFinder](http://people.binf.ku.dk/rasmus/webpage/sf.html). To make such file information on the outgroup/ancestral sequence is required.
Note! Chromosome number in the first column must be separated by _.
For example, chr_1 - correct, chr1 - incorrect.
# input:
#CHROM POS sample1 sample2 sample3 sample4
chr_1 113 G G G N
chr_1 117 C C N C
chr_1 122 C C N C
chr_2 137 A A T N
chr_2 139 T T T N
chr_2 148 A A T N
chr_13 161 C T C N
chr_13 170 C T C N
chr_13 174 A A A N
chr_X 104 A A A N
chr_X 204 G G G N
chr_X 574 A A A N
chr_Un1 174 A A A N
chr_Un2 104 A A A N
chr_Un4 204 G G G N
chr_Un7 574 A A A N
# ancestral:
#CHROM POS Co_ancestor
chr_1 113 G
chr_1 117 N
chr_1 122 C
chr_2 137 A
chr_2 139 T
chr_2 148 T
chr_13 161 S
chr_13 170 C
chr_13 174 A
chr_X 104 A
chr_X 204 A
chr_X 574 A
chr_Un1 174 T
chr_Un2 104 G
chr_Un4 204 C
chr_Un7 574 C
# fai:
chr_1 196 12 80 81
chr_2 1410 198 80 81
chr_13 1500 341 80 81
chr_X 10000 301 80 81
chr_Un1 20000 101 80 81
chr_Un2 30000 301 80 81
chr_Un4 40000 401 80 81
chr_Un7 50000 901 80 81
# output (There are also separate output files for each chromosome.):
position x n folded
117 3 3 1
333 1 3 0
344 2 3 0
1767 1 3 0
1776 1 3 0
3310 3 3 0
# command:
$ python makeSweepFinderInput_from_callsTab.py -i test.tab -o test -f test.fai -a ancestral.test -N 1 -m chr_X
# contact:
Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu
"""
############################# modules #############################
import calls # my custom module
import collections
import re
import random
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument('-i', '--input', help='name of the input file', type=str, required=True)
parser.add_argument('-a', '--ancestor', help='name of the outgroup/ancestral sequence file', type=str, required=True)
parser.add_argument('-f', '--fai', help='name of the fasta.fai file', type=str, required=True)
parser.add_argument('-o', '--output', help='name of the output file', type=str, required=True)
parser.add_argument('-N', '--missing', help='number of allowed Ns', type=int, required=True)
parser.add_argument('-m', '--major_chromosomes', help='name of the last major chromosome', type=str, required=True)
parser.add_argument('-s', '--samples', help='column names of the samples to process (optional)', type=str,
required=False)
args = parser.parse_args()
# check if samples names are given and if all sample names are present in a header
sampleNames = calls.checkSampleNames(args.samples, args.input)
# check missing data settings
Ns = calls.checkMissing(args.missing)
# check the name of the last major chromosome
mChr = args.major_chromosomes.split('_')[1]
try: # for sex chromosomes
mChr = int(mChr)
except Exception:
mChr = ord(mChr)
############################# program #############################
counter = 0
noBreak = bool(True)
Ns = args.missing
fai = open(args.fai, 'r')
fai_words = fai.readline().split()
fai_ch = fai_words[0]
fai_start1 = 0
fai_start2 = int(fai_words[1])
output = open(args.output, 'w')
output.write("position\tx\tn\tfolded\n")
outputChr = open(fai_ch + '_' + args.output, 'w')
outputChr.write("position\tx\tn\tfolded\n")
ancestFile = open(args.ancestor, 'r')
ancest_header = ancestFile.readline()
words2 = ancestFile.readline().split()
ancest_chr_pos = words2[0:2]
ancest_ch = ancest_chr_pos[0].split('_')[1]
try: # for sex chromosomes
ancest_ch = int(ancest_ch)
except Exception:
ancest_ch = ord(ancest_ch)
ancest_pos = int(ancest_chr_pos[1])
ancest = words2[2]
print('Opening the file...')
with open(args.input) as datafile:
header_line = datafile.readline()
header_words = header_line.split()
# index samples
sampCol = calls.indexSamples(sampleNames, header_words)
for line in datafile:
words = line.split()
chr_pos = words[0:2]
ch = words[0].split('_')[1]
try: # for sex chromosomes
ch = int(ch)
except Exception:
try:
ch = ord(ch)
except Exception:
ch = ch
pos = int(words[1])
# track progress
counter += 1
if counter % 1000000 == 0:
print str(counter), "lines processed"
# select alleles
alleles = calls.selectSamples(sampCol, words)
# check if one- or two-character code
if any(["/" in gt for gt in alleles]):
alleles = calls.twoToOne(alleles)
# count missing data
numAlN = collections.Counter(alleles)
valueN = numAlN['N']
if valueN <= Ns: # filer by missing data threshold
AllallesNoN = [i for i in alleles if i != 'N']
else:
continue
# count alleles
numAlNoN = collections.Counter(AllallesNoN)
numAl = numAlNoN.most_common()
# find overlap with ancestral sequence
while (ch > ancest_ch) or (ch == ancest_ch and pos > ancest_pos):
words2 = ancestFile.readline().split()
if words2 == []:
ancest = 'N'
break
else:
ancest_chr_pos = words2[0:2]
ancest_ch = ancest_chr_pos[0].split('_')[1]
try: # for sex chromosomes
ancest_ch = int(ancest_ch)
except Exception:
try:
ancest_ch = ord(ancest_ch)
except Exception:
ancest_ch = ancest_ch
ancest_pos = int(ancest_chr_pos[1])
ancest = words2[2]
if ancest in 'RYMKSW':
ancest = calls.OneToTwo(ancest)[0].split('/')
# find overlap with fai file to define chromosome borders
if ch <= mChr: # major chromosomes that will be split
while chr_pos[0] != fai_ch:
fai_words = fai.readline().split()
if fai_words == []:
break
else:
fai_ch = fai_words[0]
fai_start1 = fai_start1 + fai_start2
fai_start2 = int(fai_words[1])
outputChr = open(fai_ch + '_' + args.output, 'w')
outputChr.write("position\tx\tn\tfolded\n")
posP = pos + fai_start1
# break after major scaffolds
else:
break
# polarize alleles
n = len(AllallesNoN)
al1 = numAl[0][0]
x1 = numAl[0][1]
f = 0
if len(numAl) > 2: # skip non-biallelic
continue
elif chr_pos == ancest_chr_pos: # folded
if len(numAl) == 1: # fixed
if ancest == 'N':
f = 1
x = random.choice([x1, 0])
elif al1 in ancest:
x = 0
else:
x = n
elif len(numAl) == 2: # biallelic
al2 = numAl[1][0]
x2 = numAl[1][1]
if (al1 in ancest) and (al2 not in ancest):
x = x2
elif (al2 in ancest) and (al1 not in ancest):
#print ancest, al2, al1
x = x1
else:
f = 1
x = random.choice([x1, x2])
else: # unfolded
if len(numAl) == 1: # fixed
f = 1
x = random.choice([x1, 0])
elif len(numAl) == 2:
x2 = numAl[1][1]
f = 1
x = random.choice([x1, x2])
#print ancest, posP, numAl, x, n, f, line
if x == 0 and f == 0: # skip sites with fixed ancestral alleles
continue
else:
output.write("%s\t%s\t%s\t%s\n" % (posP, x, n, f))
outputChr.write("%s\t%s\t%s\t%s\n" % (pos, x, n, f))
datafile.close()
output.close()
outputChr.close()
fai.close()
ancestFile.close()
print('Done!')
|
import os
from collections import OrderedDict
import pandas as pd
from src.callbacks import ParamStatsStoreCallback, EpochLoggerCallback
from src.constraints import LessThanConstraint, MoreThanConstraint, RoomConstraint
from src.experiments.utils import SAMPLES_PATH
from src.experiments.utils import get_experiment_dir
from src.furnishing.room import *
from src.furnishing.room_utils import load_example_positions_for_example_room, get_example_room
from src.stop_conditions import EarlyStoppingCondition, StepsNumberStopCondition
from src.swarm_algorithms import *
from src.test_functions import Ackley, RoomFitness
TESTED_DIMENSIONALITIES = [2, 10]
QSO_PRAMS = {
'population_size': np.linspace(10, 200, 20, dtype=np.int),
'delta_potential_length_parameter': np.linspace(0.1, 10, 20)
}
PSO_PARAMS = {
'population_size': np.linspace(10, 200, 20, dtype=np.int),
'inertia': np.linspace(0.1, 5, 20),
'divergence': np.asarray(sorted(list(np.linspace(0.1, 5, 20)) + [0.7289])),
'learning_factor_1': np.linspace(0.5, 10, 20),
'learning_factor_2': np.linspace(0.5, 10, 20),
}
WHALE_PARAMS = {
'population_size': np.linspace(10, 200, 20, dtype=np.int),
'attenuation': np.linspace(0.1, 10, 20),
'intensity_at_source': np.linspace(0.1, 10, 20)
}
TESTED_FUNCTION = Ackley()
STEPS_NUMBER = 300
#POPULATION_SIZE = 30
POPULATION_SIZE = 200
RUN_TIMES = 10
PATIENCE = 30
def test_single(algorithm_constructor, constraints, params):
for key, all_values in params.items():
logger = pd.DataFrame(columns=['param_name', 'param_value',
'dimension2_mean', 'dimension2_std',
'dimension10_mean', 'dimension10_std'])
for value in all_values:
best_dim_2, best_dim_10 = [], []
for _ in range(RUN_TIMES):
best, epoch, worst, avg, alg = run_with_dimensionality(algorithm_constructor, constraints, 2, key,
value)
best_dim_2.append(epoch * best)
best, epoch, worst, avg, alg = run_with_dimensionality(algorithm_constructor, constraints, 10, key,
value)
best_dim_10.append(epoch * best)
logger = logger.append({'param_name': key,
'param_value': value,
'dimension2_mean': np.mean(best_dim_2),
'dimension2_std': np.std(best_dim_2),
'dimension10_mean': np.mean(best_dim_10),
'dimension10_std': np.std(best_dim_10)},
ignore_index=True)
logger = logger.round(4)
yield logger, key
def test_steps_number(algorithm_constructor, constraints, params):
for key, all_values in params.items():
logger = pd.DataFrame(columns=['param_name', 'param_value',
'dimension2_mean', 'dimension2_std',
'dimension10_mean', 'dimension10_std'])
for value in all_values:
best_dim_2, best_dim_10 = [], []
for _ in range(RUN_TIMES):
best, epoch, worst, avg, alg = run_with_dimensionality(algorithm_constructor, constraints, 2, key,
value)
best_dim_2.append(alg._step_number)
best, epoch, worst, avg, alg = run_with_dimensionality(algorithm_constructor, constraints, 10, key,
value)
best_dim_10.append(alg._step_number)
logger = logger.append({'param_name': key,
'param_value': value,
'dimension2_mean': np.mean(best_dim_2),
'dimension2_std': np.std(best_dim_2),
'dimension10_mean': np.mean(best_dim_10),
'dimension10_std': np.std(best_dim_10)},
ignore_index=True)
logger = logger.round(4)
yield logger, key
def run_with_dimensionality(algorithm_constructor, constraints, dim, key, value):
default_params, boundaries = get_params_and_boundaries(dim, constraints, algorithm_constructor)
alg = algorithm_constructor(**default_params)
setattr(alg, key, value)
alg.compile(TESTED_FUNCTION.fitness_function, boundaries)
stats_callback = ParamStatsStoreCallback()
alg.go_swarm_go(EarlyStoppingCondition(30), [stats_callback])
epoch, best, worst, avg = stats_callback.get_params()
return best, epoch, worst, avg, alg
def get_params_and_boundaries(dimension, constraints, alg):
boundaries = np.zeros((dimension, 2))
boundaries[:, 0] = -32.768
boundaries[:, 1] = 32.768
if alg == ParticleSwarmOptimisation:
default_params = dict(
population_size=POPULATION_SIZE,
nb_features=dimension,
constraints=constraints,
inertia=1.,
divergence=1.,
learning_factor_1=2.,
learning_factor_2=2.,
seed=None
)
elif alg == WhaleAlgorithm:
rho, eta = WhaleAlgorithm.get_optimal_eta_and_rho_zero(boundaries)
default_params = dict(
population_size=POPULATION_SIZE,
nb_features=dimension,
constraints=constraints,
attenuation_of_medium=eta,
intensity_at_source=rho,
seed=None
)
else:
default_params = dict(
population_size=POPULATION_SIZE,
nb_features=dimension,
constraints=constraints,
delta_potential_length_parameter=1,
seed=None
)
return default_params, boundaries
def test(test_method=test_single):
names = ['PSO',
'Whale',
'QSO'
]
algs_params = OrderedDict({
ParticleSwarmOptimisation: PSO_PARAMS,
WhaleAlgorithm: WHALE_PARAMS,
QuantumDeltaParticleSwarmOptimization: QSO_PRAMS
})
experiment_dir, csv_dir, latex_dir, plots_dir = get_experiment_dir()
constraints = MoreThanConstraint(-32.768).und(LessThanConstraint(32.768))
if WhaleAlgorithm in algs_params.keys():
rho2, eta2 = (WhaleAlgorithm
.get_optimal_eta_and_rho_zero(get_params_and_boundaries(2, constraints, WhaleAlgorithm)[1]))
rho10, eta10 = (WhaleAlgorithm
.get_optimal_eta_and_rho_zero(get_params_and_boundaries(10, constraints, WhaleAlgorithm)[1]))
algs_params[WhaleAlgorithm]['attenuation'] = np.concatenate(
[algs_params[WhaleAlgorithm]['attenuation'], [eta2, eta10]])
algs_params[WhaleAlgorithm]['intensity_at_source'] = np.concatenate(
[algs_params[WhaleAlgorithm]['intensity_at_source'], [rho2, rho10]])
for name, (alg, params) in zip(names, algs_params.items()):
print(f'\r{alg}', end='')
for logger, param_name in test_method(alg, constraints, params):
postfix = '' if test_method == test_single else '-' + str(test_method.__name__)
logger.to_csv(os.path.join(csv_dir, f'{name}-{param_name}{postfix}.csv'), index=False,
float_format='%.4f')
logger.to_latex(os.path.join(latex_dir, f'{name}-{param_name}{postfix}.tex'), index=False,
float_format='%.4f')
print()
def get_room_boundaries(room):
return np.array([(0, 1), (0, 1), (-1, 1)] * len(room.params_to_optimize))
def get_algorithms_with_optimal_params(room):
nb_features = len(room.params_to_optimize.flatten())
constraints = RoomConstraint(room)
common_population_size = 200
alg = OrderedDict({
ParticleSwarmOptimisation: dict(
population_size=common_population_size,
nb_features=nb_features,
constraints=constraints,
inertia=0.6158,
divergence=0.7289,
learning_factor_1=3.0,
learning_factor_2=4.0,
seed=None
),
WhaleAlgorithm: dict(
population_size=common_population_size,
nb_features=nb_features,
constraints=constraints,
attenuation_of_medium=7.3947,
intensity_at_source=1.6632,
seed=None
),
QuantumDeltaParticleSwarmOptimization: dict(
population_size=POPULATION_SIZE,
nb_features=nb_features,
constraints=constraints,
delta_potential_length_parameter=5.3105,
seed=None
)
})
return alg
def test_room_optimalization():
names = ['PSO',
'Whale',
'QSO'
]
experiment_dir, csv_dir, latex_dir, plots_dir = get_experiment_dir()
room = get_example_room()
algs_params = get_algorithms_with_optimal_params(room)
logger = pd.DataFrame(columns=['algorithm', 'epochs_mean', 'epochs_std', 'carpet_size_mean', 'carpet_size_std'])
plot_logging_data = pd.DataFrame(columns=['algorithm', 'epochs', 'carpet_size'])
for name, (alg_constructor, param) in zip(names, algs_params.items()):
print(f'Algorithm: {name}')
carpet_size, epochs = [], []
current_best = np.inf
to_log = None
for _ in range(RUN_TIMES):
print(f'\rRun: {_ + 1}/{RUN_TIMES}', end='')
alg = alg_constructor(**param)
room = get_example_room()
templates = load_example_positions_for_example_room(SAMPLES_PATH)
rfit = RoomFitness(room)
boundaries = get_room_boundaries(room)
alg.compile(rfit.fitness_function, boundaries, templates)
epoch_logger_callback = EpochLoggerCallback()
solution = alg.go_swarm_go(EarlyStoppingCondition(PATIENCE).maybe(StepsNumberStopCondition(500)),
[epoch_logger_callback])
room.apply_feature_vector(solution)
carpet_size.append(room.get_possible_carpet_radius())
epochs.append(alg._step_number)
if alg.current_global_fitness < current_best:
current_best = alg.current_global_fitness
to_log = epoch_logger_callback.logger
print()
plot_logging_data = plot_logging_data.append(pd.DataFrame({
'algorithm': pd.Series(data=[name] * len(to_log['epoch'])),
'epochs': to_log['epoch'],
'carpet_size': to_log['best']
}), ignore_index=True)
logger = logger.append({'algorithm': name,
'epochs_mean': np.mean(epochs),
'epochs_std': np.std(epochs),
'carpet_size_mean': np.mean(carpet_size),
'carpet_size_std': np.std(carpet_size), },
ignore_index=True)
logger = logger.round(4)
logger.to_csv(os.path.join(csv_dir, 'room-optimization.csv'), index=False,
float_format='%.4f')
plot_logging_data.to_csv(os.path.join(csv_dir, 'plot-example-optimization.csv'), index=False)
logger.to_latex(os.path.join(latex_dir, 'room-optimization.tex'), index=False,
float_format='%.4f')
def test_with_optimal_params(test_method=test_steps_number):
names = ['PSO',
'Whale',
'QSO'
]
algs_params = OrderedDict({
ParticleSwarmOptimisation: PSO_PARAMS,
WhaleAlgorithm: WHALE_PARAMS,
QuantumDeltaParticleSwarmOptimization: QSO_PRAMS
})
experiment_dir, csv_dir, latex_dir, plots_dir = get_experiment_dir()
constraints = MoreThanConstraint(-32.768).und(LessThanConstraint(32.768))
if WhaleAlgorithm in algs_params:
rho2, eta2 = (WhaleAlgorithm
.get_optimal_eta_and_rho_zero(get_params_and_boundaries(2, constraints, WhaleAlgorithm)[1]))
rho10, eta10 = (WhaleAlgorithm
.get_optimal_eta_and_rho_zero(get_params_and_boundaries(10, constraints, WhaleAlgorithm)[1]))
algs_params[WhaleAlgorithm]['attenuation'] = np.concatenate(
[algs_params[WhaleAlgorithm]['attenuation'], [eta2, eta10]])
algs_params[WhaleAlgorithm]['intensity_at_source'] = np.concatenate(
[algs_params[WhaleAlgorithm]['intensity_at_source'], [rho2, rho10]])
for name, (alg, params) in zip(names, algs_params.items()):
print(f'\r{alg}', end='')
for logger, param_name in test_method(alg, constraints, params):
postfix = '' if test_method == test_single else '-' + str(test_method.__name__)
logger.to_csv(os.path.join(csv_dir, f'{name}-{param_name}{postfix}.csv'), index=False,
float_format='%.4f')
logger.to_latex(os.path.join(latex_dir, f'{name}-{param_name}{postfix}.tex'), index=False,
float_format='%.4f')
print()
if __name__ == '__main__':
# test()
# test(test_steps_number)
test_room_optimalization()
|
commands = {
'help': {
'name': '$help',
'help_message': 'Display help about a command.',
'usage': '$help <command>'
},
'8ball': {
'name': '$8ball',
'help_message': 'Answer a yes/no question.',
'usage': '$8ball <yes/no question>'
},
'ban': {
'name': '$ban',
'help_message': 'Ban an user. Users can\'t rejoin when banned. Requires the ban user permission.',
'usage': '$ban <username> OR $ban <nickname>'
},
'unban': {
'name': '$unban',
'help_message': 'Unban an user. They need to be invited afterwards. Requires the ban user permission.',
'usage': '$ban <username>'
},
'server_invite': {
'name': '$server_invite',
'help_message': 'Generate a link to invite people to this server.',
'usage': '$server_invite'
},
'add_bot': {
'name': '$add_bot',
'help_message': 'Generate a link to add this bot to a server you own.',
'usage': '$add_bot'
},
'broadcast': {
'name': '$broadcast',
'help_message': 'Send a message to all text channels you have write permission in.',
'usage': '$broadcast <text you want to broadcast>'
},
'dice': {
'name': '$dice',
'help_message': 'Roll a dice with a specified number of sides. The number should be 2 or more.',
'usage': '$dice <number>'
}
}
def command_exists(command):
return command.strip('$') in commands
async def get_command_help(command):
info = None
if command_exists(command.strip('$')):
info = commands[command.strip('$')]
return info
async def get_commands():
command_list = []
for command in commands:
command_list.append(commands[command]['name'])
return command_list
|
import re
def cleanhtmltags(raw_html):
"""
Convert raw html code to text by removing all tags.
"""
cleanr =re.compile('<.*?>')
withouttags = re.sub(cleanr,'', raw_html)
withouttags.split()
without_mult_spaces= ' '.join(withouttags.split())
return without_mult_spaces
|
from __future__ import unicode_literals
from django import forms
from DjangoUeditor.forms import UEditorWidget
from DjangoUeditor.models import UEditorField
from .models import Post
class ImageUploadForm(forms.Form):
image = forms.ImageField()
class TopicUEditorForm(forms.Form):
Name = forms.CharField(label=u'topic title')
Content = forms.CharField(label=u'content',
widget=UEditorWidget({'width': 600, 'height': 500, 'imagePath': 'images/',
'filePath': 'files/'})
)
class PostReplyForm(forms.Form):
Content = forms.CharField(
widget=UEditorWidget({'width': 600, 'height': 300, 'imagePath': 'images/',
'filePath': 'files/'})
)
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
from bs4 import BeautifulSoup
import re
import pandas as pd
import mysql.connector
from datetime import datetime
from keywordQuery import query
# In[2]:
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="",
database="jobscraper"
)
# In[3]:
listkeyword=query()
# In[4]:
def nexturl(url,x):
if x==1:
url=url+'&paged='+str(x)
else:
url=url[:-1]
url=url+str(x)
return url
# In[5]:
def farojobscraper(key):
list=[]
url='https://www.farojob.net/?post_type=noo_job&s=java&type'
url=url.replace('java',key)
forajob=requests.get(url)
src=BeautifulSoup(forajob.content,'lxml')
for x in range(1,5):
url=nexturl(url,x)
print(url)
jobs=src.find_all('article',class_='loadmore-item')
for job in jobs:
title=job.find('h2',class_='loop-item-title').text.strip()
link=job.find('h2',class_='loop-item-title').a['href']
try:
company=job.find('span',class_='hidden').text.strip()
except:
company='NULL'
secteur=job.find('span',class_='job-type').text.strip()
try:
location=job.find('em',itemprop="jobLocation").text.strip()
except:
location='NULL'
date=job.find('span',class_='job-date-ago').text.strip()
datepublication=job.find('time',class_="entry-date").span.text.strip()
profi=detailscraper(link)
dict={
'title':title,
'link':link,
'company':company,
'secteur':secteur,
'location':location,
'date':date,
'datepublication':datepublication,
'description':profi['detail Post'],
}
list.append(dict)
return list
# In[6]:
def detailscraper(link):
detail=requests.get(link)
src2=BeautifulSoup(detail.content,'html.parser')
try:
infosurpost=src2.find('div',itemprop="description").text.strip()
except:
infosurpost='NULL'
detaidict={
'detail Post':infosurpost,
}
return detaidict
# In[ ]:
def faroscrap():
for lk in listkeyword:
list=farojobscraper(lk['keyword'])
mycursor= mydb.cursor()
now=datetime.now()
for l in list:
sql = "INSERT INTO farojob(title , link , company , secteur , location , date,datepublication,description,keyword,datescraping) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
values=(l['title'],l['link'],l['company'],l['secteur'],l['location'],l['date'],l['datepublication'],l['description'],lk['keyword'],now)
try:
mycursor.execute(sql,values)
mydb.commit()
except:
pass
# In[ ]:
# In[ ]:
|
## Richard Salvaty
## 3002
## 10/15/2018
## 10 minutes
## Delcare nTemp as string
## Declare Cel as integer
## Declare Fah as float
## Set nTemp = 'y'
## while (nTemp == 'y')
## Print "Enter degrees in Celcius to be converted to Fahrenheit: "
## Get Cel
## Set Fah = (9/5 * Cel) + 32
## Print "Degrees in Celsius entered: " + Cel
## Print "Degrees in Fahrenheit: " + Fah
## Print "Would you like to enter another temperature for conversion? Y/N: "
## Get nTemp
## End While
nTemp = 'y'
## This is just a simple Celsius to Fahrenheit conversion loop
while nTemp == 'y':
Cel = int(input("Enter degrees in Celsius to be converted to Fahrenheit: "))
Fah = (9/5 * Cel) + 32
print ("Degrees in Celsius entered: ", Cel, '\n', "Degrees in Fahrenheit: ", format(Fah, ".1f"))
## Asks the user if they want to enter another temp
nTemp = input("Would you like to enter another temperature for conversion? Y/N: ")
|
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from viewer.models import Bookcase, Book, BookAuthor, BookcaseSlot
admin.site.register(Bookcase, ModelAdmin)
admin.site.register(BookcaseSlot, ModelAdmin)
admin.site.register(BookAuthor, ModelAdmin)
admin.site.register(Book, ModelAdmin)
|
import subprocess
import shutil
import os
import time
from pprint import pprint
from collections import namedtuple
import glob
import datetime
import psutil
npx_paramstup = namedtuple('npx_paramstup',['backup_location','start_module','end_module'])
backup_drive = r'T:'
default_start = 'copy_raw_data'
default_end = 'copy_processed_data'
network_location = r"\\sd5\sd5"
npx_directories = {r'J:\800972939_366122_20181224_probeA':npx_paramstup(backup_drive,default_start,default_end),
r'K:\800972939_366122_20181224_probeB':npx_paramstup(backup_drive,default_start,default_end),
r'L:\800972939_366122_20181224_probeC':npx_paramstup(backup_drive,default_start,default_end)
}
#todo modify so it will check all backup drives?
data_file_params = namedtuple('data_file_params',['relpath','upload','sorting_step'])
def check_data_processing(npx_directory,npx_params):
try:
a = npx_params.backup_location
except Exception as E:
npx_params_old = npx_params
npx_params = npx_paramstup(npx_params_old.backup1,None,None)
network_location = npx_params_old.backup2
basepath,session = os.path.split(npx_directory)
probe = session.split("_")[-1]
local_sort_dir = os.path.join(basepath,session+"_sorted")
relpaths, data_files = make_files()
try:
backup_temp = os.path.join(npx_params.backup_location, 'temp.txt')
except Exception as E:
backup_temp = os.path.join(npx_params.backup1, 'temp.txt')
base_temp = os.path.join(basepath, 'temp.txt')
try:
open(backup_temp, 'a').close
except Exception as E:
pass
try:
open(base_temp, 'a').close
except Exception as E:
pass
try:
backup_drive_error = os.path.samefile(base_temp, backup_temp)
if backup_drive_error:
print('ERROR: the backup drive seems to be the same as the acquisition drive')
except Exception as E:
print('catch_specific')
raise(E)
backup_drive_error = True
print('ERROR: Backup drive test failed, backup drive could be the same as the acquisition drive')
raw_size_backup = 0
try:
raw_backup_path = os.path.join(npx_params.backup_location,session)
except Exception as E:
raw_backup_path = os.path.join(npx_params.backup_location,session)
if os.path.isdir(raw_backup_path):
#print('The raw data has been backed up for '+probe)
raw_size_backup = dir_size(raw_backup_path)
else:
print('ERROR: Could not find raw data_backup for '+probe+':')
raw_size = 0
if os.path.isdir(npx_directory):
raw_size = dir_size(npx_directory)
if raw_size == raw_size_backup:
pass
#print('the raw data backup matches the acquired data for '+probe)
else:
print('ERROR: the raw data backup does not match the acquired data for '+probe)
else:
print('WARNING: could not find the acquired data to compare to backup size')
missing_files_list = []
acquisition_size_dict = {}
backup_size_dict = {}
network_size_dict = {}
acquisition_mtime_dict = {}
backup_mtime_dict = {}
network_mtime_dict = {}
for data_file,file_params in data_files.items():
relpath = relpaths[file_params.relpath]
local_path = os.path.join(basepath,session+"_sorted",relpath,data_file)
found = False
try:
acquisition_size_dict[data_file] = os.path.getsize(local_path)
acquisition_mtime_dict[data_file] = os.path.getmtime(local_path)
found = True
except FileNotFoundError as E:
pass
backup_path = os.path.join(npx_params.backup_location,session+"_sorted",relpath,data_file)
try:
backup_size_dict[data_file] = os.path.getsize(backup_path)
backup_mtime_dict[data_file] = os.path.getmtime(backup_path)
found = True
except FileNotFoundError as E:
pass
network_path = os.path.join(network_location,session+"_sorted",relpath,data_file)
try:
network_size_dict[data_file] = os.path.getsize(network_path)
network_mtime_dict[data_file] = os.path.getmtime(network_path)
found = True
except FileNotFoundError as E:
pass
if not found:
missing_files_list.append(data_file)
if missing_files_list:
print('ERROR: Some processing files were not found on any drive for '+probe)
modules_missing = set()
for file in missing_files_list:
module = data_files[file].sorting_step
modules_missing.add(module)
print('The missing files are from the following modules:')
print(modules_missing)
else:
pass
#print('All the processing files were found on either the acquisition drive or the backup drive for '+probe)
if raw_size_backup or raw_size:
npx_size = max(raw_size_backup,raw_size)
key = r"continuous\Neuropix-3a-100.0\continuous.dat"
try:
cont_back = backup_size_dict[key]
except KeyError:
cont_back = 0
try:
cont = acquisition_size_dict[key]
except KeyError:
cont = 0
if cont or cont_back:
continuous_size = max(cont,cont_back)
if continuous_size > 1.2*npx_size and continuous_size < 1.3*npx_size:
pass
#print('The continuous size seems correct relative to the npx size for '+probe)
else: print('ERROR: The continuous file seems too large or too small relative to the npx for '+probe)
else:print('ERROR: unable to find continuous to compare to npx for '+probe)
else: print('ERROR: Unable to find npx to compare to continuous file for '+probe)
missing_backup_list = []
for file in data_files:
if file not in backup_size_dict:
missing_backup_list.append(file)
if missing_backup_list:
print('ERROR: Some files are not backed up locally for '+probe+':')
print(missing_backup_list)
else:
pass
#print('all files are backed up locally for '+probe)
mismatch_size_list = []
for file in data_files:
if file in acquisition_size_dict and file in backup_size_dict:
if not(acquisition_size_dict[file]==backup_size_dict[file]) or not (acquisition_mtime_dict[file]==backup_mtime_dict[file]):
mismatch_size_list.append(file)
if mismatch_size_list:
print('ERROR: Some processing files have different sizes or modification times on the backup drive and acquisition drive for '+probe)
print(mismatch_size_list)
else:
pass
#print('all files on the backup drive match the size and modification times of those on the acquisition drive for '+probe)
sucess = False
if not(missing_files_list) and not(mismatch_size_list) and not(missing_backup_list) and not backup_drive_error:
if raw_size == raw_size_backup and raw_size>0:
print("Deleting NPX files from the acquisition drive for ",probe)
safe_delete_npx(basepath, npx_params.backup_location, session)
delete_files = {}
for file,file_params in data_files.items():
if file_params.upload == False and file in acquisition_size_dict:
delete_files[file]=file_params
if delete_files:
print("Deleting files not needed for day 2 upload from the acquisition drive for ",probe)
for delete_file,delete_file_params in delete_files.items():
relpath = relpaths[delete_file_params.relpath]
safe_delete_file(basepath, npx_params.backup_location, session, relpath, delete_file)
#sorted_dir = os.path.join(basepath,session+"_sorted")
#print(sorted_dir)
#backup_npx_dir = os.path.join(npx_params.backup_location,session+"_sorted")
#print(backup_npx_dir)
#for root, dirs, files in os.walk(sorted_dir):
# for name in files:
# local_path = os.path.join(root,name)
# print(local_path)#
# backup_path = os.path.join(backup_npx_dir,name)#
#print(backup_path)
#if os.path.isfile(local_path):
# try:
# if os.path.getsize(local_path)==os.path.getsize(backup_path) and not os.path.samefile(local_path,backup_path):
# os.remove(local_path)
# #print(file +" deleted")
# else:
# print(name +" not deleted, backup error")
# except Exception as E:
# print(name +" not deleted, other error")
# print(E)
#for dire in dirs:
# try:
# os.rmdir(os.path.join(root,dire))
# except Exception as E:
# pass
try:
shutil.rmtree(os.path.join(basepath,session+"_sorted", 'logs'))
except Exception as E:
print('Could not delete log folder')
print('Marking as sucess!')
sucess = True
else:
pass
#print("No files found to delete for day 2 upload")
extra_files = []
for dirpath, dirnames, filenames in os.walk(local_sort_dir):
for f in filenames:
if f not in data_files and not(f == 'continuous.dat'):
extra_files.append(f)
if extra_files:
print("ERROR: Some extra files were found for ",probe)
print(extra_files)
else:
pass
#print("No extra files were found for ", probe)
net_raw_size_backup = 0
net_raw_backup_path = os.path.join(network_location,session)
if os.path.isdir(net_raw_backup_path):
net_raw_size_backup = dir_size(net_raw_backup_path)
if net_raw_size_backup == raw_size_backup:
pass
#print('The raw data is on SD4 and the size matches the backup for '+probe)
else:
print("ERROR: The raw data on SD4 is not the same size as the raw data on the backup drive for ", probe)
else:
print('ERROR: Could not find raw data on SD4 for for '+probe+':')
missing_net_backup_list = []
for file in data_files:
if file not in network_size_dict:
missing_net_backup_list.append(file)
if missing_net_backup_list:
print('ERROR: Some files are not backed up on SD4 for '+probe+':')
print(missing_net_backup_list)
else:
pass#print('all files are backed up on SD4 for '+probe)
net_mismatch_size_list = []
for file in data_files:
if file in network_size_dict and file in backup_size_dict:
if not(network_size_dict[file]==backup_size_dict[file]) or not(network_mtime_dict[file]==backup_mtime_dict[file]):
net_mismatch_size_list.append(file)
if net_mismatch_size_list:
print('ERROR: Some processing files have different sizes or modification times on the backup drive and SD4 for '+probe)
print(net_mismatch_size_list)
elif not(network_size_dict):
print('WARNING: Could not check file sizes on SD4 since they do not exist')
else:
pass
#print('All files on the backup drive match the size and modification times of those on SD4 for '+probe)
return sucess
def safe_delete_npx(base_path, backup, session):
local_npx_dir = os.path.join(base_path,session)
backup_npx_dir = os.path.join(backup,session)
for root, dirs, files in os.walk(local_npx_dir):
for name in files:
local_path = os.path.join(root,name)
backup_path = os.path.join(backup_npx_dir,name)
if os.path.isfile(local_path):
try:
if os.path.getsize(local_path)==os.path.getsize(backup_path) and not os.path.samefile(local_path,backup_path):
os.remove(local_path)
#print(file +" deleted")
else:
print(name +" not deleted, backup error")
except Exception as E:
print(name +" not deleted, other error")
print(E)
for dire in dirs:
try:
os.rmdir(os.path.join(root,dire))
except Exception as E:
pass
try:
os.rmdir(local_npx_dir)
except Exception as E:
print("WARNING: Raw directory not deleted, probably non-empty")
def safe_delete_file(base_path, backup, session, relpath, data_file):
local_path = os.path.join(base_path,session+"_sorted",relpath,data_file)
backup_path = os.path.join(backup,session+"_sorted",relpath,data_file)
try:
if os.path.getsize(local_path)==os.path.getsize(backup_path) and not os.path.samefile(local_path, backup_path):
os.remove(local_path)
#print(data_file + " deleted")
else: print(data_file + " not deleted, backup error")
except Exception as E:
print(data_file + " not deleted, other error")
print(E)
def dir_size(dir_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(dir_path):
for f in filenames:
fp = os.path.join(dirpath, f)
fsize = os.path.getsize(fp)
total_size +=fsize
return total_size
def make_files():
relpaths = {
'lfp':r"continuous\Neuropix-3a-100.1",
'spikes':r"continuous\Neuropix-3a-100.0",
'events':r"events\Neuropix-3a-100.0\TTL_1",
'empty':""
}
data_files = {
"probe_info.json":data_file_params('empty',True,'depth_estimation'),
"channel_states.npy":data_file_params('events',True,'extraction'),
"event_timestamps.npy":data_file_params('events',True,'extraction'),
r"continuous\Neuropix-3a-100.1\continuous.dat":data_file_params('empty',True,'extraction'),
"lfp_timestamps.npy":data_file_params('lfp',True,'sorting'),
"amplitudes.npy":data_file_params('spikes',True,'sorting'),
"spike_times.npy":data_file_params('spikes',True,'sorting'),
"mean_waveforms.npy":data_file_params('spikes',True,'mean waveforms'),
"spike_clusters.npy":data_file_params('spikes',True,'sorting'),
"spike_templates.npy":data_file_params('spikes',True,'sorting'),
"templates.npy":data_file_params('spikes',True,'sorting'),
"whitening_mat.npy":data_file_params('spikes',True,'sorting'),
"whitening_mat_inv.npy":data_file_params('spikes',True,'sorting'),
"templates_ind.npy":data_file_params('spikes',True,'sorting'),
"similar_templates.npy":data_file_params('spikes',True,'sorting'),
"metrics.csv":data_file_params('spikes',True,'metrics'),
"channel_positions.npy":data_file_params('spikes',True,'sorting'),
"cluster_group.tsv":data_file_params('spikes',True,'sorting'),
"channel_map.npy":data_file_params('spikes',True,'sorting'),
"params.py":data_file_params('spikes',True,'sorting'),
"probe_depth.png":data_file_params("empty",False,'depth estimation'),
r"continuous\Neuropix-3a-100.0\continuous.dat":data_file_params('empty',False,'extraction'),
"residuals.dat":data_file_params('spikes',False,'median subtraction'),
"pc_features.npy":data_file_params('spikes',False,'sorting'),
"template_features.npy":data_file_params('spikes',False,'sorting'),
"rez2.mat":data_file_params('spikes',False,'sorting'),
"rez.mat":data_file_params('spikes',False,'sorting'),
"pc_feature_ind.npy":data_file_params('spikes',False,'sorting'),
"template_feature_ind.npy":data_file_params('spikes',False,'sorting')
}
return relpaths, data_files
def check_all_data(npx_directories):
success_list = []
for npx_directory,params in npx_directories.items():
dir_sucess = check_data_processing(npx_directory, params)
success_list.append(dir_sucess)
return all(set(success_list))
def check_all_space(npx_directories):
space_dict = {}
for npx_dir in npx_directories:
drive, tail = os.path.splitdrive(npx_dir)
free_space = psutil.disk_usage(drive).free
if free_space < (200*(10**9)):
space_dict[drive] = free_space
if space_dict:
for drive in space_dict.items():
print("ERROR: Not enough space on ",drive,"for acquisition")
else: print("There is enough space for acquisition on all drives")
def postprocessing(npx_directories):
check_all_data(npx_directories)
generate_batch_file(npx_directories)
check_all_space(npx_directories)
return sucess
if __name__ == "__main__":
postprocessing(npx_directories) |
from django.http import HttpResponse
from django.template.loader import get_template
from core.decorators import UserLogguedDecorator
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
from users.models import User as SystemUser
from subjects.models import Subject
from requests_users.models import RequestUser
@UserLogguedDecorator(type_user='Monitor')
def view(req):
show_alert = False
message = ''
user = SystemUser.objects.get(user=req.user, type_user='Monitor')
image_profile = user.image_profile
if req.method == 'POST' and 'accept' in req.POST:
id_solicitude = req.POST.get('id_solicitude')
solicitude = RequestUser.objects.get(pk=id_solicitude)
solicitude.state = 'Accepted'
solicitude.save()
email_monitor = user.user.email
estudiante = solicitude.user.user.first_name
email_estudiante = solicitude.user.user.email
monitor = solicitude.monitor.user.first_name
asignatura = solicitude.subject.name
fecha = solicitude.requested_time
message = "Para verificar ingresa a la lista de monitorias acepatadas."
body = render_to_string(
'dashboard/monitors/email_content.html', {
'estudiante': estudiante,
'email': email_monitor,
'monitor': monitor,
'asignatura': asignatura,
'fecha': fecha,
'message': message,
},
)
email_message = EmailMessage(
subject='Monitoria aceptada',
body=body,
to=[email_estudiante],
)
email_message.content_subtype = 'html'
email_message.send()
elif req.method == 'POST' and 'decline' in req.POST:
id_solicitude = req.POST.get('id_solicitude')
solicitude = RequestUser.objects.get(pk=id_solicitude)
solicitude.state = 'Declined'
solicitude.save()
from datetime import datetime
solicitudes = RequestUser.objects.filter(
monitor=user,
state='Requested'
)
template = get_template('dashboard/monitors/requested.html')
ctx = {
'message': message,
'id_monitor': user.pk,
'show_alert': show_alert,
'solicitudes': solicitudes,
'show_monitor_items': True,
'image_profile':image_profile
}
return HttpResponse(template.render(ctx, req))
|
import os, requests, uuid, json
#if 'e6821924ed6e4a1d9faea4a0d355d0a0' in os.environ:
# subscriptionKey = os.environ['e6821924ed6e4a1d9faea4a0d355d0a0']
#else:
# print('Environment variable for TRANSLATOR_TEXT_KEY is not set.')
# exit()
# If you want to set your subscription key as a string, uncomment the next line.
subscriptionKey = 'a7eaeca7c32f43018a851e29639cb09e'
# If you encounter any issues with the base_url or path, make sure
# that you are using the latest endpoint: https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate
base_url = 'https://api.cognitive.microsofttranslator.com'
path = '/translate?api-version=3.0'
params = '&to=en'
constructed_url = base_url + path + params
headers = {
'Ocp-Apim-Subscription-Key': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
# You can pass more than one object in body.
def traducir(texto):
body = [{ 'text' : texto }]
request = requests.post(constructed_url, headers=headers, json=body)
response = request.json()
jsonresult = json.dumps(response, sort_keys=True, indent=4, separators=(',', ': '))
print jsonresult
return response[0]['translations'][0]['text']
print (traducir("hola , buen dia"))
|
import numpy as np
from math import pi, sin, cos, sqrt
import tqdm
import matplotlib.pyplot as plt
l1 = 1
l2 = 1
l3 = 1
l4 = 1
def J(q):
"""エンドエフェクター状態変数のヤコビ行列"""
z = np.array([
[
-l1*sin(q[0,0]) + sqrt(2)*l2*cos(q[0,0] + q[1,0] + pi/4) + l2*cos(q[0,0] + q[0,0] + q[2,0]),
l2*(sqrt(2)*cos(q[0,0] + q[2,0] + pi/4) + cos(q[0,0] + q[1,0] + q[2,0])),
l2*cos(q[0,0] + q[1,0] + q[2,0])
],
[
l1*cos(q[0,0]) + sqrt(2)*l2*sin(q[0,0] + q[1,0] + pi/4) + l2*sin(q[0,0] + q[1,0] + q[2,0]),
l2*(sqrt(2)*sin(q[0,0] + q[1,0] + pi/4) + sin(q[0,0] + q[1,0] + q[2,0])),
l2*sin(q[0,0] + q[1,0] + q[2,0])
],
[
1,
1,
1,
],
])
return z
def ee(q):
x = l1*cos(q[0,0] + sqrt(2)*l2*sin(q[0,0] + q[1,0] + pi/4) + l2*sin(q[0,0] + q[1,0] + q[2,0]))
y = l1*sin(q[0,0] - sqrt(2)*l2*cos(q[0,0] + q[1,0] + pi/4) - l2*cos(q[0,0] + q[1,0] + q[2,0]))
phi = q[0,0] + q[1,0] + q[2,0] - 2*pi
return np.array([[x, y, phi]]).T
xd = np.array([[
3,
0,
pi/2
]]).T # 目標値
def inv_kinema(xd, q_before=None):
for j in tqdm.tqdm(range(10)):
#print(j)
if q_before is None:
q = [np.random.rand()*2*pi for k in range(3)]
q = np.array([q]).T
else:
q = q_before
for i in range(100):
x = ee(q)
error = xd - x # 誤差
dq = np.linalg.pinv(J(q)) @ error
q = q + 0.1 * dq
if np.linalg.norm(xd[0:2, :] - x[0:2, :]) < 0.01:
if abs(xd[2, 0] - x[2, 0]) < 0.000001:
#print(j, "で成功!!")
break
return q
if __name__ == '__main__':
def D1(q):
return np.array([
[cos(q[0,0]), -sin(q[0,0]), 0],
[sin(q[0,0]), cos(q[0,0]), 0],
[0, 0, 1],
])
def D2(q):
return np.array([
[1, 0, l1],
[0, 1, 0],
[0, 0, 1],
])
def D3(q):
return np.array([
[cos(q[1,0]), -sin(q[1,0]), 0],
[sin(q[1,0]), cos(q[1,0]), 0],
[0, 0, 1],
])
def D4(q):
return np.array([
[1, 0, l2],
[0, 1, 0],
[0, 0, 1],
])
def D5(q):
theta = 270/180*pi
return np.array([
[cos(theta), -sin(theta), 0],
[sin(theta), cos(theta), 0],
[0, 0, 1],
])
def D6(q):
return np.array([
[1, 0, l3],
[0, 1, 0],
[0, 0, 1],
])
def D7(q):
return np.array([
[cos(q[2,0]), -sin(q[2,0]), 0],
[sin(q[2,0]), cos(q[2,0]), 0],
[0, 0, 1],
])
def D8(q):
return np.array([
[1, 0, l4],
[0, 1, 0],
[0, 0, 1],
])
D_func_all = [D1, D2, D3, D4, D5, D6, D7, D8]
if __name__ == "__main__":
q = np.zeros((7,1))
temp_ee = ee(q)
print(q)
fig = plt.figure()
ax = fig.add_subplot()
ax.scatter(temp_ee[0, 0], temp_ee[1, 0], label = 'sol')
ax.scatter(xd[0, 0], xd[1, 0], marker = '*', s=500, label = 'desired')
ax.grid(True)
D_all = [Di(q) for Di in D_func_all]
for j, D in enumerate(D_all):
if j == 0:
D_w_all = [D]
else:
D_w_all.append(D_w_all[j-1] @ D)
x = []
y = []
for D in D_w_all:
x.append(D[0, 2])
y.append(D[1, 2])
ax.plot(x, y, label='leg')
for j, D_w in enumerate(D_w_all):
ax.scatter(
D_w[0, 2], D_w[1, 2], label = str(j+1),
)
ax.legend()
ax.set_aspect('equal')
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
plt.show()
|
import requests
from envparse import env
from flask import Flask
from flask_restplus import Api, Resource, fields
env.read_envfile()
app = Flask(__name__)
api = Api(app)
model = api.model('Model', {
'text': fields.String,
})
class Text:
def __init__(self):
self._text = None
self._observers = set()
def attach(self, observer):
self._observers.add(observer)
def deatach(self, observer):
self._observers.remove(observer)
def get_data(self):
return self._text
def set_data(self, text):
self._text = text
self.notify(text)
def notify(self, text):
for observer in self._observers:
observer.send_message(text)
class ObserverBase:
def send_message(self, text):
raise NotImplementedError()
class Telegram(ObserverBase):
def __init__(self, token, group_id):
self.url = f'https://api.telegram.org/bot{token}/'
self.group_id = group_id
def send_message(self, text):
params = {'chat_id': self.group_id, 'text': text}
response = requests.post(self.url + 'sendMessage', data=params)
return response
@api.route('/send_message/')
class RestService(Resource):
@api.expect(model)
@api.marshal_with(model)
def post(self):
text = api.payload.get('text', '')
if not text:
return {'message': 'Error'}, 400
text_class.set_data(text)
return {'message': 'ok'}, 201
text_class = Text()
text_class.attach(Telegram(env('TELEGRAM_TOKEN'),
env('TELEGRAM_GROUP', cast=int)))
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView
from django.views.generic import DetailView
from django.views.generic import ListView
from django.views.generic import UpdateView
from core.datatables_tools.datatables_tools import DatatablesListView
class TemplateDataMixin(object):
create_reversible_url = None
template_name = None
def get_template_names(self):
cls = self.__class__
if self.template_name:
return self.template_name
elif issubclass(cls, (DatatablesListView, ListView)):
return ['_list.html']
elif issubclass(cls, (UpdateView, CreateView)):
return ['_form.html']
elif issubclass(cls, DetailView):
return ['_detail.html']
else:
raise ImproperlyConfigured("{0}: TemplateDataMixin only supports DatatablesListView, ListView, UpdateView, "
"CreateView and DetailView".format(self.__class__.__name__))
def _get_value(self, attr):
value = getattr(self, attr, None)
if value:
return value
else:
raise ImproperlyConfigured("You are using the TemplateDataMixin in {0} but you are not passing "
"'page_title' and 'section_title' attributes".format(self.__class__.__name__))
def _get_model_name(self):
model_name = getattr(self, 'model_name', None)
if not model_name:
model_name = self.model.__name__
return model_name.lower()
def _get_class_modal(self):
class_modal_create = getattr(self, 'class_modal_create', None)
if not class_modal_create:
class_modal_create = "ajax-modal"
return class_modal_create
def _get_and_reverse_create_url(self):
return reverse(self.create_reversible_url)
def get_context_data(self, **kwargs):
context = super(TemplateDataMixin, self).get_context_data(**kwargs)
context.update({
'page_title': self._get_value('page_title'),
'section_title': self._get_value('section_title'),
'model_name': self._get_model_name(),
'class_modal_create': self._get_class_modal(),
})
if self.create_reversible_url:
context.update({
'create_url': self._get_and_reverse_create_url()
})
return context
class MessageMixin(object):
# Mensajes por defecto
success_message = _("La información ha sido actualizada correctamente")
failure_message = _("El formulario tiene errores, por favor revise la información.")
def form_valid(self, form):
messages.success(self.request, self.success_message)
return super(MessageMixin, self).form_valid(form)
def form_invalid(self, form):
messages.error(self.request, self.failure_message)
return super(MessageMixin, self).form_invalid(form)
class GetToPostMixin(object):
def get(self, request, *args, **kwargs):
# FIXME: Este método es inseguro, se utiliza solo porque las datatables de rady todavía no soportan este tipo
# de urls
if hasattr(super, 'get'):
super(GetToPostMixin, self).get(request, *args, **kwargs)
return self.post(request, *args, **kwargs)
|
# -*- coding: utf-8 -*-
import ui
from random import randint
import sqlite3 as lite
import sys
i=1
con = lite.connect('./my.sqlite')
with con:
cur = con.cursor()
cur.execute("SELECT * FROM PrePri")
rows = cur.fetchall()
a=[]
b=[]
for row in rows:
a.append(row[0])
b.append(row[1])
c=randint(0,184)
v=[]
x=0
r=0
def start(sender):
global x,r
label = sender.superview['label1']
label2 = sender.superview['label2']
if x==0:
x=1
label.text=a[c]
else:
x=0
r=0
label.text='Слова'
label2.text='Ошибки'
def but(sender):
global c,r,x
t=sender.title
label = sender.superview['label1']
label2 = sender.superview['label2']
if x==0:
pass
else:
if t==b[c]:
c=randint(0,184)
label.text=a[c]
else:
r+=1
label2.text='Ошибки'+str(r)
v = ui.load_view()
v.present('sheet')
|
#!/usr/bin/env python3
# vim:fileencoding=utf-8
import os
import sys
import base64
import quopri
from mailbox import mbox
import email.header
from lxml.html import fromstring, tostring
from lxml.html import builder as E
from termcolor import colored
from charset import CJK_align
from myutils import filesize
mailStyle = '''\
body { max-width: 900px; margin: auto; padding: 1em; }
blockquote { margin-top: 1em !important; }
'''
def decodeHeader(h):
var = email.header.decode_header(h)[0]
charset = var[1] or 'ascii'
if charset.lower() == 'gb2312': #fxxk
charset = 'gb18030'
try:
var = var[0].decode(charset)
except AttributeError:
var = var[0]
except LookupError:
var = var[0].decode('utf-8', errors='replace')
return var
def getMailSize(mail):
payload = mail.get_payload()
if isinstance(payload, str):
return len(payload)
else:
return sum(getMailSize(m) for m in payload)
def selectMail(mb):
for i, m in enumerate(mb):
who = m['From']
if who.endswith('>'):
who = who.split(' <', 1)[0].strip('"')
who = decodeHeader(who)
else:
who = who.split('@', 1)[0]
subj = decodeHeader(m['Subject'])
size = getMailSize(m)
print(colored('%3d' % i, attrs=['bold']),
#FIXME: strip overflowed text
colored(CJK_align(who, 12), 'blue'),
colored('(%7s)' % filesize(size)[:-1].rstrip('i'), 'yellow'),
subj)
while True:
try:
n = int(input('选择一封邮件: '))
if n < 0 or n >= len(mb):
raise ValueError
return n
except ValueError:
continue
except (EOFError, KeyboardInterrupt):
print()
raise SystemExit
def parseSingleMail(mail):
mainMail = [m for m in mail.get_payload() if m.get_content_type() == 'text/html'][0]
mailbody = getMailContent(mainMail)
return mailbody
def saveHTMLMail(m):
title = decodeHeader(m['Subject'])
mailtype = m.get_content_type()
if mailtype == 'multipart/alternative':
mailbody = parseSingleMail(m)
elif mailtype in ('multipart/related', 'multipart/mixed'):
mails = m.get_payload()
cidMapping = {}
for mail in mails:
if mail.get_content_type() == 'multipart/alternative':
mailbody = parseSingleMail(mail)
else:
try:
cid = mail['Content-ID'][1:-1]
except TypeError:
if mail['Content-Disposition'] and \
mail['Content-Disposition'].find('attachment') != -1:
continue
from cli import repl
repl(locals())
fname = decodeHeader(mail.get_filename())
cidMapping[cid] = fname
body = getMailContent(mail)
saveFile(fname, body)
elif mailtype == 'text/plain':
print('plain text mail, nothing to do')
return
elif mailtype == 'text/html':
mailbody = getMailContent(m)
else:
raise NotImplementedError('type %s not recognized' % mailtype)
div = fromstring(mailbody)
for cidLink in div.cssselect('[src^="cid:"]'):
cid = cidLink.get('src')[4:]
cidLink.set('src', cidMapping[cid])
div.insert(0, E.TITLE(title))
div.insert(0, E.STYLE(mailStyle))
mailbody_b = tostring(div, encoding='utf-8')
saveFile('index.html', mailbody_b)
def saveFile(fname, content):
if isinstance(content, str):
f = open(fname, 'w')
else:
f = open(fname, 'wb')
f.write(content)
def getMailContent(mail):
rawbody = mail.get_payload()
encoding = mail['Content-Transfer-Encoding']
if encoding == 'base64':
mailbody = base64.decodebytes(rawbody.encode('ascii'))
elif encoding == 'quoted-printable':
mailbody = quopri.decodestring(rawbody.encode('ascii'))
else:
raise NotImplementedError('encoding %s not recognized' % encoding)
charset = mail.get_content_charset()
if charset:
mailbody = mailbody.decode(charset)
return mailbody
def main(mailbox=os.path.expanduser('~/.Mail/inbox')):
mb = mbox(mailbox)
n = selectMail(mb)
saveHTMLMail(mb[n])
if __name__ == '__main__':
if len(sys.argv) == 2:
main(sys.argv[1])
elif len(sys.argv) == 1:
main()
else:
print('usage: %s [MBOX_FILE]' % os.path.split(sys.argv[0])[1],
file=sys.stderr)
|
from math import sqrt,sin,pi
from numpy import empty
from pylab import imshow,gray,show
wavelength = 5.0
k = (2*pi)/wavelength
E0 = 1.0
Separation = 20.0
side = 100.0
points = 500
spacing = side/points
x1 = (side/2 - Separation/2)
y1 = (side/2)
x2 = (side/2 + Separation/2)
y2 = (side/2)
xi = empty([points,points],float)
for i in range(points):
y = spacing*i
for j in range(points):
x = spacing*j
r1 = sqrt((x-x1)**2+(y-y1)**2)
r2 = sqrt((x-x2)**2+(y-y2)**2)
xi[i,j] = E0*(sin(k*r1)+sin(k*r2))#/spacing
imshow(xi,origin="lower",extent=[0,side,0,side])
gray()
show() |
"""BigGAN Evol Figure
Partially inherit from BigGAN_Evol_summary.py but more focused.
"""
import os
import re
from time import time
from glob import glob
from os.path import join
from easydict import EasyDict
from imageio import imread
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pylab as plt
from scipy.stats import ttest_rel, ttest_ind
from PIL import Image
from build_montages import make_grid_np
from stats_utils import summary_by_block, saveallforms
def load_data_from_row(row, imgid=-1):
npzpath = join(dataroot, row.unitstr, 'scores%s_%05d.npz' % (row.optimizer, row.RND))
imgtrajpath = glob(join(dataroot, row.unitstr, "besteachgen%s_%05d.jpg" % (row.optimizer, row.RND)))[0]
data = np.load(npzpath)
evolmtg = imread(imgtrajpath)
proto = crop_from_montage(evolmtg, imgid=imgid)
scorevec = data['scores_all']
genvec = data["generations"]
score_m, score_s, blockarr = summary_by_block(scorevec, genvec, sem=False)
return scorevec, genvec, score_m, score_s, blockarr, proto
def shadedErrorbar(blockarr, score_m, score_s, alpha=0.2, linealpha=1.0, label=None, color=None, linecolor=None):
L = plt.plot(blockarr, score_m, label=label, c=linecolor, alpha=linealpha)
if color is None: color = L[0]._color
plt.fill_between(blockarr, score_m-score_s, score_m+score_s, alpha=alpha, color=color)
def crop_from_montage(img, imgid:int=-1, imgsize=256, pad=2):
nrow, ncol = (img.shape[0] - pad) // (imgsize + pad), (img.shape[1] - pad) // (imgsize + pad)
if imgid == "rand": imgid = np.random.randint(nrow * ncol)
elif imgid < 0: imgid = nrow * ncol + imgid
ri, ci = np.unravel_index(imgid, (nrow, ncol))
img_crop = img[pad + (pad+imgsize)*ri:pad + imgsize + (pad+imgsize)*ri, \
pad + (pad+imgsize)*ci:pad + imgsize + (pad+imgsize)*ci, :]
return img_crop
pd.set_option('display.width', 200)
pd.set_option("max_colwidth", 60)
pd.set_option('display.max_columns', None)
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.spines.top'] = False
#%% Data using alexnet
dataroot = r"E:\Cluster_Backup\BigGAN_Optim_Tune_new"
figdir = r"O:\BigGAN_FC6_insilico"
outdir = r"O:\ThesisProposal\BigGAN"
summarydir = join(figdir, "summary")
os.makedirs(summarydir, exist_ok=True)
# load the table that indexing all experiments
exptab = pd.read_csv(join(summarydir, "optim_raw_score_tab.csv"))
exptab.suffix.fillna("", inplace=True) # substitute nan as ""
exptab.suffix = exptab.suffix.astype(str)
rfmsk = exptab.layer.str.contains("fc") | exptab.suffix.str.contains("RF") # Experiments that do RFfitting
fullmsk = exptab.layer.str.contains("fc") | ~exptab.suffix.str.contains("RF") # Experiments that use full size images
# two masks are overlapping.
#%%
row = exptab.loc[25]
npzpath = join(dataroot, row.unitstr, 'scores%s_%05d.npz'%(row.optimizer, row.RND))
imgtrajpath = glob(join(dataroot, row.unitstr, "besteachgen%s_%05d.jpg"%(row.optimizer, row.RND)))[0]
data = np.load(npzpath)
evolmtg = imread(imgtrajpath)
proto = crop_from_montage(evolmtg)
#%%
for layer in exptab.layer.unique():
unit_cols = exptab.unitstr[(exptab.layer==layer) & rfmsk].unique()
# for unitstr in unit_cols:
unitstr = np.random.choice(unit_cols, 1)[0]
unitstr = "alexnet_conv5_33_RFrsz"
for optim in ['CholCMA', 'HessCMA', 'CholCMA_fc6']:
msk = (exptab.unitstr==unitstr) & (exptab.optimizer==optim) & rfmsk
print(layer, optim, unitstr, exptab[msk].shape)
# row = exptab[msk].sample(1).iloc[0]
#%% Plot the optim trajectory comparison!
# unitstr = "alexnet_conv5_31_RFrsz"
# unitstr = "alexnet_conv1_33_RFrsz"
unitstrs = ["alexnet_conv2_30_RFrsz",
"alexnet_conv3_32_RFrsz",
"alexnet_conv4_31_RFrsz",
#"alexnet_conv5_30_RFrsz",
"alexnet_conv5_33_RFrsz",
#"alexnet_fc6_30",
"alexnet_fc6_32",
"alexnet_fc7_33",
"alexnet_fc8_31",]
mtg_col = []
ncol = len(unitstrs)
figh, axs = plt.subplots(1, ncol, figsize=[ncol*2.75, 2.75])
for ci, unitstr in enumerate(unitstrs):
proto_col = []
for optim in ['CholCMA', 'HessCMA', 'CholCMA_fc6']:
msk = (exptab.unitstr == unitstr) & (exptab.optimizer == optim)# & rfmsk
if "fc6" not in optim: optim+="_BigGAN"
print(layer, optim, unitstr, exptab[msk].shape)
# row = exptab[msk].sample(1).iloc[0]
maxid = exptab[msk].score.argmax()
row = exptab[msk].loc[maxid]
_, _, score_m, score_s, blockarr, proto = load_data_from_row(row)
proto_col.append(proto)
plt.sca(axs[ci])
shadedErrorbar(blockarr, score_m, score_s, label=optim)
plt.title(unitstr)
if ci == 0: plt.legend()
mtg = make_grid_np(proto_col, nrow=1) # np.stack(tuple(proto_col), axis=3)
mtg_col.append(mtg)
saveallforms([figdir, outdir], "best_trajs_exemplar_alllayer")
plt.show()
mtg_full = make_grid_np(mtg_col, nrow=ncol)
mtg_PIL = Image.fromarray(mtg_full)
mtg_PIL.show()
mtg_PIL.save(join(figdir, "proto_cmp_alllayers.jpg"))
mtg_PIL.save(join(outdir, "proto_cmp_alllayers.jpg"))
#%% New data sourse using ResNet50-robust
sumdir = r"E:\Cluster_Backup\GAN_Evol_cmp\summary"
rootdir = r"E:\Cluster_Backup\GAN_Evol_cmp"
expdirs = os.listdir(rootdir)
expdirs = [*filter(lambda nm: "resnet50_linf" in nm, expdirs)]
# re.findall("resnet50_linf_8_([^_]*)_(\d*)_(\d*)_(\d*)(_RFrsz|)", "resnet50_linf_8_.layer4.Bottleneck2_46_4_4_RFrsz")
# "scoresCholCMA_93259.npz"
exp_col = []
trial_col = []
for expdir in expdirs:
unit_tup = expdir.split("resnet50_linf_8_")[1].split("_")
do_resize = ("RFrsz" in unit_tup)
if do_resize: unit_tup = unit_tup[:-1]
if len(unit_tup) == 4:
layer, chan, xid, yid = unit_tup[0], int(unit_tup[1]), int(unit_tup[2]), int(unit_tup[3])
elif len(unit_tup) == 2:
layer, chan, xid, yid = unit_tup[0], int(unit_tup[1]), None, None
else:
raise ValueError("unit parsing error for %s"%expdir)
exp_col.append((expdir, layer, chan, xid, yid, do_resize))
imgtrajpaths = [*map(os.path.basename, glob(join(rootdir, expdir, "traj*.jpg")))]
for trialnm in imgtrajpaths:
patt = re.findall("traj(.*)_(\d*)_score([\d.]*).jpg", trialnm)
if len(patt) == 0:
raise ValueError(trialnm)
optimstr, RND, score = patt[0][0], int(patt[0][1]), float(patt[0][2])
GANstr = "fc6" if "fc6" in optimstr else "BigGAN"
trial_col.append((expdir, layer, chan, xid, yid, do_resize, optimstr, GANstr, RND, score))
unit_tab = pd.DataFrame(exp_col, columns=["expdir", "layer", "chan", "xid", "yid", "RFrsz", ])
exptab = pd.DataFrame(trial_col, columns=["expdir", "layer", "chan", "xid", "yid", "RFrsz", "optimstr", "GANstr", "RND", "score", ])
unit_tab.to_csv(join(sumdir, "unit_tab.csv"))
exptab.to_csv(join(sumdir, "trial_tab.csv"))
#%% Fancy pandas way to do trial averaging quick
exptab_trial_m = exptab.groupby(["expdir", "optimstr"]).mean()
exptab_trial_m = exptab_trial_m.reset_index()
exptab_trial_mW = exptab_trial_m.pivot(index='expdir', columns='optimstr', values='score')
unit_score_tab = unit_tab.copy()
for optim in ["CholCMA", "HessCMA", "HessCMA500_fc6"]:
unit_score_tab[optim] = np.nan
for ri, row in unit_score_tab.iterrows():
for optim in ["CholCMA", "HessCMA", "HessCMA500_fc6"]:
unit_score_tab[optim][ri] = exptab_trial_mW.loc[row.expdir][optim]
maxscore = unit_score_tab[["CholCMA", "HessCMA", "HessCMA500_fc6"]].max(axis=1, skipna=True)
for optimstr in ["CholCMA", "HessCMA", "HessCMA500_fc6"]:
unit_score_tab[optimstr+"_norm"] = unit_score_tab[optimstr] / maxscore
unit_score_tab.to_csv(join(sumdir, "unit_score_tab.csv"))
#%%
# Melt is the wide to long transform, making each optimizer a row in the table
unit_score_tab_L = unit_score_tab.melt(id_vars=["layer","chan","xid","yid","RFrsz", "expdir"],
value_vars=["CholCMA", "HessCMA", "HessCMA500_fc6"], var_name="optimstr", value_name="score")
RFrsz_msk = unit_score_tab_L.RFrsz | (unit_score_tab_L.layer == ".Linearfc")
figh = plt.figure(figsize=[7, 6])
sns.violinplot(x='layer', y='score', hue="optimstr", jitter=0.25,
hue_order=['CholCMA', 'HessCMA', 'HessCMA500_fc6'], cut=0.1,
data=unit_score_tab_L[RFrsz_msk], alpha=0.4)
plt.xticks(rotation=20)
figh.savefig(join(sumdir, "resnet_linf8_raw_score_cmp_RFresize.png"))
figh.savefig(join(sumdir, "resnet_linf8_raw_score_cmp_RFresize.pdf"))
plt.show()
#%%
# Melt is the wide to long transform, making each optimizer a row in the table
unit_score_tab_norm_L = unit_score_tab.melt(id_vars=["layer","chan","xid","yid","RFrsz", "expdir"],
value_vars=["CholCMA_norm", "HessCMA_norm", "HessCMA500_fc6_norm"],
var_name="optimstr", value_name="norm_score")
RFrsz_msk = unit_score_tab_norm_L.RFrsz | (unit_score_tab_norm_L.layer == ".Linearfc")
figh = plt.figure(figsize=[7, 6])
ax = sns.violinplot(x='layer', y='norm_score', hue="optimstr", jitter=0.1, width=0.7, scale="width",
hue_order=['CholCMA_norm', 'HessCMA_norm', 'HessCMA500_fc6_norm'], cut=0.1,
data=unit_score_tab_norm_L[RFrsz_msk], alpha=0.2)
plt.xticks(rotation=20)
figh.savefig(join(sumdir, "resnet_linf8_norm_score_cmp_RFresize.png"))
figh.savefig(join(sumdir, "resnet_linf8_norm_score_cmp_RFresize.pdf"))
plt.show()
#%% Visualize the prototypes as showed by different GANs
# from build_montages import make_grid_np
from PIL import Image
from tqdm import tqdm
def plot_scoremap(score_mat, expdir=""):
plt.figure(figsize=[6,5])
ax = sns.heatmap(score_mat, annot=True, fmt=".1f",
xticklabels=['CholCMA', 'HessCMA', 'HessCMA500_fc6'], )
plt.ylabel("Trials")
plt.xlabel("Optimizers")
plt.axis("image")
plt.title(expdir+"\nScore map with BigGAN or FC6")
plt.savefig(join(sumdir, "proto_cmp", "%s_scoremat.jpg" % expdir))
# plt.show()
for expdir in tqdm(unit_tab.expdir[845:]):
trial_rows = exptab[exptab.expdir == expdir]
score_mat = np.zeros((3, 3), dtype=float)
proto_list = []
for opti, optim in enumerate(['CholCMA', 'HessCMA', 'HessCMA500_fc6']):
rows_w_optim = trial_rows[trial_rows.optimstr==optim]
trN = rows_w_optim.shape[0]
for itr in range(3):
if itr < trN:
row = rows_w_optim.iloc[itr]
score_mat[itr, opti] = row.score
# imgnm = "lastgen%s_%05d_score%.1f.jpg"%(row.optimstr, row.RND, row.score)
# imgfp = join(rootdir, row.expdir, imgnm)
# assert os.path.exists(imgfp)
# proto = crop_from_montage(plt.imread(imgfp), 2)
# proto_list.append(proto)
else:
score_mat[itr, opti] = np.nan
# proto_list.append(np.zeros((256, 256, 3), dtype=np.uint8))
# mtg = make_grid_np(proto_list, nrow=3, padding=8, rowfirst=False)
# Image.fromarray(mtg).save(join(sumdir, "proto_cmp", "%s_proto.jpg"%expdir))
plot_scoremap(score_mat, expdir=expdir)
#%%
#%%
# compare performance across optimizers.
# BigGAN FC6 pair alignment
optimnames = ["CholCMA", "HessCMA", "CholCMA_fc6"]
def BigGANFC6_comparison_plot(norm_scheme="allmax", rffit=True):
Scol = []
overallmsk = rfmsk if rffit else fullmsk
unitstr_uniq = exptab.loc[overallmsk].unitstr.unique()
for unitstr in unitstr_uniq:
unitmsk = (exptab.unitstr == unitstr)
unit = exptab.unit[unitmsk].iloc[0]
layer = exptab.layer[unitmsk].iloc[0]
unitfc6msk = unitmsk & overallmsk & (exptab.optimizer=="CholCMA_fc6")
unitBGmsk = unitmsk & overallmsk & (exptab.optimizer=="CholCMA")
unitBGHmsk = unitmsk & overallmsk & (exptab.optimizer=="HessCMA")
unitoptimmsk = unitmsk & overallmsk & ((exptab.optimizer=="CholCMA")\
| (exptab.optimizer=="HessCMA")\
| (exptab.optimizer=="CholCMA_fc6"))
if any([sum(unitfc6msk)==0, sum(unitBGmsk)==0, sum(unitBGHmsk)==0]): continue
if norm_scheme is "allmax":
normalizer = exptab.score[unitoptimmsk].max()
elif norm_scheme is "fc6max":
normalizer = exptab.score[unitfc6msk].max()
elif norm_scheme is "fc6mean":
normalizer = exptab.score[unitfc6msk].mean()
else:
raise NotImplementedError
# no fc6 to normalize to
for optim in optimnames:
msk = unitmsk & overallmsk & (exptab.optimizer==optim)
scorevec = exptab.score[msk]
scorevec_norm = scorevec / normalizer
newdicts = [{"layer":layer,"unit":unit,"optimizer":optim,"score":score,"score_norm":score_norm}
for score, score_norm in zip(scorevec, scorevec_norm)]
Scol.extend(newdicts)
BigGANFC6cmptab = pd.DataFrame(Scol)
deadunitmsk = (BigGANFC6cmptab.score_norm.isna())
fc6failedmsk = np.isinf(BigGANFC6cmptab.score_norm)
print("Dead channel trial number:%d"%sum(deadunitmsk))
print("Failed trial number:%d"%sum(fc6failedmsk))
figh = plt.figure(figsize=[7, 7])
ax = sns.violinplot(x='layer', y='score_norm', hue="optimizer", jitter=0.25,
hue_order=['CholCMA', 'HessCMA', 'CholCMA_fc6'], cut=0.1,
data=BigGANFC6cmptab[~deadunitmsk&~fc6failedmsk], alpha=0.4)
ax.set_title("Comparison of Optimizer and GAN space over Units of AlexNet %s"%("RFfit" if rffit else "FullImage"))
ax.set_ylabel("activ normalized by %s"%norm_scheme)
ax.figure.show()
ax.figure.savefig(join(summarydir, "BigGANFC6_cmp_%snorm_layer_all%s.jpg"%(norm_scheme, "RFfit" if rffit else
"_Full")))
ax.figure.savefig(join(summarydir, "BigGANFC6_cmp_%snorm_layer_all%s.pdf"%(norm_scheme, "RFfit" if rffit else
"_Full")))
return BigGANFC6cmptab, figh
# BigGANFC6_comparison_plot(norm_scheme="fc6max", rffit=True)
# BigGANFC6_comparison_plot(norm_scheme="allmax", rffit=True)
BigGANFC6_comparison_plot(norm_scheme="allmax", rffit=False) |
from django.contrib import admin
from django.contrib.auth import get_user_model
admin.site.register(get_user_model())
|
import math
from itertools import islice
from typing import Collection
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import scipy.optimize
import skimage
import torch
from matplotlib import patches, patheffects
from matplotlib import pyplot as plt
from matplotlib.patches import Patch, Rectangle
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from PIL import Image
from scipy import ndimage
from skimage import data, img_as_ubyte, measure
from survos2.entity.sampler import centroid_to_bvol, viz_bvols
from survos2.frontend.nb_utils import show_images, summary_stats
from torch import Tensor
from torchvision.ops import roi_align
from survos2 import survos
from survos2.model import DataModel
from survos2.improc.utils import DatasetManager
from loguru import logger
from survos2.frontend.main import init_ws, roi_ws
def load_model(detmod, file_path):
def load_model_parameters(full_path):
checkpoint = torch.load(full_path)
return checkpoint
checkpoint = load_model_parameters(file_path)
detmod.load_state_dict(checkpoint["model_state"], strict=False)
detmod.eval()
print(f"Loaded model from {file_path}")
return detmod
def remove_masked_entities(bg_mask, entities):
pts_vol = np.zeros_like(bg_mask)
for pt in entities:
pts_vol[pt[0], pt[1], pt[2]] = 1
pts_vol = pts_vol * (1.0 - bg_mask)
zs, xs, ys = np.where(pts_vol == 1)
masked_entities = []
for i in range(len(zs)):
pt = [zs[i], ys[i], xs[i], 6]
masked_entities.append(pt)
return np.array(masked_entities)
def quick_norm(vol):
vol -= np.min(vol)
vol = vol / np.max(vol)
return vol
def pad_vol(vol, padding):
padded_vol = np.zeros(
(
vol.shape[0] + padding[0] * 2,
vol.shape[1] + padding[1] * 2,
vol.shape[2] + padding[2] * 2,
)
)
padded_vol[
padding[0] : vol.shape[0] + padding[0],
padding[1] : vol.shape[1] + padding[1],
padding[2] : vol.shape[2] + padding[2],
] = vol
return padded_vol
def remove_padding(vol, padding):
unpadded_vol = vol[
padding[0] : vol.shape[0] + padding[0],
padding[1] : vol.shape[1] + padding[1],
padding[2] : vol.shape[2] + padding[2],
]
return unpadded_vol
def get_largest_cc(I):
# Initialize connected component (cc) as an array of zeros with the same shape as the input
cc = np.zeros_like(I)
# Check if there are any non-zero values in the input image
if np.sum(I) > 0:
# Convert input image to binary forma
img = I > 0
# Label the connected components in the binary image
label_im, nb_labels = ndimage.label(img)
# Calculate the size (sum of values) of each connected component
sizes = ndimage.sum(I, label_im, range(nb_labels + 1))
# Find the size of the largest connected component
max_sz = np.max(sizes)
# Assign the size of each pixel in the image to the size of its connected component
lab_sz = sizes[label_im]
# Set all pixels in the largest connected component to 1 and the rest to 0
cc = lab_sz == max_sz
cc = cc.astype(int)
return cc
def get_surface(img_vol, plot3d=False):
try:
# Get the vertices, faces, normals and values using marching_cubes
verts, faces, normals, values = measure.marching_cubes((img_vol > 0) * 1.0, 0)
# Create a 3D polygon collection using the vertices and faces
mesh = Poly3DCollection(verts[faces])
if plot3d:
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection="3d")
mesh.set_edgecolor("k")
ax.add_collection3d(mesh)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.set_xlim(0, img_vol.shape[1])
ax.set_ylim(0, img_vol.shape[2])
ax.set_zlim(0, img_vol.shape[0])
plt.tight_layout()
plt.show()
# Calculate the surface area of the mesh
s = skimage.measure.mesh_surface_area(verts, faces)
# Calculate the volume of the mesh
v = np.sum(img_vol)
# Calculate the sphericity of the mesh
sphericity = (36 * math.pi * (v**2)) / (s**3)
except:
# Set default values in case of exceptions
s = 0
v = 0
sphericity = 0
mesh = None
return mesh, s, v, sphericity
def remove_padding(vol, padding):
unpadded_vol = vol[
padding[0] : vol.shape[0] + padding[0],
padding[1] : vol.shape[1] + padding[1],
padding[2] : vol.shape[2] + padding[2],
]
return unpadded_vol
|
import numpy as np
import math
import Interpolate
if __name__ == '__main__':
mass = [[19,1203],[22,1245],[26,1378],[28,1315],[30,1475]]
n = 4
x = [0]*n
dimensions = (n, n)
y = np.zeros(dimensions)
for i in range(1,n):
x[i] = mass[i][0]
y[i][0] = mass[i][1]
Interpolate.calculateTable(x,y,n)
Result3 = Interpolate.findValue(25,x,y,n)
n = 5
x = [0] * n
dimensions = (n, n)
y = np.zeros(dimensions)
for i in range(n):
x[i] = mass[i][0]
y[i][0] = mass[i][1]
Interpolate.calculateTable(x, y, n)
Result4 = Interpolate.findValue(25, x, y, n)
error = math.fabs(Result4-Result3)/Result4*100
print("The mass at 25 sec is",Result4)
print("Abosulte Relative Error", error,"%")
Interpolate.plotGraph(x,y,5,1000,2000)
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
import keras
from keras.models import Sequential, load_model
from keras.layers import Dense, Conv2D, Flatten
import autograd.numpy as np
from collections import OrderedDict
class TabularValueFun(object):
def __init__(self, env):
self.num_states = env.num_states
self._value_fun = np.zeros(shape=(self.num_states,))
def get_values(self, states=None):
if states is None:
return self._value_fun
else:
return self._value_fun[states]
def update(self, values):
self._value_fun = values
def save(self, filepath):
np.save(filepath,self._value_fun)
def load(self, filepath):
self._value_fun = np.load(filepath)
class CNNValueFun(object):
def __init__(self, env, activation='relu'):
self._env = env
input_shape = env.state_shape
self._build((*input_shape,1), activation)
def _build(self, input_shape, activation='relu', *args, **kwargs):
#create model
model = Sequential()
#add model layers
model.add(Conv2D(10, kernel_size=3, activation='relu', input_shape=input_shape))
model.add(Conv2D(5, kernel_size=3, activation='relu'))
model.add(Flatten())
model.add(Dense(1,activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
self.model = model
def get_values(self, states):
states = np.expand_dims(states, axis=-1)
return self.model.predict(states).reshape(-1)
def update(self, states, V_bar):
states = np.expand_dims(states, axis=-1)
# loss = self.model.train_on_batch(states, V_bar,reset_metrics=False)
loss = self.model.fit(states, V_bar)
print(loss)
class FFNNValueFun(object):
def __init__(self, env, activation='relu'):
self._env = env
input_shape = env.state_shape
self._build((*input_shape,1), activation)
def _build(self, input_shape, activation='relu', *args, **kwargs):
#create model
model = Sequential()
#add model layers
model.add(Dense(256,activation='relu'))
model.add(Dense(256,activation='relu'))
model.add(Dense(1,activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
self.model = model
def get_values(self, states):
return self.model.predict(states).reshape(-1)
def update(self, states, V_bar):
# loss = self.model.train_on_batch(states, V_bar,reset_metrics=False)
loss = self.model.fit(states, V_bar, verbose=0)
def save(self, filepath):
self.model.save(filepath)
def load(self, filepath):
self.model = load_model(filepath)
class MLPValueFun(object):
_activations = {
'tanh': np.tanh,
None: lambda x: x,
'relu': lambda x: np.maximum(x, 0)
}
def __init__(self, env, hidden_sizes=(256, 256), activation='relu'):
self._env = env
self._params = dict()
self._build(hidden_sizes, activation)
def _build(self, hidden_sizes=(256, 256), activation='relu', *args, **kwargs):
self._activation = self._activations[activation]
self._hidden_sizes = hidden_sizes
prev_size = self._env.observation_space.shape[0]
for i, hidden_size in enumerate(hidden_sizes):
W = np.random.normal(loc=0, scale=1/prev_size, size=(hidden_size, prev_size))
b = np.zeros((hidden_size,))
self._params['W_%d' % i] = W
self._params['b_%d' % i] = b
prev_size = hidden_size
W = np.random.normal(loc=0, scale=1/prev_size, size=(1, prev_size))
b = np.zeros((1,))
self._params['W_out'] = W
self._params['b_out'] = b
def get_values(self, states, params=None):
params = self._params if params is None else params
x = states
for i, hidden_size in enumerate(self._hidden_sizes):
x = np.dot(params['W_%d' % i], x.T).T + params['b_%d' % i]
x = self._activation(x)
values = np.dot(params['W_out'], x.T).T + params['b_out']
return values[:, 0]
def update(self, params):
assert set(params.keys()) == set(self._params.keys())
loss = np.linalg.norm(V_hat_new-V_bar)
params = self.optimizer.grad_step(self.objective, params)
self._params = params
|
from flask_debugtoolbar import DebugToolbarExtension
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
from flask_login import LoginManager
login_manager = LoginManager()
debug_toolbar = DebugToolbarExtension() |
import requests
import time
import threading
url = 'https://www.lagou.com/gongsi/allCity.html?option=0-0-0'
# 测试接口的响应速度
def request():
start = time.time()
requests.get("http://10.0.0.2:1024/company/one?name=steve&age=10&sex=true").text
print("响应时间: {} ms".format((time.time() - start)))
for i in range(1,1000):
request() |
#Gavin Harris, Lab 3b
# How to run: python ChangeMaker.py
#ask user for input on how much change for program
change = int(input("How much change are you trying to give (in cents)? "))
quarters = 0
dimes = 0
nickels = 0
pennies = 0
#it will loop as long as the change is above 0 cents
while change > 0 :
#if the change is at or above 25 cents we will use quarters and subtract 25 cents from total and add one quarters to quarter variable
if change >= 25:
change = change - 25
quarters = quarters + 1
#if the change is at or above 10 but less than 25 we will use dimes, subtract 10 from total and add one dime to dimes variable
elif change >= 10:
change = change - 10
dimes = dimes + 1
#if the change is at or above 5 but less than 10 we will use nickels, subtract 5 from total and add one nickel to nickels variable
elif change >= 5:
change = change - 5
nickels = nickels + 1
#if the change is at or above 1 but less than 5 we will use pennies, subtract 1 from total and add one penny to pennies variable
elif change >= 1:
change = change -1
pennies = pennies + 1
#print unknown input if not valid input
else :
Print("Unknown input")
#this is the start of the print statement to tell the user what coins to use, we put end=" " to make it all go on one line of code
print("You should give the customer:", end= " ")
#I use if statements to figure out which coins will be over zero and if they are over zero it will do the print statement saying how many and has end="" to put them all on the same line of code. If at 0 it will be ignored
if quarters > 0:
print( quarters, "quarters", end=" ")
else: ""
if dimes > 0:
print(dimes, "dimes", end= " ")
else: ""
if nickels > 0:
print (nickels, "nickels", end= " ")
else : ""
if pennies > 0:
print( pennies, "pennies", end = " ")
else : "" |
# Turn off (too-many-instance-attributes), (invalid-name) and
# (missing-docstring) pylint errors:
# pylint: disable=R0902,C0103,C0111
import unittest
from week1.proj1 import slide, merge
class TestSlide(unittest.TestCase):
def test_slide_returns_correct_result(self):
self.assertEqual(slide([1, 0, 1, 1]), [1, 1, 1, 0])
self.assertEqual(slide([0, 0, 1, 1]), [1, 1, 0, 0])
self.assertEqual(slide([1, 1, 0, 0]), [1, 1, 0, 0])
self.assertEqual(slide([0, 0, 0, 0]), [0, 0, 0, 0])
self.assertEqual(slide([0, 0, 0, 1]), [1, 0, 0, 0])
self.assertEqual(slide([0, 1]), [1, 0])
class TestMerge(unittest.TestCase):
def test_merge_returns_correct_result(self):
self.assertEqual(merge([2, 0, 2, 4]), [4, 4, 0, 0])
self.assertEqual(merge([0, 0, 2, 2]), [4, 0, 0, 0])
self.assertEqual(merge([2, 2, 0, 0]), [4, 0, 0, 0])
self.assertEqual(merge([2, 2, 2, 2]), [4, 4, 0, 0])
self.assertEqual(merge([8, 16, 16, 8]), [8, 32, 8, 0])
self.assertEqual(merge([8, 8]), [16, 0])
|
import requests
import os
def downloadModelFromDB(ID, outPath, baseURL='https://files.rcsb.org/download/'):
#print('Requesting {my_id} file. Please wait.\n'.format(my_id=ID))
reqURL = os.path.join(baseURL, ID.lower() + '.pdb')
response = requests.get(reqURL)
if not response.ok:
#print('ID {my_id} file not founded. Try with other ID.\n'.format(my_id=ID))
return False
#print('ID:{my_id} file was downloaded. Writing file.\n'.format(my_id=ID))
with open(os.path.join(outPath, ID + ".pdb"), "w+") as f:
f.write(response.text)
return True
|
import floppyforms as forms
from kitchencrashers.main.models import RsvpOptions
from kitchencrashers.main.models import CategoryOptions
class EventForm(forms.Form):
name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'name your event'}))
date = forms.DateTimeField(widget=forms.SplitDateTimeWidget)
location = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'address'}))
city = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'city'}))
category = forms.ChoiceField(choices=CategoryOptions().CATEGORY_OPTIONS)
description = forms.CharField(widget=forms.Textarea(attrs={'placeholder': 'don\'t be shy - tell us about it some more'}))
budget = forms.IntegerField()
max_people = forms.IntegerField()
is_vegan = forms.BooleanField(required=False)
is_kosher = forms.BooleanField(required=False)
is_vegeterian = forms.BooleanField(required=False)
rsvp = forms.ChoiceField(widget=forms.RadioSelect, choices=RsvpOptions().RSVP_OPTIONS)
picture = forms.ImageField()
|
###############################################################################
# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
# and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
import copy
from discover.fetchers.api.api_access import ApiAccess
class TestRegions:
def __init__(self, test_regions):
super().__init__()
self.original_regions = copy.deepcopy(ApiAccess.regions)
self.test_regions = test_regions
def __enter__(self):
ApiAccess.regions = self.test_regions
def __exit__(self, exc_type, exc_val, exc_tb):
ApiAccess.regions = self.original_regions |
from django.core.management import BaseCommand
#The class must be named Command, and subclass BaseCommand
import matplotlib.pyplot as plt
from reports.models import ostPerfHistory
import datetime
from pytz import timezone
import numpy as np
class Command(BaseCommand):
# Show this when the user types help
help = "Generates SVG charts for Lustre throuhput values."
min_throughput = 160
start_date = 0
end_date = 0
# A command must define handle()
def handle(self, *args, **options):
berlin = timezone('Europe/Berlin')
self.start_date = datetime.datetime(2017,4,2,0,0,0, tzinfo=berlin)
self.end_date = datetime.datetime(2017,4,7,23,59,59, tzinfo=berlin)
mid = self.start_date+datetime.timedelta(days=((self.end_date-self.start_date).days/2)+0.5)
self.stdout.write(self.style.WARNING("Rendering"), ending="\n")
x,y = self.main_overview()
self.renderOverview(x, y, mid)
dates, throughputs = self.getDataForOsts()
for ostname,v in dates.iteritems():
self.renderOst(v, throughputs[ostname], mid, ostname)
def main_overview(self):
latest_question_list = ostPerfHistory.objects.filter(throughput__lte=160*1024*1024, timepoint__range=(self.start_date, self.end_date)).order_by('timepoint')
output = "\n".join([q.ost+" "+str(q.timepoint.strftime('%Y-%m-%d %H:%M:%S %Z%z'))+" "+str(q.throughput/1024/1024)+" MB" for q in latest_question_list])
throughputs, dates = [], []
for q in latest_question_list:
dates.append(q.timepoint)
throughputs.append(q.throughput/1024/1024)
#self.stdout.write("Doing All The Things!"+output)
return (dates, throughputs)
def getDataForOsts(self):
result = list(ostPerfHistory.objects.filter(throughput=0, timepoint__range=(self.start_date, self.end_date)).order_by('timepoint').values_list('ost', flat=True))
print result
latest_question_list = ostPerfHistory.objects.filter(ost__in=result, timepoint__range=(self.start_date, self.end_date)).order_by('timepoint')
#output = "\n".join([q.ost+" "+str(q.timepoint.strftime('%Y-%m-%d %H:%M:%S %Z%z'))+" "+str(q.throughput/1024/1024)+" MB" for q in latest_question_list])
dates, throughputs = {}, {}
for q in latest_question_list:
ostname=q.ost
if not (ostname in dates or ostname in throughputs):
dates[ostname] = []
throughputs[ostname] = []
dates[ostname].append(q.timepoint)
throughputs[ostname].append(q.throughput/1024/1024)
#self.stdout.write("Doing All The Things!"+output)
return (dates, throughputs)
def renderOverview(self, x,y, mid):
self.stdout.write(" - Overview.svg", ending="")
z=np.array(y)
lines = plt.plot(x, y, 'b.', x, z*0+self.min_throughput, 'r--')
plt.text(mid, 165, 'OSTs with Throughput Values lower than 160 MB/s', horizontalalignment='center')
plt.xlim(self.start_date, self.end_date)
plt.ylim(0, 180)
plt.ylabel('min-throughput (MB/s)')
plt.xlabel('date OST')
plt.gca().set_position([0, 0, 1, 1])
#plt.axis([0, 7, 0, 200])
plt.axis('on')
plt.savefig("tmp/Overview.svg", format="svg", bbox_inches='tight')
plt.clf()
self.stdout.write(self.style.SUCCESS(" - done"), ending="\n")
def renderOst(self, x,y, mid, ostname):
self.stdout.write(' - '+ostname+'.svg', ending='')
z=np.array(y)
lines = plt.plot(x, y, 'k', x, z*0+self.min_throughput, 'r--')
plt.text(mid, 165, r'Throughput Values lower than 160 MB/s', horizontalalignment='center')
plt.xlim(self.start_date, self.end_date)
plt.ylim(0, 2400)
plt.grid(True)
plt.ylabel('Throughput (MB/s)')
plt.xlabel('Fig.: '+ostname)
plt.gca().set_position([0, 0, 1, 1])
#plt.axis([0, 7, 0, 200])
plt.axis('on')
plt.savefig("tmp/"+ostname+".svg", format="svg", bbox_inches='tight')
plt.clf()
self.stdout.write(self.style.SUCCESS(" - done"), ending="\n")
|
import argparse
import asyncio
import logging
import os
from datetime import datetime
import reuters_parsing.app as app
from .db_postgres import PostgresBackend
from .parser_reuters import ReutersParser
parser = argparse.ArgumentParser(description='Reuters web scrapper.', prog="python3.8 -m reuters_parsing")
parser.add_argument('-f', '--force-recreate', action='store_true', help="Recreate tables even if database not empty")
subparsers = parser.add_subparsers(help="Command to proceed.", dest='command')
scrap_parser = subparsers.add_parser('scrap', help="Do scraping of Reuters website and store new news into database. "
"Schema will be applied automatically (if database is empty "
"or --force-recreate specified).")
report_parser = subparsers.add_parser('report', help="Output news for given date into csv file.")
report_parser.add_argument('date', help="Date to report in YYYY-MM-DD format (in publisher timezone).")
report_parser.add_argument('file', nargs='?', help="File to store report, 'News-YYYY-MM-DD.csv' by default.")
parsed = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if os.environ.get('APP_DEBUG', False) else logging.INFO)
dsn = "postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@" \
"{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB}".format(**os.environ)
backend = PostgresBackend(dsn)
backend.ensure_schema(parsed.force_recreate)
async def main():
print("[{}]".format(datetime.now()), end=" ")
await backend.init_engine()
if parsed.command in [None, 'scrap']:
print("scraping...")
await app.scrap('http://feeds.reuters.com/reuters/topNews', ReutersParser(), backend)
elif parsed.command == 'report':
print("Reporting...")
await app.report(parsed.date, parsed.file, backend)
await backend.stop_engine()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
|
# Generated by Django 3.1.1 on 2020-12-01 10:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainpage', '0002_auto_20201129_1424'),
]
operations = [
migrations.AddField(
model_name='regionlarge',
name='prev_no_deceased',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='regionlarge',
name='prev_no_infected',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='regionlarge',
name='prev_no_offisolated',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='regionmedium',
name='prev_no_deceased',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='regionmedium',
name='prev_no_infected',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='regionmedium',
name='prev_no_offisolated',
field=models.IntegerField(default=0),
),
]
|
from __future__ import print_function
import sys
import ctypes
import textwrap
from nose.tools import assert_equal
from parameterized import parameterized, param
sys.path.append("pp/")
import pp
import pprintpp as p
from pprintpp import Counter, defaultdict, OrderedDict
class PPrintppTestBase(object):
def assertStdout(self, expected, trim=True):
if trim:
expected = textwrap.dedent(expected.rstrip().lstrip("\n"))
# Assumes that nose's capture plugin is active
assert_equal(sys.stdout.getvalue().rstrip(), expected)
class TestPP(PPrintppTestBase):
def test_pp(self):
pp(["hello", "world"])
self.assertStdout("['hello', 'world']")
def test_pp_pprint(self):
pp.pprint("stuff")
self.assertStdout("'stuff'")
def test_fmt(self):
print(pp.pformat("asdf"))
print(pp.fmt("stuff"))
self.assertStdout("""
'asdf'
'stuff'
""")
def test_module_like(self):
print(dir(pp))
print(repr(pp))
class MyDict(dict):
pass
class MyList(list):
pass
class MyTuple(tuple):
pass
class MySet(set):
pass
class MyFrozenSet(frozenset):
pass
class MyOrderedDict(p.OrderedDict):
pass
class MyDefaultDict(p.defaultdict):
pass
class MyCounter(p.Counter):
pass
class MyCounterWithRepr(p.Counter):
def __repr__(self):
return "MyCounterWithRepr('dummy')"
class TestPPrint(PPrintppTestBase):
uni_safe = u"\xe9 \u6f02 \u0e4f \u2661"
uni_unsafe = u"\u200a \u0302 \n"
slashed = lambda s: u"%s'%s'" %(
p.u_prefix,
s.encode("ascii", "backslashreplace").decode("ascii").replace("\n", "\\n")
)
@parameterized([
param("safe", uni_safe, "%s'%s'" %(p.u_prefix, uni_safe)),
param("unsafe", uni_unsafe, slashed(uni_unsafe)),
param("encoding-aware", uni_safe, slashed(uni_safe), encoding="ascii"),
param("high-end-chars", u"\U0002F9B2", slashed(u"\U0002F9B2"), encoding="ascii"),
])
def test_unicode(self, name, input, expected, encoding="utf-8"):
stream = p.TextIO(encoding=encoding)
p.pprint(input, stream=stream)
assert_equal(stream.getvalue().rstrip("\n"), expected)
@parameterized([
param(u"'\\'\"'"),
param(u'"\'"'),
param(u"'\"'"),
param("frozenset(['a', 'b', 'c'])"),
param("set([None, 1, 'a'])"),
param("[]"),
param("[1]"),
param("{}"),
param("{1: 1}"),
param("set()"),
param("set([1])"),
param("frozenset()"),
param("frozenset([1])"),
param("()"),
param("(1, )"),
param("MyDict({})"),
param("MyDict({1: 1})"),
param("MyList([])"),
param("MyList([1])"),
param("MyTuple(())"),
param("MyTuple((1, ))"),
param("MySet()"),
param("MySet([1])"),
param("MyFrozenSet()"),
param("MyFrozenSet([1])"),
] + ([] if not p._test_has_collections else [
param("Counter()"),
param("Counter({1: 1})"),
param("OrderedDict()"),
param("OrderedDict([(1, 1), (5, 5), (2, 2)])"),
param("MyOrderedDict()"),
param("MyOrderedDict([(1, 1)])"),
param("MyCounter()"),
param("MyCounter({1: 1})"),
param("MyCounterWithRepr('dummy')"),
]))
def test_back_and_forth(self, expected):
input = eval(expected)
stream = p.TextIO()
p.pprint(input, stream=stream)
assert_equal(stream.getvalue().rstrip("\n"), expected)
if p._test_has_collections:
@parameterized([
param("defaultdict(%r, {})" %(int, ), defaultdict(int)),
param("defaultdict(%r, {1: 1})" %(int, ), defaultdict(int, [(1, 1)])),
param("MyDefaultDict(%r, {})" %(int, ), MyDefaultDict(int)),
param("MyDefaultDict(%r, {1: 1})" %(int, ), MyDefaultDict(int, [(1, 1)])),
])
def test_expected_input(self, expected, input):
stream = p.TextIO()
p.pprint(input, stream=stream)
assert_equal(stream.getvalue().rstrip("\n"), expected)
def test_unhashable_repr(self):
# In Python 3, C extensions can define a __repr__ method which is an
# instance of `instancemethod`, which is unhashable. It turns out to be
# spectacularly difficult to create an `instancemethod` and attach it to
# a type without using C... so we'll simulate it using a more explicitly
# unhashable type.
# See also: http://stackoverflow.com/q/40876368/71522
class UnhashableCallable(object):
__hash__ = None
def __call__(self):
return "some-repr"
class MyCls(object):
__repr__ = UnhashableCallable()
obj = MyCls()
assert_equal(p.pformat(obj), "some-repr")
if __name__ == "__main__":
import nose
nose.main()
|
from yacs.config import CfgNode as CN
_C = CN()
_C.NAME = ""
_C.TRAIN = CN()
_C.TRAIN.EPOCH_TOTAL = 50
_C.TRAIN.BATCH_SIZE = 8
_C.TRAIN.DATA = ""
_C.TRAIN.OPTIMIZER = "sgd"
_C.TRAIN.TEST_FREQ = 5 #epoch
_C.TRAIN.OUTPUT_FOLDER = ""
_C.TRAIN.INPUT_SIZE = [0,0] #Height Width
_C.SGD = CN()
_C.SGD.LR_START = 0.001
_C.SGD.LR_END = 1e-8
_C.SGD.MOMENTUM = 0.9
_C.SGD.LR_POLICY = "linear"
_C.TEST = CN()
_C.TEST.DATA = ""
_C.TEST.BATCH_SIZE = 1
_C.TEST.OUTPUT_FOLDER = ""
def get_default_cfg():
return _C.clone()
|
import keras.backend as K
import numpy as np
from keras.layers import merge, Dense
from keras.models import Input, Model, Sequential
def get_multi_inputs_model():
a = Input(shape=(10,))
b = Input(shape=(10,))
c = merge([a, b], mode='mul')
c = Dense(1, activation='sigmoid', name='only_this_layer')(c)
m_multi = Model(inputs=[a, b], outputs=c)
return m_multi
def get_single_inputs_model():
m_single = Sequential()
m_single.add(Dense(1, activation='sigmoid', input_shape=(10,)))
return m_single
if __name__ == '__main__':
m = get_multi_inputs_model()
m.compile(optimizer='adam',
loss='binary_crossentropy')
inp_a = np.random.uniform(size=(100, 10))
inp_b = np.random.uniform(size=(100, 10))
inp_o = np.random.randint(low=0, high=2, size=(100, 1))
m.fit([inp_a, inp_b], inp_o)
from read_activations import *
get_activations(m, [inp_a[0:1], inp_b[0:1]], print_shape_only=True)
get_activations(m, [inp_a[0:1], inp_b[0:1]], print_shape_only=True, layer_name='only_this_layer')
m2 = get_single_inputs_model()
m2.compile(optimizer='adam',
loss='binary_crossentropy')
m2.fit([inp_a], inp_o)
get_activations(m2, [inp_a[0]], print_shape_only=True)
|
import sys, os, re, math
import functools
sys.path.insert(0, os.path.abspath('..'))
from common.DayBase import DayBase
class Substrate:
def __init__(self, name="", amount=0):
self.name = name
self.amount = amount
class Reaction:
def __init__(self, result, substrates):
self.substrates = substrates
self.result = result
class Day14(DayBase):
def __init__(self):
super(Day14, self).__init__()
self.reactions = []
self._amount_needed_per_one_fuel = []
def _find_reaction(self, product):
for reaction in self.reactions:
if reaction.result.name == product.name:
return reaction
@staticmethod
def add_substrate(substrates, new_substrate):
inserted = False
for substrate in substrates:
if substrate.name == new_substrate.name:
substrate.amount = substrate.amount + new_substrate.amount
inserted = True
if not inserted:
substrates.append(new_substrate)
return substrates
def process_input(self):
for line in self._file.readlines():
sides = re.split(r"=>", line)
result = Substrate()
result.amount = int(re.findall(r"\d+", sides[1])[0])
filtered = re.sub(r"\s*", "", sides[1])
filtered = re.sub(r"\d+", "", filtered)
result.name = filtered
substrates = []
for substrate in re.split(r",", sides[0]):
element = Substrate()
element.amount = int(re.findall(r"\d+", substrate)[0])
filtered = re.sub(r"\s*", "", substrate)
filtered = re.sub(r"\d+", "", filtered)
element.name = filtered
substrates.append(element)
self.reactions.append(Reaction(result, substrates))
def is_sub_needed_somewhere_else(self, substrate, substrates):
needed = []
for other_substrate in substrates:
if other_substrate.name != substrate:
reaction_needed = self._find_reaction(other_substrate)
if reaction_needed:
for substrate_needed in reaction_needed.substrates:
if substrate_needed.name == substrate.name:
return True
needed.append(self.is_sub_needed_somewhere_else(substrate, reaction_needed.substrates))
else:
needed.append(False)
return functools.reduce(lambda x, y: x or y, needed)
def _solve(self, what, how_much):
substrates_needed = [Substrate(what, how_much)]
only_needed_ore = False
while not only_needed_ore:
only_needed_ore = True
for substrate_needed in substrates_needed:
if substrate_needed.name != "ORE":
reaction_needed = self._find_reaction(substrate_needed)
if not self.is_sub_needed_somewhere_else(substrate_needed, substrates_needed):
only_needed_ore = False
reaction_result_amount = reaction_needed.result.amount
needed_amount = substrate_needed.amount
times_reaction = math.ceil(needed_amount / reaction_result_amount)
self._amount_needed_per_one_fuel.append([substrate_needed.name,
substrate_needed.amount * times_reaction])
for ingredient in reaction_needed.substrates:
sub_to_add = Substrate(ingredient.name, ingredient.amount * times_reaction)
substrates_needed = self.add_substrate(substrates_needed, sub_to_add)
substrates_needed.remove(substrate_needed)
return substrates_needed[0].amount
def solve1(self):
self.process_input()
ans = self._solve("FUEL", 1)
print(f"part1: {ans}")
def solve2(self):
self.process_input()
target = int(1e12)
lower_bound = 0
upper_bound = target
changed = True
center = 0
while changed:
changed = False
center = (lower_bound + upper_bound) // 2
result = self._solve("FUEL", center)
if result >= target:
if upper_bound != center:
upper_bound = center
changed = True
if result <= target:
if lower_bound != center:
changed = True
lower_bound = center
print (lower_bound, upper_bound)
print(f"part2: {center}")
if __name__ == "__main__":
dayPart1 = Day14()
dayPart1.solve1()
dayPart2 = Day14()
dayPart2.solve2()
|
from __future__ import print_function
from setuptools import setup
# from setuptools.command.test import test as TestCommand
import io
# import sys
import wikigrouth
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
# long_description = read('README.rst')
# class PyTest(TestCommand):
# def finalize_options(self):
# TestCommand.finalize_options(self)
# self.test_args = []
# self.test_suite = True
# def run_tests(self):
# import pytest
# errcode = pytest.main(self.test_args)
# sys.exit(errcode)
setup(
# Basic info
name='wikigrouth',
version=wikigrouth.__version__,
url='https://github.com/behas/wikigrouth.git',
# Author
author='Bernhard Haslhofer',
author_email='bernhard.haslhofer@ait.ac.at',
# Description
description="""A Python tool for creating coreference resolution
ground truths from Wikipedia.""",
# long_description=long_description,
# Package information
packages=['wikigrouth'],
scripts=['scripts/wikigrouth'],
platforms='any',
install_requires=['requests', 'beautifulsoup4'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
],
# Testing
# test_suite='tests',
# tests_require=['pytest'],
# cmdclass={'test': PyTest},
# extras_require={
# 'testing': ['pytest'],
# },
# Legal info
license='MIT License',
)
|
import sys
# generates the reverse complement of a codon
def generate_reverse_complement(codon):
new_codon = ""
for l in reversed(codon):
new_codon += "T" if l == "A" else "A" if l == "T" else "C" if l == "G" else "G"
return new_codon
# `replacement_codon` will replace `bad_codon` (for normal genes)
# `rev_comp_replacement_codon` will replace `rev_comp_bad_codon` (for complement genes)
bad_codon = "TTG"
rev_comp_bad_codon = generate_reverse_complement(bad_codon)
replacement_codon = "CTA"
rev_comp_replacement_codon = generate_reverse_complement(replacement_codon)
# input paths and files
genome_path = "./genome.txt"
genome_file = open(genome_path)
genome_bank_path = "./genome_bank.txt"
genome_bank_file = open(genome_bank_path)
# log file
log_path = "./log.txt"
log = open(log_path, "w")
# output file
output_path = "./modified_genome.txt"
output = open(output_path, "w")
# get all of the lines in the genome
gene_lines = []
for line in genome_file:
gene_lines.append(line.replace("\n",""))
gene_lines = gene_lines[1:]
# get all of the lines in the genome bank
bank_lines = []
for line in genome_bank_file:
bank_lines.append(line)
# get all of the start/stop points
bounds = []
for line in bank_lines:
if " gene " in line:
if "complement" in line:
s = line.split(" ")[-1].split(".")
start = int(s[0].replace("complement(","")) - 1
stop = int(s[-1].replace("\n","").replace(")","")) - 1
bounds.append((start,stop,"complement"))
else:
s = line.split(" ")[-1].split(".")
start = int(s[0]) - 1
stop = int(s[-1].replace("\n", "")) - 1
bounds.append((start,stop,"normal"))
# generate entire genome
genome = "".join(gene_lines)
# create new genome
# glb: gene lower bound
# gub: gene upper bound
# bt: bound type (either "normal" or "complement")
count = 1
for (glb,gub,bt) in bounds:
status = "\rModifying gene %d of %d" % (count, len(bounds))
count += 1
sys.stdout.write(status)
sys.stdout.flush()
log.write("[Gene Bounds]: (%d,%d)\n\n" % (glb+1,gub+1)) # log
substring = genome[glb:gub + 1]
codons = [substring[i:i+3] for i in range(0,len(substring),3)]
log_codons = [s.replace(bad_codon, "--(("+ bad_codon + "))--") for s in codons] if bt == "normal" else [s.replace(rev_comp_bad_codon, "--((" + rev_comp_bad_codon + "))--") for s in codons] # log
log.write("[Original Gene]: " + "".join(log_codons) + "\n\n") # log
if bt == "normal":
tmp = codons # log
codons = [s.replace(bad_codon,replacement_codon) for s in codons]
log_codons = [s.replace(bad_codon, "--((" + replacement_codon + "))--") for s in tmp] # log
elif bt == "complement":
tmp = codons # log
codons = [s.replace(rev_comp_bad_codon,rev_comp_replacement_codon) for s in codons]
log_codons = [s.replace(rev_comp_bad_codon, "--((" + rev_comp_replacement_codon + "))--") for s in tmp] # log
substring = "".join(codons)
genome = genome[:glb] + substring + genome[gub + 1:]
log.write("[Modified Gene]: " + "".join(log_codons) + "\n") # log
log.write("**********************************************\n") # log
# write modified genome to output
genome_lines = [genome[i:i+70] for i in range(0,len(genome),70)]
for line in genome_lines:
output.write(line + "\n")
# clean up
print "\nDone!"
genome_file.close()
genome_bank_file.close()
log.close()
output.close()
|
from tkinter import *
def drawCell(canvas, x, y, size, text):
canvas.create_rectangle(x, y, x + size, y + size)
canvas.create_text(x + size / 2, y + size / 2 - 1, text = str(text))
def drawCellLine(canvas, x, y, cellSize, elements, xBitCount):
for column in range(0, 1 << xBitCount):
drawCell(canvas, x + column * cellSize, y, cellSize, elements[column])
def drawTable(canvas, x, y, cellSize, table, xBitCount, yBitCount):
for row in range(0, 1 << yBitCount):
drawCellLine(canvas, x, y + row * cellSize, cellSize, table[row], xBitCount)
def generateGrayCode(bitCount):
if(bitCount == 1):
return ["0", "1"]
else:
prevGrayCode = generateGrayCode(bitCount - 1)
first = list(("0" + x) for x in prevGrayCode)
prevGrayCode.reverse()
second = list(("1" + x) for x in prevGrayCode)
return (first + second)
def drawGrayCodeHorizontal(canvas, x, y, cellSize, xBitCount):
grayCode = generateGrayCode(xBitCount)
for column in range(0, len(grayCode)):
canvas.create_text(x + column * cellSize + cellSize / 2, y, text = grayCode[column])
def drawGrayCodeVertical(canvas, x, y, cellSize, yBitCount):
grayCode = generateGrayCode(yBitCount)
for row in range(0, len(grayCode)):
canvas.create_text(x, y + row * cellSize + cellSize / 2, text = grayCode[row])
def drawTableWithGrayCode(canvas, x, y, cellSize, table, xBitCount, yBitCount):
drawTable(canvas, x + 30, y + 16, cellSize, table, xBitCount, yBitCount)
drawGrayCodeHorizontal(canvas, x + 30, y + 10, cellSize, xBitCount)
drawGrayCodeVertical(canvas, x + 16, y + 15, cellSize, yBitCount)
class KarnaughMap:
def __init__(self, xBitCount = 3, yBitCount = 3, x = 0, y = 0, cellSize = 40):
self.xBitCount = xBitCount
self.yBitCount = yBitCount
self.elements = [[0] * (1 << xBitCount)] * (1 << yBitCount)
self.x = x
self.y = y
self.cellSize = cellSize
def draw(self, canvas, cellSize = 40):
drawTableWithGrayCode(canvas, self.x, self.y, cellSize, self.elements, self.xBitCount, self.yBitCount)
def handleClick(self, event):
column = int((event.x - (self.x + 30)) / self.cellSize)
row = int((event.y - (self.y + 16)) / self.cellSize)
self.elements[row][column] = 1
def click(event):
map.handleClick(event)
graphWidth = 400
graphHeight = 400
backgroundFill = "#FFFFFF"
root = Tk()
root.title("Karnaugh map drawer")
root.resizable(height = False, width = False)
#root.wm_iconbitmap("favicon.ico")
root.geometry(str(graphWidth) + 'x' + str(graphHeight+60))
canvas = Canvas(root, width = graphWidth, height = graphHeight, bg = backgroundFill)
canvas.bind("<Button-1>", click)
map = KarnaughMap()
map.draw(canvas)
#print(elements)
#drawTableWithGrayCode(canvas, 20, 20, 40, elements, xBitCount, yBitCount)
canvas.place(bordermode = OUTSIDE)
root.mainloop() |
import pygame
import Components
import MainPage
from Pages import Page
class DriveTrainPage(Page.Page):
def __init__(self):
Page.Page.__init__(self)
self.backgroundColor = pygame.Color("dark red")
self.components = [
Components.Label.Label([150, 150], 150, "Drive", ["red", "black"]),
Components.Label.Label([200, 270], 150, "Train", ["red", "black"]),
Components.Container.Container(512, 0, 1280, 512, [
Components.Label.Label([100, 600], 60, "has a set of four swerve wheels which allow for fluid \n" +
"and fast movement across the field. Our chassis was \n" +
"custom-built to the short robot dimensions, as \n" +
"this gives more length to our intake.", ["red", "black"]),
Components.Button.Button([300, 900], 235, 60, "Main Page", ["black", "white"],
textHeight=68,
function=MainPage.MainPage),
Components.Button.Button([600, 900], 520, 60, "Software Information", ["black", "white"],
textHeight=68,
function=MainPage.MainPage),
], "grey"),
Components.PictureHolder.PictureHolder([600, 50], "Images/BiuldingChassis.JPG", scale=.12)
]
|
import cv2
import copy
cap = cv2.VideoCapture(0)
name=input("Please Enter Your First Name : ")
while(True):
ret,frame = cap.read()
frame1=copy.deepcopy(frame)
frame1 = cv2.putText(frame1, 'Press \'k\' to click photo', (200,200), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,255), 3, cv2.LINE_AA)
cv2.imshow('img1',frame1)
if cv2.waitKey(1) & 0xFF == ord('k'):
cv2.imwrite('faceRegister/%s.png'%name,frame)
cv2.destroyAllWindows()
break
cap.release() |
__author__ = 'Louis-Pan'
import pytest
"""
使用场景:有的用例需要登录执行,有些用例不需要登录执行
用例在执行时需要登录的用例,都需要执行login函数,相当于unittest中setUp()函数,每条用例运行前都需要执行
步骤:
1. 导⼊pytest
2. 在登陆的函数上⾯加@pytest.fixture()
3. 在要使⽤的测试⽅法中传⼊(登陆函数名称),就先登陆
4. 不传⼊的就不登陆直接执⾏测试⽅法。
"""
@pytest.fixture(scope="function")
def login():
print("\n登录成功")
def test_update_person_info(login):
print("登录成功后修改个人信息")
def test_search():
print("无需登录,查询商品")
def test_add_to_car(login):
print("登录成功后加入商品至购物车")
if __name__=="__main__":
pytest.main(["-s",'test_2_fixture_scope_function.py']) |
from django.contrib import admin
from django.urls import path
from multiply_app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.calculator, name='calculator'),
path('CalculateResult/', views.CalculateResult, name='CalculateResult'),
]
|
import falcon
from ldframe.utils.logs import app_logger
from ldframe.api.healthcheck import HealthCheck
api_version = "0.2" # TO DO: Figure out a better way to do versioning
def init_api():
"""Create the API endpoint."""
ldframe = falcon.API()
ldframe.add_route('/health', HealthCheck())
app_logger.info('FramingService REST API is running.')
return ldframe
# if __name__ == '__main__':
# init_api()
|
'''
Created on 28/05/2015
@author: mangeli
'''
import GameQualityAssessment.code_pac.dataBaseAdapter as db
import os,sys
sys.path.insert(1, os.path.abspath(os.pardir))
print (sys.path)
db.setValues("desafio_simples", "mangeli", "localhost", "agoravai", 5432)
conn = db.getConnection()
c = db.getCursor(conn)
db.cursorExecute(c, "SELECT * FROM tournament;")
l = db.fetchAll(c)
print (l[:])
for retorno in l : print (retorno["country"])
print (c, conn)
db.closeCursor(c)
db.closeConnection(conn)
print (c, conn)
|
from store import RedisClient
import aiohttp
import asyncio
import time
VALID_STATUS_CODES=[200]
TEST_URL='http://baidu.com'
BATCH_TEST_SIZE=100
class TEST:
def __init__(self):
self.redis=RedisClient()
async def test_single_proxy(self,proxy):
"""
测试单个代理
:param proxy:
:return:
"""
conn=aiohttp.TCPConnector(verify_ssl=False)
async with aiohttp.ClientSession(connector=conn) as session:
try:
if isinstance(proxy,bytes):
proxy=proxy.decode('utf-8')
real_proxy='http://'+proxy
print('testing ',proxy)
async with session.get(TEST_URL,proxy=real_proxy,timeout=15) as response:
if response.status in VALID_STATUS_CODES:
self.redis.max(proxy)
print('proxy is ok :',proxy)
else:
self.redis.decrease(proxy)
print('proxy is no :',proxy)
except(aiohttp.ClientError,aiohttp.ClientConnectionError,aiohttp.ClientTimeout,AttributeError):
self.redis.decrease(proxy)
print('proxy request failuer ',proxy)
def run(self):
print('run....')
try:
proxies=self.redis.all()
loop=asyncio.get_event_loop()
for i in range(0,len(proxies),BATCH_TEST_SIZE):
test_proxies=proxies[i:i+BATCH_TEST_SIZE]
tasks=[self.test_single_proxy(proxy) for proxy in test_proxies]
loop.run_until_complete(asyncio.wait(tasks))
time.sleep(5)
except Exception as e:
print('testing error :',e)
|
#!/usr/bin/env python
import rospy
import math
from robot_messages.msg import LandmarkDistance
from std_msgs.msg import String
from kobuki_msgs.msg import DockInfraRed
from kobuki_msgs.msg import SensorState
from tf.transformations import euler_from_quaternion
class RobotController(object):
def __init__(self, publisher, core_pub):
self._publisher = publisher
self._core_pub = core_pub
self._previous_sensor = None
def distanceCallback(self, msg):
#extract message data
sensor_name = msg.name
sensor_distance = msg.distance
roll = None
pitch = None
yaw = None
orientation_list = [msg.quat_orientation_x, msg.quat_orientation_y, msg.quat_orientation_z, msg.quat_orientation_w]
(roll, pitch, yaw) = euler_from_quaternion (orientation_list)
#set _previous_sensor initial value
if self._previous_sensor is None:
self._previous_sensor = sensor_name
#create publishing dock_ir message and populate header
dock_ir_msg = DockInfraRed()
dock_ir_msg.header.frame_id = 'base_link'
dock_ir_msg.header.stamp = rospy.Time.now()
sensor_state_msg = SensorState()
sensor_state_msg.header.frame_id = 'base_link'
sensor_state_msg.header.stamp = rospy.Time.now()
sensor_state_msg.charger = SensorState.DISCHARGING
#populate pub msg data based on sensor and distance
if sensor_name == 'Right Sensor':
self.rightSensorDetected(dock_ir_msg, sensor_distance, sensor_state_msg, math.degrees(yaw))
elif sensor_name == 'Left Sensor':
self.leftSensorDetected(dock_ir_msg, sensor_distance, sensor_state_msg, math.degrees(yaw))
else: #sensor_name == 'Center Sensor'
self.centerSensorDetected(dock_ir_msg, sensor_distance, sensor_state_msg, math.degrees(yaw))
def rightSensorDetected(self, dock_ir_msg, sensor_distance, sensor_state_msg, robot_orientation):
data = []
if sensor_distance <= 2.5 and sensor_distance > 1:
if robot_orientation >= -95 and robot_orientation <= -65:
data.append(50)
data.append(50)
data.append((DockInfraRed.FAR_RIGHT))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Right Sensor"
else:
data.append((DockInfraRed.FAR_RIGHT))
data.append((DockInfraRed.FAR_RIGHT))
data.append((DockInfraRed.FAR_RIGHT))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Right Sensor"
elif sensor_distance <= 1:
if robot_orientation >= -95 and robot_orientation <= -80:
data.append(50)
data.append(50)
data.append((DockInfraRed.NEAR_RIGHT))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Right Sensor"
else:
data.append((DockInfraRed.NEAR_RIGHT))
data.append((DockInfraRed.NEAR_RIGHT))
data.append((DockInfraRed.NEAR_RIGHT))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Right Sensor"
def leftSensorDetected(self, dock_ir_msg, sensor_distance, sensor_state_msg, robot_orientation):
data = []
if sensor_distance <= 2.5 and sensor_distance > 1:
if robot_orientation >= 65 and robot_orientation <= 95:
data.append((DockInfraRed.FAR_LEFT))
data.append(50)
data.append(50)
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Left Sensor"
else:
data.append((DockInfraRed.FAR_LEFT))
data.append((DockInfraRed.FAR_LEFT))
data.append((DockInfraRed.FAR_LEFT))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Left Sensor"
elif sensor_distance <= 1:
if robot_orientation >= 80 and robot_orientation <= 95:
data.append((DockInfraRed.NEAR_LEFT))
data.append(50)
data.append(50)
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Left Sensor"
else:
data.append((DockInfraRed.NEAR_LEFT))
data.append((DockInfraRed.NEAR_LEFT))
data.append((DockInfraRed.NEAR_LEFT))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Left Sensor"
def centerSensorDetected(self, dock_ir_msg, sensor_distance, sensor_state_msg, robot_orientation):
data = []
if sensor_distance <= 4 and sensor_distance > 1:
if robot_orientation >= -10 and robot_orientation <= 10:
data.append((DockInfraRed.FAR_CENTER))
data.append((DockInfraRed.FAR_CENTER))
data.append((DockInfraRed.FAR_CENTER))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Center Sensor"
else:
if self._previous_sensor == "Right Sensor":
data.append((DockInfraRed.FAR_LEFT))
data.append((DockInfraRed.FAR_LEFT))
data.append((DockInfraRed.FAR_LEFT))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
elif self._previous_sensor == "Left Sensor":
data.append((DockInfraRed.FAR_RIGHT))
data.append((DockInfraRed.FAR_RIGHT))
data.append((DockInfraRed.FAR_RIGHT))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
elif sensor_distance <= 1.0 and sensor_distance > 0.5:
if robot_orientation >= -5 and robot_orientation <= 5:
data.append((DockInfraRed.NEAR_CENTER))
data.append((DockInfraRed.NEAR_CENTER))
data.append((DockInfraRed.NEAR_CENTER))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Center Sensor"
else:
data.append((DockInfraRed.NEAR_CENTER))
data.append((DockInfraRed.NEAR_CENTER))
data.append((DockInfraRed.NEAR_CENTER))
dock_ir_msg.data = data
self._publisher.publish(dock_ir_msg)
self._core_pub.publish(sensor_state_msg)
self._previous_sensor = "Center Sensor"
elif sensor_distance <= 0.5:
sensor_state_msg.charger = SensorState.DOCKING_CHARGING
self._core_pub.publish(sensor_state_msg)
def main():
rospy.init_node('robot_dock_controller')
#publish DockInfraRed msgs based on sensor and distance
publisher = rospy.Publisher("/mobile_base/sensors/dock_ir", DockInfraRed, queue_size = 10)
core = rospy.Publisher("/mobile_base/sensors/core", SensorState, queue_size = 10)
#create RobotController object
controller = RobotController(publisher, core)
#subscribe to /closest_landmark topic and get distance to the closest sensor
rospy.Subscriber("/closest_landmark", LandmarkDistance, controller.distanceCallback)
rospy.spin()
if __name__ == '__main__':
main()
|
# Your key for the Google Speech API which is used to transcribe your speech to text. See this for more info: https://developers.google.com/api-client-library/python/guide/aaa_apikeys
GOOGLE_SPEECH_API_KEY = ""
# Your key for the Trello API: https://trello.com/app-key
TRELLO_API_KEY = ""
# You also need a Trello API token. You can get the token URL by running this function:
#tokenUrl = trello.get_token_url('Hello Trello', expires='30days', write_access=True)
# Once you have a URL, paste it in your browser and approve. It will return a token for you.
TRELLO_API_TOKEN = ""
# This is the ID of the Trello list where you want your card to be added. To get it, follow these steps:
# 1. Use this URL to retrieve the Board ID where your list is located. Replace <your_username>, <your_trello_api_key> and <your_trello_token> with the values you got above.
# https://api.trello.com/1/members/<your_username>/boards?key=<your_trello_api_key>&token=<your_trello_token>
# 2. Use this function to get all the lists from your board, the ID is included there:
# lists = trello.boards.get_list('<your_board_ID>')
TRELLO_LIST_ID = ""
# This is the command that will make the computer prepare to send a note to Trello. Anything you say after this command will be sent to Trello.
INIT_COMMAND = "add to trello"
import speech_recognition as sr
from trello import TrelloApi
r = sr.Recognizer(key = GOOGLE_SPEECH_API_KEY)
r.pause_threshold = 0.5
from dragonfly.all import Grammar, CompoundRule
import pythoncom
import time
# Voice command rule combining spoken form and recognition processing.
class ExampleRule(CompoundRule):
spec = INIT_COMMAND # Spoken form of command.
def _process_recognition(self, node, extras): # Callback when command is spoken.
# use the default microphone as the audio source
with sr.Microphone() as source:
# listen for the first phrase and extract it into audio data
audio = r.listen(source)
try:
# recognize speech using Google Speech Recognition then send to Trello
text = r.recognize(audio)
print(text)
trello = TrelloApi(apikey=TRELLO_API_KEY, token=TRELLO_API_TOKEN)
a = trello.lists.new_card(TRELLO_LIST_ID, text, '')
except LookupError:
print("Could not understand audio")
# Create a grammar which contains and loads the command rule.
grammar = Grammar("example grammar") # Create a grammar to contain the command rule.
grammar.add_rule(ExampleRule()) # Add the command rule to the grammar.
grammar.load() # Load the grammar.
while True:
pythoncom.PumpWaitingMessages()
time.sleep(.1)
|
from software_v2 import Ui_MainWindow # importing our generated file
from PyQt5 import QtCore,QtGui,QtWidgets,uic, QtTest
from PyQt5.QtWidgets import QApplication,QMainWindow,QLabel,QDesktopWidget,QFileDialog
from PyQt5.QtGui import QPixmap,QImage
from PyQt5.QtCore import QThread ,pyqtSlot,pyqtSignal,Qt
import thread
import sys
import time
import numpy as np
sys.path.remove(sys.path[1])
import cv2
class mywindow(QtWidgets.QMainWindow):
status = pyqtSignal(bool)
switch = pyqtSignal(bool)
def __init__(self):
super(mywindow, self).__init__()
self.ui = Ui_MainWindow()
self.th = thread.Thread(self)
self.link()
self.running = False
self.th.ThreadBool = True
self.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.qtRectangle = self.frameGeometry()
self.centerPoint = QDesktopWidget().availableGeometry().center()
self.qtRectangle.moveCenter(self.centerPoint)
self.move(self.qtRectangle.topLeft())
self.ui.setupUi(self)
self.ui.stackedWidget.setStyleSheet("background-color: rgb(57, 158, 229);")
self.ui.centralwidget.setStyleSheet("background-color: rgb(255,255 , 255);")
self.ui.frame_3.setStyleSheet("background-color: rgb(57, 158, 229);")
self.ui.exit.clicked.connect(self.exitbtn)
self.ui.stackedWidget.setCurrentIndex(0)
self.ui.detailbtn.clicked.connect(self.threadvidstr)
self.ui.radiusButton.clicked.connect(self.radius)
self.ui.heightButton.clicked.connect(self.height)
self.ui.choicefromimg.clicked.connect(self.browseImg)
def mousePressEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
self.dragPos = event.globalPos()
event.accept()
def mouseMoveEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
self.move(self.pos() + event.globalPos() - self.dragPos)
self.dragPos = event.globalPos()
event.accept()
def link(self):
self.status.connect(self.th.runThread)
self.switch.connect(self.th.switch)
@pyqtSlot(QImage)
def setImage(self,image):
self.ui.label.setPixmap(QPixmap.fromImage(image))
@pyqtSlot(int)
def radiusin(self,val):
self.ui.pr3.display(val)
self.ui.pr4.display(pow(val,2)*3.14)
@pyqtSlot(int)
def wigth(self,val):
self.ui.pr1.display(val)
@pyqtSlot(int)
def height(self,val):
self.ui.pr2.display(val)
@pyqtSlot(str)
def sta(self,arg):
self.ui.pr6.setText(arg)
def threadvidstr(self):
self.th.changePixmap.connect(self.setImage)
self.th.changeRadius.connect(self.radiusin)
self.th.changewidth.connect(self.wigth)
self.th.changehight.connect(self.height)
self.th.changeStatus.connect(self.sta)
self.th.start()
self.status.emit(True)
def exitbtn(self):
self.status.emit(False)
time.sleep(1)
sys.exit()
def radius(self):
self.switch.emit(True)
def height(self):
self.switch.emit(False)
def browseImg(self):
self.status.emit(False)
fname = QFileDialog.getOpenFileName(self, 'Open file','', "Image files (*.jpg *.gif *.jpeg *.png)")
imagePath = fname[0]
img = cv2.imread(imagePath)
frame= img
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,20,param1=70,param2=55,minRadius=0,maxRadius=0)
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3)
self.ui.pr3.display(i[2])
self.ui.pr4.display(3.14*i[2]*i[2])
#img = cv2.rectangle(img, (40,130), (350,220), (0,0,255), 2)
rgbImage = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.ui.label.setPixmap(QPixmap.fromImage(p))
self.ui.pr6.setText("Circle")
app = QtWidgets.QApplication([])
application = mywindow()
application.show()
sys.exit(app.exec())
|
from qelos.train import lossarray, train, TensorDataset
from qelos.rnn import GRUCell, LSTMCell, SRUCell, RNU, RecStack, RNNLayer, BiRNNLayer, GRULayer, LSTMLayer, RecurrentStack, BidirGRULayer, BidirLSTMLayer, Recurrent, Reccable, PositionwiseForward
from qelos.loss import SeqNLLLoss, SeqAccuracy, SeqElemAccuracy
from qelos.seq import Decoder, DecoderCell, ContextDecoderCell, AttentionDecoderCell, Attention, ContextDecoder, AttentionDecoder
from qelos.basic import Softmax, LogSoftmax, BilinearDistance, CosineDistance, DotDistance, Forward, ForwardDistance, \
Distance, Lambda, Stack, TrilinearDistance, LNormDistance, SeqBatchNorm1d, CReLU, Identity, argmap, argsave, LayerNormalization
from qelos.containers import ModuleList
from qelos.util import ticktock, argprun, isnumber, issequence, iscollection, \
iscallable, isstring, isfunction, StringMatrix, tokenize, dtoo, emit, get_emitted
from qelos.qutils import name2fn, var, val, seq_pack, seq_unpack, dataload
from qelos.word import WordEmb, PretrainedWordEmb, ComputedWordEmb, WordLinout, PretrainedWordLinout, ComputedWordLinout
from qelos.gan import GANTrainer
from qelos.exceptions import SumTingWongException, HoLeePhukException, BaDumTssException
from IPython import embed
from qelos.aiayn import MultiHeadAttention as MultiHeadAttention, Encoder as AYNEncoder, Decoder as AYNDecoder, Transformer as AYNTransformer
|
from backend import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
public_id = db.Column(db.Integer, unique=True)
name = db.Column(db.String(30), nullable=False)
surname = db.Column(db.String(30), nullable=False)
role = db.Column(db.String(30), nullable=False)
wallet = db.Column(db.Integer, nullable=False)
points = db.Column(db.Integer, nullable=False)
nickname = db.Column(db.String(30), nullable=False)
password = db.Column(db.String(30), nullable=False)
email = db.Column(db.String(200), nullable=False, unique=True)
|
import turtle
import random
draw = turtle.Turtle()
lines = random.randint(10)
for i in range(lines):
length = random.randint(10, 100)
angle = random.randint(1, 365)
draw.forward(length)
draw.right(angle)
|
from time import time, sleep
from math import sqrt
t0 = time()
#############################
### Instructions Begin ###
#############################
'''
HOW TO USE
This is a tool for automatically running multiple treatments and
treatment sets and generating graphs to summarize their results.
To create a set of graphs, specify parameters in the treatments
section of this file, and then from the command line, run
"python3.6 create_graphs.py"
while in the folder containing this file (stats_scripts)
"treatments" is an array of treatment sets. Each treatment set is
saved as a single .png file containing separate graphs for each
treatment. You may specify values or lists of values for each
parameter in a treatment set, and the file names should reflect this.
Output will be grouped into files according to the value of
GROUP_NAME, but spaces in group names cause problems right now.
You can edit the trials variable to change the number of trials, and
the population variable to change the population.
The warning message "Removed n rows containing missing values
(geom_point)." indicates that some trials resulted in all hosts dying.
LIMITATIONS
This program will probably not work on python 2. It should probably
work on any version of python 3, but I have only tested it on 3.6 and
3.7.
Population cannot be variable between treatments or treatment sets,
but you can change the population variable between executions of this
python program.
If two treatments in a treatment set are identical, one will overwrite
the other.
COMPONENTS
In addition to the underlying Symbulation system which generates the
data, which is ultimately graphed, this system is comprised of
1. This file
2. The R program cmd_munge_graph_save_hostsurvival_MOI.r
3. The folder at "/moi_hostsurvival_data_graphs", refered to
as work_folder in this python program
4. Various intermediary data folders and files in the work folder
5. A folder called "run_all_graphs" in the work folder which will
contain all generated graphs.
Components 3, 4, and 5 will be automatically created by this python
program.
'''
#############################
### Instructions End ###
#############################
#############################
### Treatments Begin ###
#############################
trials = 10
population = 10000
popsq = int(sqrt(population))**2
#Abbreviations are for: HOST_REPRO_RES, SYM_LYSIS_RES, BURST_SIZE, BURST_TIME, SYM_LIMIT, POPULATION
parameter_names = 'GROUP_NAME, HRR, SLR, BS, BT, SL, POP'.split(', ')
treatments = [
['SYM_LYSIS_RES', 40, [.04*(5.6/.04)**(i/4) for i in range(5)], 999999999, 1, 999999999, popsq],
]
#############################
### Treatments End ###
#############################
#Command line interface
import subprocess
def cmd(command):
'''This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created.'''
return subprocess.Popen(command, shell=True).wait()
def silent_cmd(command):
'''This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created.'''
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).wait()
#Work folders
name = "run_"
work_folder = 'moi_hostsurvival_data_graphs'
run_folder = work_folder+'/'+name+str(int(time()))
all_graphs_folder = work_folder+'/'+name+'all_graphs'
data_folder = run_folder + '/data'
graphs_folder = run_folder + '/graphs'
def mkdir(dir):
cmd('mkdir {}'.format(dir))
for dir in [work_folder, run_folder, all_graphs_folder, data_folder, graphs_folder]:
mkdir(dir);
group_folders_made = set()
for treatment_set in treatments:
treatments_names = None #Validate treatement set and determine number of treatments in it
for parameter in treatment_set[1:]:
if isinstance(parameter, list):
if treatments_names is None or len(parameter) == len(treatments_names):
treatments_names = list(parameter)
else:
print('I had to skip treatment set '+str(treatments)+' because multiple variable parameters were set, but they didn\'t all have the same number of values')
treatments_names = None
break
else:
if treatments_names is None:
treatments_names = [0]
if treatments_names is None:
continue
group_name = treatment_set[0]#Make data and graphs group folders if neccessary
if group_name not in group_folders_made:
group_folders_made.add(group_name)
mkdir(data_folder+'/'+group_name)
mkdir(graphs_folder+'/'+group_name)
output_file = graphs_folder+'/'+group_name+'/'+'HRR{}_SLR{}_BS{}_BT{}_SL{}_POP{}_T{}.png'.format(*[str(p)[:30] for p in treatment_set[1:]]+[trials]).replace(' ', '')
r_args = [output_file, str(population), str(trials)]
for treatment_index, treatment_name in enumerate(treatments_names):#Loop through treatments
treatment = [p[treatment_index] if isinstance(p, list) else p for p in treatment_set]
for index, name in enumerate(parameter_names): #Set config
locals()[name] = treatment[index]
SIDE = int(sqrt(POP))
SEED = 1#(HRR,SLR,BS,BT,SL,SIDE).__hash__()
data_file_root_name = '_HRR{}_SLR{}_BS{}_BT{}_SL{}_POP{}_T{}'.format(HRR, SLR, BS, BT, SL, POP, trials)
print('Simbulating: {}. Trial:'.format(data_file_root_name[1:]), end='', flush=True)
for trial in range(trials):#IDENTICAL TREATMENTS
FILE_NAME = data_file_root_name+'_R{}'.format(trial)
with open('../SymSettings.cfg', 'r') as SymSettings:
data = SymSettings.readlines()
data[3] = "set SEED " + str(SEED+trial) + " # What value should the random seed be?\n"
data[9] = "set GRID_X " + str(SIDE) + " # Width of the world, just multiplied by the height to get total size\n"
data[10] = "set GRID_Y " + str(SIDE) + " # Height of world, just multiplied by width to get total size\n"
data[12] = "set SYM_LIMIT " + str(SL) + " # Number of symbiont allowed to infect a single host\n"
data[17] = "set HOST_REPRO_RES " + str(HRR) + " # How many resources required for host reproduction\n"
data[18] = "set SYM_LYSIS_RES " + str(SLR) + " # How many resources required for symbiont to create offspring for lysis each update\n"
data[15] = "set BURST_SIZE " + str(BS) + " # If there is lysis, this is how many symbionts should be produced during lysis. This will be divided by burst_time and that many symbionts will be produced every update\n"
data[16] = "set BURST_TIME " + str(BT) + " # If lysis enabled, this is how many updates will pass before lysis occurs\n"
data[21] = "set FILE_PATH " + 'stats_scripts'+'/'+data_folder+'/'+group_name+'/' + " # Output file location\n"
data[22] = "set FILE_NAME " + FILE_NAME + " # Root output file name\n"
with open('../SymSettings.cfg', 'w') as SymSettings:
SymSettings.writelines(data)
SymSettings.flush()
print(' {}'.format(trial),end='',flush=True);
silent_cmd('cd ..; ./symbulation') #Run Symbulation
r_args.append(str(treatment_name))
r_args.append(data_folder+'/'+group_name+'/'+'HostVals'+data_file_root_name+'_R')#This is everything up to the trial number
r_args.append(data_folder+'/'+group_name+'/'+'SymVals'+data_file_root_name+'_R')#Still missing trial_number.data at the end (e.g. 0.data)
print('.')
#import os
#print('Generating graph with R script', end='. ')
#cmd = '/Library/Frameworks/R.framework/Versions/3.6/Resources/Rscript --vanilla ./'+work_folder+'/cmd_munge_graph_save_hostsurvival_MOI.R ' + ' '.join(r_args)
#out = os.system(cmd)
#print('Error code: {}'.format(out))
command = 'Rscript --vanilla ./' \
+ 'cmd_munge_graph_save_hostsurvival_MOI.R ' + ' '.join(r_args)
out = cmd(command)
if out:
print(command)
print('Graph stored in: {}'.format(output_file))
print('Copying graphs to {}'.format(all_graphs_folder))
for folder_name in group_folders_made:
command = 'rsync -av {}/{} {}'.format(graphs_folder,folder_name,all_graphs_folder)
try:
cmd(command)
except FileNotFoundError:
print('I couldn\'t find a file while running command "{}", so I skiped copying that set of graphs to the all graphs folder.'.format(command))
print('Run time: {} minutes'.format((time()-t0)/60))
|
# Need this modules for the dates.
import datetime as dt
# Create a list of strings.
names = ["Zara", "Lupe", "Alberto", "Jake", "Tyler"]
# Create a list of numbers
numbers = [14, 0, 56, -4, 99, 56, 11.26]
# Sort the names list
names.sort()
# Sort the numbers list
numbers.sort()
# Show the results
print("Sorting in ascending order...")
print(names)
print(numbers)
# Sort strings in reverse order
names.sort(reverse=True)
print(names)
print() # This just adds a blank line to the output
# Sort numbers in reverse order (largest to smallest and show.
numbers.sort(reverse=True)
print(numbers)
print()
# Create a list of dates, empty for starters
datelist = []
# Append dates one at time so code is easier to read.
datelist.append(dt.date(2020, 12, 31))
datelist.append(dt.date(2019, 1, 31))
datelist.append(dt.date(2018, 2, 28))
datelist.append(dt.date(2020, 1, 1))
# Sort the dates in reversed order
datelist.sort(reverse=True)
for date in datelist:
print(f"{date: %m/%d/%Y}")
|
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models import F, Q
from django.utils import timezone
from enum import Enum
import logging
logger = logging.getLogger("django")
def __defaultList__():
return []
GRADE = [
{
'name': {
'en': 'primary',
'zh': '小学',
},
'subjects': [
'语文',
'数学',
'英语'
]
},
{
'name': {
'en': 'junior',
'zh': '初中',
},
'subjects': [
'语文',
'数学',
'英语',
'物理',
'化学',
'生物',
'政治',
'历史',
'地理'
]
},
{
'name': {
'en': 'senior',
'zh': '高中',
},
'subjects': [
'语文',
'数学',
'英语',
'物理',
'化学',
'生物',
'政治',
'历史',
'地理'
]
},
{
'name': {
'en': 'undergraduate',
'zh': '本科',
},
'subjects': [
'高等数学',
'数据库系统概论',
'马克思主义基本原理概论'
]
},
{
'name': {
'en': 'postgraduate',
'zh': '研究生',
},
'subjects': [
'高等数学',
'数据库系统概论',
'马克思主义基本原理概论'
]
}
]
Coding = []
for gradeIndex, grade in enumerate(GRADE):
for subjectIndex, subject in enumerate(grade['subjects']):
Coding.append((grade['name']['zh'] + '-' + subject, int('%d%d' % (gradeIndex + 1, subjectIndex))))
class Question(models.Model):
'''Base class for questions
'''
CATEGORY = Enum('Category', Coding)
question_text = models.TextField() # Question text
# answer = models.IntegerField(default=0) # Answer of this question
resolution = models.TextField(default='') # Resolution of this question
category = models.IntegerField(default=1) # Category that this question belongs to
topic = ArrayField(models.CharField(max_length=100, default=''), default=__defaultList__) # Involved topics of this question
visit_count = models.IntegerField(default=0) # Visit count of this question
visit_count_daily = models.IntegerField(default=0) # Visit count of this question within today
visit_count_weekly = models.IntegerField(default=0) # Visit count of this question within this week
# accuracy = models.FloatField(default=0) # Accuracy
# accuracy_daily = models.FloatField(default=0)
# accuracy_weekly = models.FloatField(default=0)
source = models.TextField(default='') # Source of this question
entry_date = models.DateTimeField('entry date') # Entry time of this question
@classmethod
def create(cls, *args, **kwargs):
'''Create a Question, or its derived question class, object
'''
question = cls(*args, **kwargs)
question.entry_date = timezone.now()
return question
@classmethod
def search(cls, category=[], topic=[], question_text=''):
keywords = {
'category': None,
'topic': None,
'question_text': None
}
if category or topic or question_text:
if category:
keywords['category'] = Q(category=category[0])
for index, value in enumerate(category):
if index > 0:
keywords['category'] |= Q(category=value)
if topic:
keywords['topic'] = Q(topic__contains=[topic[0]])
for index, value in enumerate(topic):
if index > 0:
keywords['topic'] |= Q(topic__contains=[value])
if question_text:
keywords['question_text'] = Q(question_text__contains=question_text)
condition = None
for keyword in keywords:
if not keywords[keyword]:
continue
if not condition:
condition = keywords[keyword]
else:
condition &= keywords[keyword]
logger.info(cls)
res = list(cls.objects.filter(condition))[:10]
return list(map(lambda ques: ques.toSimpleJson(), res))
else:
raise ValueError('Require at least one parameter')
@classmethod
def getNew(cls, category=1, num=10):
if category and type(category) == int:
condition = Q(category=category)
res = list(cls.objects.filter(condition).order_by('entry_date'))
else:
res = list(cls.objects.order_by('entry_date'))
length = len(res)
res = res[length - num : length]
res.reverse()
return list(map(lambda ques: ques.toSimpleJson(), res))
@classmethod
def getHot(cls, category=1, num=10):
res = [None] * 3
if category and type(category) == int:
condition = Q(category=category)
res[0] = list(cls.objects.filter(condition).order_by('visit_count'))
res[1] = list(cls.objects.filter(condition).order_by('visit_count_daily'))
res[2] = list(cls.objects.filter(condition).order_by('visit_count_weekly'))
else:
res[0] = list(cls.objects.order_by('visit_count'))
res[1] = list(cls.objects.order_by('visit_count_daily'))
res[2] = list(cls.objects.order_by('visit_count_weekly'))
length = list(map(len, res))
for i in range(3):
res[i] = res[i][length[i] - num : length[i]]
res[i] = list(map(lambda ques: ques.toSimpleJson(), res[i]))
res[i].reverse()
return {
'tops': res[0],
'topsDaily': res[1],
'topsWeekly': res[2]
}
def toSimpleJson(self):
'''Return simply serialized data of this question
'''
return {
'id': self.id,
'question_text': self.question_text,
'category': self.category,
'topic': self.topic,
'visit_count': self.visit_count,
'visit_count_daily': self.visit_count_daily,
'visit_count_weekly': self.visit_count_weekly,
'source': self.source,
'entry_date': self.entry_date.timestamp() * 1e3
}
def toJson(self):
'''Return fully serialized data of this question
'''
return {
'id': self.id,
'question_text': self.question_text,
'answer': self.answer,
'resolution': self.resolution,
'category': self.category,
'topic': self.topic,
'visit_count': self.visit_count,
'visit_count_daily': self.visit_count_daily,
'visit_count_weekly': self.visit_count_weekly,
'source': self.source,
'entry_date': self.entry_date.timestamp() * 1e3
}
def validate(self, usersAnswer):
'''Validate the answer provided by a user
'''
self.countInc()
self.save()
def countInc(self, *args, **kwargs):
self.visit_count = F('visit_count') + 1
self.visit_count_daily = F('visit_count_daily') + 1
self.visit_count_weekly = F('visit_count_weekly') + 1
def countResetDaily(self):
self.visit_count_daily = 0
def countResetWeekly(self):
self.visit_count_daily = 0
self.visit_count_weekly = 0
def setCategory(self, cat):
if type(cat) == int:
self.category = cat
elif type(cat) == __class__.CATEGORY:
self.category = cat.value
elif type(cat) == str:
self.category = __class__.CATEGORY[cat].value
else:
raise TypeError('Unknown type: %s' % type(cat).__name__)
def getCategory(self):
return __class__.CATEGORY(self.category)
def category_text(self):
return self.getCategory().name
category_text.string = True
def question_id(self):
return self.id + 0
question_id.string = True
def __str__(self):
return self.question_text |
hrs = input("Enter Hours:")
h = float(hrs)
rate = input ("Enter rate")
r = float(rate)
ot = 1.5 * r
pay = 40 * r
if h > 40:
bonus = (h - 40) * ot
print (pay + bonus)
|
import pandas as pd
import numpy as np
import requests
from io import StringIO
from sklearn.preprocessing import MinMaxScaler
class ErrorAPI(Exception):
def __init__(self, code, message):
super().__init__()
self.code = code
self.message = message
def detail(self):
return {
'error': {
'code': self.code,
'message': self.message
}
}
TOKEN = 'a7213dd9f3ba445a808a3310c5031dd5021adffd'
COLUMNS = [
'close',
'open',
'high',
'low',
'volume',
'adjClose',
'adjOpen',
'adjHigh',
'adjLow',
'adjVolume',
'divCash',
'splitFactor'
]
def get_data(
stock='aapl',
freq='daily',
start=None,
end=None
):
params = {
'format': 'csv',
'sort': 'date',
'token': TOKEN,
'columns[]': COLUMNS
}
if start:
params['startDate'] = start
if end:
params['endDate'] = end
url = f'https://api.tiingo.com/tiingo/{freq}/{stock}/prices'
r = requests.get(url, params=params)
if r.status_code != 200:
raise ErrorAPI(r.status_code, 'Failed to request data!')
if 'Error' in r.text:
raise ErrorAPI(400, r.text)
csv = r.content.decode('utf-8')
df = pd.read_csv(StringIO(csv))
if len(df) < 1:
raise ErrorAPI(400, 'Failed to get data!')
return df
# features=['Close','PoC','RSI']
def preprocess(df, features=None, n_days=60):
if features is None:
features = ['close']
data = df[['close']]
if 'poc' in features:
data = data.assign(PoC=data.pct_change())
if 'rsi' in features:
delta = data['poc'].diff()
up = delta.clip(lower=0)
down = -1 * delta.clip(upper=0)
ema_up = up.ewm(com=13, adjust=False).mean()
ema_down = down.ewm(com=13, adjust=False).mean()
rs = ema_up / ema_down
data = data.assign(RSI=100 - (100 / (1 + rs)))
data = data.dropna()
# Transform
Y = data[n_days:][['close']].to_numpy()
data = data[features].to_numpy()
n = len(data) - n_days + 1
X = np.empty((n, len(features) * n_days))
for i in range(n):
X[i] = data[i:i + n_days].reshape(-1)
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
X = X.reshape((n, 1, len(features) * n_days))
Y = scaler.fit_transform(Y)
return scaler, X, Y
|
"""
Runtime: 684 ms
Memory: 18.9 MB
"""
from typing import List
class Solution:
"""
Problem #15:
Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Notice that the solution set must not contain duplicate triplets.
"""
def threeSum(self, nums: List[int]) -> List[List[int]]:
if len(nums) < 3:
return []
nums.sort()
res = set()
for i, v in enumerate(nums[:-2]):
print(i, v)
if i >= 1 and v == nums[i-1]:
continue
d = {}
for x in nums[i+1:]:
print("X: ", x)
if x not in d:
d[-v-x] = 1
else:
res.add((v, -v-x, x))
print("D: ", d)
print("Res: ", res)
print()
return list(map(list, res))
if __name__ == "__main__":
result = Solution()
nums = [-1,0,1,2,-1,-4]
#nums = [34,55,79,28,46,33,2,48,31,-3,84,71,52,-3,93,15,21,-43,57,-6,86,56,94,74,83,-14,28,-66,46,-49,62,-11,43,65,77,12,47,61,26,1,13,29,55,-82,76,26,15,-29,36,-29,10,-70,69,17,49]
#nums = [1,-1,-1,0]
print(result.threeSum(nums)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.