blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5233091305b44640cd97581d32e8076ff35c614c | c4c81058dd9fa111f706a5db7ee80064873271ba | /HLTrigger/btau/hltDisplacedmumumuVtxProducer_cfi.py | f0548d1fb7727e06c833cf979e4fa57f865861ab | [] | no_license | fwyzard/cmssw-cfipython | e142c3a3e707c599dae491333ec48522de3f2f34 | cae55b22a46433b55ea6ff5b36aecc043792d16c | refs/heads/master | 2021-07-25T21:04:42.950199 | 2017-10-24T06:29:00 | 2017-10-24T06:29:00 | 109,701,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import FWCore.ParameterSet.Config as cms
hltDisplacedmumumuVtxProducer = cms.EDProducer('HLTDisplacedmumumuVtxProducer',
Src = cms.InputTag('hltL3MuonCandidates'),
PreviousCandTag = cms.InputTag(''),
MaxEta = cms.double(2.5),
MinPt = cms.double(0),
MinPtTriplet = cms.double(0),
MinInvMass = cms.double(1),
MaxInvMass = cms.double(20),
ChargeOpt = cms.int32(-1)
)
| [
"cmsbuild@cern.ch"
] | cmsbuild@cern.ch |
e8641f526333e9b9fbc46bb83deeefe383cd9bd1 | 069984c09a4cf60eaef8ebc78ea87242539ef5d2 | /data/data_loader.py | d7eca4f22b07ab70031ef9efe5cea8b6bcf0348d | [] | no_license | yanwengong/GWAS_SNP_analysis | be35baa73ae760c7ccaa3b7e8f8b94d2d3ecc17c | 1e95c1f04f34b749a05db861402df9ada63918ac | refs/heads/master | 2023-07-16T01:07:07.491231 | 2021-08-20T05:11:23 | 2021-08-20T05:11:23 | 385,704,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | import numpy as np
import torch
from torch.utils.data import Dataset
from datetime import datetime
class Data(Dataset):
# The __init__ function is run once when instantiating the Dataset object.
def __init__(self, data):
# (n,) array, each element is string, dtype=object
self.data = data # fasta of forward, no chr title, 1d np.array, shape is n
print("-----------------shape before add RC -------------")
print(self.data.shape)
# add reverse complement
temp = []
complement = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'N' : 'N'}
print("reverse complement start time: ", datetime.now())
for seq in self.data:
complement_seq = ''
for base in seq: ## need to check here, what is seq's shape?? why do they have seq[0] here
complement_seq = complement[base] + complement_seq
temp.append(complement_seq)# 0301 indented, TODO: check why before it could still train, even x and y have different n
print("reverse complement end time: ", datetime.now())
temp = np.array(temp, dtype=object)
self.data = np.append(self.data, temp, axis=0)
print("-----------------shape after subset and add RC-------------")
print(self.data.shape)
# The __len__ function returns the number of samples in our dataset.
def __len__(self):
return self.data.shape[0] ## check
# The __getitem__ function loads and returns a sample from the dataset at the given index idx.
def __getitem__(self, index):
seq = self.data[index]
row_index = 0
temp = np.zeros((len(seq), 4))
for base in seq: ## seq[0]??
if base == 'A':
temp[row_index, 0] = 1
elif base == 'T':
temp[row_index, 1] = 1
elif base == 'G':
temp[row_index, 2] = 1
elif base == 'C':
temp[row_index, 3] = 1
row_index += 1
X = torch.tensor(temp).float().permute(1,0) # change the dim to 4, 1000
# y = torch.tensor(self.label[index]).float()
return X | [
"yanwengong7@gmail.com"
] | yanwengong7@gmail.com |
faeb284b5d4bbd6269cb6e66053c0cf5ead89327 | 5cf022e1659902521f1f04510db40176c511a35e | /lstm_segment_v1.py | f567ce49c3df5d9e02a55af888a03f71ae80bd7b | [] | no_license | lengyuewuyazui/cws | ff037806be84d2dbc156e2f1b611e14876ecaae4 | ea2775820c8b0599a2a6485972b2d2034509744e | refs/heads/master | 2021-07-09T13:22:17.145855 | 2017-10-09T08:02:55 | 2017-10-09T08:02:55 | 106,253,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,157 | py | # encoding: utf-8
'''
LSTM 中文分词
Refer: https://mp.weixin.qq.com/s?__biz=MzA4OTk5OTQzMg==&mid=2449231335&idx=1&sn=d3ba98841e85b7cea0049cc43b3c16ca
设窗口长度为 7,那么从训练样本中切分 xxxoxxx,根据前三个 x,后三个 x 和 o 本身,训练 o 的 tag (B/E/M/S)
为了能够处理开头和结尾的字符,要在训练文本和测试文本的开头和结尾分别 padding 三个 '\01' 不可见字符,作为 START/END
要求训练文本和测试文本没有空格;这里默认已经处理过了,不需要本脚本再去做处理
重要:算法中默认测试集中没有训练文本中不存在的汉字,也就是说,要求训练文本中的汉字空间是完全覆盖的!!!!
这里没有把整个训练和测试流程化,只是提供了函数接口 pretraining, training, run_test
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import RMSprop
import numpy as np
def pretraining(train_text, train_tags, maxlen, step):
"""
train_text: 待训练文本
train_tags: 待训练tags,和train_text等长,且一一对应
maxlen : 切分窗口长度,要求是奇数;比如7,表示考察一个字的前3个字和后3个字
step : 切分窗口间隔
"""
# 预处理:把样本中的所有字符映射为数字
print('corpus length:', len(train_text))
# 字符集中加入 '\01',不可见字符,用于在 train_text 前后 padding
half_window = maxlen / 2
train_text = u'\01' * half_window + train_text + u'\01' * half_window
chars = sorted(list(set(train_text)))
print('chars length:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
# 要学习的分词标记,分别表示词开始、词结尾、词中间、单字成词
tags = ['B', 'E', 'M', 'S']
# 在 train_tags 前后 padding 'S' 单字 tag
train_tags = 'S' * half_window + train_tags + 'S' * half_window
print('total tags:', len(tags))
tag_indices = dict((c, i) for i, c in enumerate(tags))
# 开始切分,用某个字的前 half_window + 后 half_window 个字来预测该字的 tag
windows = []
next_tags = []
end_pos = len(train_text) - half_window
for i in range(half_window, end_pos, step):
windows.append(train_text[i - half_window: i + half_window + 1])
next_tags.append(train_tags[i])
print('nb sequences:', len(windows))
# 向量化,前面得到的是字符和标记数组,要转化为数字
# X 和 Y 各自都有 len(windows) 组;每组中 X 为 maxlen 个字,每个字 len(chars) 维,one-hot; Y为 len(tags) 维
print('Vectorization...')
X = np.zeros((len(windows), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(windows), len(tags)), dtype=np.bool)
for i, sentence in enumerate(windows):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, tag_indices[next_tags[i]]] = 1
return X, y, chars, tags, char_indices
def training(X, y, maxlen, len_of_chars, len_of_tags, hidden_nodes=128, batch_size=128, nb_epoch=1):
# build the model: 2 stacked LSTM
model = get_lstm_model(hidden_nodes, (maxlen, len_of_chars), len_of_tags)
# train
for iteration in range(1, 61):
print_iteration_sign(iteration)
model.fit(X, y, batch_size=batch_size, nb_epoch=nb_epoch)
yield model
def run_test(model, test_text, diversity, maxlen, len_of_chars, char_indices, tags):
"""
model: 用于预测的模型
test_text: 待预测字符串
diversity: 调整抽样概率
maxlen: 窗口大小
len_of_char: 用于确定 x 的维度 (one-hot)
char_indices: 用于确定 x one-hot 字符维度上的值
tags: 用于把预测的结果还原为 tag 字符
"""
print()
print('>>>>> diversity: ', diversity)
half_window = maxlen / 2
# padding with '\01'
test_text = u'\01' * half_window + test_text + u'\01' * half_window
# 初始化预测结果
next_tags = ''
end_pos = len(test_text) - half_window
# 每个字都要预测,故此显然不要设置 step
for i in range(half_window, end_pos):
window = test_text[i - half_window: i + half_window + 1]
x = np.zeros((1, maxlen, len_of_chars))
for t, char in enumerate(window):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_tags += tags[next_index]
return next_tags
def get_lstm_model(hidden_nodes, input_shape, output_nodes):
print('Build model...')
model = Sequential()
model.add(LSTM(hidden_nodes, input_shape=input_shape))
# 输出 len(tags) 维度
model.add(Dense(output_nodes))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
return model
def sample(preds, temperature=1.0):
"""
给定一个多维预测结果,在其中抽样一个,并取出该抽样的 index
temperature 用于调整输入的 preds
temperature 为 1,则没有调整
temparature < 1 会加大差距,比如 array([ 0.1 , 0.15, 0.5 , 0.25]) ---0.5---> array([ 0.02898551, 0.06521739, 0.72463768, 0.18115942])
反之 temparature > 1 会均匀化,比如同样的数组,经过 1.3 变为 array([ 0.12757797, 0.17427316, 0.43999216, 0.2581567 ])
"""
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
# 抽样,从所有预测维度中,只抽取一个作为结果
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def print_iteration_sign(i):
print()
print('-' * 50)
print('Iteration: ', i)
| [
"jinchen@jinchen-PC.lan"
] | jinchen@jinchen-PC.lan |
8456af16f4d0a33f448185e56d9a45f9cb8b797f | a9ec22bcf8bbc4c73d21691795d400b3cdd8e12e | /demo.py | 9911b3b271a0f313ed3b9d231d5cb63a6df6ae1b | [] | no_license | nehasmore/ManualTestSync | 6dce48979a062bf12eb4f693d1b4eb8284c5e3b3 | 3da9ddcfe516527606b5b7bf82dc8a531125d7fc | refs/heads/master | 2021-04-01T04:28:55.478541 | 2020-04-01T07:20:22 | 2020-04-01T07:20:22 | 248,155,959 | 0 | 1 | null | 2020-04-01T13:00:29 | 2020-03-18T06:31:54 | Python | UTF-8 | Python | false | false | 772 | py | import os
import requests
def main():
print(os.getenv('url'))
print(os.getenv('username'))
print(os.getenv('password'))
csrftoken = os.getenv('csrftoken')
sessionid = os.getenv('sessionid')
testpadurl = os.getenv('testpadurl')
project = os.getenv('project')
targetfolder = os.getenv('targetfolder')
headers = {
'x-csrftoken' : ('%s' % csrftoken),
'referer' : ('%s%s/%s' % (testpadurl, project, targetfolder)),
'cookie': ('csrftoken=%s; sessionid=%s;' % (csrftoken, sessionid))
}
a = requests.post('%s/a%s/%sloadScripts' % (testpadurl, project, targetfolder), headers=headers, json='{"data":null}')
print(a.status_code)
assert a.json()['data'][0]['_id']
if __name__ == '__main__':
main() | [
"neha.more@csaver.co.uk"
] | neha.more@csaver.co.uk |
ea84dc4184836dbcd834d26e7ab57d1afb8567a4 | a34457fc344ffe48ebd44d026c19ec336e5828c2 | /administration_system/api/exceptions.py | f41c2dab8afba4abf1c43a406da3bd9307536e38 | [] | no_license | fiiniefi/ProjectDB | 5ba9753f6136581eb215248d5451b0a9e38f2e9c | 2ad1c3af2ccffbd2596501e80111f3677dac5b5f | refs/heads/master | 2020-06-03T16:06:13.656105 | 2019-06-13T17:05:31 | 2019-06-13T17:05:31 | 191,643,040 | 0 | 0 | null | 2019-06-13T17:05:33 | 2019-06-12T20:53:02 | Python | UTF-8 | Python | false | false | 205 | py | import psycopg2
from abc import ABC
class DatabaseException(psycopg2.InternalError, ABC):
pass
class InvalidMember(DatabaseException):
pass
class InvalidRowCount(DatabaseException):
pass
| [
"marcin.drewniak1@vp.pl"
] | marcin.drewniak1@vp.pl |
8ea52b9d50b1d1209d63b9bd3634ae313ca554b2 | 15843fb76c292007ce9c8fdebff559e2439bb6d6 | /queryMatcher.py | 0bdc6699f722367ce4e1275b4dfeb4dd2732722b | [] | no_license | virajprabhu/Newsline | b17e815aa9efc09e13b80fa1d4d9b75b89ad3cf7 | daa347f3e09a51d1cd0e69175f8145a64c5c34a5 | refs/heads/master | 2020-05-30T14:43:23.811730 | 2015-03-16T19:36:38 | 2015-03-16T19:36:38 | 32,348,304 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | # Author : Animesh Das , Sujay Narumanchi , Shruti Rijhwani , Viraj Prabhu
# Information Retrieval Project - Group 6
# Libraries Used :
# python sklearn
# python scrapy
# python selenium
from __future__ import print_function
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import linear_kernel
from nltk.corpus import stopwords
import os
import string
import numpy as np
import nltk
import sys
import logging
import numpy.linalg as LA
import math
import json
token_dict = {}
stemmer = nltk.PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
tokens = nltk.word_tokenize(text)
stems = tokens#stem_tokens(tokens, stemmer)
return stems
def queryMatcher(query, path):
print ("Entered query Matcher")
stopWords = stopwords.words('english')
for subdir, dirs, files in os.walk(path):
for file in files:
file_path = subdir + os.path.sep + file
#print("Filepath:" + str(file_path))
document = open(file_path, 'r')
text = document.read()
lowers = text.lower()
no_punctuation = lowers.translate(None, string.punctuation)
token_dict[file] = no_punctuation
document.close()
print ("Query is " + query)
query = [query]
tfidfTrain = TfidfVectorizer(tokenizer=tokenize, stop_words='english')
trainVectorizer = tfidfTrain.fit_transform(token_dict.values ())
trainVectorizerArray = trainVectorizer.toarray()
testVectorizer = tfidfTrain.transform(query);
testVectorizerArray = testVectorizer.toarray()
print ("Fit Vectorizer to train set\n", trainVectorizer)
print ("Transform Vectorizer to test set\n", testVectorizer)
cx = lambda a, b : round(np.inner(a, b)/(LA.norm(a)*LA.norm(b)), 3)
maxcos = 0.0
#print("Test vector is:" + str(testVectorizerArray[0]))
print("Fetching search results...")
## if(LA.norm(testVectorizerArray[0]) == 0):
## return (None, None)
for index, vector in enumerate(trainVectorizerArray):
#print("Vector is:" + str(vector))
cosine = cx(vector, testVectorizerArray[0])
if((not math.isnan(cosine)) and cosine > maxcos):
maxpos = index
maxcos = cosine
print("Cosine is:" + str(cosine))
if(maxcos == 0):
print ("No matches found!\n")
else:
print("Closest match:" + str(token_dict.keys()[maxpos]) + " with cosine:" + str(maxcos))
if(maxcos == 0):
print ("NO MATCH")
return(None, None)
finalDictionary = open('finalDictionary.json', 'r')
finalDict= json.load(finalDictionary)
values = finalDict.values()
for valueList in values:
if str(token_dict.keys()[maxpos]) in valueList:
return (str(token_dict.keys()[maxpos]), valueList)
| [
"prabhuviraj@gmail.com"
] | prabhuviraj@gmail.com |
423c1fd41217a2eb61094e5d27b3dcbec28a38fc | 98a5cf2f5579342e2018bdff2524d98079ef1566 | /0x08-python-more_classes/0-rectangle.py | 7d0d881aafc096350ea30bd2032ca9409793cf00 | [] | no_license | ja95aricapa/holbertonschool-higher_level_programming | 8b557b217ab45c33fb2f5439fbaa253c1901b1ea | 19a9e5d2d06cfcee349fdd64b5354e76c2a52e82 | refs/heads/main | 2023-08-05T12:54:32.953284 | 2021-09-22T14:53:58 | 2021-09-22T14:53:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | #!/usr/bin/python3
'''
Define a class: Rectangle
'''
class Rectangle():
'''A class to represent a rectangle.'''
pass
| [
"2241@holbertonschool.com"
] | 2241@holbertonschool.com |
f827e9c01715a4a59c84f252e6e838591e327d1d | 3e09ddb5bc1b540b19720c713f21e7566dbaee2a | /utils/subtree_util.py | 6c342e9e67b34f1e7d631e908d97286aff2351ca | [] | no_license | little-pikachu/infercode | ee699b3262dd367e54fa307e61d7bbc9091504e7 | 9063131e61bbe37128b034798bf80709ae2ec744 | refs/heads/master | 2023-03-22T04:33:51.957772 | 2021-03-11T10:18:35 | 2021-03-11T10:18:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import argparse
from os.path import exists
import re
from os import path
from tree_sitter import Language, Parser
from pathlib import Path
def print_tree_line(id, data, root_node, reports, selected_node_types):
node_id = id
node_type = root_node.type
node_label = data[root_node.start_byte:root_node.end_byte]
has_child = len(root_node.children) > 0
depth = 1
s = "{}-{},".format(node_id, node_type)
if not has_child:
s = "{}-{}-{},".format(node_id, node_type, node_label.decode("utf-8"))
for child in root_node.children:
(id, child_depth, child_str) = print_tree_line(id + 1, data, child, reports, selected_node_types)
depth = max(depth, child_depth+1)
s = "{}{}".format(s, child_str)
# if str(node_type) in selected_node_types:
reports[node_id] = "{}{}".format(s, depth)
return (id, depth, s)
def print_subtree(data, root_node, reports, selected_node_types):
(id, depth, s) = print_tree_line(1, data, root_node, reports, selected_node_types)
return "{}{}".format(s, depth)
| [
"bdqnghi@gmail.com"
] | bdqnghi@gmail.com |
a98977fc8b89c5f5d215ed261a70c21aa885a849 | 5cf7b8e028a4f4d88fc6e57563632780f9490d67 | /utils/http.py | 519d6a37090bde18163240962d10706ac048fcad | [
"MIT"
] | permissive | QuirkyDevil/alex-boat-old | 75dff27ab020299982dfaa80e2bc567f4de87254 | 6ca1f883a13a49b0377d434e22bb25366ff64b26 | refs/heads/main | 2023-07-08T19:10:58.340517 | 2021-08-23T17:05:31 | 2021-08-23T17:05:31 | 399,185,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | import asyncio
import aiohttp
from utils import cache
# Removes the aiohttp ClientSession instance warning.
class HTTPSession(aiohttp.ClientSession):
""" Abstract class for aiohttp. """
def __init__(self, loop=None):
super().__init__(loop=loop or asyncio.get_event_loop())
def __del__(self):
"""
Closes the ClientSession instance
cleanly when the instance is deleted.
Useful for things like when the interpreter closes.
This would be perfect if discord.py had this as well. :thinking:
"""
if not self.closed:
self.close()
session = HTTPSession()
@cache.async_cache()
async def query(url, method="get", res_method="text", *args, **kwargs):
async with getattr(session, method.lower())(url, *args, **kwargs) as res:
return await getattr(res, res_method)()
async def get(url, *args, **kwargs):
return await query(url, "get", *args, **kwargs)
async def post(url, *args, **kwargs):
return await query(url, "post", *args, **kwargs) | [
"81952913+QuirkyDevil@users.noreply.github.com"
] | 81952913+QuirkyDevil@users.noreply.github.com |
40fb34df46839112e06182347eb450f560b5c972 | 28bb42797cf3caa1dead53e6ffc7e719a3148d25 | /pareto_test.py | 5dad3fd45c76793744e5547c81e7cc7108a81e3b | [] | no_license | glkuzi/MonteCarlo | 3f5590cc5f3eee35621a1f847e40fa8263cd10bf | 4fac99656d82e9b210bc2e53e4cb613326ee0f1f | refs/heads/master | 2020-04-09T00:33:29.091721 | 2018-12-03T14:56:52 | 2018-12-03T14:56:52 | 159,872,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,165 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 25 01:19:35 2018
@author: User
"""
import numpy as np
import scipy.stats
import random
import pandas as pd
def csv_to_arrays(filename):
'''Функция для преобразования .csv таблицы в массивы numpy.
Входные данные:
filename - string, путь к таблице
Выходные данные:
X - array_like, массив x координат
Y - array_like, массив y координат
'''
data = pd.read_csv(filename) # считываем таблицу в датафрейм
a = data.columns.tolist() # получаем заголовки столбцов
X_with_str = np.array(data[a[0]][2:]) # преобразуем столбцы в массивы
Y_with_str = np.array(data[a[1]][2:]) # первые 3 строки без чисел
# заменяем запятые на точки для дальнейшего преобразования в float
X_with_comma = [x.replace(',', '.') for x in X_with_str]
Y_with_comma = [y.replace(',', '.') for y in Y_with_str]
# преобразуем в float
X = np.array([float(x) for x in X_with_comma])
Y = np.array([float(y) for y in Y_with_comma])
return X, Y
def detrending(X, Y, power):
'''Функция для удаления тренда порядка power из последовательности.
Входные данные:
X - array_like, массив x координат
Y - array_like, массив y координат
power - int, степень полинома
Выходные данные:
Y-Yfunc - array_like, массив y координат, приведенный к тренду
'''
# получаем коэффициенты полинома
p = np.polyfit(X, Y, power)
# получаем наилучшее приближение функции полиномом
Yfunc = np.polyval(p, X)
return Y - Yfunc
def distribution_creating(X, bins=10):
'''Функция для преобразования последовательности случайных чисел в
выборочную функцию плотности вероятности (гистограмму).
Входные данные:
X - array_like, массив случайных величин
bins - int, число столбцов в гистограмме
Выходные данные:
x - list, список границ столбцов гистограммы
Y - list, список значений вероятности для каждого столбца
'''
bins = bins + 1
# находим минимальный и максимальный элемент массива для задания столбцов
# гистограммы
down_boundary = min(X)
upper_boundary = max(X)
# создаем массив с границами столбцов гистограммы
x = np.linspace(down_boundary, upper_boundary + 0., bins)
size = len(X)
Y = []
# считаем вероятности попасть в каждый из столбцов
for i in range(1, bins):
y = len([y for y in X if (y < x[i] and y >= x[i - 1])]) / size
Y.append(y)
# добавляем единицу к последнему столбцу, т. к. точка, соответствующая
# верхней границе, не была добавлена в цикле
Y[-1] += 1 / size
return x, Y
def tail(X, Y, bins):
'''Функция для получения хвоста распределения.
Эта функция определяет хвост функции плотности вероятности, "отсекая" его
на уровне в одну сигму.
Входные данные:
X - array_like, массив x координат
Y - array_like, массив y координат
bins - int, число столбцов гистограммы
Выходные данные:
X_tail - array_like, координаты столбцов хвоста гистограммы
Y_tail - array_like, вероятности выпадения столбцов хвоста гистограммы
'''
# вычисляем выборочное среднее
M = np.mean(Y)
# вычисляем выборочное среднеквадратичное отклонение
sigma = np.sqrt(sum((Y - M) ** 2) / len(Y))
# создаем выборочную функцию плотности вероятности
X, Y = distribution_creating(Y, bins)
# 'отсекаем' хвост
X_tail = [x for x in X if x > sigma + M]
index = list(X).index(X_tail[0])
Y_tail = np.array(Y[index:]) / sum(Y[index:])
return X_tail, Y_tail
def pareto_pdf(x, alpha, sigma):
'''Функция плотности вероятности распределения Парето (теоретическая).
Входные данные:
x - array_like, массив границ столбцов гистограммы
alpha - float, параметр распределения
sigma - float, параметр распределения
Выходные данные:
t - list, список со значениями вероятностей для каждого столбца
'''
t = []
for i in range(len(x)-1):
t.append(sigma ** alpha * (x[i] ** (-alpha) - x[i+1] ** (-alpha)))
return t
def pareto_pdf_random(alpha, sigma, size, seed, bins):
'''Функция плотности вероятности распределения Парето.
Генерируется выборка из size распределенных по Парето случайных величин,
для них составляется выборочная функция плотности вероятности.
Входные данные:
alpha - float, параметр распределения
sigma - float, параметр распределения
size - int, размер выборки
seed - int, начальное состояние генератора
bins - число отрезков для генерации выборочной функции плотности
вероятности
Выходные данные:
x - list, список границ столбцов гистограммы
Y - list, список значений вероятности для каждого столбца
'''
# создаем генератор случайных чисел, распределенных по Парето
# генерируем size случайных чисел, распределенных по Парето, с параметрами
# alpha и sigma - для тестирования алгоритма
gen = scipy.stats.pareto
test = gen.rvs(alpha, size=size, random_state=seed) * sigma ** alpha
# преобразуем случайные величины в гистограмму
x, Y = distribution_creating(test, bins)
return x, Y
def main():
'''Предполагается, что заданные последовательности имеют функцию плотности
вероятности с тяжелым хвостом. Для оценки этого предположения предлагается
следующий алгоритм:
1.Из последовательности удаляется тренд
2.Составляется выборочная функция плотности вероятности, и для нее
вычисляются выборочное среднее и выборочная дисперсия
3.От полученной функции 'отсекается' хвост на уровне в одну сигму
4.Для хвоста методом Монте-Карло определяется значение показателей
alpha и sigma
5.При помощи alpha вычисляется значение показателя Хёрста
Замечания:
1.Важным параметром, непосредственно влияющим на точность, является
число столбцов bins - с одной стороны, их должно быть как можно больше,
но при этом должно быть как можно меньше столбцов с нулевой
вероятностью
'''
eps = 1e-16 # задаем желаемую точность
delta = 1 # начальное значение точности, для входа в цикл
bins = 1000 # число столбцов в гистограмме
alpha = 0
sigma = 0 # alpha, sigma - искомые параметры
alpha_boundary = [1e-5, 3] # задаем границы поиска alpha и sigma
sigma_boundary = [1e-5, 2]
# создаем выборочную функцию плотности вероятности для теста
x, Y = pareto_pdf_random(alpha=1.8, sigma=0.8,
size=100000, seed=200, bins=bins)
steps = 0 # количество шагов, для предотвращения зацикливания
while (delta > eps) and (steps < 1000000):
# на каждом шаге получаем случайные alp и sig
alp = random.uniform(alpha_boundary[0], alpha_boundary[1])
sig = random.uniform(sigma_boundary[0], sigma_boundary[1])
# заполняем пустой список t теоретическими значениями вероятности
# для данного распределения
t = pareto_pdf(x, alp, sig)
# вычисляем delta
delta0 = sum((t - np.array(Y)) ** 2)
# сохраняем значения alp и sig, если точность повысилась
if delta0 < delta:
delta = delta0
sigma = sig
alpha = alp
steps += 1 # увеличиваем число пройденных шагов на один
# выводим на экран найденные значения alpha и sigma
print('sigma* = ', sigma)
print('alpha* = ', alpha)
print('steps = ', steps)
print('delta = ', delta)
# выводим на экран значение показателя Хёрста
print('H = ', (3 - alpha) / 2)
return 0
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
fe58fe961797ab457ef2a590d71b62b7a4043775 | 13fdfd03d975c2b94d08a84f05f452c697186a44 | /atcoder/ARC/88/arc88c.py | 03e682083af6389256ee811627b3ebe1d4142096 | [] | no_license | poponzu/atcoder1 | 7243da9250d56eb80b03f1a8f4a3edb9df9e5515 | 64a52bac4cf83842167ca1ce1229c562dabd92a3 | refs/heads/master | 2023-08-22T02:10:52.639566 | 2021-10-09T14:23:46 | 2021-10-09T14:23:46 | 385,467,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | x, y = map(int, input().split())
ans = 1
# 対数計算ミスってた
# 検算にpythonで書いてcheckしようこれから
# 発想は間違っていなかった。
for i in range(60):
result = x * (2 ** i)
if result <= y:
ans = max(ans, i + 1)
print(ans)
| [
"grape.daiki.sora@icloud.com"
] | grape.daiki.sora@icloud.com |
2c0a397eb8da8ec105b36f31c12c3f6ea5877481 | 12358e534084cc00452329c9fed2fb943d6bb066 | /chat/filters.py | f149157c438308807a6fc0825fa04ab20d890a2d | [
"MIT"
] | permissive | purviljain/DJ-Comps-Book-Exchange | a83bae2937dc5609a911f0e9f064f8a6019ad373 | f33425999b3d3b5f8dfcd4d6365723b9a4cd1a97 | refs/heads/master | 2018-12-16T05:03:16.611801 | 2018-09-14T07:15:59 | 2018-09-14T07:15:59 | 116,996,975 | 0 | 0 | null | 2018-01-10T18:45:52 | 2018-01-10T18:45:51 | null | UTF-8 | Python | false | false | 399 | py | import django_filters
from django.contrib.auth.models import User
from django import forms
class UserFilter(django_filters.FilterSet):
username = django_filters.CharFilter(lookup_expr='icontains', label='Whom you want to message?',
widget=forms.TextInput(attrs={'name': 'receiver'}))
class Meta:
model = User
fields = ['username']
| [
"pujan2222@gmail.com"
] | pujan2222@gmail.com |
00faf752817b6df3eafe44e118e429961fda8078 | f24eeca35a0a94ebdcaa33895b1550d42e31a1d1 | /profile/-10/scriptTest.py | 110525d77a05fc3455345db8a1081d06c2fd24fe | [] | no_license | facupaisanito/SCAS4.0 | db9524e8872e9debc6884c13afd3a46d34b15959 | 9db11894ef37454c782da8a846fc902c30c1ea3e | refs/heads/master | 2020-03-28T06:52:06.354588 | 2018-09-07T19:28:34 | 2018-09-07T19:28:34 | 147,865,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,141 | py | #-----------------------------------------------------------------------
# ScriptTest for BA
# Version: 12
# Compatible with HW:
# Developed by Ignacio Cazzasa and company for CWG
#-----------------------------------------------------------------------
try:
import sys,os
except:
print "import sys,os in scriptSys Not found!!"
sys.exit()
try:
sys.argv.append('--Param-scriptSys')
sys.argv.append(sys.argv[1])
import scriptSys
except:
print "ERROR file scriptSys Not found in scriptTest!!"
sys.exit()
try:
sys.argv.append('--Param-scriptInc')
sys.argv.append(sys.argv[1])
import scriptInc
except:
print "ERROR file scriptInc Not found in scriptTest!!"
sys.exit()
for px in sys.argv:
if px == '-d':
scriptSys.DEBUG_MODE = True
try:
sys.argv.append('--Param-scriptDebug')
sys.argv.append(sys.argv[1])
import scriptDebug
except:
print "ERROR file scriptDebug Not found in scriptTest!!"
sys.exit()
################################################################
########## SETUP ##########
################################################################
umbralVoltTarget = 4100
umbralCurrentTarget = 300
umbralVoltHigh = umbralVoltTarget
umbralVoltLow = 3800
umbralVolt = umbralVoltTarget * 0.03
# maxTimeInit = 2 * 60 # 2min
maxTimeInit = 20 # 2min
maxTimeTest = 2 * 60 * 60 # hr
# maxTimeMeasure = 5 * 60
maxTimeMeasure = 30
# maxTimeTest = 2000 # hr
maxTimeDischarge = 30 * 60 # 30 min
minTimeDischarge = 60
maxTimeChargeHig = 1 * 60 * 60 # hr
maxTimeChargeMed = 2 * 60 * 60 # hr
maxTimeChargeLow = 4 * 60 * 60 # hr
minTimeCharge = 5 * 60
maxTimeCond = 45 # 10 seg
tMargin = 3
vMargin = 16
iMargin = 20
iCharge1 = '0.5'
iCharge2 = '1.8'
iCharge3 = '1.3'
iCharge4 = '1.0'
vCharge1 = '4.1'
vCharge2 = '4.2'
vCharge3 = '4.1'
vCharge4 = '4.2'
iDischarge1 = '1.6'
iDischarge2 = '1.3'
iDischarge3 = '1.0'
iDischarge4 = '0.5'
VALTA = 3800
VBAJA = 3200
################################################################
########## INIT ##########
################################################################
def init_state() :
try:
if int(scriptSys.TIME) >= maxTimeInit :
#condiciones inciales:
if scriptSys.CURRENT < (iMargin):
scriptSys.AUX['failcode'] = 3
scriptSys.final_report('FAIL_A',int(scriptSys.AUX['failcode']))
return
##############################
measure_state()
return
##############################
print "RUN"
return
except Exception as e:
scriptSys.error_report(e,"init_state()")
################################################################
########## MEASURE ##########
################################################################
def measure_state() :
try:
if scriptSys.TIME < maxTimeMeasure:
if scriptInc.measure() == 'FAIL':
erase_state()
else:
analysis_state()
# print "RUN"
return
except Exception as e:
scriptSys.error_report(e,"zmeasure_state()")
################################################################
########## analysis ##########
################################################################
def analysis_state() :
try:
if scriptInc.analysis() == 'DONE':
erase_state()
return
except Exception as e:
scriptSys.error_report(e,"zmeasure2_state()")
################################################################
########## erase ##########
################################################################
def erase_state():
try:
if scriptInc.erase() == 'DONE':
inform_state()
return
except Exception as e:
scriptSys.error_report(e,"stress_state()")
################################################################
########## PAinform ##########
################################################################
def inform_state():
try:
scriptInc.inform()
return
except Exception as e:
scriptSys.error_report(e,"pause_state()")
################################################################
########## END ##########
################################################################
def end_state():
try:
scriptSys.SCRIPT['mode']= "STOP"
scriptSys.TIME_INIT = scriptSys.TIME
print "STOP"
# scriptSys.copy_report()
return
except Exception as e:
scriptSys.error_report(e,"end_state()")
################################################################
################################################################
########## MAIN ##########
################################################################
################################################################
scriptSys.openini()
scriptSys.opencsv()
if scriptSys.SCRIPT['mode'] == "INIT":
init_state()
elif scriptSys.SCRIPT['mode'] == "MEASURE":
measure_state()
elif scriptSys.SCRIPT['mode'] == "ANALYSIS":
analysis_state()
elif scriptSys.SCRIPT['mode'] == "ERASE":
erase_state()
elif scriptSys.SCRIPT['mode'] == "INFORM":
inform_state()
elif scriptSys.SCRIPT['mode'] == "PAUSE":
pause_state()
elif scriptSys.SCRIPT['mode'] == "END":
end_state()
scriptSys.ini_Update()
if (scriptSys.TIME - scriptSys.TIME_INIT) > maxTimeTest:
scriptSys.AUX['F21'] =scriptSys.CURRENT
scriptSys.AUX['F04t'] =scriptSys.TIME
scriptSys.final_report("F21",0)
sys.exit()
| [
"facu.higuera@gmail.com"
] | facu.higuera@gmail.com |
5baaf7f41da07336b45dd3e44764bd17d91174bd | 222c9d5f9bd0b0f262c30b485a4b966aaed0bd9b | /006.py | f7dd8252ae9fab9818eb9ab3409e7e4c6e26a5dc | [
"MIT"
] | permissive | fresky/ProjectEulerSolution | 598ae65d83999ae3ac569ca1e27aea3b306f8b75 | eed4e4e9965908d1047a61fcd0a4d1346c395548 | refs/heads/master | 2016-09-02T03:20:59.441285 | 2014-05-10T09:51:15 | 2014-05-10T09:51:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | sum = 0
for i in range(101):
for j in range(101):
if i!=j:
sum += i*j
print(sum) | [
"dawei.xu@gmail.com"
] | dawei.xu@gmail.com |
06a7d1cc33297ae4a3dde990c52105eb76b0a7a4 | 46890f9bbd0af1102ce5cf2c98019295a76f67fb | /the3ballsoft/users/migrations/0004_auto_20161004_1312.py | 65eb880217c5da867833fc6aed0125717994ea46 | [] | no_license | the3ballsoft/the3ballsoft-website | 1a870cec2816dedfcc30e366faca84d162db4f83 | 96a01c58b2a079e14d922c24bb0feea4357d7b40 | refs/heads/master | 2021-01-13T08:22:48.922675 | 2016-10-24T07:21:23 | 2016-10-24T07:21:23 | 69,994,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-04 18:12
from __future__ import unicode_literals
from django.db import migrations
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20161004_1304'),
]
operations = [
migrations.AlterField(
model_name='user',
name='avatar',
field=versatileimagefield.fields.VersatileImageField(blank=True, max_length=500, null=True, upload_to='img/avatars'),
),
]
| [
"genesisdaft@gmail.com"
] | genesisdaft@gmail.com |
c1fa15ad37bca2c90b9187310de36d12c4ba189d | 0b7d67816c876f000519c4fda688362e33a7abe3 | /Training.py | 22d8c082d589a6d330093c7721deaa405f3306c1 | [] | no_license | braikoff/TrialAndError | 0c6ff8e455e6848bf16bbea6bd76a1586d9fa218 | 0e15239b6989143bf9647fe4db631c2356a6d3e9 | refs/heads/master | 2021-05-15T10:22:45.913809 | 2017-10-24T20:56:17 | 2017-10-24T20:56:17 | 108,182,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,802 | py | #%1 line: Output%
print ('Hello, world!')
#%2 lines: Input, assignment%
name = input('What is your name?\n')
print ('Hi, %s.' % name)
#%3 lines: For loop, built-in enumerate function, new style formatting%
friends = ['john', 'pat', 'gary', 'michael']
for i, name in enumerate(friends):
print ("iteration {iteration} is {name}".format(iteration=i, name=name))
#%4 lines: Fibonacci, tuple assignment%
parents, babies = (1, 1)
while babies < 100:
print ('This generation has {0} babies'.format(babies))
parents, babies = (babies, parents + babies)
#%5 lines: Functions%
def greet(name):
print ('Hello', name)
greet('Jack')
greet('Jill')
greet('Bob')
#%6 lines: Import, regular expressions%
import re
for test_string in ['555-1212', 'ILL-EGAL']:
if re.match(r'^\d{3}-\d{4}$', test_string):
print (test_string, 'is a valid US local phone number')
else:
print (test_string, 'rejected')
#%7 lines: Dictionaries, generator expressions%
prices = {'apple': 0.40, 'banana': 0.50}
my_purchase = {'apple': 1,'banana': 6}
grocery_bill = sum(prices[fruit] * my_purchase[fruit] for fruit in my_purchase)
print ('I owe the grocer $%.2f' % grocery_bill)
#%8 lines: Command line arguments, exception handling%
# This program adds up integers in the command line
import sys
try:
total = sum(int(arg) for arg in sys.argv[1:])
print ('sum =', total)
except ValueError:
print ('Please supply integer arguments')
#%9 lines: Opening files%
# indent your Python code to put into an email
import glob
# glob supports Unix style pathname extensions
python_files = glob.glob('*.py')
for file_name in sorted(python_files):
print (' ------' + file_name)
with open(file_name) as f:
for line in f:
print (' ' + line.rstrip())
print()
#%10 lines: Time, conditionals, from..import, for..else%
from time import localtime
activities = {8: 'Sleeping',
9: 'Commuting',
17: 'Working',
18: 'Commuting',
20: 'Eating',
22: 'Resting' }
time_now = localtime()
hour = time_now.tm_hour
for activity_time in sorted(activities.keys()):
if hour < activity_time:
print (activities[activity_time])
break
else:
print ('Unknown, AFK or sleeping!')
#%11 lines: Triple-quoted strings, while loop%
REFRAIN = '''
%d bottles of beer on the wall,
%d bottles of beer,
take one down, pass it around,
%d bottles of beer on the wall!
'''
bottles_of_beer = 99
while bottles_of_beer > 1:
print (REFRAIN % (bottles_of_beer, bottles_of_beer,
bottles_of_beer - 1))
bottles_of_beer -= 1
#%12 lines: Classes%
class BankAccount(object):
def __init__(self, initial_balance=0):
self.balance = initial_balance
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
self.balance -= amount
def overdrawn(self):
return self.balance < 0
my_account = BankAccount(15)
my_account.withdraw(5)
print (my_account.balance)
#%13 lines: Unit testing with unittest%
import unittest
def median(pool):
copy = sorted(pool)
size = len(copy)
if size % 2 == 1:
return copy[(size - 1) / 2]
else:
return (copy[size/2 - 1] + copy[size/2]) / 2
class TestMedian(unittest.TestCase):
def testMedian(self):
self.failUnlessEqual(median([2, 9, 9, 7, 9, 2, 4, 5, 8]), 7)
if __name__ == '__main__':
unittest.main()
#%14 lines: Doctest-based testing%
def median(pool):
'''Statistical median to demonstrate doctest.
>>> median([2, 9, 9, 7, 9, 2, 4, 5, 8])
7
'''
copy = sorted(pool)
size = len(copy)
if size % 2 == 1:
return copy[(size - 1) / 2]
else:
return (copy[size/2 - 1] + copy[size/2]) / 2
if __name__ == '__main__':
import doctest
doctest.testmod()
#%15 lines: itertools%
from itertools import groupby
lines = '''
This is the
first paragraph.
This is the second.
'''.splitlines()
# Use itertools.groupby and bool to return groups of
# consecutive lines that either have content or don't.
for has_chars, frags in groupby(lines, bool):
if has_chars:
print (' '.join(frags))
# PRINTS:
# This is the first paragraph.
# This is the second.
#%16 lines: csv module, tuple unpacking, cmp() built-in%
import csv
def cmp(a, b):
return (a > b) - (a < b)
# write stocks data as comma-separated values
writer = csv.writer(open('stocks.csv', 'wb', buffering=0))
writer.writerows([
('GOOG', 'Google, Inc.', 505.24, 0.47, 0.09),
('YHOO', 'Yahoo! Inc.', 27.38, 0.33, 1.22),
('CNET', 'CNET Networks, Inc.', 8.62, -0.13, -1.49)
])
# read stocks data, print (status messages
stocks = csv.reader(open('stocks.csv', 'rb'))
status_labels = {-1: 'down', 0: 'unchanged', 1: 'up'}
for ticker, name, price, change, pct in stocks:
status = status_labels[cmp(float(change), 0.0)]
print ('%s is %s (%s%%)' % (name, status, pct))
#%17 lines: 8-Queens Problem (recursion)%
BOARD_SIZE = 8
def under_attack(col, queens):
left = right = col
for r, c in reversed(queens):
left, right = left - 1, right + 1
if c in (left, col, right):
return True
return False
def solve(n):
if n == 0:
return [[]]
smaller_solutions = solve(n - 1)
return [solution+[(n,i+1)]
for i in range(BOARD_SIZE)
for solution in smaller_solutions
if not under_attack(i+1, solution)]
for answer in solve(BOARD_SIZE):
print (answer)
#%18 lines: Prime numbers sieve w/fancy generators%
import itertools
def iter_primes():
# an iterator of all numbers between 2 and +infinity
numbers = itertools.count(2)
# generate primes forever
while True:
# get the first number from the iterator (always a prime)
prime = numbers.next()
yield prime
# this code iteratively builds up a chain of
# filters...slightly tricky, but ponder it a bit
numbers = itertools.ifilter(prime.__rmod__, numbers)
for p in iter_primes():
if p > 1000:
break
print (p)
#%19 lines: XML/HTML parsing (using Python 2.5 or third-party library)%
dinner_recipe = '''<html><body><table>
<tr><th>amt</th><th>unit</th><th>item</th></tr>
<tr><td>24</td><td>slices</td><td>baguette</td></tr>
<tr><td>2+</td><td>tbsp</td><td>olive oil</td></tr>
<tr><td>1</td><td>cup</td><td>tomatoes</td></tr>
<tr><td>1</td><td>jar</td><td>pesto</td></tr>
</table></body></html>'''
# In Python 2.5 or from http://effbot.org/zone/element-index.htm
import xml.etree.ElementTree as etree
tree = etree.fromstring(dinner_recipe)
# For invalid HTML use http://effbot.org/zone/element-soup.htm
# import ElementSoup, StringIO
# tree = ElementSoup.parse(StringIO.StringIO(dinner_recipe))
pantry = set(['olive oil', 'pesto'])
for ingredient in tree.getiterator('tr'):
amt, unit, item = ingredient
if item.tag == "td" and item.text not in pantry:
print ("%s: %s %s" % (item.text, amt.text, unit.text))
#%20 lines: 8-Queens Problem (define your own exceptions)%
BOARD_SIZE = 8
class BailOut(Exception):
pass
def validate(queens):
left = right = col = queens[-1]
for r in reversed(queens[:-1]):
left, right = left-1, right+1
if r in (left, col, right):
raise BailOut
def add_queen(queens):
for i in range(BOARD_SIZE):
test_queens = queens + [i]
try:
validate(test_queens)
if len(test_queens) == BOARD_SIZE:
return test_queens
else:
return add_queen(test_queens)
except BailOut:
pass
raise BailOut
queens = add_queen([])
print (queens)
print ("\n".join(". "*q + "Q " + ". "*(BOARD_SIZE-q-1) for q in queens))
#%21 lines: "Guess the Number" Game (edited) from http://inventwithpython.com%
import random
guesses_made = 0
name = input('Hello! What is your name?\n')
number = random.randint(1, 20)
print ('Well, {0}, I am thinking of a number between 1 and 20.'.format(name))
while guesses_made < 6:
guess = int(input('Take a guess: '))
guesses_made += 1
if guess < number:
print ('Your guess is too low.')
if guess > number:
print ('Your guess is too high.')
if guess == number:
break
if guess == number:
print ('Good job, {0}! You guessed my number in {1} guesses!'.format(name, guesses_made))
else:
print ('Nope. The number I was thinking of was {0}'.format(number)) | [
"noreply@github.com"
] | noreply@github.com |
c26f7634772cb749ee3a6b5784f4753109bafbe5 | 6f74c608167c36d5473903fe754ee09a3f02c118 | /venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py | a8bb3b0c2a60e16a440a11684a0c4c1baed170d1 | [] | no_license | riadnwu/SparkConnection | d44545f81c32c88e20789cbd9f678be52510e169 | e52e08ef15a60e07f48f53f3521d46191862d33a | refs/heads/master | 2020-03-24T21:37:21.218839 | 2018-07-31T16:54:46 | 2018-07-31T16:54:46 | 143,042,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,506 | py | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: special with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| [
"riadnwu@gmail.com"
] | riadnwu@gmail.com |
3f7ad64f6a7c833d05a8e07a162c6c8de8dacb21 | 4c9cfa4570d034c3e21944d3c53348153a14cd3f | /flux_runtime.py | 92115be3fabce6f581d1edc211b3717cdfbe993d | [] | no_license | michaelmathen/TrajectoryScanningExperiments | 4d2b3f17850245a5b8a975cc4c756395daeeacac | d6c6e39c21e153fdfdb617921b1d7db8e02b3e3a | refs/heads/master | 2020-05-18T03:38:01.681728 | 2019-04-29T22:22:06 | 2019-04-29T22:22:06 | 184,149,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,984 | py | import flux_testing
import paths
import utils
import pyscan
if __name__ == "__main__":
#trajectories = paths.read_geolife_files(100)
trajectories = paths.read_dong_csv("/data/Dong_sets/Trajectory_Sets/samples/osm_eu_sample_100k_nw.tsv")
st_pts, end_pts = pyscan.trajectories_to_flux(trajectories)
st_pts = [pyscan.WPoint(1.0, float(p[0]), float(p[1]), 1.0) for p in st_pts]
end_pts = [pyscan.WPoint(1.0, float(p[0]), float(p[1]), 1.0) for p in end_pts]
r = .0025
q = .2
p = .5
eps_r = .001
disc = utils.disc_to_func("disc")
#red, blue, _, _ = pyscan.plant_partial_disk(trajectories, r, p, q, eps_r, disc)
for region_name, two_level_sample, ham_sand in [ ("halfplane", True, False), ("disk", True, False),("halfplane", True, True), ("rectangle", True, False)]:
# if region_name == "halfplane":
# scan = pyscan.max_halfplane
# elif region_name == "disk":
# scan = pyscan.max_disk
# elif region_name == "rectangle":
# def scan(n_s, m_s, b_s, disc):
# grid = pyscan.Grid(len(n_s), m_s, b_s)
# s1 = pyscan.max_subgrid_linear(grid, -1.0, 1.0)
# s2 = pyscan.max_subgrid_linear(grid, 1.0, -1.0)
# if s1.fValue() > s2.fValue():
# reg = grid.toRectangle(s1)
# mx = s1.fValue()
# else:
# reg = grid.toRectangle(s2)
# mx = s2.fValue()
# return reg, mx
output_file = "flux_runtime_{}_{}_{}.csv".format(region_name, "2" if two_level_sample else "1", "ham" if ham_sand else "rand")
flux_testing.testing_flux_framework(output_file, st_pts, end_pts, -1, -4, 80,
region_name=region_name,
two_level_sample=two_level_sample,
ham_sample=ham_sand,
max_time=100)
| [
"michaelmathen@gmail.com"
] | michaelmathen@gmail.com |
bc84a9d59661a0626a5b7ef6f63bc252e9820fe9 | be6ebc62c50e2db47b749cc0d0deb49a5f723a9c | /python/pyp2pstrmsim/ppss.py | 64ccb31c341bb8540d96408ee3cb7bce37cfeb62 | [] | no_license | jollywing/jolly-code-snippets | ebeb9fd1d4fe0891ec081cf6db663a60abef58e0 | f453474f59e4384ae37295bf706fb6288f55c89f | refs/heads/master | 2020-12-24T16:31:52.023140 | 2018-10-08T10:15:30 | 2018-10-08T10:15:30 | 17,929,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,579 | py | #!/usr/bin/python2
"""
PPSS (Python based P2P Streaming Simulator)
my first python problem: a event-driven p2p streaming simulator
author: jiqing
update: 2012-07-21
version: 1.0.2
===============================================================
Copyright (C) 2009 jiqing (jiqingwu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
===============================================================
"""
"""
TODO:
Why does the performance decrease as time passes?
"""
import random
import time
""" There are some constants below,
you can change it to setup your simulation."""
""" You can set SAMPLE_ID to -1,
to forbid the output of debug information.
If you set DETAIL to 1, the information of packet level will output """
SAMPLE_ID = -1
DETAIL = 0
""" You can set which kind of application
you are simulating here """
FILE_SHARING = 1
STREAMING = 2
APP = 1
""" Set the duration (mseconds) of your simulation here. """
SIM_DURATION = 600000
""" For file sharing application, set the number of chunks."""
SIM_CHUNKS = 500
""" For distribution delay of single chunk """
SAMPLE_SEQ_TIME = 0
SAMPLE_SEQ_PEERS = 0
SAMPLE_SEQ = 10
SAMPLE_SEQ_RATIO = 0.9
""" All available scheduling algorithms. """
RANDOM_PULL = 1
RF_PULL = 2
GREEDY_PULL = 3
# The fewest block to the peer with the fewest available block
F2F_OPTIMAL = 4
F2F_PULL = 5
# The scheduling algorithm in our internet model
ENDURE_PULL = 6
""" Specify your scheduling algorithm here """
SCHEDULE = 6
""" Specify the on schedule algorithm on serving peer side here """
BEST_EFFORT_SERV = 0
FAIR_SERV = 1
FAST_SERV = 2
DEADLINE_SERV = 3
DEADLINE_FAST_SERV = 4
SRV_SIDE_SCHEDULE = 1
""" All available overlay construction algorithms,
add your own overlay construction algorithm here. """
RANDOM_OVERLAY = 1
DELAY_OVERLAY = 2
""" Specify your overlay construction algorithm here """
OVERLAY = RANDOM_OVERLAY
""" Set the number of peers in your simulation,
the source peer is not included. """
NODE_CNT = 100
""" Set the duration (mseconds) that all new peers join the system. """
JOIN_DURATION = 5000
""" To simulate flash crowds, set FLASH to True. """
FLASH_CROWD = False
""" Set the scale of flash crowd here. """
FLASH_CNT = 50
""" Set when the flash crowd happends. """
FLASH_TIME = SIM_DURATION/2
""" Set the duration (mseconds) of your flash crowd. """
FLASH_DURATION = 5000
""" How long a chunk plays back? """
FORWARD_INTERVAL = 500
""" How long between two Adding neighbors """
FIND_NBR_INTERVAL = 200
""" How many neighbors a peer has """
NBR_CNT = 5
""" The cycle of exchanging buffermaps """
NOTIFY_INTERVAL = 2000
""" The cycle of requesting chunks """
REQ_INTERVAL = 1000
""" The cycle of statistics """
STAT_INTERVAL = 20000
""" How many packets the buffer can hold. """
BUF_CAP = 40
""" When a peer reaches the BUF_RATIO, it can start playback. """
BUF_RATIO = 0.4
""" In average, How long to send a data chunk """
SEND_INTERVAL = 100
SEND_QUEUE_LEN = 15
""" Set the max delay of sending a packet,
When you use delay matrix, this may be useless. """
MAX_DELAY = 150
USE_DELAY_MATRIX = False
MATRIX_FILE = 'simplified-matrix.dat'
""" !!! YOU NEED NOT MODIFY THE FOLLOWING CONSTANTS !!! """
""" THERE are some events in your simulation,
such as joining and leaving of a peer,
receiving some packet of a peer."""
EVENT_PEER_JOIN = 0
EVENT_PEER_LEAVE = 1
EVENT_RECEIVE = 2
EVENT_FORWARD = 3
EVENT_SEND = 5
""" For my F2F algorithm """
EVENT_UPLOAD = 6
""" The event that a peer look for its neighbors."""
EVENT_FIND_NBR = 7
""" The event that a peer broadcast its data avalability to
its neighbors. """
EVENT_NOTIFY = 8
""" The event that a peer request data from its neighbors """
EVENT_SCHEDULE = 9
""" The event that you gather statistics. """
EVENT_STAT = 10
""" The event that a peer evaluate the qos of its neighbors. """
EVENT_EVAL = 11
""" All packet types are list here. """
PACKET_LEAVE = 100
PACKET_ADD_NBR = 101
""" When its request for adding neighbor is accepted,
it will receive a PACKET_ACK. """
PACKET_ACK = 102
PACKET_DATA = 103
PACKET_REQ = 104
""" A peer inform its neighbors which packets it holds
by sending PACKET_NOTIFY. """
PACKET_NOTIFY = 105
############################################################
class FileChunk(object):
""" structure of file chunk in cache """
def __init__(self, state, rareness):
self.m_state = state # 0: miss; 1: downloading; 2: have
self.m_rareness = rareness
class BufferElement(object):
""" structure of packet in buffer """
def __init__(self, state, seq, rareness):
self.m_state = state
self.m_seq = seq
self.m_rareness = rareness
self.m_req_time = -1
self.m_response_time = -1
class Peer(object):
""" peer in overlay: include server and normal peer """
new_seq = 0
def __init__ (self, peer_id,topology, event_engine):
""" peer class constructor """
self.m_topology = topology # for accessing other peers
self.m_engine = event_engine # for scheduling event
self.m_peer_id = peer_id # for peer ID
self.m_start_seq = 0 # for stating load, initial pointer
self.m_seq_num = 0 # for play pointer
self.m_played = 0 # for stating quality
self.m_should_played = 0 # for stating quality
self.m_late = 0 # for stating late packets
self.m_sent = 0 # for bandwidth utility
self.m_rejected = 0 # for rejected ratio
self.m_total_reqs = 0 # for rejected ratio
self.m_avail_bw = 0 # for bw aware schedule
self.m_buffer = [] # for streaming buffer
for i in range(BUF_CAP):
e = BufferElement(0, -1, 0)
self.m_buffer.append(e)
self.m_buffering = True # for buffering state
self.m_buffer_time = 0 # for time of buffering
self.m_recved = 0 # for buffering finish
self.m_good_rate_cycles = 0 # for buffering finish?
self.m_file_cache = [] # for file cache
for i in range(SIM_CHUNKS):
c = FileChunk(0, 0)
self.m_file_cache.append(c)
self.m_nbrs = [] # for neighbors
self.m_finding_nbrs = False # for finding nbr state
self.m_cycle_recved = 0 # for switching nbr?
self.m_low_qos_cycles = 0 # for switching nbr?
self.last_nbr_send = 0 # for cycle sending
self.m_join_time = 0 # for online duration
self.m_leave_time = 0 # for online duration
self.m_online = False # for online state, neccessary?
def set_life_time(self, join_time, leave_time):
""" for peer churn """
self.m_join_time = join_time
self.m_leave_time = leave_time
def set_bandwidth(self, out_bw, in_bw):
""" To set according to the real model,
the outbound bandwidth decides the sending interval. """
self.m_outbound_bw = out_bw
self.m_inbound_bw = in_bw
def bw_utility(self):
""" return the bandwidth utility of this peer. """
should_sent = float(self.m_engine.m_current_time -\
self.m_join_time) / SEND_INTERVAL
return self.m_sent / should_sent
def find_nbrs(self):
""" normal peers find their neighbours,
you can describe your algorithm of overlay
construction here."""
if len(self.m_nbrs) >= NBR_CNT:
self.m_finding_nbrs = False
return
if self.m_finding_nbrs == True:
self.m_engine.schedule_event(FIND_NBR_INTERVAL,\
EVENT_FIND_NBR, self)
""" if all other online peers are all nbrs of this peer,
return """
if len(self.m_nbrs) == len(self.m_topology.online_peers) - 1:
return
""" find a object peer """
candidate_peer = None
if OVERLAY == RANDOM_OVERLAY:
candidate_peer = self.find_nbr_random()
elif OVERLAY == DELAY_OVERLAY:
candidate_peer = self.find_nbr_delay()
""" send a packet of adding neighbor to the peer """
if not candidate_peer is None:
packet = Packet(self, candidate_peer, PACKET_ADD_NBR)
self.send_pkt(packet)
def find_nbr_random(self):
"""find a neighbor candidate randomly"""
while True:
index = random.randrange( \
len(self.m_topology.online_peers) )
p = self.m_topology.online_peers[index]
if self != p and self.nbr_index(p) == -1:
return p
def find_nbr_delay(self):
""" find a neighbor candidate with a little RTT """
candidate_peer = None
min_delay = 1000
for p in self.m_topology.online_peers:
if p.m_peer_id == self.m_peer_id:
continue
if self.nbr_index(p) != -1:
continue
if len(p.m_nbrs) >= NBR_CNT:
continue
latency = self.m_topology.latency_matrix[self.m_peer_id]\
[p.m_peer_id]
if latency < min_delay:
min_delay = latency
candidate_peer = p
return candidate_peer
def add_nbr(self, p):
""" when receive an ACK or a ADD_NBR packet """
nbr = Neighbour(p)
self.m_nbrs.append(nbr)
if len(self.m_nbrs) == NBR_CNT:
self.m_finding_nbrs = False
# print debug information
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
self.print_nbrs()
def del_nbr(self, n):
""" when receive a LEAVE packet """
self.m_nbrs.remove(n)
self.m_finding_nbrs = True
self.find_nbrs()
# print debug information
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d: %d remove neighbor %d." % \
(self.m_engine.m_current_time,\
SAMPLE_ID, packet.m_src_peer.m_peer_id)
self.print_nbrs()
def print_nbrs(self):
print "nbr list: ",
nbrs = []
for n in self.m_nbrs:
nbrs.append(n.m_peer.m_peer_id)
print nbrs
def maintain_nbrs(self):
# If this peer is server or it has enough neighbors, return
if self.m_finding_nbrs == True or \
self.m_peer_id == 0: return
# if aggregate rate is less than the playback rate
# Q: why divided by 2?
if self.m_cycle_recved < REQ_INTERVAL / FORWARD_INTERVAL/2:
self.m_low_qos_cycles += 1
else:
self.m_low_qos_cycles = 0
self.m_cycle_recved = 0
if self.m_low_qos_cycles >= 3:
bad_nbr = self.m_nbrs[0]
recv_cnt = self.m_nbrs[0].cycle_recved
for n in self.m_nbrs:
if n.cycle_recved < recv_cnt:
recv_cnt = n.cycle_recved
bad_nbr = n
self.del_nbr(bad_nbr)
def schedule(self):
""" scheduling periodically, decide requesting which
packets from which nbrs, or pushing wich packets to
which nbrs. """
self.m_engine.schedule_event(REQ_INTERVAL,\
EVENT_SCHEDULE, self)
# if this peer has no neighbor, do nothing
if len(self.m_nbrs) == 0: return
if APP == STREAMING and self.m_buffering == True:
# find the neighbor peer with the most available blocks
chunk_num = 0
obj_nbr = None
for nbr in self.m_nbrs:
if nbr.m_peer.avail_items_absolute() > chunk_num:
chunk_num = nbr.m_peer.avail_items_absolute()
obj_nbr = nbr
# reset the requesting pointer
if obj_nbr:
self.m_seq_num = obj_nbr.m_peer.min_seq()
if SCHEDULE == RANDOM_PULL:
self.random_pull()
elif SCHEDULE == RF_PULL:
self.rf_pull()
elif SCHEDULE == GREEDY_PULL:
self.greedy_pull()
elif SCHEDULE == F2F_OPTIMAL:
if APP == FILE_SHARING:
self.f2f_fs_optimal()
else: self.f2f_stream_optimal()
elif SCHEDULE == F2F_PULL:
if APP == FILE_SHARING:
self.f2f_fs_pull()
else: self.f2f_stream_pull()
elif SCHEDULE == ENDURE_PULL:
if APP == STREAMING:
self.endurable_pull()
self.maintain_nbrs()
def random_pull(self):
""" select packet in order, select peer randomly """
# If this is a source peer, do nothing
if self.m_peer_id == 0: return
cycle_reqed = 0
if APP == FILE_SHARING:
for seq in range(SIM_CHUNKS):
if self.want(seq):
self.m_total_reqs += 1
holders = []
for nbr in self.m_nbrs:
if nbr.m_peer.have(seq):
holders.append(nbr)
if len(holders) > 0:
i = random.randrange(len(holders))
holders[i].req_queue.append(seq)
self.m_file_cache[seq].m_state = 1
cycle_reqed += 1
if cycle_reqed > REQ_INTERVAL / FORWARD_INTERVAL:
break
# add the times that this peer is rejected
else:
self.m_rejected += 1
else:
seqs_reqed = []
for seq in range(self.m_seq_num + 1,\
self.m_seq_num + BUF_CAP):
if self.have(seq): continue
self.m_total_reqs += 1
self.m_buffer[seq% BUF_CAP].m_req_time = \
self.m_engine.m_current_time
holders = []
for nbr in self.m_nbrs:
if nbr.m_peer.have(seq):
holders.append(nbr)
if len(holders) > 0:
seqs_reqed.append(seq)
i = random.randrange(len(holders))
holders[i].req_queue.append(seq)
# cycle_reqed += 1
# if cycle_reqed > REQ_INTERVAL / FORWARD_INTERVAL:
# break
else:
self.m_rejected += 1
if self.m_peer_id == SAMPLE_ID:
print "Scheduled: ",
print seqs_reqed
self.send_req_pkts()
def greedy_pull(self):
if APP == FILE_SHARING:
seq_begin = 0
seq_end = SIM_CHUNKS
else:
seq_begin = self.m_seq_num + 1
seq_end = self.m_seq_num + BUF_CAP
reqed_this_cycle = 0
for seq in range(seq_begin, seq_end):
if not self.want(seq): continue
self.m_total_reqs += 1
holders = []
for nbr in self.m_nbrs:
if nbr.m_peer.have(seq):
holders.append(nbr)
if len(holders) > 0:
short_queue_len = 10000
fast_holder = None
for holder in holders:
queue_len = holder.m_peer.total_send_queue_len()
if queue_len <= short_queue_len:
short_queue_len = queue_len
fast_holder = holder
if fast_holder != None:
fast_holder.req_queue.append(seq)
reqed_this_cycle += 1;
if APP == FILE_SHARING:
self.m_file_cache[seq].m_state = 1
if reqed_this_cycle >\
(REQ_INTERVAL / SEND_INTERVAL) *\
len(self.m_nbrs):
break
else:
self.m_buffer[seq% BUF_CAP].m_req_time = \
self.m_engine.m_current_time
else:
self.m_rejected += 1
self.send_req_pkts()
def rf_pull(self):
""" select packet in order of rareness,
select peer randomly. """
if self.m_peer_id == 0: return
for nbr in self.m_nbrs:
nbr.req_queue = []
seqs_vs_holdernum = [None,]*NBR_CNT
for i in range(len(seqs_vs_holdernum)):
seqs_vs_holdernum[i] = []
seqs_to_req = {}
if APP == FILE_SHARING:
for seq in range(SIM_CHUNKS):
if self.want(seq):
holders = []
for nbr in self.m_nbrs:
if nbr.m_peer.have(seq):
holders.append(nbr)
if len(holders) > 0:
seqs_vs_holdernum[len(holders)-1].append(seq)
seqs_to_req[seq] = holders
else:
cycle_reqed = 0
for seq in range(self.m_seq_num,\
self.m_seq_num + int(3 * BUF_CAP/4)):
if self.have(seq): continue
self.m_total_reqs += 1
holders = []
for nbr in self.m_nbrs:
if nbr.m_peer.have(seq):
holders.append(nbr)
if len(holders) > 0:
seqs_vs_holdernum[len(holders)-1].append(seq)
seqs_to_req[seq] = holders
cycle_reqed += 1
if cycle_reqed > REQ_INTERVAL / FORWARD_INTERVAL:
break
else:
self.m_rejected += 1
for seqs in seqs_vs_holdernum:
for seq in seqs:
i = random.randrange(len(seqs_to_req[seq]))
seqs_to_req[seq][i].req_queue.append(seq)
if APP == FILE_SHARING:
self.m_file_cache[seq].m_state = 1
else:
self.m_buffer[seq % BUF_CAP].m_req_time = \
self.m_engine.m_current_time
self.send_req_pkts()
def endurable_peer(self, seq):
chunk_num = 0
obj_peer = None
for p in self.m_topology.online_peers:
if p.have(seq) and p.m_avail_bw > 0:
if p.avail_items_absolute() > chunk_num:
chunk_num = p.avail_items_absolute()
obj_peer = p
return obj_peer
def endurable_pull(self):
""" select serving peer in order of endurability """
if self.m_peer_id == 0: return
for p in self.m_topology.online_peers:
if p.m_peer_id != self.m_peer_id:
p.m_avail_bw = REQ_INTERVAL / SEND_INTERVAL - \
p.total_send_queue_len()
cycle_reqed = 0
if APP == FILE_SHARING:
req_begin = 0
req_end = SIM_CHUNKS
cycle_reqed_max = REQ_INTERVAL / SEND_INTERVAL * NODE_CNT
else:
req_begin = self.m_seq_num + 1
req_end = self.m_seq_num + int(BUF_CAP*3/4)
cycle_reqed_max = int(REQ_INTERVAL / FORWARD_INTERVAL * 1.5)
for seq in range(req_begin, req_end):
if self.want(seq):
self.m_total_reqs += 1
if APP == STREAMING:
self.m_buffer[seq % BUF_CAP].m_req_time =\
self.m_engine.m_current_time
else:
self.m_file_cache[seq].m_state = 1
obj_peer = self.endurable_peer(seq)
if obj_peer:
obj_peer.m_avail_bw -= 1
packet = PacketRequest(self, obj_peer, PACKET_REQ)
packet.append_seq(seq)
self.send_pkt(packet)
cycle_reqed += 1
if cycle_reqed >= cycle_reqed_max:
break
else:
self.m_rejected += 1
def send_req_pkts(self):
for nbr in self.m_nbrs:
if len(nbr.req_queue) > 0:
packet = PacketRequest(self, nbr.m_peer, PACKET_REQ)
packet.set_pkts_list(nbr.req_queue)
self.send_pkt(packet)
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d: %d request data pkts from %d" %\
(self.m_engine.m_current_time, SAMPLE_ID,
nbr.m_peer.m_peer_id)
nbr.req_queue =[]
def buffer_finish(self):
""" judge whether the buffering does finish."""
if self.m_buffering == True and \
self.m_recved / float(BUF_CAP) >= BUF_RATIO:
self.m_buffering = False
self.m_buffer_time = self.m_engine.m_current_time\
- self.m_join_time
self.m_start_seq = self.m_seq_num
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d: %d finishes buffering, buffer time: %d"\
%(self.m_engine.m_current_time, SAMPLE_ID,\
self.m_buffer_time)
def want(self, seq):
""" whether this peer is interested in segment with given seq
@param: int seq
@return: True or False """
if APP == FILE_SHARING:
if self.m_file_cache[seq].m_state == 0:
return True
else:
if seq > self.m_seq_num and\
seq < self.m_seq_num + BUF_CAP and \
(not self.have(seq)):
return True
return False
def have(self, seq):
""" whether segment with given seq is in this peer's buffer"""
if APP == STREAMING:
if self.m_buffer[seq % BUF_CAP].m_seq == seq and\
self.m_buffer[seq %BUF_CAP].m_state == 2:
return True
else:
if self.m_file_cache[seq].m_state == 2:
return True
return False
def print_buffer(self):
print "Seq num: %d" %(self.m_seq_num)
print "[",
for i in range(BUF_CAP):
print "%d:%d "%(self.m_buffer[i].m_seq, self.m_buffer[i].m_state),
print "]"
def print_info(self):
print "ID: %d" %(self.m_peer_id)
print "join time: %d" %(self.m_join_time)
self.print_nbrs()
print "Buffering: ",
print self.m_buffering
print "Recved: %d" % (self.m_recved)
print "Buffer ratio: %.3f" % (float(self.m_recved) / BUF_CAP)
self.print_buffer()
print "want to req: %d" %(self.m_total_reqs)
print "rejected: %d" % (self.m_rejected)
print "avg send queue length: %d" %( self.avg_send_queue_len() )
def nbr_index(self, peer):
""" Judge whether a given peer is a neighbor,
if it is, renturn its index, else return -1. """
for i in range(len(self.m_nbrs)):
if self.m_nbrs[i].m_peer.m_peer_id == peer.m_peer_id:
return i
return -1
def join(self):
""" new peer join the system """
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d: %d joined" % (self.m_engine.m_current_time,\
self.m_peer_id)
self.m_topology.print_online_peers(self.m_engine)
self.m_online = True
if APP == STREAMING:
self.forward()
else:
if self.m_peer_id == 0:
for i in range(SIM_CHUNKS):
self.m_file_cache[i].m_state = 2
if APP == STREAMING:
if self.m_peer_id == 0:
self.m_buffering = False
else:
self.m_buffering = True
if self.m_peer_id != 0:
self.m_finding_nbrs = True
self.find_nbrs()
self.m_engine.schedule_event(REQ_INTERVAL,\
EVENT_SCHEDULE, self)
self.m_engine.schedule_event(SEND_INTERVAL,\
EVENT_SEND, self)
def leave(self):
""" if a peer leave the system elegantly,
it send packets to inform its neighors."""
for n in self.m_nbrs:
pkt = Packet(self, n.m_peer, PACKET_LEAVE)
self.send_pkt(pkt)
self.m_online = False
def forward(self):
""" for source peer, create new packets peoridically;
for normal peer, update playback process."""
self.m_engine.schedule_event(FORWARD_INTERVAL,\
EVENT_FORWARD, self)
if self.m_peer_id == 0:
self.m_buffer[Peer.new_seq % BUF_CAP].m_seq =\
Peer.new_seq
self.m_buffer[Peer.new_seq % BUF_CAP].m_state = 2
self.m_buffer[Peer.new_seq % BUF_CAP].m_rareness = 1
Peer.new_seq += 1
""" In fact, the m_seq_num of source is nonsense. """
if Peer.new_seq >= BUF_CAP:
self.m_seq_num += 1
else:
if self.m_buffering == False:
if self.have(self.m_seq_num): # state quality
self.m_played += 1
else:
self.m_late +=1
self.m_should_played +=1
self.m_seq_num += 1 # playback
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d: %d forward, seq num: %d. played: %d"\
% (self.m_engine.m_current_time, SAMPLE_ID,\
self.m_seq_num, self.m_played)
def quality(self):
if self.m_peer_id == 0: return 1
else:
if self.m_buffering == True or self.m_should_played == 0:
return 0
else:
return float(self.m_played)/( self.m_should_played)
def broadcast(self):
""" peer broadcast its buffermap to its neighbours """
self.m_engine.schedule_event(NOTIFY_INTERVAL,\
EVENT_NOTIFY, self)
for nbr in self.m_nbrs:
packet = PacketNotify(self, nbr.m_peer, PACKET_NOTIFY)
self.send_pkt(packet)
def fair_serv(self):
""" send packets to each neighbor fairly.
@input: send_queue of each neighbor """
#find a nbr in turn
nbr_idx = -1
for i in range( len(self.m_nbrs) ):
self.last_nbr_send %= len(self.m_nbrs)
if len(self.m_nbrs[ self.last_nbr_send ].send_queue)\
> 0:
nbr_idx = self.last_nbr_send
break
self.last_nbr_send += 1
if nbr_idx < 0: return
sended = False
for seq in self.m_nbrs[nbr_idx].send_queue:
if self.have(seq):
packet = PacketData(self, self.m_nbrs[nbr_idx].\
m_peer, PACKET_DATA, seq)
self.send_pkt(packet)
self.m_nbrs[nbr_idx].send_queue.remove(seq)
self.m_sent += 1
sended = True
if sended == True:
break
self.last_nbr_send += 1
def best_effort_serv(self):
""" serve neighbor peer one by one, serve next
neighbor until serving of this neighbor is completed. """
for n in self.m_nbrs:
for seq in n.send_queue:
if self.have(seq):
packet = PacketData(self, n.m_peer,\
PACKET_DATA, seq)
self.send_pkt(packet)
n.send_queue.remove(seq)
self.m_sent += 1
return
def fast_serv(self):
# find the rarest chunk
nbr_list = []
rarest_seq = -1
min_copy_num = NODE_CNT
for n in self.m_nbrs:
for seq in n.send_queue:
if seq == rarest_seq:
nbr_list.append(n)
elif self.have(seq):
copy_num = 0
for p in self.m_topology.online_peers:
if p.have(seq):
copy_num += 1
if copy_num <= min_copy_num:
nbr_list = []
min_copy_num = copy_num
rarest_seq = seq
nbr_list.append(n)
if rarest_seq == -1: return
#find the requestor with largest rbw
idlest_nbr = None
avail_chunks = SIM_CHUNKS
for n in nbr_list:
chunk_num = n.m_peer.avail_items_relative()
if chunk_num <= avail_chunks:
avail_chunks = chunk_num
idlest_nbr = n
if idlest_nbr == None: return
#send chunk to the requestor
packet = PacketData(self, idlest_nbr.m_peer,\
PACKET_DATA, rarest_seq)
self.send_pkt(packet)
idlest_nbr.send_queue.remove(rarest_seq)
self.m_sent += 1
def deadline_pure_serv(self):
# construct request queue
req_queue = {}
for n in self.m_nbrs:
for seq in n.send_queue:
if seq not in req_queue:
req_queue[seq] = []
req_queue[seq].append(n)
# find the least dealine
snd_seq = -1
Delta = float(SIM_DURATION) / FORWARD_INTERVAL
for req_seq in req_queue.keys():
total_delta = 0
for n in req_queue[req_seq]:
total_delta += req_seq - n.m_peer.m_seq_num
delta = float(total_delta) / len(req_queue[req_seq])
if delta <= Delta:
Delta = delta
snd_seq = req_seq
if snd_seq == -1:
return
# find the nbr with fast process
fast_nbr = None
seq_num = 0
for n in req_queue[snd_seq]:
if n.m_peer.m_seq_num >= seq_num:
seq_num = n.m_peer.m_seq_num
fast_nbr = n
if fast_nbr == None: return
# send
packet = PacketData(self, fast_nbr.m_peer,\
PACKET_DATA, snd_seq)
self.send_pkt(packet)
fast_nbr.send_queue.remove(snd_seq)
self.m_sent += 1
def deadline_hybrid_serv(self):
# construct request queue
req_queue = {}
for n in self.m_nbrs:
for seq in n.send_queue:
if seq not in req_queue:
req_queue[seq] = []
req_queue[seq].append(n)
# find the least dealine
snd_seq = -1
Delta = float(SIM_DURATION) / FORWARD_INTERVAL
for req_seq in req_queue.keys():
total_delta = 0
for n in req_queue[req_seq]:
total_delta += req_seq - n.m_peer.m_seq_num
delta = float(total_delta) / len(req_queue[req_seq])
if delta <= Delta:
Delta = delta
snd_seq = req_seq
if snd_seq == -1:
return
# find the max relative bandwidth
idlest_nbr = None
avail_chunks = SIM_CHUNKS
for n in req_queue[snd_seq]:
chunk_num = n.m_peer.avail_items_relative()
if chunk_num <= avail_chunks:
avail_chunks = chunk_num
idlest_nbr = n
if idlest_nbr == None: return
# send
packet = PacketData(self, idlest_nbr.m_peer,\
PACKET_DATA, snd_seq)
self.send_pkt(packet)
idlest_nbr.send_queue.remove(snd_seq)
self.m_sent += 1
def send(self):
""" pick a packet to send, and send it"""
""" TODO: adaptive SEND_INTERVAL for different bandwidth"""
self.m_engine.schedule_event(SEND_INTERVAL,\
EVENT_SEND, self)
if len(self.m_nbrs) == 0: return
if SRV_SIDE_SCHEDULE == FAIR_SERV:
self.fair_serv()
elif SRV_SIDE_SCHEDULE == FAST_SERV:
self.fast_serv()
elif SRV_SIDE_SCHEDULE == DEADLINE_SERV:
self.deadline_pure_serv()
elif SRV_SIDE_SCHEDULE == DEADLINE_FAST_SERV:
self.deadline_hybrid_serv()
else:
self.best_effort_serv()
def avg_send_queue_len(self):
if len(self.m_nbrs) == 0:
return 0
total_len = 0
for n in self.m_nbrs:
total_len += len(n.send_queue)
return float(total_len) / len(self.m_nbrs)
def total_send_queue_len(self):
total_len = 0
for n in self.m_nbrs:
total_len += len(n.send_queue)
return total_len
def min_seq(self):
min_seq = -1
for i in range(BUF_CAP):
if self.m_buffer[i].m_state == 2:
if min_seq < 0 or self.m_buffer[i].m_seq < min_seq:
min_seq = self.m_buffer[i].m_seq
return min_seq
def avail_items_absolute(self):
avail_items = 0
if APP == STREAMING:
for i in range(BUF_CAP):
if self.m_buffer[i].m_state == 2:
avail_items += 1
else:
for i in range(SIM_CHUNKS):
if self.have(i):
avail_items += 1
return avail_items
def avail_items_relative(self):
avail_items = 0
if APP == STREAMING:
for i in range(BUF_CAP):
for nbr in self.m_nbrs:
if nbr.m_peer.want(self.m_buffer[i].m_seq):
avail_items +=1
break
else:
for i in range(SIM_CHUNKS):
if self.have(i):
avail_items +=1
return avail_items
def avg_response_time(self):
resp_time_sum = 0
resp_pkt_cnt = 0
for i in range(BUF_CAP):
if self.m_buffer[i].m_response_time > 0:
resp_time_sum += self.m_buffer[i].m_response_time
resp_pkt_cnt += 1
if resp_pkt_cnt > 0:
return float(resp_time_sum) / resp_pkt_cnt
else:
return -1
def f2f_fs_optimal(self):
""" optimal distribution for file sharing """
rare_dict = {}
for chunk in range(SIM_CHUNKS):
copy_num = 0
for p in self.m_topology.online_peers:
if p.have(chunk):
copy_num += 1
if self.have(chunk)\
and copy_num < NODE_CNT + 1:
rare_dict[chunk] = copy_num
chunk_none_want = []
want_dict = {}
for chunk in rare_dict:
want_nbrs = []
for nbr in self.m_nbrs:
if nbr.m_peer.want(chunk):
want_nbrs.append(nbr)
if len(want_nbrs) == 0:
chunk_none_want.append(chunk)
else:
want_dict[chunk] = want_nbrs
for chunk in chunk_none_want:
del rare_dict[chunk]
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "rare_dict: %d" % (len(rare_dict))
# times = 0
while len(rare_dict) > 0:
valid_chunk = -1
copy_num = NODE_CNT + 1
for chunk in rare_dict:
if rare_dict[chunk] < copy_num:
valid_chunk = chunk
copy_num = rare_dict[chunk]
if valid_chunk == -1: break
obj_nbr = None
avail_chunks = SIM_CHUNKS
for n in want_dict[valid_chunk]:
if n.m_peer.avail_items_relative() < avail_chunks:
obj_nbr = n
avail_chunks = n.m_peer.avail_items_relative()
if obj_nbr != None:
obj_nbr.send_queue.append(valid_chunk)
# times += 1
obj_nbr.m_peer.m_file_cache[valid_chunk].\
m_state = 1 #downloading
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d will upload %d to %d." %\
(SAMPLE_ID, valid_chunk, \
obj_nbr.m_peer.m_peer_id)
del rare_dict[valid_chunk]
def f2f_fs_pull(self):
if self.m_peer_id == 0: return
rare_dict = {}
for i in range(SIM_CHUNKS):
if self.m_file_cache[i].m_state == 2:
self.m_file_cache[i].m_rareness = 1
else:
self.m_file_cache[i].m_rareness = 0
for nbr in self.m_nbrs:
if nbr.m_peer.m_file_cache[i].m_state == 2:
self.m_file_cache[i].m_rareness += 1
if self.m_file_cache[i].m_state == 0:
rare_dict[i] = self.m_file_cache[i].m_rareness
#if self.m_peer_id == SAMPLE_ID:
#print rare_dict[i],
chunk_no_owner = []
owner_dict = {}
for chunk in rare_dict:
have_nbrs = []
for nbr in self.m_nbrs:
if nbr.m_peer.have(chunk):
have_nbrs.append(nbr)
if len(have_nbrs) == 0:
chunk_no_owner.append(chunk)
else:
owner_dict[chunk] = have_nbrs
for chunk in chunk_no_owner:
del rare_dict[chunk]
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "rare_dict: %d"%(len(rare_dict))
while len(rare_dict) > 0:
chunk_to_req = -1
min_rareness = SIM_CHUNKS
for chunk in rare_dict:
if rare_dict[chunk] <= min_rareness:
chunk_to_req = chunk
min_rareness = rare_dict[chunk]
if chunk_to_req < 0: break
obj_nbr = None
avail_chunks = SIM_CHUNKS
for n in owner_dict[chunk_to_req]:
if n.m_peer.avail_items_relative()\
<= avail_chunks:
obj_nbr = n
avail_chunks = n.m_peer.avail_items_relative()
if obj_nbr != None:
obj_nbr.req_queue.append(chunk_to_req)
self.m_file_cache[chunk_to_req].m_state = 1
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d request %d from %d." %\
(SAMPLE_ID, chunk_to_req, \
obj_nbr.m_peer.m_peer_id)
del rare_dict[chunk_to_req]
self.send_req_pkts()
def f2f_stream_optimal(self):
want_dict = {}
for i in range(BUF_CAP):
self.m_buffer[i].m_rareness = 0
for p in self.m_topology.online_peers:
if p.have(self.m_buffer[i].m_seq):
self.m_buffer[i].m_rareness += 1
want_dict[i] = []
for nbr in self.m_nbrs:
if nbr.m_peer.want(self.m_buffer[i].m_seq):
want_dict[i].append(nbr)
pkts_no_wanter = []
for idx in want_dict:
if want_dict[idx] == []:
pkts_no_wanter.append(idx)
for idx in pkts_no_wanter:
del want_dict[idx]
for n in self.m_nbrs:
n.send_queue = []
sended = 0
while len(want_dict) > 0:
obj_idx = -1
obj_rareness = NODE_CNT + 1
for idx in want_dict:
if self.m_buffer[idx].m_rareness < obj_rareness:
obj_rareness = self.m_buffer[idx].m_rareness
obj_idx = idx
if obj_idx == -1: break
obj_nbr = None
avail_items = BUF_CAP
for nbr in want_dict[obj_idx]:
if nbr.m_peer.avail_items_relative() <= avail_items:
obj_nbr = nbr
avail_items = nbr.m_peer.avail_items_relative()
if obj_nbr != None:
sended += 1
obj_nbr.send_queue.append(\
self.m_buffer[obj_idx].m_seq)
obj_nbr.m_peer.m_buffer[obj_idx].m_seq = \
self.m_buffer[obj_idx].m_seq
obj_nbr.m_peer.m_buffer[obj_idx].m_state = 1
self.m_buffer[obj_idx].m_rareness += 1
want_dict[obj_idx].remove(obj_nbr)
if want_dict[obj_idx] == []:
del want_dict[obj_idx]
if sended > REQ_INTERVAL / SEND_INTERVAL:
break
def f2f_stream_pull(self):
if self.m_peer_id == 0: return
for i in range(BUF_CAP):
self.m_buffer[i].m_rareness = 0
if self.m_buffer[i].m_seq >= 0:
if self.m_buffer[i].m_state == 2:
self.m_buffer[i].m_rareness += 1
for n in self.m_nbrs:
if n.m_peer.have(self.m_buffer[i].m_seq):
self.m_buffer[i].m_rareness += 1
have_dict = {}
for seq in range(self.m_seq_num + 1, \
self.m_seq_num + BUF_CAP):
if not self.have(seq):
have_dict[seq] = []
for n in self.m_nbrs:
if n.m_peer.have(seq):
have_dict[seq].append(n)
seq_no_owner = []
for seq in have_dict:
if have_dict[seq] == []:
seq_no_owner.append(seq)
for seq in seq_no_owner:
del have_dict[seq]
while len(have_dict) > 0:
obj_seq = -1
owner_num = NBR_CNT
for seq in have_dict:
if len(have_dict[seq]) <= owner_num:
obj_seq = seq
owner_num = len(have_dict[seq])
obj_nbr = None
avail_item = BUF_CAP
for n in have_dict[obj_seq]:
if n.m_peer.avail_items_relative() < avail_item:
avail_item = n.m_peer.avail_items_relative()
obj_nbr = n
if obj_nbr != None:
obj_nbr.req_queue.append(obj_seq)
del have_dict[obj_seq]
self.send_req_pkts()
def send_pkt(self, packet):
#TODO: delay Matrix
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d send packet to %d, type: %d. "%\
(SAMPLE_ID, packet.m_dest_peer.m_peer_id, packet.m_type)
matrix_delay = self.m_topology.latency_matrix[self.m_peer_id]\
[packet.m_dest_peer.m_peer_id]
time = self.m_engine.m_current_time + matrix_delay
e = Event(time, EVENT_RECEIVE, packet.m_dest_peer)
e.packet(packet)
self.m_engine.add_event(e)
def receive_pkt(self,packet):
""" handle all kinds of packets received """
if packet == None:
return
if packet.m_type == PACKET_LEAVE:
for n in self.m_nbrs:
if n.m_peer== packet.m_src_peer:
self.del_nbr(n)
break
return
elif packet.m_type == PACKET_ADD_NBR:
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d: %d receive packet of adding nbr from %d."%\
(self.m_engine.m_current_time, SAMPLE_ID,\
packet.m_src_peer.m_peer_id)
if len(self.m_nbrs) < NBR_CNT and \
self.nbr_index(packet.m_src_peer) == -1:
self.add_nbr(packet.m_src_peer)
p = Packet(self, packet.m_src_peer, PACKET_ACK)
self.send_pkt(p)
del packet
elif packet.m_type == PACKET_ACK:
if len(self.m_nbrs) < NBR_CNT and \
self.nbr_index(packet.m_src_peer) == -1:
self.add_nbr(packet.m_src_peer)
elif packet.m_type == PACKET_DATA:
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d: %d receive data pkt from %d, seq num: %d"%\
(self.m_engine.m_current_time, self.m_peer_id,\
packet.m_src_peer.m_peer_id, packet.m_seq)
self.m_recved += 1
self.m_cycle_recved += 1
for n in self.m_nbrs:
if n.m_peer == packet.m_src_peer:
n.cycle_recved += 1
break
if APP == FILE_SHARING:
self.m_file_cache[packet.m_seq].m_state = 2
else:
self.m_recved += 1
self.buffer_finish()
buf_idx = packet.m_seq % BUF_CAP
self.m_buffer[buf_idx].m_seq = packet.m_seq
self.m_buffer[buf_idx].m_state = 2
self.m_buffer[buf_idx].m_response_time =\
self.m_engine.m_current_time - \
self.m_buffer[buf_idx].m_req_time
global SAMPLE_SEQ_TIME, SAMPLE_SEQ_PEERS,\
SAMPLE_SEQ_RATIO
if SAMPLE_SEQ_TIME == 0:
if packet.m_seq == SAMPLE_SEQ:
SAMPLE_SEQ_PEERS += 1
if (float(SAMPLE_SEQ_PEERS) / \
len(self.m_topology.online_peers)) >= \
SAMPLE_SEQ_RATIO:
SAMPLE_SEQ_TIME = self.m_engine.m_current_time
del packet
elif packet.m_type == PACKET_REQ:
idx = self.nbr_index(packet.m_src_peer)
if idx >= 0:
self.handle_request(idx, packet)
if self.m_peer_id == SAMPLE_ID and DETAIL==1:
print "%d: %d requested by %d:" %\
(self.m_engine.m_current_time,\
SAMPLE_ID,\
self.m_nbrs[idx].m_peer.m_peer_id),
print self.m_nbrs[idx].send_queue
self.print_buffer()
del packet
elif packet.m_type == PACKET_NOTIFY:
if self.m_peer_id == SAMPLE_ID and DETAIL == 1:
print "%d: %d received broadcast message from %d." %\
(self.m_engine.m_current_time, SAMPLE_ID,\
packet.m_src_peer.m_peer_id)
del packet
else:
print " what is fucking this packet ???"
del packet
def handle_request(self, nbr_idx, pkt_req):
""" according to the request packet,
rearrange the send queue """
for seq_new in pkt_req.m_pkts_to_req:
in_queue = False
for seq_old in self.m_nbrs[nbr_idx].send_queue:
if seq_new == seq_old:
in_queue = True
break
if in_queue == False:
self.m_nbrs[nbr_idx].send_queue.append(seq_new)
self.m_nbrs[nbr_idx].send_queue = \
sorted(self.m_nbrs[nbr_idx].send_queue)
# for file sharing, need not drop the head
#if len(self.m_nbrs[nbr_idx].send_queue) > SEND_QUEUE_LEN:
#del self.m_nbrs[nbr_idx].send_queue[0]
class Neighbour(object):
""" neighbour information """
def __init__(self, peer):
self.m_peer = peer
self.req_queue = []
self.send_queue = []
self.recv_cnt = 0 # receive from this neighbour
self.req_cnt = 0 # request from this neighbour
self.serv_time = -1
self.cycle_recved = 0
self.last_send_sidx = 0 # for fair bandwidth alloc
class Packet(object):
def __init__(self,src_peer,dest_peer,type):
self.m_src_peer = src_peer
self.m_dest_peer = dest_peer
self.m_type = type
def src_peer(self, src_peer):
self.m_src_peer = src_peer
def dest_peer(self, dest_peer):
self.m_dest_peer = dest_peer
class PacketData(Packet):
""" packet data for streaming and file sharing"""
def __init__(self,src_peer, dest_peer, type,seq_num):
super(PacketData,self).__init__(src_peer,dest_peer,type)
self.m_seq = seq_num
class PacketRequest(Packet):
def __init__(self,src_peer, dest_peer, type):
super(PacketRequest,self).__init__(src_peer,dest_peer,type)
self.m_pkts_to_req = []
def set_pkts_list(self, list):
self.m_pkts_to_req.extend(list)
def append_seq(self, seq):
self.m_pkts_to_req.append(seq)
class PacketSubscribe(Packet):
def __init__(self, src_peer, dest_peer, type, seq):
super(PacketSubscribe,self).__init__(src_peer,dest_peer,type)
self.m_start_seq = seq
class PacketUnSub(Packet):
def __init__(self, src_peer, dest_peer, type, ss_idx):
super(PacketUnSub,self).__init__(src_peer,dest_peer,type)
self.m_idx = ss_idx
class PacketNotify(Packet):
""" buffer map nitification """
def __init__(self, src_peer,dest_peer,type):
super(PacketNotify,self).__init__(src_peer,dest_peer,type)
class Event(object):
""" event class """
def __init__(self, time, event_type, peer):
self.m_time = time
self.m_type = event_type
self.m_peer = peer
self.m_packet = None
def packet(self,p):
self.m_packet = p
class EventEngine(object):
def __init__(self, topology):
self.m_current_time = 0
self.m_event_queue = []
self.m_topology = topology
self.m_monitor = None
""" You can add new event and its handling here """
def handle_event(self,e):
self.m_current_time = e.m_time
if e.m_type == EVENT_FORWARD:
if e.m_peer.m_online:
e.m_peer.forward()
elif e.m_type == EVENT_SEND:
if e.m_peer.m_online:
e.m_peer.send()
elif e.m_type == EVENT_RECEIVE:
if e.m_peer.m_online:
e.m_peer.receive_pkt(e.m_packet)
elif e.m_type == EVENT_FIND_NBR:
if e.m_peer.m_online:
e.m_peer.find_nbrs()
elif e.m_type == EVENT_NOTIFY:
if e.m_peer.m_online:
e.m_peer.broadcast()
elif e.m_type == EVENT_STAT:
self.m_monitor.runtime_print(self)
self.schedule_event(STAT_INTERVAL, EVENT_STAT, None)
elif e.m_type == EVENT_SCHEDULE:
if e.m_peer.m_online:
e.m_peer.schedule()
elif e.m_type == EVENT_PEER_JOIN:
self.m_topology.online_peers.append(e.m_peer)
e.m_peer.join()
elif e.m_type == EVENT_PEER_LEAVE:
self.m_topology.online_peers.remove(e.m_peer)
e.m_peer.leave()
elif e.m_type == EVENT_EVAL:
pass
# if e.m_peer.m_online:
# e.m_peer.eval_nbrs()
else:
print "I don't know what happened ..."
def run(self,duration):
self.m_records = open('quality', 'w')
self.m_monitor = GlobalMonitor(self.m_topology, \
self.m_records)
self.schedule_event(STAT_INTERVAL, EVENT_STAT, None)
while self.m_event_queue != []:
e = self.m_event_queue[0]
del self.m_event_queue[0]
self.handle_event(e)
if self.m_current_time > duration:
break
self.m_records.close()
self.m_monitor.delay_cdf()
def add_event(self,e):
index = 0
for event in self.m_event_queue:
if e.m_time < event.m_time:
self.m_event_queue.insert(index,e)
return
index +=1
self.m_event_queue.append(e)
def schedule_event(self, interval, event_type, peer):
time = self.m_current_time + interval
e = Event(time, event_type, peer)
self.add_event(e)
class UnderTopo(object):
"""underlying topology """
def __init__(self):
self.all_peers = []
self.online_peers = []
self.latency_matrix = []
def random_latency_matrix(self):
if FLASH_CROWD:
peer_count = NODE_CNT + FLASH_CNT + 1
else:
peer_count = NODE_CNT + 1
for k in range(peer_count):
self.latency_matrix.append([])
for n in range(peer_count):
if n == k:
self.latency_matrix[k].append(0)
else:
self.latency_matrix[k].append(\
random.randrange(MAX_DELAY))
def load_latency_matrix(self,filename):
""" load latency matrix from filename into matrix """
self.random_latency_matrix()
if FLASH_CROWD:
max_peer_id = NODE_CNT + FLASH_CNT
else:
max_peer_id = NODE_CNT
f = open(filename,'r')
while 1:
s = f.readline()
fields = s.split()
peer1 = int(fields[0])
peer2 = int(fields[1])
latency = int(float(fields[2])/2000)
if peer1 > max_peer_id:
break
if peer2 <= max_peer_id:
self.latency_matrix[peer1][peer2] = latency
self.latency_matrix[peer2][peer1] = latency
f.close()
def join_server(self, event_engine):
peer = Peer(0, self, event_engine)
self.all_peers.append(peer)
e = Event(0, EVENT_PEER_JOIN,peer)
event_engine.add_event(e)
leave_time = SIM_DURATION + 1000
e = Event(leave_time, EVENT_PEER_LEAVE,peer)
event_engine.add_event(e)
def join_peers(self,event_engine, begin_time, duration, peer_cnt):
"""schedule all peers join and leave """
begin_id = len(self.all_peers)
for i in range(peer_cnt):
peer = Peer(begin_id + i,self,event_engine)
self.all_peers.append(peer)
""" random join model and there is no churn """
peer.set_life_time(\
begin_time + random.randrange(duration),\
SIM_DURATION + 1000)
e = Event(peer.m_join_time, EVENT_PEER_JOIN,peer)
event_engine.add_event(e)
e = Event(peer.m_leave_time, EVENT_PEER_LEAVE, peer)
event_engine.add_event(e)
def schedule_all_peers(self, engine):
""" schedule join and leave of all peers
@param: EventEngine engine
@return: None"""
self.join_server(engine)
self.join_peers(engine, 0, JOIN_DURATION, NODE_CNT)
if FLASH_CROWD:
self.join_peers(engine, FLASH_TIME, FLASH_DURATION,\
FLASH_CNT)
def print_online_peers(self, engine):
print "%d: online peers: " % (engine.m_current_time),
for p in self.online_peers:
print "%d, " % (p.m_peer_id),
print
class GlobalMonitor(object):
def __init__(self, topo, file):
self.m_topo = topo
self.m_file = file
def avg_quality(self):
started = 0
avg_quality = 0
for p in self.m_topo.online_peers:
if p.m_peer_id != 0 and p.m_buffering == False:
started += 1
avg_quality += p.quality()
if started > 0:
return float(avg_quality) / started
else:
return 0
def avg_late(self):
started = 0
total_late = 0
for p in self.m_topo.online_peers:
if p.m_peer_id != 0 and p.m_buffering == False:
started += 1
total_late += p.m_late
if started > 0:
return int( float(total_late) / started )
else:
return 0
def avg_resp_time(self):
avg_resp_time_sum = 0
valid_peer_cnt = 0
for p in self.m_topo.online_peers:
if p.avg_response_time() > 0:
avg_resp_time_sum += p.avg_response_time()
valid_peer_cnt += 1
if valid_peer_cnt > 0:
return avg_resp_time_sum / valid_peer_cnt
else:
return 0
def avg_throughput(self):
throughput = 0
for p in self.m_topo.online_peers:
throughput += p.m_recved
return int( float(throughput) / \
len(self.m_topo.online_peers))
def rejected_ratio(self):
if len(self.m_topo.online_peers) == 0:
return 0
rejected = 0
for p in self.m_topo.online_peers:
if p.m_total_reqs > 0:
rejected += float(p.m_rejected)/ \
p.m_total_reqs
return float(rejected) / len(self.m_topo.online_peers)
def complete_ratio(self):
if len(self.m_topo.online_peers) == 0:
return 0
comp_ratio = 0
for p in self.m_topo.online_peers:
comp_ratio += float(p.avail_items_relative())/ \
SIM_CHUNKS
return float(comp_ratio) / \
len(self.m_topo.online_peers)
def distrib_load(self):
load = 0
for p in self.m_topo.online_peers:
if APP == FILE_SHARING:
load += SIM_CHUNKS - p.avail_items_relative()
else:
load += (Peer.new_seq - p.m_start_seq + 1\
- p.m_recved)
return load
def load_balance(self):
""" stat deviation of load """
if len(self.m_topo.online_peers) < 2:
return 0
avg_sent = 0
for p in self.m_topo.online_peers:
avg_sent += p.m_sent
avg_sent = float(avg_sent)/len(self.m_topo.online_peers)
deviation = 0
for p in self.m_topo.online_peers:
deviation += (p.m_sent - avg_sent) ** 2
deviation = float(deviation) /\
(len(self.m_topo.online_peers) - 1)
return deviation
def runtime_print(self, engine):
if APP == STREAMING:
print "%d\t%.3f\t%.3f\t%d\t%d\t%.3f\t%.2f" %\
(engine.m_current_time / 1000, \
self.rejected_ratio(), self.avg_quality(),\
self.avg_late(), self.avg_throughput(),\
self.load_balance(), self.avg_resp_time())
self.m_file.write(
"%d\t%.3f\t%.3f\t%d\t%d\t%.3f\t%.2f\n" %\
(engine.m_current_time / 1000, \
self.rejected_ratio(), self.avg_quality(),\
self.avg_late(), self.avg_throughput(),\
self.load_balance(), self.avg_resp_time() ) )
else:
print "%d\t%d\t%.3f" %\
(engine.m_current_time / 1000,\
self.distrib_load(),\
self.complete_ratio())
self.m_file.write( "%d\t%d\t%.3f\n" %\
(engine.m_current_time / 1000,\
self.distrib_load(),\
self.complete_ratio()) )
def delay_cdf(self):
max_delay = 0
for p in self.m_topo.all_peers:
if p.m_buffering == False and \
p.m_buffer_time > max_delay:
max_delay = p.m_buffer_time
print "the max delay:", max_delay
if max_delay == 0: return
points = [0,]*(max_delay/1000 + 1)
for p in self.m_topo.online_peers:
delay = p.m_buffer_time
for v in range(delay/1000 + 1, max_delay/1000 + 1):
points[v] += 1
print "writing delay cdf to file ... "
fp = open('delay', 'w')
fp.write('SAMPLE_SEQ:%d, SAMPLE_SEQ_TIME:%d\n\n' % \
(SAMPLE_SEQ, SAMPLE_SEQ_TIME))
fp.write('0\t0.00\n')
for v in range(0, max_delay/1000 + 1):
fp.write('%d\t%.2f\n' % (v+1, float(points[v])/\
len(self.m_topo.online_peers)) )
fp.close()
#################################################################
topology = UnderTopo()
engine = EventEngine(topology)
print "wait please! loading data ..."
topology.schedule_all_peers(engine)
if USE_DELAY_MATRIX:
topology.load_latency_matrix(MATRIX_FILE)
else:
topology.random_latency_matrix()
print "simulation start ..."
engine.run(SIM_DURATION)
print "simulation finish at %d."%(engine.m_current_time)
# print "time cost: ", time.clock()
# vim: foldmethod=indent
| [
"jiqingwu@gmail.com"
] | jiqingwu@gmail.com |
fe7c36c87cbcd95b486451126b116d1e7cdd26ec | f4ef5247ad75394272a66bca5cfb68c55581cb7c | /0x16-api_advanced/1-top_ten.py | 871adf90de6f108ab79f5ec38def973f8880bd77 | [] | no_license | AugustineArthur/alx-system_engineering-devops-1 | d6e48bc1e4d3990c340dab318ad71064a04e0b34 | 9ce83db502232e834522c1a49172277e778bbc0d | refs/heads/main | 2023-08-23T04:08:08.225245 | 2021-10-09T21:11:05 | 2021-10-09T21:11:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | #!/usr/bin/python3
"""
Contains the top_ten function
"""
import requests
def top_ten(subreddit):
"""prints the titles of the top ten hot posts for a given subreddit"""
if subreddit is None or type(subreddit) is not str:
print(None)
r = requests.get('http://www.reddit.com/r/{}/hot.json'.format(subreddit),
headers={'User-Agent': 'Python/requests:APIproject:\
v1.0.0 (by /u/fraol21)'},
params={'limit': 10}).json()
posts = r.get('data', {}).get('children', None)
if posts is None or (len(posts) > 0 and posts[0].get('kind') != 't3'):
print(None)
else:
for post in posts:
print(post.get('data', {}).get('title', None))
| [
"fraoltesfaye.ft@gmail.com"
] | fraoltesfaye.ft@gmail.com |
269751f7432c6b3011370bc9f329c9432a8f265b | 9830360802428854384d6b27a172102de0e59c8f | /2902.py | d4a02794d38f68d3e4dc41bae137bb11e4388beb | [] | no_license | banje/acmicpc | d4009535ec31892f706333d812c92fddead08aa1 | 69d44a3b60d2a559563b5a1055bcc2290090e35c | refs/heads/master | 2022-07-20T20:01:56.623346 | 2020-05-16T11:30:17 | 2020-05-16T11:30:17 | 260,843,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | a=input()
b=""
for i in range(len(a)):
if ord(a[i])<91:
if ord(a[i])!=45:
b=b+a[i]
print(b) | [
"pak2t@naver.com"
] | pak2t@naver.com |
c0f2c54b513cda8972f4c3fd726ff8b8a2123fec | 829ff11164821195035406417865bbaec05c3dfc | /cifar_utils.py | 950809e8dbec7e3ee6cbe8d964de261756d3a842 | [
"MIT"
] | permissive | yukimasano/self-label | d45262771f05e2cf188cc7190b4b84fb7cd3f2b0 | 3891e376a82d1e4ac6de6d39505b692b8885c971 | refs/heads/master | 2023-07-21T06:41:17.966575 | 2023-07-16T21:46:54 | 2023-07-16T21:46:54 | 229,603,018 | 533 | 59 | MIT | 2023-07-16T21:46:55 | 2019-12-22T17:14:27 | Python | UTF-8 | Python | false | false | 8,042 | py | import torch
import torchvision
from PIL import Image
import torch.nn as nn
class CIFAR10Instance(torchvision.datasets.CIFAR10):
"""CIFAR10Instance Dataset.
"""
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
super(CIFAR10Instance, self).__init__(root=root,
train=train,
transform=transform,
target_transform=target_transform)
def __getitem__(self, index):
#if self.train:
# img, target = self.data[index], self.targets[index]
# else:
image, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
image = Image.fromarray(image)
if self.transform is not None:
img = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
class CIFAR100Instance(CIFAR10Instance):
"""CIFAR100Instance Dataset.
This is a subclass of the `CIFAR10Instance` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
def kNN(net, trainloader, testloader, K, sigma=0.1, dim=128,use_pca=False):
net.eval()
# this part is ugly but made to be backwards-compatible. there was a change in cifar dataset's structure.
if hasattr(trainloader.dataset, 'imgs'):
trainLabels = torch.LongTensor([y for (p, y) in trainloader.dataset.imgs]) # .cuda()
elif hasattr(trainloader.dataset, 'indices'):
trainLabels = torch.LongTensor([k for path,k in trainloader.dataset.dataset.dt.imgs])[trainloader.dataset.indices]
elif hasattr(trainloader.dataset, 'train_labels'):
trainLabels = torch.LongTensor(trainloader.dataset.train_labels) # .cuda()
if hasattr(trainloader.dataset, 'dt'):
if hasattr(trainloader.dataset.dt, 'targets'):
trainLabels = torch.LongTensor(trainloader.dataset.dt.targets) # .cuda()
else: # hasattr(trainloader.dataset.dt, 'imgs'):
trainLabels = torch.LongTensor([k for path,k in trainloader.dataset.dt.imgs]) # .cuda()
else:
trainLabels = torch.LongTensor(trainloader.dataset.targets) # .cuda()
C = trainLabels.max() + 1
if hasattr(trainloader.dataset, 'transform'):
transform_bak = trainloader.dataset.transform
trainloader.dataset.transform = testloader.dataset.transform
elif hasattr(trainloader.dataset.dataset.dt, 'transform'):
transform_bak = trainloader.dataset.dataset.dt.transform
trainloader.dataset.dataset.dt.transform = testloader.dataset.dt.transform
else:
transform_bak = trainloader.dataset.dt.transform
trainloader.dataset.dt.transform = testloader.dataset.dt.transform
temploader = torch.utils.data.DataLoader(trainloader.dataset,
batch_size=64, num_workers=1)
if hasattr(trainloader.dataset, 'indices'):
LEN = len(trainloader.dataset.indices)
else:
LEN = len(trainloader.dataset)
trainFeatures = torch.zeros((dim, LEN)) # , device='cuda:0')
normalize = Normalize()
for batch_idx, (inputs, targets, _) in enumerate(temploader):
batchSize = inputs.size(0)
inputs = inputs.cuda()
features = net(inputs)
if not use_pca:
features = normalize(features)
trainFeatures[:, batch_idx * batchSize:batch_idx * batchSize + batchSize] = features.data.t().cpu()
if hasattr(temploader.dataset, 'imgs'):
trainLabels = torch.LongTensor(temploader.dataset.train_labels) # .cuda()
elif hasattr(temploader.dataset, 'indices'):
trainLabels = torch.LongTensor([k for path,k in temploader.dataset.dataset.dt.imgs])[temploader.dataset.indices]
elif hasattr(temploader.dataset, 'train_labels'):
trainLabels = torch.LongTensor(temploader.dataset.train_labels) # .cuda()
elif hasattr(temploader.dataset, 'targets'):
trainLabels = torch.LongTensor(temploader.dataset.targets) # .cuda()
elif hasattr(temploader.dataset.dt, 'imgs'):
trainLabels = torch.LongTensor([k for path,k in temploader.dataset.dt.imgs]) #.cuda()
elif hasattr(temploader.dataset.dt, 'targets'):
trainLabels = torch.LongTensor(temploader.dataset.dt.targets) #.cuda()
else:
trainLabels = torch.LongTensor(temploader.dataset.labels) #.cuda()
trainLabels = trainLabels.cpu()
if hasattr(trainloader.dataset, 'transform'):
trainloader.dataset.transform = transform_bak
elif hasattr(trainloader.dataset, 'indices'):
trainloader.dataset.dataset.dt.transform = transform_bak
else:
trainloader.dataset.dt.transform = transform_bak
if use_pca:
comps = 128
print('doing PCA with %s components'%comps, end=' ')
from sklearn.decomposition import PCA
pca = PCA(n_components=comps, whiten=False)
trainFeatures = pca.fit_transform(trainFeatures.numpy().T)
trainFeatures = torch.Tensor(trainFeatures)
trainFeatures = normalize(trainFeatures).t()
print('..done')
def eval_k_s(K_,sigma_):
total = 0
top1 = 0.
top5 = 0.
with torch.no_grad():
retrieval_one_hot = torch.zeros(K_, C)# .cuda()
for batch_idx, (inputs, targets, _) in enumerate(testloader):
targets = targets # .cuda(async=True) # or without async for py3.7
inputs = inputs.cuda()
batchSize = inputs.size(0)
features = net(inputs)
if use_pca:
features = pca.transform(features.cpu().numpy())
features = torch.Tensor(features).cuda()
features = normalize(features).cpu()
dist = torch.mm(features, trainFeatures)
yd, yi = dist.topk(K_, dim=1, largest=True, sorted=True)
candidates = trainLabels.view(1, -1).expand(batchSize, -1)
retrieval = torch.gather(candidates, 1, yi)
retrieval_one_hot.resize_(batchSize * K_, C).zero_()
retrieval_one_hot.scatter_(1, retrieval.view(-1, 1), 1)
yd_transform = yd.clone().div_(sigma_).exp_()
probs = torch.sum(torch.mul(retrieval_one_hot.view(batchSize, -1, C),
yd_transform.view(batchSize, -1, 1)),
1)
_, predictions = probs.sort(1, True)
# Find which predictions match the target
correct = predictions.eq(targets.data.view(-1, 1))
top1 = top1 + correct.narrow(1, 0, 1).sum().item()
top5 = top5 + correct.narrow(1, 0, 5).sum().item()
total += targets.size(0)
print(f"{K_}-NN,s={sigma_}: TOP1: ", top1 * 100. / total)
return top1 / total
if isinstance(K, list):
res = []
for K_ in K:
for sigma_ in sigma:
res.append(eval_k_s(K_, sigma_))
return res
else:
res = eval_k_s(K, sigma)
return res
| [
"yuki.m.asano@gmail.com"
] | yuki.m.asano@gmail.com |
247f055462626c6b6a9df14e8daaeaa654c1b657 | 9ef6dde178e3bc8ff9c7c5c9c7888451f1133feb | /jl6583/assignment5.py | 31660f54685c25f7572138d1a622854d6cf8acda | [] | no_license | xz1082/assignment5 | 2d568f9f063da65804c10f8939f140c264a3ab4a | aca54cb9445de38021b5a92fc91c5ce0ccf879fc | refs/heads/master | 2020-12-02T15:03:31.305458 | 2014-10-18T00:01:57 | 2014-10-18T00:01:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | '''
Created on Oct 9, 2014
@author: jiayi lu(jl6583)
'''
import math as mt
class interval:
def __init__(self,init_str = ''):
if type(init_str) == str:
bound_list = init_str.strip(' []()').split(',')
self.l_bound = int(bound_list[0])
self.u_bound = int(bound_list[-1])
#a 0.9 shift was added to represent exclusive bounds
if init_str.strip(' ')[0] == '(':
self.l_bound = self.l_bound+0.9
if init_str.strip(' ')[-1] == ')':
self.u_bound = self.u_bound-0.9
#receiving list input for initialization
elif type(init_str) == list:
self.l_bound = init_str[0]
self.u_bound = init_str[-1]
if self.l_bound > self.u_bound:
raise Exception('Invalid Interval!\n')
def __repr__(self):
l = mt.floor(float(self.l_bound))
u = mt.ceil(float(self.u_bound))
if self.l_bound - l != 0.0:
l_bracket = '('
else:
l_bracket = '['
if self.u_bound - u != 0.0:
u_bracket = ')'
else:
u_bracket = ']'
output_str = '%s%d,%d%s'%(l_bracket,l,u,u_bracket)
return output_str
def mergeIntervals(int1,int2):
if round(int1.l_bound) > round(int2.u_bound)+1 or round(int1.u_bound)+1 < round(int2.l_bound):
raise Exception('Intervals not overlapping')
else:
new_l = min(int1.l_bound,int2.l_bound)
new_u = max(int1.u_bound,int2.u_bound)
merged_itvl = interval([new_l,new_u])
return merged_itvl;
def mergeOverlapping(intlist = []):
#sort the interval list by the lower bound
intlist.sort(key = lambda x : x.l_bound)
temp_itvl = intlist[0]
output_list = list()
for i in range(1,len(intlist)):
try:
temp_itvl = mergeIntervals(temp_itvl,intlist[i])
except:
#if unable to merge create a new interval
output_list.append(temp_itvl)
temp_itvl = intlist[i]
output_list.append(temp_itvl)
return output_list
def insert(intlist,newint):
intlist.append(newint)
return mergeOverlapping(intlist)
def main():
input_list = raw_input('List of intervals? ').split(', ')
itvl_list = []
for itvl_str in input_list:
itvl_list.append(interval(itvl_str))
while True:
insert_input = raw_input('Interval? ')
if insert_input == 'quit':
break
#initialzing the interval with a string
try:
insert_itvl = interval(insert_input)
itvl_list = insert(itvl_list,insert_itvl)
print itvl_list
except:
print 'Invalid interval\n'
if __name__ == '__main__' :
main()
| [
"jiayi.lu@nyu.edu"
] | jiayi.lu@nyu.edu |
96f3d6b6b5992dd3ad311167dbd5f7757d1aa977 | 786de89be635eb21295070a6a3452f3a7fe6712c | /pytopsana/trunk/examples/ex_cspad.py | c3e4a63c747ba8ffe656b7d27f458dd554177fdd | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,603 | py | #!/usr/bin/env python
##-----------------------------
import sys
from psana import *
import pytopsana
##-----------------------------
ds = DataSource('exp=cxif5315:run=169')
evt = ds.events().next()
env = ds.env()
src = Source('DetInfo(CxiDs2.0:Cspad.0)')
#src = Source('Camp.0:pnCCD.1')
det = pytopsana.Detector(src,0) # , 0xffff)
# src)
#print evt.keys()
##-----------------------------
peds = det.pedestals(evt,env)
print '\npedestals:\n', peds[0:20]
prms = det.pixel_rms(evt,env)
print '\npixel_rms:\n', prms[0:20]
pgain = det.pixel_gain(evt,env)
print '\npixel_gain:\n', pgain[0:20]
pmask = det.pixel_mask(evt,env)
print '\npixel_mask:\n', pmask[0:20]
pbkgd = det.pixel_bkgd(evt,env)
print '\npixel_bkgd:\n', pbkgd[0:20]
pstat = det.pixel_status(evt,env)
print '\npixel_status:\n', pstat[0:20]
pcmod = det.common_mode(evt,env)
print '\ncommon_mode:\n', pcmod
print '\nInstrument: ', det.inst(env)
##-----------------------------
#det.set_print_bits(255);
det.set_def_value(-5.);
det.set_mode(1);
raw_data = det.data_int16_3(evt,env)
print '\nraw_data:\n', raw_data
print 'raw_data type: %s shape: %s' % (raw_data.dtype, raw_data.shape)
pixel_x = det.pixel_coords_x(evt,env)
print '\npixel_x:\n', pixel_x
print 'pixel_x type: %s shape: %s' % (pixel_x.dtype, pixel_x.shape)
pixel_y = det.pixel_coords_y(evt,env)
print '\npixel_y:\n', pixel_y
print 'pixel_y type: %s shape: %s' % (pixel_y.dtype, pixel_y.shape)
pixel_a = det.pixel_areas(evt,env)
print '\npixel_a:\n', pixel_a
print 'pixel_a type: %s shape: %s' % (pixel_a.dtype, pixel_a.shape)
pixel_m = det.pixel_mask_geo(evt,env)
print '\npixel_m:\n', pixel_m
print 'pixel_m type: %s shape: %s' % (pixel_m.dtype, pixel_m.shape)
print '\npixel_scale_size: ', det.pixel_scale_size(evt,env)
pixel_ix = det.pixel_indexes_x(evt,env)
print '\npixel_ix:\n', pixel_ix
print 'pixel_ix type: %s shape: %s' % (pixel_ix.dtype, pixel_ix.shape)
pixel_iy = det.pixel_indexes_y(evt,env)
print '\npixel_iy:\n', pixel_iy
print 'pixel_iy type: %s shape: %s' % (pixel_iy.dtype, pixel_iy.shape)
##-----------------------------
import numpy as np
nda_img = np.array(raw_data.flatten()-peds, dtype=np.double)
print '\nnda_img:\n', nda_img
print 'nda_img type: %s shape: %s' % (nda_img.dtype, nda_img.shape)
img = det.get_image(evt, env, nda_img)
print '\nimg:\n', img
print 'img type: %s shape: %s' % (img.dtype, img.shape)
##-----------------------------
import pyimgalgos.GlobalGraphics as gg
ave, rms = img.mean(), img.std()
gg.plotImageLarge(img, amp_range=(ave-1*rms, ave+6*rms))
gg.show()
sys.exit(0)
##-----------------------------
| [
"dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 |
c07a4ecf6c85750a174f044756d9b18802c940e6 | f45a90c83c365e2d3b7d6a7d6007f274a7e79df7 | /week 7/Authentication&CachingCodes/Fib.py | 9b712e440a953f05ccb44a494ab8a37df2ccf6d4 | [] | no_license | yiicao/final_project | a35f93eda46271c60e4b2168fdeda6eabf442df6 | 8c07c544cf6a94ba466349106d2c29268eda93d9 | refs/heads/main | 2023-04-14T16:40:35.153686 | 2021-03-29T20:59:37 | 2021-03-29T20:59:37 | 358,947,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | def fib(n):
fib_seq = [0, 1]
for i in range(2, n):
fib_seq.append(fib_seq[i - 2] + fib_seq[i - 1])
return fib_seq[-1]
print (fib(300000)) # start with 100000 and increase until it's annoying | [
"yiicao@umich.edu"
] | yiicao@umich.edu |
6f63f6b8c63983776f1eaed0b04a879f053db854 | 6a3639751ceda99b7a72eede2a75a52ac619c87b | /Stanford_CS224n NLP with Deep Learning/assignment1/.env/lib/python3.6/rlcompleter.py | e634e2a82930d55035b69c4cd76959312ef6a1f1 | [
"MIT"
] | permissive | db12138/Online_Courses_and_Materials | 3a988edf53e035a26fbf1d9cab0559382f228970 | 6a113056f4fd2667556942b3bcc9608bdf9c2968 | refs/heads/master | 2020-03-18T01:14:28.291109 | 2018-05-14T14:54:21 | 2018-05-14T14:54:21 | 134,133,889 | 1 | 3 | null | 2018-05-20T08:45:48 | 2018-05-20T08:45:48 | null | UTF-8 | Python | false | false | 52 | py | /Users/Hansen/anaconda3/lib/python3.6/rlcompleter.py | [
"hc.wang96@gmail.com"
] | hc.wang96@gmail.com |
5f10840891363c3feddf05032065e4fad8886c28 | 43900caca1bb4d22586b0ddb686510fae80afd2f | /singh-code-book/subst_cipher.py | 3287a84ea9a67a6e7786c263d39419e160106e37 | [] | no_license | almehj/crypto-learn | b97cdeb738bc983d44b407772fd0507204feb6c2 | 7b637bf02278b20d8de6450d719921428ec6993f | refs/heads/master | 2023-07-20T06:48:27.477081 | 2023-07-06T22:27:12 | 2023-07-06T22:27:12 | 185,260,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,159 | py | #!/usr/bin/env python3
import sys
from string import ascii_uppercase
class subst_cipher:
unknown_char = '.'
def __init__(self):
self.clear()
def clear(self):
self.clear2cipher = {}
self.cipher2clear = {}
self.base_alphabet = ascii_uppercase
def read_from_cipher_map(self,cipher_map):
if len(cipher_map) != len(self.base_alphabet):
raise RuntimeError(
"Cipher map must contain %d characters, % dprovided"%
(len(self.base_alphabet), len(cipher_map))
)
for cl,ci in zip(self.base_alphabet,cipher_map):
if ci in self.base_alphabet:
self.add_mapping(cl,ci)
def add_mapping(self,cl,ci):
self.remove_forward_mapping(cl)
self.remove_reverse_mapping(ci)
self.clear2cipher[cl] = ci
self.cipher2clear[ci] = cl
def remove_forward_mapping(self,cl):
if cl in self.clear2cipher:
ci = self.clear2cipher[cl]
self.cipher2clear.pop(ci)
self.clear2cipher.pop(cl)
def remove_reverse_mapping(self,ci):
if ci in self.cipher2clear:
self.remove_forward_mapping(self.cipher2clear[ci])
def transform(self,s,char_map):
answer = []
for cl in s:
if cl in self.base_alphabet:
cl = char_map.get(cl,subst_cipher.unknown_char)
answer.append(cl)
return ''.join(answer)
def encipher(self,s):
return self.transform(s,self.clear2cipher)
def decipher(self,s):
return self.transform(s,self.cipher2clear)
def show_mapping(self,**kwargs):
outfile = kwargs.get('outfile',sys.stdout)
prefix = kwargs.get('prefix',"")
outfile.write("%s%s\n"%(prefix,self.base_alphabet))
outfile.write("%s%s\n"%(prefix,self.encipher(self.base_alphabet)))
def find_chains(self):
chains = []
seen = []
for cl in self.clear2cipher:
if cl in seen:
continue
chain = self.chain_from_letter(cl)
for c in chain:
seen.append(c)
chains.append(chain)
chains.sort(key=len, reverse=True)
answer = []
for new_chain in chains:
is_substring = False
for old_chain in answer:
if new_chain in old_chain:
is_substring = True
if not is_substring:
answer.append(new_chain)
return answer
def chain_from_letter(self,cl):
answer = []
ci = cl
while ci in self.clear2cipher:
if ci in answer:
break
answer.append(ci)
ci = self.clear2cipher[ci]
if ci not in answer:
answer.append(ci)
return ''.join(answer)
def __str__(self):
return self.encipher(self.base_alphabet)
def read_ciphers_from_file(cipher_file,**kwargs):
base_alphabet = kwargs.get('base',ascii_uppercase)
answer = []
line_num = 0
for line in cipher_file:
line_num += 1
line = line.strip()
if line[0] == '#': continue
if len(line) != len(base_alphabet):
sys.stderr.write("Error: Line %d has %d characters, %d expected\n"%
(line_num,len(line),len(base_alphabet)))
sys.stderr.write(" Skipping line.\n")
continue
C = subst_cipher()
C.read_from_cipher_map(line)
answer.append(C)
return answer
def main():
cipher_filename = sys.argv[1]
text_filename = sys.argv[2]
ciphers = []
with open(cipher_filename) as cipher_file:
ciphers = read_ciphers_from_file(cipher_file)
with open(text_filename) as infile:
text = infile.read()
print("Cipher text:\n")
print(text)
print("\n")
for i,C in enumerate(ciphers):
print("Cipher %d:\n"%i)
print(C.decipher(text))
if __name__ == "__main__":
main()
| [
"almehj@alumni.rice.edu"
] | almehj@alumni.rice.edu |
ea31bba5cc1cc85a88f67befc489f877ee9cfe15 | f10c63ee43e9e42988f36581c3dc5989eb0061ae | /Gluon_Code/gluon_conv.py | 97ba8e99114a6e828760dc99e530297daf4e61eb | [] | no_license | xcszbdnl/Toy | 1305e3d71fa357546996257cd9ea4911478ba141 | 16424d45f57b6f6868205be5acf0310fa85fb999 | refs/heads/master | 2021-09-13T21:40:15.423727 | 2018-05-04T14:39:14 | 2018-05-04T14:39:14 | 108,120,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | from __future__ import print_function
from mxnet.gluon import nn, loss, Trainer
from mxnet import ndarray as nd
from mxnet import autograd
import utils
def get_net():
net = nn.Sequential()
with net.name_scope():
net.add(
nn.Conv2D(channels=20, kernel_size=5, activation='relu'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=50, kernel_size=3, activation='relu'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Flatten(),
nn.Dense(128, activation='relu'),
nn.Dense(10)
)
return net
net = get_net()
net.initialize()
batch_size = 128
train_iter, test_iter = utils.loadMnistData(batch_size)
for data, label in train_iter:
print(data)
break
softmax_loss = loss.SoftmaxCrossEntropyLoss()
trainer = Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})
epochs = 5
for epoch in range(epochs):
total_loss = .0
total_acc = .0
for data, label in train_iter:
with autograd.record():
output = net(data)
losses = softmax_loss(output, label)
losses.backward()
trainer.step(batch_size)
total_loss += nd.mean(losses).asscalar()
total_acc += utils.accuracy(output, label)
test_acc = utils.evaluate_accuracy(test_iter, net)
print('Epoch %d, Train Loss: %f, Train acc: %f, Test acc: %f\n' % (
epoch, total_loss / len(train_iter), total_acc / len(train_iter), test_acc
))
| [
"xcszbdnl@gmail.com"
] | xcszbdnl@gmail.com |
bca4bd718ec274aba0bcf10d62c76a0deca3585f | 13e8fe5585975e7f969ecb38ec1219a40c078b7b | /packages/vorbis/build.py | ebf3e52250de0f65728075305e32fe58f60bbccc | [] | no_license | handsomegui/mpd-win32-build | dbfe2387e5babc7d7d99f7b21bbe45dafc6ea54c | 5a614e30d0dad73fa46cf2e5d36f4ffd58be2864 | refs/heads/master | 2020-05-27T19:38:43.439781 | 2013-02-02T17:10:02 | 2013-02-02T17:10:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | build(static_lib=True)
collect_licenses('COPYING AUTHORS')
| [
"denis@crazydev.net"
] | denis@crazydev.net |
42a31cb2215dcd7cc3cea56f2a5b30c0e7771e4f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /DGpxmRkADuZaWHJxZ_14.py | 180a83ff26de40951119e0535277ed8ec34d08b4 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | """
Steve and Maurice have racing snails. They each have three, a slow `s`, medium
`m` and fast `f` one. Although Steve's snails are all a bit stronger than
Maurice's, Maurice has a trick up his sleeve. His plan is:
1. Round 1: `[s, f]` Sacrifice his slowest snail against Steve's fastest.
2. Round 2: `[m, s]` Use his middle snail against Steve's slowest.
3. Round 3: `[f, m]` Use his fastest snail against Steve's middle.
Create a function that determines whether Maurice's plan will work by
outputting `True` if Maurice wins 2/3 games.
The function inputs:
1. List 1: `[s, m, f]` for Maurice.
2. List 2: `[s, m, f]` for Steve.
### Examples
maurice_wins([3, 5, 10], [4, 7, 11]) ➞ True
# Since the matches are (3, 11), (5, 4) and (10, 7), Maurice wins 2 out of 3.
maurice_wins([6, 8, 9], [7, 12, 14]) ➞ False
# Since the matches are (6, 14), (8, 7) and (9, 12), Steve wins 2 out of 3.
maurice_wins([1, 8, 20], [2, 9, 100]) ➞ True
### Notes
* Maurice wins if his competing snail's speed **strictly** exceeds Steve's snail's speed.
* Steve will always play in this order: `[f, s, m]`.
* The order you'll get the snails is always in ascending order.
"""
def maurice_wins(m_snails, s_snails):
mscore = 0
if m_snails[0] > s_snails[2]:
mscore = mscore + 1
if m_snails[1] > s_snails[0]:
mscore = mscore + 1
if m_snails[2] > s_snails[1]:
mscore = mscore + 1
if mscore == 2:
return True
else:
return False
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0f13a3d51fb6d6c6d66b40c54ee6da40367dc232 | d8b13203c39e68e459638decc44a8bf9b3a3d925 | /content/migrations/0004_form_to_page_back_relation.py | eb34793083050b1cd3acb1f88296d45156f2254e | [
"0BSD"
] | permissive | tbrlpld/headless-wagtail-form-backend | 26266afbbf41cb53cad691b37ac82254dd201ce6 | b6ba81db8ea705fbda2c75b77a0075fb20d67beb | refs/heads/master | 2022-12-24T01:14:39.185345 | 2020-10-02T22:09:48 | 2020-10-02T22:09:48 | 298,130,570 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | # Generated by Django 3.0.10 on 2020-09-29 02:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forms', '0001_initial'),
('content', '0003_auto_20200929_0125'),
]
operations = [
migrations.AlterField(
model_name='somepage',
name='contact_form',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='used_on_page', to='forms.FormPage'),
),
]
| [
"tibor@lpld.io"
] | tibor@lpld.io |
d27de3ae06c82ca21feafe92b90698f9254ec67c | 7c5da9f7299c5f5080fb5f7416caede5b4d92d6f | /0x01-python-if_else_loops_functions/101-remove_char_at.py | 5b08ff0d3120debe562c3e8771f2524182cd09e7 | [] | no_license | stefansilverio/holbertonschool-higher_level_programming | eb0b9415047eb089d69e4099ff00d1f9ed529a4d | f47fc1817245fa41e597c9b03707687c78bc80e6 | refs/heads/master | 2020-04-09T10:20:45.203061 | 2019-05-17T00:36:42 | 2019-05-17T00:36:42 | 160,268,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | #!/usr/bin/python3
def remove_char_at(str, n):
length = len(str)
if n >= 0 and n < length:
str2 = str.replace(str[n], "")
print("{0}".format(str2), end='')
else:
print("{}".format(str), end='')
return ('')
| [
"494@holbertonschool.com"
] | 494@holbertonschool.com |
227ded274134c5beab9559db115e8a1a32a60a62 | d1603e14ccc9ff17518468f447d7dab17d82193a | /supercar/views.py | c4e366d0c728b62952537f1c01a4f951b662395c | [] | no_license | therogue/supercar | b91b007a6892bba6504a69665dbfbfa10a176d61 | 65910b358a9d949175fc5b2d55b4d51353bf241b | refs/heads/master | 2022-02-19T22:39:49.512360 | 2019-05-26T18:40:30 | 2019-05-26T18:40:30 | 188,715,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Hello, world. You're about to start a race!")
| [
"alex@roguemark.com"
] | alex@roguemark.com |
cffde11e5499da3323f62f5f2144021b04c466e4 | 7dc5b51bfa7083b6ce31ccbd387607fef605028b | /ellyscraper/ellyscraper.py | 6498262e225f01c145d4638503164a952dc96717 | [] | no_license | igorecarrasco/Elly | 22e920660554d271128e87e6338e46340f191391 | 514a7aa6f35fe820a6bbaec45d0eea8cbb3fd7be | refs/heads/master | 2021-01-11T05:58:44.848327 | 2017-04-04T19:52:27 | 2017-04-04T19:52:27 | 72,024,282 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,268 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PARSE.LY API SCRAPER
"""
import urllib2
import oauth2
import json
import psycopg2
import os
import datetime
from os.path import join,dirname
from dotenv import load_dotenv
from BeautifulSoup import BeautifulSoup
#load env path and file
dotenv_path = join(dirname(__file__),'..','.env')
load_dotenv(dotenv_path)
#load parameters from env file
token = os.getenv('parselytoken')
apikey= os.getenv('parselyapikey')
db=os.getenv('db')
dbuser=os.getenv('dbuser')
dbpassword=os.getenv('dbpassword')
dbhost=os.getenv('dbhost')
#connect to db
conn = psycopg2.connect(database=db, user=dbuser, password=dbpassword, host=dbhost)
cur = conn.cursor()
#Make API calls for top 20 posts in the last 24h and 48h
umdia = 'http://api.parsely.com/v2/analytics/posts?apikey='+apikey+'&secret='+token+'&days=1&limit=15'
doisdia = 'http://api.parsely.com/v2/analytics/posts?apikey='+apikey+'&secret='+token+'&days=2&limit=15'
tresdia = 'http://api.parsely.com/v2/analytics/posts?apikey='+apikey+'&secret='+token+'&days=3&limit=15'
#capture 24h stats
response = urllib2.urlopen(umdia)
dados = json.load(response)['data']
#create clean list out of 24h data with only the things we need
#this was done to be able to compare lists as "Hits" field would yield different results
#thus making comparison impossible
listalimpa = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
listalimpa.append([title, tags, pubdate, link, thumb, authors, section])
i=i+1
#repeat for 48h stats
response = urllib2.urlopen(doisdia)
dados = json.load(response)['data']
listalimpa2 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
listalimpa2.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
#repeat for 72h stats
response = urllib2.urlopen(tresdia)
dados = json.load(response)['data']
listalimpa3 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
listalimpa3.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
#compare lists and return only posts that were present in both 24h, 48h and 72h calls
novalista =[]
for element in listalimpa:
if element in listalimpa2:
if element in listalimpa3:
novalista.append(element)
#start of section-specific calls
#Life, 72h
sectioncall = 'http://api.parsely.com/v2/analytics/section/Life/detail?apikey='+apikey+'&secret='+token+'&days=3&limit=10'
response = urllib2.urlopen(sectioncall)
dados = json.load(response)['data']
lifelimpa1 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
lifelimpa1.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
#life - 48h
sectioncall = 'http://api.parsely.com/v2/analytics/section/Life/detail?apikey='+apikey+'&secret='+token+'&days=2&limit=10'
response = urllib2.urlopen(sectioncall)
dados = json.load(response)['data']
lifelimpa2 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
lifelimpa2.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
#for 24h
sectioncall = 'http://api.parsely.com/v2/analytics/section/Life/detail?apikey='+apikey+'&secret='+token+'&days=1&limit=10'
response = urllib2.urlopen(sectioncall)
dados = json.load(response)['data']
lifelimpa3 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
lifelimpa3.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
lifelista =[]
for element in lifelimpa1:
if element in lifelimpa2:
if element in lifelimpa3:
lifelista.append(element)
for element in lifelista:
novalista.append(element)
#Life and Style -72h
sectioncall = 'http://api.parsely.com/v2/analytics/section/Life_and_Style/detail?apikey='+apikey+'&secret='+token+'&days=3&limit=10'
response = urllib2.urlopen(sectioncall)
dados = json.load(response)['data']
lifelimpa1 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
lifelimpa1.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
#life - 48h
sectioncall = 'http://api.parsely.com/v2/analytics/section/Life_and_Style/detail?apikey='+apikey+'&secret='+token+'&days=2&limit=10'
response = urllib2.urlopen(sectioncall)
dados = json.load(response)['data']
lifelimpa2 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
lifelimpa2.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
#for 24h
sectioncall = 'http://api.parsely.com/v2/analytics/section/Life_and_Style/detail?apikey='+apikey+'&secret='+token+'&days=1&limit=10'
response = urllib2.urlopen(sectioncall)
dados = json.load(response)['data']
lifelimpa3 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
lifelimpa3.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
lifelista =[]
for element in lifelimpa1:
if element in lifelimpa2:
if element in lifelimpa3:
lifelista.append(element)
for element in lifelista:
novalista.append(element)
#Arts section
sectioncall = 'http://api.parsely.com/v2/analytics/section/Arts/detail?apikey='+apikey+'&secret='+token+'&days=3&limit=10'
response = urllib2.urlopen(sectioncall)
dados = json.load(response)['data']
lifelimpa1 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
lifelimpa1.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
#life - 48h
sectioncall = 'http://api.parsely.com/v2/analytics/section/Arts/detail?apikey='+apikey+'&secret='+token+'&days=2&limit=10'
response = urllib2.urlopen(sectioncall)
dados = json.load(response)['data']
lifelimpa2 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
lifelimpa2.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
#for 24h
sectioncall = 'http://api.parsely.com/v2/analytics/section/Arts/detail?apikey='+apikey+'&secret='+token+'&days=1&limit=10'
response = urllib2.urlopen(sectioncall)
dados = json.load(response)['data']
lifelimpa3 = []
i=0
while i<len(dados):
title=dados[i]['title']
tags=[]
a=0
while a<len(dados[i]['tags']):
tags.append(dados[i]['tags'][a])
a=a+1
pubdate=dados[i]['pub_date']
link=dados[i]['link']
thumb=dados[i]['image_url']
authors=[]
b=0
while b<len(dados[i]['authors']):
authors.append(dados[i]['authors'][b])
b=b+1
section=dados[i]['section']
lifelimpa3.append([title , tags , pubdate , link, thumb, authors, section])
i=i+1
lifelista =[]
for element in lifelimpa1:
if element in lifelimpa2:
if element in lifelimpa3:
lifelista.append(element)
for element in lifelista:
novalista.append(element)
i=0
while i<len(novalista):
urllink=novalista[i][3]
soup = BeautifulSoup(urllib2.urlopen(urllink))
result = soup.findAll('meta',{'name' : 'article.headline'})
try:
result = result[0]['content']
except IndexError:
result = ''
novalista[i].append(result)
i=i+1
# write to the database title, tag list, published date, link, thumbnail url, author
# in the corresponding fields
i=0
while i<len(novalista):
title=novalista[i][0]
tags=novalista[i][1]
timedate=novalista[i][2].replace("T", " ")
try:
timedate=datetime.datetime.strptime(timedate,"%Y-%m-%d %H:%M:%S")
except ValueError:
timedate=datetime.datetime.strptime(timedate,"%Y-%m-%d %H-%M-%S")
if timedate == None:
timedate = datetime.datetime.now()
link=novalista[i][3]
thumb=novalista[i][4]
author=novalista[i][5]
section=novalista[i][6]
socialhed=novalista[i][7]
replace=["u'","'","[","]",'"']
for a in replace:
tags=str(tags).replace(a,"")
author=str(author).replace(a,"")
title=title.replace(a,"")
section=section.replace(a,"")
author = author.encode('utf_8')
author = author.replace("’","'")
title = title.encode('utf_8')
title = title.replace("’","'")
cur.execute("INSERT INTO elly_elly (title, tags, pubdate, link, thumb, author, section, socialhed) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",(title,tags,timedate,link,thumb,author,section,socialhed))
i=i+1
cur.execute("DELETE FROM elly_elly WHERE id NOT IN (SELECT min(id) FROM elly_elly GROUP BY link)")
#commit
conn.commit()
#close cursor and connection with db
cur.close()
conn.close()
# it worked!
print "SUCESS" | [
"igor.ecarrasco@gmail.com"
] | igor.ecarrasco@gmail.com |
936edd4149e8ba555aa390d32ec21c3c05266e6e | a6bcacbbb366774182bc7d0529ed205b4cccd19d | /bert/model.py | 81f2d9ee8f03b633109c18c8a9ffbb79524a3b19 | [] | no_license | yk287/NLP_TF2 | be0e9d346930a3ea28687d4ff3760b837e955868 | 67ef13effd03cf9033a5facb3168ea9d4970b323 | refs/heads/master | 2022-11-24T11:29:40.184738 | 2020-07-26T03:49:56 | 2020-07-26T03:49:56 | 276,225,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,079 | py |
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow_hub as hub
#run wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py
import tokenization
from official.modeling import tf_utils
from official import nlp
from official.nlp import bert
# Load the required submodules
import official.nlp.optimization
import official.nlp.bert.bert_models
import official.nlp.bert.configs
import official.nlp.bert.run_classifier
import official.nlp.bert.tokenization
import official.nlp.data.classifier_data_lib
import official.nlp.modeling.losses
import official.nlp.modeling.models
import official.nlp.modeling.networks
class bertModel():
def __init__(self, opts):
self.opts = opts
self.bert = hub.KerasLayer(opts.model_loc, trainable=True)
#build the model
self.build_model()
def build_model(self):
input_word_ids = Input(shape=(self.opts.max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = Input(shape=(self.opts.max_len,), dtype=tf.int32, name="input_mask")
segment_ids = Input(shape=(self.opts.max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = self.bert([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
out = Dense(self.opts.num_classes, activation='sigmoid')(clf_output)
self.model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
class bertTrainer():
def __init__(self, data, bert, opts):
self.opts = opts
self.data = data
self.bert = bert.bert
self.model = bert.model
def initTrainer(self):
self.init_optimizer()
self.build_model()
if self.opts.print_model:
self.model.summary()
def build_model(self):
self.model.compile(self.optimizer, loss='binary_crossentropy', metrics=['accuracy'])
def init_optimizer(self):
train_data_size = len(self.data.X_train)
steps_per_epoch = int(train_data_size / self.opts.batch_size)
num_train_steps = steps_per_epoch * self.opts.epochs
warmup_steps = int(self.opts.epochs * train_data_size * 0.1 / self.opts.batch_size)
# creates an optimizer with learning rate schedule
self.optimizer = nlp.optimization.create_optimizer(
self.opts.lr, num_train_steps=num_train_steps, num_warmup_steps=warmup_steps)
def train_model(self):
self.initTrainer()
checkpoint = ModelCheckpoint('1model.h5', monitor='val_loss', save_best_only=True)
self.train_history = self.model.fit(
self.data.train_input, self.data.y_train,
validation_split=self.opts.validation_split,
epochs=self.opts.epochs,
callbacks=[checkpoint],
batch_size=self.opts.batch_size
)
| [
"noreply@github.com"
] | noreply@github.com |
e38fa81deaa2096641268b1622e18201366f2511 | 43374981d652806124c721b527c1ce2c00909f7b | /Ironman.py | 1a00c674f602ac111c36ea27d91a07895b7c4b92 | [] | no_license | Sovicheanhim/Python | 37f059a817d5b02c00a65b8d55562060d27032d6 | 8743daa0130e540eda947314754338657834a44f | refs/heads/master | 2020-09-24T15:29:44.898572 | 2019-12-28T11:57:36 | 2019-12-28T11:57:36 | 225,790,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | def i_tri(s):
return "Starting Line... Good Luck!" if s == 0 else "You're done! Stop running!" if s >= 140.8 else ({"Swim":"{:0.2f}".format(140.6-s)+" to go!"} if s <= 2.4 else {"Bike":"{:0.2f}".format(140.6-s)+ " to go!"} if s <= 114.4 else {"Run": "{:0.2f}".format(140.6-s)+" to go!"} if s<= 130.6 else {"Run":"Nearly there!"})
print(i_tri(36))
# print("{:0.2f}".format(1.5)) | [
"nhimsovichea18@kit.edu.kh"
] | nhimsovichea18@kit.edu.kh |
48a2f29b6dd4ea6ec1887f15ba6a5a590bcccbe1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2871/60825/301237.py | bb1e542bda489a7e94ef28cefb577359aa3faa8d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | t=""
while True:
try:
ts=input()
t+=ts
t+="#"
except:
break
if t=='3#1 1 1#':
print('''1''')
elif t=='2#2 2#':
print('''0''')
elif t.startswith('57#2 1 2 2 1 2 2 1 1 1 2 1 1'):
print('''28''')
elif t.startswith('47#2 1 1 1 1 2 2 1 2 1 1 1 1 2') or t.startswith('49#1 1 2 1 1 2 2 1 2 1 1'):
print('''22''')
elif t.startswith('95#2 1 1 1 1 1 2 1 2 2 2 2 1 1 1 2') or t.startswith('99#1 2 1 1 2 1 2 2 1 1 2 2 1 1 1 1 1 1 1 2'):
print('''46''')
elif t.startswith('4#1 1 2 1#'):
print('''1''')
elif t.startswith('47#1 2 1 2 2 1 1 2 2 1 2 2 2 1'):
print('''22''')
elif t.startswith('7#2 2 2 1 1 1 1#'):
print('''3''')
else:
print(t) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
09d24ef5412e862756f211e6f6befbfa45e97875 | 468de14f263ee6d8dbb5123164055592d83d57d0 | /practice/Serialization/memo_clear.py | b61bf72790c373629625171e74fc4c7af81ad043 | [] | no_license | harry-fan/spider | baa6272c11132d38f683fd56cd6fc581f6e24aff | ce896d9f544a10aa2de65ebfafe22fb6ffd38385 | refs/heads/master | 2020-03-26T22:56:32.980615 | 2018-09-08T08:28:07 | 2018-09-08T08:28:07 | 145,497,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | # coding:utf-8
import StringIO
import pickle
| [
"326946939@qq.com"
] | 326946939@qq.com |
613f9e86bb1cc1e5e293a3bf622ccede43168ac7 | 929687769c681173427917213ae87a8cef5ea8bc | /extract_bar_data.py | d23ecccc7c7667b465735815a3ceb9dc2ec581d1 | [] | no_license | alextryonpdx/blazerbar_project | 91afb60edd2d646d523062f18b4dbc74ef74d0f0 | 1613569d072fef82fe888aef5d990cb62c472415 | refs/heads/master | 2020-03-30T17:42:59.529999 | 2015-03-31T23:03:53 | 2015-03-31T23:03:53 | 32,547,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | import json
import os
from django.template.defaultfilters import slugify
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blazerbar_project.settings')
import django
django.setup()
from blazerbar.models import Bar
def add_bar(name, location, website, image_url, tvsize, sound, happyhour):
print name
b = Bar.objects.get_or_create(name=name)[0]
b.location = location
b.website = website
b.image_url = image_url
#b.neighborhood = neighborhood
b.bar_slug = slugify(name)
b.tv_size = tvsize
b.sound = sound
b.happyhour_hour = happyhour
b.save()
return b
for i in range(30):
f = open('blazerbar/yelp-data/results'+ str(i+1) +'.txt', 'r')
getback = json.load(f)
for biz in getback:
name = biz['name']
website = biz['url']
location = biz['location']['address'][0]
image_url = biz['image_url']
image_url = biz['image_url']
add_bar(name, location, website, image_url, "","","")
| [
"alextryonpdx@gmail.com"
] | alextryonpdx@gmail.com |
9bff6e51cc95919783c4e18271253272f45e1d0a | 9071dd25a64a93bb542312ccbaf04f39bdd27a84 | /500. 键盘行.py | 10d6a814d2c4d436f609a76bdbda8ccbdebe0685 | [] | no_license | JancisWang/leetcode_python | c00c927c507b67f3191e7f1a5f37c7b9d87bf5ce | c843234ba8e64a53239c6e1110bd083d5bcc9e1c | refs/heads/master | 2021-01-02T19:07:21.811156 | 2020-02-11T13:43:59 | 2020-02-11T13:43:59 | 239,757,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | '''
给定一个单词列表,只返回可以使用在键盘同一行的字母打印出来的单词。键盘如下图所示。
示例:
输入: ["Hello", "Alaska", "Dad", "Peace"]
输出: ["Alaska", "Dad"]
注意:
你可以重复使用键盘上同一字符。
你可以假设输入的字符串将只包含字母。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/keyboard-row
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
class Solution:
def findWords(self, words: List[str]) -> List[str]:
row_1 = {'Q', 'W', 'E','R', 'T', 'Y', 'U', 'I', 'O', 'P'}
row_2 = {'A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L'}
row_3 = {'Z', 'X', 'C', 'V', 'B', 'N', 'M'}
def match(row, string):
string = set(string.upper())
for s in string:
if s not in row:
return False
return True
result = []
for word in words:
if match(row_1, word) or match(row_2, word) or match(row_3, word):
result.append(word)
return result | [
"18810172610@163.com"
] | 18810172610@163.com |
3d9b5a2a55009890b0767a2d1fb14ef055274979 | 88d57abfad4b863abbdb798ff80c4baf1432b966 | /hisplot_for_data.py | de9dfce4c7517a0c84c6d7b3641bcf8c31089fa8 | [] | no_license | MarkovAnton/First-project | 9b8a007e6a482bb744e2ba4145914722b8bbb72f | 6a111242142bf87437ff2b4e414f900de1144a5b | refs/heads/main | 2023-06-04T13:48:26.037143 | 2021-06-17T13:39:13 | 2021-06-17T13:39:13 | 374,131,062 | 0 | 0 | null | 2021-06-17T13:39:14 | 2021-06-05T14:13:16 | null | UTF-8 | Python | false | false | 266 | py | import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
def his_plot(train):
pl = pd.DataFrame()
pl['sales'] = train['sales']
sns.histplot(data=pl, x='sales', bins=40)
plt.title('Sales bar chart')
plt.show()
| [
"markov_anton2001@mail.ru"
] | markov_anton2001@mail.ru |
39c3cac1154d8010727d17fdc16c8cdeca1b9c8c | fcd744030cce61eb0ee709995e5b008e89f222f0 | /docs/conf.py | 47135000f73b7aac67a5b31e0deec7010296e328 | [
"ISC"
] | permissive | usingnamespace/pyramid_authsanity | 20223d7f6812707a2423a44f0eeebb34d2f08dce | 98795f37e89a6cb06701d8d70fe54f94beec6ae8 | refs/heads/main | 2023-01-13T06:10:40.332856 | 2022-12-29T13:06:49 | 2022-12-29T13:06:49 | 42,696,878 | 19 | 6 | ISC | 2023-09-09T04:21:59 | 2015-09-18T03:15:55 | Python | UTF-8 | Python | false | false | 4,539 | py | import pkg_resources
import sys
import os
import shlex
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"repoze.sphinx.autointerface",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pyramid_authsanity"
copyright = "2015, Bert JW Regeer"
author = "Bert JW Regeer"
version = release = pkg_resources.get_distribution("pyramid_authsanity").version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
modindex_common_prefix = ["pyramid_authsanity."]
# -- Options for HTML output ----------------------------------------------
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "pyramid_authsanitydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pyramid_authsanity.tex",
"pyramid\\_authsanity Documentation",
"Bert JW Regeer",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "pyramid_authsanity", "pyramid_authsanity Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pyramid_authsanity",
"pyramid_authsanity Documentation",
author,
"pyramid_authsanity",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
| [
"bertjw@regeer.org"
] | bertjw@regeer.org |
f32722a2c71fb650e88c9b9b7145ce1590beb399 | 109b24ada597936b60e7750ad6a7ed226a469081 | /blockchain_main.py | c4f6e5ceead7aeaaa5a6ca343ca25473048a4a5d | [] | no_license | Fukuhara-Kichinosuke/myblockchain | 272df55e123e21a5c543c4d4a6b83606ddf6e3cc | c805504a5b6222440ee0b76c308d11e6bbe41fe0 | refs/heads/master | 2020-05-22T18:51:18.145805 | 2017-03-12T06:48:05 | 2017-03-12T06:48:05 | 84,715,977 | 0 | 0 | null | 2017-03-12T10:01:33 | 2017-03-12T10:01:33 | null | UTF-8 | Python | false | false | 1,291 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 2017
@author: fukuharakichinosuke
"""
from blockchain_mod import blockDict
from blockchain import blockexplorer
class_00 = blockDict # blockDict class
def Main():
print("***block data components***")
hashValue = '000000000000000016f9a2c3e0f4c1245ff24856a79c34806969f5084f410680'
blockDict = class_00.getBlockDict(hashValue)
class_00.printDictvalue(blockDict)
print("***transaction data components***")
transactionDict = class_00.getTransactionDict(blockDict['transactions'])
class_00.printDictvalue(transactionDict)
print("***transaction output data***")
outputDict = class_00.getOutputDict(transactionDict['outputs'])
class_00.printDictvalue(outputDict)
print("***address data components***")
address = blockexplorer.get_address('1HS9RLmKvJ7D1ZYgfPExJZQZA1DMU3DEVd')
addressDict = class_00.getAddressDict(address)
class_00.printDictvalue(addressDict)
print("***unspentoutputs data components***")
outs = blockexplorer.get_unspent_outputs('1HS9RLmKvJ7D1ZYgfPExJZQZA1DMU3DEVd')
unspentoutputDict = class_00.getUnspentoutputsDict(outs[0])
class_00.printDictvalue(unspentoutputDict)
if __name__ == "__main__":
Main()
| [
"noreply@github.com"
] | noreply@github.com |
3a3e79842b58a156777a8d896a0f049e1397d0a8 | 3b7a061e406e072de15af4e92216b4800da0edba | /mysite/settings.py | 94405729b97cfb1e0171340d7af42abfb26c1211 | [] | no_license | juliakastrup/1stG | 45df5278f441f2fab88324282225003f8f18c7af | 57a08eec627ddd860f3e5ba1785f9f51c87976c3 | refs/heads/master | 2021-09-23T22:33:19.845219 | 2018-09-28T14:23:09 | 2018-09-28T14:23:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p@jbmh6d4gs^)^z13qyp_homh5wc^noskvp)6&z4(sz&j8^7nh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"jkastrupb@gmail.com"
] | jkastrupb@gmail.com |
56c0f5b4ad712dcd53d029b39fa44127f8f31119 | 571e885363ba484e6f6df6544c2ad11e0640695d | /ratings/views.py | 7a25aec28722bb645a075010ee86cfb2db1bb0e9 | [] | no_license | extreme1337/django-netflix-clone-backend | 99860c0e973a1120c2460e712782eed211e276eb | b3a6900120d65d6c604bc12f7124136d94a43ab1 | refs/heads/main | 2023-05-25T01:00:48.713179 | 2021-06-08T07:00:59 | 2021-06-08T07:00:59 | 370,954,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | from django import forms
from django.shortcuts import render
from .forms import RatingForm
from django.http import HttpResponseRedirect
from django.contrib.contenttypes.models import ContentType
from .models import Rating
# Create your views here.
def rate_object_view(request):
if not request.user.is_authenticated:
return HttpResponseRedirect('/')
if request.method == "POST":
form = RatingForm(request.POST)
if form.is_valid():
object_id = form.cleaned_data.get('object_id')
rating = form.cleaned_data.get('rating')
content_type_id = form.cleaned_data.get('content_type_id')
c_type = ContentType.objects.get_for_id(content_type_id)
obj = Rating.objects.create(
content_type=type,
object_id=object_id,
value=rating,
user=request.user
)
next_path = form.cleaned_data.get('next')
return HttpResponseRedirect(next_path)
return HttpResponseRedirect('/')
| [
"marko.miseljic.14@gmail.com"
] | marko.miseljic.14@gmail.com |
a981eb8f728c98e588854c450929dd46f0c4a2aa | 090957b375ecff951d87caea1e644ac4b17616f6 | /tests/year_test.py | e53a4d2e08f6c97c40c8e360f9c260726c0454b2 | [
"MIT"
] | permissive | minishrink/calendargen | d3635f304c8eee70e162e1861e65db1dbc1d007b | 0c509544e1d50f3348b4e1e8725808c978e3a358 | refs/heads/master | 2020-04-17T09:54:26.770600 | 2019-11-30T20:42:41 | 2019-11-30T20:42:41 | 166,478,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,919 | py |
from typing import Callable
from calendar_lib.year import leap_years_between, year_starts_on, Year
def unit_test_leap_years_between(x: int, y: int, expected: int) -> None:
assert leap_years_between(x,y) == expected
def test_leap_years_between() -> None:
unit_test_leap_years_between(1600, 1600, 0)
unit_test_leap_years_between(1600, 1601, 1)
unit_test_leap_years_between(1600, 1604, 1)
unit_test_leap_years_between(1600, 1605, 2)
# final year should not be included
unit_test_leap_years_between(1600, 1696, 24)
# ensure multiples of 4 between 100s are counted regardless
unit_test_leap_years_between(1600, 1697, 25)
# ensure multiples of 100 only count if they are multiples of 400
unit_test_leap_years_between(1600, 1700, 25)
# check for normal behaviour after a century
unit_test_leap_years_between(1600, 1701, 25)
unit_test_leap_years_between(1600, 1703, 25)
unit_test_leap_years_between(1600, 1704, 25)
unit_test_leap_years_between(1600, 1705, 26)
def unit_test_year_starts_on(y: int, expected: int) -> None:
assert year_starts_on(y) == expected
def test_year_starts_on() -> None:
# hardcoded dates as verified by Wikipedia
unit_test_year_starts_on(1583, 5)
unit_test_year_starts_on(1600, 5)
unit_test_year_starts_on(1700, 4)
unit_test_year_starts_on(1800, 2)
unit_test_year_starts_on(1900, 0)
def unit_test_year_getters(Y: Year, y: int, first: int, start: int = 0, end: int = 11) -> None:
assert Y.get_first_day == first
assert Y.get_start_month == start
assert Y.get_end_month == end
assert Y.get_year_num == y
def test_year_getters() -> None:
unit_test_year_getters(Year(1582, 9, 11), 1582, 4, 9, 11)
unit_test_year_getters(Year(1583), 1583, 5)
unit_test_year_getters(Year(1599), 1599, 4)
unit_test_year_getters(Year(1600), 1600, 5)
unit_test_year_getters(Year(1601), 1601, 0)
| [
"mathur_akanksha@hotmail.com"
] | mathur_akanksha@hotmail.com |
534f975c66b89dfcafb6544c9604ea1c70c0e8f3 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /9Q5nsEy2E2apYHwX8_20.py | 8618e9090acf49e54f8506ead8a5fe3d1c58dd78 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py |
class programmer:
def __init__(self, sal, hours):
# Can't not spell salary properly..
self._salary = sal
self._hours = hours
@property
def salary(self): return self._salary
@property
def work_hours(self): return self._hours
def __del__(self):
return 'oof, {_salary}, {_hours}'.format(**vars(self))
# Also programmers..
def compare(*programmers):
return min(programmers, key=lambda p: (p._salary, p._hours))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
017f5cdcb41d925e5a6ea44a5a663019403fbae6 | 6c24fd8a3a59112aa295ae4b5148e9494f533547 | /soilMoisture.py | f55aee2649fd848ae8e0b2e5252066527e108f9c | [] | no_license | ChinSinclair/soil-moisture | b8aae5e0a26c200de0aa9595df6ad6e3c8131084 | afe68db1841d1b104e906e05817940a18b11e942 | refs/heads/master | 2020-03-24T02:50:23.525713 | 2018-07-26T05:26:58 | 2018-07-26T05:26:58 | 142,392,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import RPi.GPIO as GPIO
import time
import csv
# GPIO 21 is used in this case
channel = 21
GPIO.setmode(GPIO.BCM)
GPIO.setup(channel, GPIO.IN)
# initialise string output to empty string
output = “”
# filename is temp.csv in CSV format
csvfile = “temp.csv”
GPIO.add_event_detect(channel, GPIO.BOTH, bouncetime=500)
while True:
# read data ever 1 second
time.sleep(1)
if GPIO.input(channel):
output = “Moisture: No water detected”
else:
output =”Moisture: Water detected”
# print string output upon execution
print(output)
# open file to write string output, still have bugs where characters are being stored in columns separately
with open(csvfile, “a”) as output1:
write = csv.writer(output1)
writer.writerow(output)
| [
"sinclair_chin@hotmail.com"
] | sinclair_chin@hotmail.com |
939411a9681aa337509476fe32732080cfa22c81 | 2f4aff3236a958610982082114d8c9ce13063a79 | /kdayCalcData.py | 2337c77408b2de23bedb7c377fe1a4c805a6eae5 | [] | no_license | bxgh/stock | 126575c663c093828db8d9763ec56770ad778374 | 2199c6450cc821ccde9c4e941f81dd1b18ab3086 | refs/heads/master | 2020-04-11T18:33:04.732445 | 2019-04-22T09:05:40 | 2019-04-22T09:05:40 | 162,002,968 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,514 | py | import time
from datetime import datetime as dt
import os
import pymysql
import pandas as pd
import numpy as np
import tushare as ts
from io import StringIO
from sqlalchemy import create_engine
import logging
from time import sleep
from queue import LifoQueue
import queue
import threading
import random
import basewin
import timeit
class CALCDATA:
def __init__(self,host,user,pwd,db):
ts.set_token('38bb3cd1b6af2d75a7d7e506db8fd60354168642b400fa2104af81c5') #设置tushare.token
self.pro = ts.pro_api() #连接tushare
self.host=host #获取数据库连接字符串
self.user=user
self.pwd=pwd
self.db=db
self.hisDate_queue = LifoQueue() #股票历史日期数据,用于分期获取数据
self.trade_cal_queue = LifoQueue() #初始化交易日队列
self.stockBasic_queue = LifoQueue() #初始化股票代码队列
self.file_queue = queue.Queue() #kday文件列表队列,用于读取hdf5数据转存到sqlserver
self.statustotal=0 #初始化进度条
self.isTradeDay=1 #是否交易日
self.isKdayClosed=0 #当天是否执行日线收盘作业
self.allKdayDir='./kday/'
#股票交易代码list
self.stockBasic = self.pro.stock_basic(exchange='',fields='ts_code,symbol,name,area,industry,fullname,enname,market,exchange,curr_type,list_status,list_date,delist_date,is_hs')
def GetWriteConnect(self):
# connectStr1 = "mssql+pymssql://"+self.user + ":" + self.pwd + "@" + self.host+ ":1433/" + self.db+"?charset=utf8"
connectStr = "mysql+pymysql://"+self.user + ":" + self.pwd + "@" + self.host+ "/"+self.db+"?charset=utf8"
# engine=create_engine("mysql+pymysql://toshare1:toshare1@192.168.151.213:3306/kday?charset=utf8",echo=True)
engine=create_engine(connectStr,echo=True)
return engine
def GetConnect(self):
# self.connect=pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset='utf8')
self.connect=pymysql.connect(host=self.host,port=3306,user=self.user,password=self.pwd,database=self.db,charset='utf8')
cur=self.connect.cursor()
return cur
def ExecSql(self,sql):
cur=self.GetConnect()
cur.execute(sql)
self.connect.commit()
self.connect.close()
def ExecQuery(self,sql):
cur=self.GetConnect()
cur.execute(sql)
resList = cur.fetchall()
self.connect.close()
return resList
| [
"°ËÏÉ@DESKTOP-J47RKBK.yyychis.com"
] | °ËÏÉ@DESKTOP-J47RKBK.yyychis.com |
fbe30e999056a1d6e842aedc1d813c0d9b63abe9 | 0ecf2d067e8fe6cdec12b79bfd68fe79ec222ffd | /ui/aura/test/DEPS | 7b065fad58a282d77af7d76a45babcbe24f021e0 | [
"BSD-3-Clause"
] | permissive | yachtcaptain23/browser-android-tabs | e5144cee9141890590d6d6faeb1bdc5d58a6cbf1 | a016aade8f8333c822d00d62738a922671a52b85 | refs/heads/master | 2021-04-28T17:07:06.955483 | 2018-09-26T06:22:11 | 2018-09-26T06:22:11 | 122,005,560 | 0 | 0 | NOASSERTION | 2019-05-17T19:37:59 | 2018-02-19T01:00:10 | null | UTF-8 | Python | false | false | 179 | include_rules = [
"+cc/test",
"+components/viz/test",
"+mojo/core/embedder/embedder.h",
"+services/ui/public/cpp/input_devices",
"+ui/gl",
"+ui/wm/core/wm_state.h",
]
| [
"artem@brave.com"
] | artem@brave.com | |
abdfb315e9df8bc8217098192c8d34fa6221d2ba | 0f925b9444eee676be57c540e8aba43bb207d6a6 | /preparation/resources/phraseological/__init__.py | b05f58a74108b45237b2b9b123e0d2ac10456c40 | [
"MIT"
] | permissive | hatbot-team/hatbot_resources | e1abd0df1d4f2fa48de1fb2da7c47174983809a3 | 0547de84ea917cde093ad64c643679817d0082db | refs/heads/master | 2021-01-10T18:45:15.582674 | 2015-04-01T23:50:43 | 2015-04-01T23:50:43 | 25,030,035 | 0 | 0 | null | 2015-04-01T23:09:02 | 2014-10-10T09:56:35 | Python | UTF-8 | Python | false | false | 210 | py | __author__ = 'shkiper'
import os
PREFIX = os.path.dirname(os.path.abspath(__file__))
_raw_data = PREFIX + '/raw_data/phraseologism.txt'
__all__ = ['parse_phraseological']
from . import parse_phraseological | [
"aa_Zhur@mail.ru"
] | aa_Zhur@mail.ru |
c90d288b4b59233f12071b908f959ed607002bb4 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/viz/counter.py | d827b22070ecb3416754b03a878a22e00231ed25 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,162 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Counter(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.viz.Counter")
meta.moClassName = "vizCounter"
meta.rnFormat = "counter-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Represents a statistical counter"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.viz.Sample")
meta.childNamesAndRnPrefix.append(("cobra.model.viz.Sample", "sample-"))
meta.parentClasses.add("cobra.model.viz.TimeSeries")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.rnPrefixes = [
('counter-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 21893, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "name"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
dc92d1e059fb44eb34040d451f18b840721f66d3 | 779994eac5afa01829c20309ba860374cc2dd8ea | /task2_OAuth2/photo_manager/app/main/views.py | 9d74444a953b5b2a1d17c7a7fbdeb1be76583a1c | [] | no_license | colinaaa/tasks_uniquestudio | f27defc2665b8113b370f4d78408ce16b37d0638 | 9e8f8844e89aa057323b7263ef7c48e05b0b9b48 | refs/heads/master | 2020-04-03T20:41:31.458630 | 2019-02-15T13:34:17 | 2019-02-15T13:34:17 | 155,554,266 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,775 | py | from flask import render_template, redirect, url_for, request,make_response
from . import main
from .forms import Login,Signup
from .. import db
from ..models import User, Photo
import os
import base64
#from flask_login import login_required, current_use
basedir=os.path.abspath(os.path.dirname(__file__))
@main.route('/')
def index():
return render_template('index.html')
@main.route('/user/<username>',methods=['GET','POST'])
def user(username):
photos=[]
for u in User.query.filter_by(username=username).first().photos:
photos.append(u)
return render_template('user.html',photos=photos,username=username)
@main.route('/login', methods=['GET','POST'])
def login():
form=Login()
username=None
if form.validate_on_submit():
username=form.username.data
user=User.query.filter_by(username=username).first()
if user is not None and form.password.data==user.password:
return redirect(url_for('.user',username=username))
form.username.data=' '
return render_template('login.html',form=form)
@main.route('/signup',methods=['GET','POST'])
def signup():
form=Signup()
if form.validate_on_submit():
username=form.username.data
password=form.password.data
user=User(username=username,password=password)
db.session.add(user)
db.session.commit()
return redirect(url_for('main.login'))
return render_template('signup.html',form=form)
@main.route('/upload/<username>')
#@login_required
def upload(username):
return render_template('up.html',username=username)
@main.route('/up/<username>',methods=['POST'])
#@login_required
def up(username):
photo_file=request.files.get('photo')
user=User.query.filter_by(username=username).first()
upload_path = os.path.join(basedir, 'static/images', photo_file.filename)
photo=Photo(name=photo_file.filename,user_id=user.id,path=upload_path)
db.session.add(photo)
db.session.commit()
photo_file.save(upload_path)
return redirect(url_for('.user',username=user.username))
@main.route('/download/<int:id>')
def download(id):
photo=Photo.query.filter_by(id=id).first()
if photo is None:
return 'no such photo'
else:
upload_path = os.path.join(basedir, 'static/images', photo.name)
photo_file=open(upload_path,"rb").read()
response = make_response(photo_file)
response.headers['Content-Type'] = 'image/png'
return response
@main.route('/delete/<int:id>')
def delete(id):
photo=Photo.query.filter_by(id=id).first()
user_id=photo.user_id
user=User.query.filter_by(id=user_id).first()
db.session.delete(photo)
db.session.commit()
return redirect(url_for('.user',username=user.username))
| [
"qingyu.wang@aliyun.com"
] | qingyu.wang@aliyun.com |
89dd0d4ffc177fcfe2e1660239afa11bee516eda | 5d1cef076cdf3f0fde6ac2b1112784a08613375d | /Open3D/examples/Python/Advanced/remove_geometry.py | 04c9c3e8a2e29c000152602f72b04f87585d31da | [
"MIT"
] | permissive | RohanChacko/CVIT-Summer-School | f3875f3c3923dd75e04360e0b12da70d241584f0 | 1c4ee753977be2def44d8081f29d3c325f179859 | refs/heads/master | 2022-12-21T13:36:15.098962 | 2019-06-17T09:43:29 | 2019-06-17T09:43:29 | 188,479,058 | 2 | 1 | MIT | 2022-12-11T14:41:07 | 2019-05-24T19:47:14 | Jupyter Notebook | UTF-8 | Python | false | false | 1,394 | py | # Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Advanced/remove_geometry.py
import open3d as o3d
import numpy as np
import time
import copy
def visualize_non_blocking(vis):
vis.update_geometry()
vis.poll_events()
vis.update_renderer()
pcd_orig = o3d.io.read_point_cloud("../../TestData/fragment.pcd")
flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]
pcd_orig.transform(flip_transform)
n_pcd = 5
pcds = []
for i in range(n_pcd):
pcds.append(copy.deepcopy(pcd_orig))
trans = np.identity(4)
trans[:3, 3] = [3 * i, 0, 0]
pcds[i].transform(trans)
vis = o3d.visualization.Visualizer()
vis.create_window()
start_time = time.time()
added = [False] * n_pcd
curr_sec = int(time.time() - start_time)
prev_sec = curr_sec - 1
while True:
curr_sec = int(time.time() - start_time)
if curr_sec - prev_sec == 1:
prev_sec = curr_sec
for i in range(n_pcd):
if curr_sec % (n_pcd * 2) == i and not added[i]:
vis.add_geometry(pcds[i])
added[i] = True
print("Adding %d" % i)
if curr_sec % (n_pcd * 2) == (i + n_pcd) and added[i]:
vis.remove_geometry(pcds[i])
added[i] = False
print("Removing %d" % i)
visualize_non_blocking(vis)
| [
"rohanchacko007@gitlab.com"
] | rohanchacko007@gitlab.com |
6345b57545727b994dba5288eb691d685262eaf1 | 48fafa568ccc6a81eb5fba4f7a15ddc2c0cfd6a8 | /android_env/components/adb_controller.py | a95cb630ae7a4b7223efc98421223c2cd628832c | [
"Apache-2.0"
] | permissive | yizhangliu/android_env_for_windows | fb14b9d35bb242fcad2f1b4f5ae055a662a72496 | 55d33f2406c9f31d952de2bf080abfe42ad7586c | refs/heads/main | 2023-08-22T17:23:19.510215 | 2021-10-25T07:18:49 | 2021-10-25T07:18:49 | 378,130,008 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,785 | py | # coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to manage and control an external ADB process."""
import os
import platform
import pathlib
import re
import subprocess
import threading
import time
from typing import List, Optional, Sequence, Tuple
from absl import logging
from android_env.components import errors
from android_env.proto import task_pb2
import pexpect
_MAX_INIT_RETRIES = 20
_INIT_RETRY_SLEEP_SEC = 2.0
_DEFAULT_TIMEOUT_SECONDS = 10.0 # 120.0
class AdbController():
"""Manages communication with adb."""
def __init__(self,
adb_path: str = 'adb',
adb_server_port: int = 5037,
apk_path: str = '',
device_name: str = '',
shell_prompt: str = r'generic_x86:/ \$',
default_timeout: float = _DEFAULT_TIMEOUT_SECONDS):
self.platform_sys = platform.system()
self._adb_path = adb_path
self._adb_server_port = str(adb_server_port)
self._apk_path = apk_path
self._device_name = device_name
self._prompt = shell_prompt
self._default_timeout = default_timeout
self._execute_command_lock = threading.Lock()
self._adb_shell = None
self._shell_is_ready = False
# Unset problematic environment variables. ADB commands will fail if these
# are set. They are normally exported by AndroidStudio.
if 'ANDROID_HOME' in os.environ:
del os.environ['ANDROID_HOME']
if 'ANDROID_ADB_SERVER_PORT' in os.environ:
del os.environ['ANDROID_ADB_SERVER_PORT']
def command_prefix(self) -> List[str]:
"""The command for instantiating an adb client to this server."""
command_prefix = [self._adb_path, '-P', self._adb_server_port]
if self._device_name:
command_prefix.extend(['-s', self._device_name])
return command_prefix
def init_server(self, timeout: Optional[float] = None):
"""Initialize the ADB server deamon on the given port.
This function should be called immediately after initializing the first
adb_controller, and before launching the simulator.
Args:
timeout: A timeout to use for this operation. If not set the default
timeout set on the constructor will be used.
"""
# Make an initial device-independent call to ADB to start the deamon.
logging.info(f'Initialize the ADB server__{self._device_name} / {self._adb_server_port}')
device_name_tmp = self._device_name
self._device_name = ''
self._execute_command(['devices'], timeout=120.0) # timeout)
time.sleep(0.2)
# Subsequent calls will use the device name.
self._device_name = device_name_tmp
def close(self) -> None:
"""Closes internal threads and processes."""
logging.info('Closing ADB controller...')
if self._adb_shell is not None:
logging.info('Killing ADB shell')
self._adb_shell.close(force=True)
self._adb_shell = None
self._shell_is_ready = False
logging.info('Done closing ADB controller.')
def _execute_command(self,
args: List[str],
timeout: Optional[float] = None,
checkStr = '',
) -> Optional[bytes]:
"""Executes an adb command.
Args:
args: A list of strings representing each adb argument.
For example: ['install', '/my/app.apk']
timeout: A timeout to use for this operation. If not set the default
timeout set on the constructor will be used.
Returns:
The output of running such command as a string, None if it fails.
"""
# The lock here prevents commands from multiple threads from messing up the
# output from this AdbController object.
logging.info(f'_execute_command: {args}')
with self._execute_command_lock:
if args and args[0] == 'shell':
adb_output = self._execute_shell_command(args[1:], timeout=timeout, checkStr=checkStr)
else:
adb_output = self._execute_normal_command(args, timeout=timeout, checkStr=checkStr)
# logging.info('ADB output: %s', adb_output)
return adb_output
def _execute_normal_command(
self,
args: List[str],
timeout: Optional[float] = None,
checkStr = ''
) -> Optional[bytes]:
"""Executes `adb args` and returns its output."""
timeout = self._resolve_timeout(timeout)
command = self.command_prefix() + args
logging.info('Executing ADB command: %s', command)
try:
cmd_output = subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
logging.info('Done executing ADB command: %s', command)
return cmd_output
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as error:
logging.exception('Failed to execute ADB command %s', command)
raise error
def _execute_shell_command(self,
args: List[str],
timeout: Optional[float] = None,
max_num_retries: int = 3,
checkStr = '',
) -> Optional[bytes]:
"""Execute shell command."""
timeout = self._resolve_timeout(timeout)
if not self._shell_is_ready:
self._init_shell(timeout=timeout)
shell_args = ' '.join(args)
logging.info('Executing ADB shell command(%d): %s', timeout, shell_args)
num_tries = 0
while num_tries < max_num_retries:
num_tries += 1
try:
self._adb_shell.sendline(shell_args)
if self.platform_sys == 'Windows' and checkStr != '':
self._adb_shell.expect(checkStr, timeout=timeout)
else:
self._adb_shell.expect(self._prompt, timeout=timeout)
logging.info('Done executing ADB shell command: %s', shell_args)
if self.platform_sys == 'Windows':
output = self._adb_shell.after
else:
shell_ret = self._adb_shell.before.partition('\n'.encode('utf-8'))
# logging.info(f'__shell_ret: {shell_ret} ')
output = shell_ret[2] # Consume command.
return output
except pexpect.exceptions.EOF:
logging.exception('Shell command failed_EOF. Reinitializing the shell.')
logging.warning('self._adb_shell.before: %r', self._adb_shell.before)
self._init_shell(timeout=timeout)
except pexpect.exceptions.TIMEOUT:
logging.warning(f'pexpect.exceptions.TIMEOUT__1_')
if self.platform_sys == 'Windows' and self._adb_shell.before != b'':
output = self._adb_shell.before
self._adb_shell._set_buffer(b'')
self._adb_shell.before = b''
self._adb_shell.after = b''
if checkStr != '':
output = str.encode(checkStr)
return output
else:
logging.exception('Shell command failed_TIMEOUT. Reinitializing the shell.')
self._init_shell(timeout=timeout)
logging.exception('Reinitializing the shell did not solve the issue.')
raise errors.AdbControllerPexpectError()
def _init_shell(self, timeout: Optional[float] = None) -> None:
"""Starts an ADB shell process.
Args:
timeout: A timeout to use for this operation. If not set the default
timeout set on the constructor will be used.
Raises:
errors.AdbControllerShellInitError when adb shell cannot be initialized.
"""
timeout = self._resolve_timeout(timeout)
command = ' '.join(self.command_prefix() + ['shell'])
logging.info('Initialising ADB shell with command: %s', command)
num_tries = 0
while num_tries < _MAX_INIT_RETRIES:
num_tries += 1
try:
logging.info(f'Spawning ADB shell...{self.platform_sys}')
self._adb_shell = pexpect.popen_spawn.PopenSpawn(command, timeout=timeout)
# Setting this to None prevents a 50ms wait for each sendline.
self._adb_shell.delaybeforesend = None
self._adb_shell.delayafterread = None
logging.info(f'Done spawning ADB shell. Consuming first prompt ({timeout} / {num_tries})...')
if self.platform_sys == 'Windows':
self._adb_shell.expect(self._prompt, timeout=timeout, async_=True)
else:
self._adb_shell.expect(self._prompt, timeout=timeout)
logging.info(f'Done consuming first prompt.')
self._shell_is_ready = True
return
except (pexpect.ExceptionPexpect, ValueError) as e:
logging.exception(e)
logging.error('self._adb_shell.before: %r', self._adb_shell.before)
logging.error('Could not start ADB shell. Try %r of %r.', num_tries, _MAX_INIT_RETRIES)
time.sleep(_INIT_RETRY_SLEEP_SEC)
raise errors.AdbControllerShellInitError(
'Failed to start ADB shell. Max number of retries reached.')
def _resolve_timeout(self, timeout: Optional[float]) -> float:
"""Returns the correct timeout to be used for external calls."""
return self._default_timeout if timeout is None else timeout
def _wait_for_device(self,
max_tries: int = 20,
sleep_time: float = 1.0,
timeout: Optional[float] = None) -> None:
"""Waits for the device to be ready.
Args:
max_tries: Number of times to check if device is ready.
sleep_time: Sleep time between checks, in seconds.
timeout: A timeout to use for this operation. If not set the default
timeout set on the constructor will be used.
Returns:
True if the device is ready, False if the device timed out.
Raises:
errors.AdbControllerDeviceTimeoutError when the device is not ready after
exhausting `max_tries`.
"""
num_tries = 0
while num_tries < max_tries:
ready = self._check_device_is_ready(timeout=timeout)
if ready:
logging.info('Device is ready.')
return
time.sleep(sleep_time)
logging.error('Device is not ready.')
raise errors.AdbControllerDeviceTimeoutError('Device timed out.')
def _check_device_is_ready(self, timeout: Optional[float] = None) -> bool:
"""Checks if the device is ready."""
logging.info(f'__________________checking device_______________')
required_services = ['window', 'package', 'input', 'display']
timeout = 120
for service in required_services:
checkStr = f'Service {service}: found'
check_output = self._execute_command(['shell', 'service', 'check', service],
timeout=timeout,
checkStr = checkStr
)
if not check_output:
logging.error('Check for service "%s" failed.', service)
return False
check_output = check_output.decode('utf-8').strip()
if check_output != checkStr:
logging.error(check_output)
return False
return True
# ===== SPECIFIC COMMANDS =====
def install_binary(self,
src: str,
dest_dir: str,
timeout: Optional[float] = None):
"""Installs the specified binary on the device."""
self._execute_command(['shell', 'su', '0', 'mkdir', '-p', dest_dir],
timeout=timeout)
self._execute_command(
['shell', 'su', '0', 'chown', '-R', 'shell:', dest_dir],
timeout=timeout)
bin_name = pathlib.PurePath(src).name
dest = pathlib.PurePath(dest_dir) / bin_name
self.push_file(src, str(dest), timeout=timeout)
def install_apk(self,
local_apk_path: str,
timeout: Optional[float] = None) -> None:
"""Installs an app given a `local_apk_path` in the filesystem.
This function checks that `local_apk_path` exists in the file system, and
will raise an exception in case it doesn't.
Args:
local_apk_path: Path to .apk file in the local filesystem.
timeout: A timeout to use for this operation. If not set the default
timeout set on the constructor will be used.
"""
apk_file = self._apk_path + os.sep + local_apk_path
assert os.path.exists(apk_file), ('Could not find local_apk_path :%r' % apk_file)
timeout = 60
self._execute_command(['install', '-r', '-t', '-g', apk_file], timeout=timeout)
def is_package_installed(self,
package_name: str,
timeout: Optional[float] = None) -> bool:
"""Checks that the given package is installed."""
packages = self._execute_command(['shell', 'pm', 'list', 'packages'], timeout=timeout)
if not packages:
return False
packages = packages.decode('utf-8').split()
# Remove 'package:' prefix for each package.
packages = [pkg[8:] for pkg in packages if pkg[:8] == 'package:']
# logging.info('Installed packages: %r', packages)
if package_name in packages:
logging.info('Package %s found.', package_name)
return True
return False
def start_activity(self,
full_activity: str,
extra_args: Optional[List[str]],
timeout: Optional[float] = None):
if extra_args is None:
extra_args = []
self._execute_command(
['shell', 'am', 'start', '-S', '-n', full_activity] + extra_args,
timeout=timeout)
def start_intent(self,
action: str,
data_uri: str,
package_name: str,
timeout: Optional[float] = None):
self._execute_command(
['shell', 'am', 'start', '-a', action, '-d', data_uri, package_name],
timeout=timeout)
def start_accessibility_service(self,
accessibility_service_full_name,
timeout: Optional[float] = None):
self._execute_command([
'shell', 'settings', 'put', 'secure', 'enabled_accessibility_services',
accessibility_service_full_name
],
timeout=timeout)
def broadcast(self,
receiver: str,
action: str,
extra_args: Optional[List[str]],
timeout: Optional[float] = None):
if extra_args is None:
extra_args = []
self._execute_command(
['shell', 'am', 'broadcast', '-n', receiver, '-a', action] + extra_args,
timeout=timeout)
def setprop(self,
prop_name: str,
value: str,
timeout: Optional[float] = None):
self._execute_command(['shell', 'setprop', prop_name, value],
timeout=timeout)
def push_file(self, src: str, dest: str, timeout: Optional[float] = None):
self._execute_command(['push', src, dest], timeout=timeout)
def force_stop(self, package: str, timeout: Optional[float] = None):
self._execute_command(['shell', 'am', 'force-stop', package], timeout=timeout)
def clear_cache(self, package: str, timeout: Optional[float] = None):
self._execute_command(['shell', 'pm', 'clear', package], timeout=timeout)
def grant_permissions(self,
package: str,
permissions: Sequence[str],
timeout: Optional[float] = None):
for permission in permissions:
logging.info('Granting permission: %r', permission)
self._execute_command(['shell', 'pm', 'grant', package, permission], timeout=timeout)
def get_activity_dumpsys(self,
package_name: str,
timeout: Optional[float] = None) -> Optional[str]:
"""Returns the activity's dumpsys output in a UTF-8 string."""
dumpsys_activity_output = self._execute_command(['shell', 'dumpsys', 'activity', package_name, package_name], timeout=timeout)
if dumpsys_activity_output:
return dumpsys_activity_output.decode('utf-8')
def get_current_activity(self,
timeout: Optional[float] = None) -> Optional[str]:
"""Returns the full activity name that is currently opened to the user.
The format of the output is `package/package.ActivityName', for example:
"com.example.vokram/com.example.vokram.MainActivity"
Args:
timeout: A timeout to use for this operation. If not set the default
timeout set on the constructor will be used.
Returns:
None if no current activity can be extracted.
"""
if self.platform_sys == 'Windows':
visible_task = self._execute_command(['shell', 'am', 'stack', 'list'], timeout=timeout)
else:
visible_task = self._execute_command(['shell', 'am', 'stack', 'list', '|', 'grep', '-E', 'visible=true'], timeout=timeout)
if not visible_task:
am_stack_list = self._execute_command(['shell', 'am', 'stack', 'list'], timeout=timeout)
logging.error('Empty visible_task. `am stack list`: %r', am_stack_list)
return None
visible_task = visible_task.decode('utf-8')
if self.platform_sys == 'Windows':
visible_task_list = re.findall(r"visible=true topActivity=ComponentInfo{(.+?)}", visible_task)
if visible_task_list == []:
visible_task = ''
else:
visible_task = 'ComponentInfo{' + visible_task_list[0] + '}'
p = re.compile(r'.*\{(.*)\}')
matches = p.search(visible_task)
if matches is None:
logging.error(
'Could not extract current activity. Will return nothing. '
'`am stack list`: %r',
self._execute_command(['shell', 'am', 'stack', 'list'], timeout=timeout))
return None
return matches.group(1)
def start_screen_pinning(self,
full_activity: str,
timeout: Optional[float] = None):
current_task_id = self._fetch_current_task_id(full_activity, timeout)
if current_task_id == -1:
logging.info('Could not find task ID for activity [%r]', full_activity)
return # Don't execute anything if the task ID can't be found.
self._execute_command(['shell', 'am', 'task', 'lock', str(current_task_id)],
timeout=timeout)
def _fetch_current_task_id(self,
full_activity_name: str,
timeout: Optional[float] = None) -> int:
"""Returns the task ID of the given `full_activity_name`."""
stack = self._execute_command(['shell', 'am', 'stack', 'list'], timeout=timeout)
stack_utf8 = stack.decode('utf-8')
lines = stack_utf8.splitlines()
regex = re.compile(r'^\ *taskId=(?P<id>[0-9]*): %s.*visible=true.*$' % full_activity_name)
if self.platform_sys == 'Windows':
regex = re.compile(r'^\ *taskId=(?P<id>[0-9]*): .* visible=true .*{%s}.*' % full_activity_name)
matches = [regex.search(line) for line in lines]
# print(f'___matches={matches}___')
for match in matches:
if match is None:
continue
current_task_id_str = match.group('id')
try:
current_task_id = int(current_task_id_str)
return current_task_id
except ValueError:
logging.info('Failed to parse task ID [%r].', current_task_id_str)
logging.error('Could not find current activity in stack list: %r', stack_utf8)
# At this point if we could not find a task ID, there's nothing we can do.
return -1
def get_screen_dimensions(self,
timeout: Optional[float] = None) -> Tuple[int, int]:
"""Returns a (height, width)-tuple representing a screen size in pixels."""
logging.info(f'Fetching screen dimensions({timeout})...')
self._wait_for_device(timeout=timeout)
adb_output = self._execute_command(['shell', 'wm', 'size'], timeout=timeout) #, checkStr='Physical\ssize:\s([0-9]+x[0-9]+)')
assert adb_output, 'Empty response from ADB for screen size.'
adb_output = adb_output.decode('utf-8')
adb_output = adb_output.replace('\r\n', '')
# adb_output should be of the form "Physical size: 320x480".
dims_match = re.match(r'.*Physical\ssize:\s([0-9]+x[0-9]+).*', adb_output)
assert dims_match, ('Failed to match the screen dimensions. %s' % adb_output)
dims = dims_match.group(1)
logging.info('width x height: %s', dims)
width, height = tuple(map(int, dims.split('x'))) # Split between W & H
logging.info('Done fetching screen dimensions: (H x W) = (%r, %r)', height, width)
return (height, width)
def get_orientation(self, timeout: Optional[float] = None) -> Optional[str]:
"""Returns the device orientation."""
logging.info('Getting orientation...')
dumpsys = self._execute_command(['shell', 'dumpsys', 'input'],
timeout=timeout)
# logging.info('dumpsys: %r', dumpsys)
if not dumpsys:
logging.error('Empty dumpsys.')
return None
dumpsys = dumpsys.decode('utf-8')
lines = dumpsys.split('\n') # Split by lines.
skip_next = False
for line in lines:
# There may be multiple devices in dumpsys. An invalid device can be
# identified by negative PhysicalWidth.
physical_width = re.match(r'\s+PhysicalWidth:\s+(-?\d+)px', line)
if physical_width:
skip_next = int(physical_width.group(1)) < 0
surface_orientation = re.match(r'\s+SurfaceOrientation:\s+(\d)', line)
if surface_orientation is not None:
if skip_next:
continue
orientation = surface_orientation.group(1)
logging.info('Done getting orientation: %r', orientation)
return orientation
logging.error('Could not get the orientation. Returning None.')
return None
def rotate_device(self,
orientation: task_pb2.AdbCall.Rotate.Orientation,
timeout: Optional[float] = None) -> None:
"""Sets the device to the given `orientation`."""
self._execute_command(['shell', 'settings', 'put', 'system', 'user_rotation', str(orientation)],
timeout=timeout)
def set_touch_indicators(self,
show_touches: bool = True,
pointer_location: bool = True,
timeout: Optional[float] = None) -> None:
"""Sends command to turn touch indicators on/off."""
logging.info('Setting show_touches indicator to %r', show_touches)
logging.info('Setting pointer_location indicator to %r', pointer_location)
show_touches = 1 if show_touches else 0
pointer_location = 1 if pointer_location else 0
self._wait_for_device(timeout=timeout)
self._execute_command(['shell', 'settings', 'put', 'system', 'show_touches', str(show_touches)],
timeout=timeout)
self._execute_command(['shell', 'settings', 'put', 'system', 'pointer_location', str(pointer_location)],
timeout=timeout)
def set_bar_visibility(self,
navigation: bool = False,
status: bool = False,
timeout: Optional[float] = None) -> Optional[bytes]:
"""Show or hide navigation and status bars."""
command = ['shell', 'settings', 'put', 'global', 'policy_control']
if status and navigation: # Show both bars.
command += ['null*']
elif not status and navigation: # Hide status(top) bar.
command += ['immersive.status=*']
elif status and not navigation: # Hide navigation(bottom) bar.
command += ['immersive.navigation=*']
else: # Hide both bars.
command += ['immersive.full=*']
return self._execute_command(command, timeout=timeout)
def disable_animations(self, timeout: Optional[float] = None):
self._execute_command(
['shell', 'settings put global window_animation_scale 0.0'],
timeout=timeout)
self._execute_command(
['shell', 'settings put global transition_animation_scale 0.0'],
timeout=timeout)
self._execute_command(
['shell', 'settings put global animator_duration_scale 0.0'],
timeout=timeout)
def input_tap(self, x: int, y: int, timeout: Optional[float] = None) -> None:
self._execute_command(['shell', 'input', 'tap',
str(x), str(y)],
timeout=timeout)
def input_text(self,
input_text: str,
timeout: Optional[float] = None) -> Optional[bytes]:
return self._execute_command(['shell', 'input', 'text', input_text],
timeout=timeout)
def input_key(self,
key_code: str,
timeout: Optional[float] = None) -> Optional[bytes]:
"""Presses a keyboard key.
Please see https://developer.android.com/reference/android/view/KeyEvent for
values of `key_code`.
We currently only accept:
KEYCODE_HOME (constant 3)
KEYCODE_BACK (constant 4)
KEYCODE_ENTER (constant 66)
Args:
key_code: The keyboard key to press.
timeout: Optional time limit in seconds.
Returns:
The output of running such command as a string, None if it fails.
"""
accepted_key_codes = ['KEYCODE_HOME', 'KEYCODE_BACK', 'KEYCODE_ENTER']
assert key_code in accepted_key_codes, ('Rejected keycode: %r' % key_code)
return self._execute_command(['shell', 'input', 'keyevent', key_code], timeout=timeout)
| [
"liuyizhang@loveorange.com"
] | liuyizhang@loveorange.com |
0d81887fc7d8ba45d14f40ea4076f76365ea8404 | 736d095bcca970283eeee34f2c87cfbf0247176e | /PyqtGraph_Selftest.py | dd28a17b2520d5a3dcac6b62254ff75d077766e1 | [] | no_license | XPlay1990/Python_Graph-libs-trys | e8693c8094f34bdbeb27f4f208211637b6fcdeab | fcbf3598f856b6546a59ebee79fbb65491d06e9e | refs/heads/master | 2020-03-26T16:15:20.760300 | 2018-08-17T07:58:15 | 2018-08-17T07:58:15 | 145,090,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,649 | py | # -*- coding: utf-8 -*-
"""
This example demonstrates the use of GLSurfacePlotItem.
"""
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
from collections import deque
import pyqtgraph.opengl as gl
import numpy as np
import datetime
numberOfData = 1000
widthOfData = 500
## Create a GL View widget to display data
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.show()
w.setWindowTitle('PAS Surfaceplot')
w.setGeometry(100, 100, 1500, 800) # distance && resolution
w.setCameraPosition(distance=50)
## Create axis
#axis = pg.AxisItem('left', pen=None, linkView=None, parent=None, maxTickLength=-5, showValues=True)
#axis.show()
#axis = pg.AxisItem('left', pen = None)
# xAxis.paint()
#Axis.setSize(self.valueNumber, self.valueNumber, self.valueNumber)
#axis.setStyle(showValues = True)
#axis.show()
#--------------------
axis = gl.GLAxisItem()
# xAxis.paint()
#axis.setSize(self.valueNumber, self.valueNumber, self.valueNumber)
w.addItem(axis)
## Add a grid to the view
g = gl.GLGridItem()
g.scale(2,2,1)
g.setDepthValue(10) # draw grid after surfaces since they may be translucent
w.addItem(g)
## Animated example
## compute surface vertex data
x = np.linspace(0, widthOfData, widthOfData)
y = np.linspace(0, numberOfData, numberOfData)
## precompute height values for all frames
data = np.random.randint(5, size=(widthOfData, numberOfData))
## create a surface plot, tell it to use the 'heightColor' shader
## since this does not require normal vectors to render (thus we
## can set computeNormals=False to save time when the mesh updates)
p4 = gl.GLSurfacePlotItem(x, y, shader='heightColor', computeNormals=False, smooth=False)
p4.shader()['colorMap'] = np.array([0.2, 2, 0.5, 0.2, 1, 1, 0.2, 0, 2])
p4.translate(10, 10, 0)
w.addItem(p4)
index = 0
def update():
global p4, data, index
timeBeforeUpdate = datetime.datetime.now()
data = np.delete(data, 0, 0)
print('popped')
newValues = np.random.randint(5, size=(1, numberOfData))
print('newvalues created')
data = np.concatenate((data, newValues))
print('numpied')
p4.setData(z = data)
print('set')
timeAfterUpdate = datetime.datetime.now()
timeDiff = timeAfterUpdate - timeBeforeUpdate
elapsed_ms = (timeDiff.days * 86400000) + (timeDiff.seconds * 1000) + (timeDiff.microseconds / 1000)
print(elapsed_ms + ' ms')
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(20)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| [
"35451726+PASDeutschland@users.noreply.github.com"
] | 35451726+PASDeutschland@users.noreply.github.com |
3c97ee56f65b16d69e856715a947b01f4497c14d | cff68cf6523f29efcf7e50df48a9d85ac6912934 | /qa/data/models.py | 4a9396b0ac450b6504238c0f88edc7635a536127 | [] | no_license | sabithpocker/narratorqa | f29513262a61276f851580b96e77826767a5ab1d | c45ba3d89d8a3da930e632e4620bf480dc205312 | refs/heads/master | 2021-01-19T20:34:17.773066 | 2017-09-02T15:12:56 | 2017-09-02T15:12:56 | 101,228,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | from django.db import models
# Create your models here.
class Catalog(models.Model):
title = models.CharField(max_length=500)
description = models.CharField(max_length=1000)
ministry_department = models.CharField(max_length=150,null=True,blank=True)
state_department = models.CharField(max_length=150,null=True,blank=True)
data_sets_actual_count = models.IntegerField(default=0)
data_sets_count = models.IntegerField(default=0)
last_updated = models.DateTimeField('last updated')
url = models.URLField(max_length=200)
def __str__(self):
return self.title
class Node(models.Model):
catalog = models.ForeignKey(Catalog, on_delete=models.CASCADE)
title = models.CharField(max_length=500)
node = models.CharField(max_length=100)
url = models.URLField(max_length=200)
def __str__(self):
return self.title
| [
"sabith@techolution.com"
] | sabith@techolution.com |
ef600d8b85e4fca937d76f4660c45917052aaf5f | dd998619977a5aeebda40bb7d690a26f40815263 | /learningpython/forloop2.py | 35337dd9f81ec92d22ede46f15f59ae8a24f7dad | [] | no_license | skolte/python | 791156afb78f2b87194e601f78f1375356c71069 | dfea80f5eb340a375f31a961f3315790105437d9 | refs/heads/master | 2021-01-10T03:21:39.313897 | 2016-04-07T04:43:02 | 2016-04-07T04:43:02 | 54,427,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | __author__ = 'sandeep'
# Find the smallest number in the list.
smallest_so_far = -1
for the_num in [9, 41, 12, 3, 74, 15] :
if the_num < smallest_so_far :
smallest_so_far = the_num
print (smallest_so_far) | [
"sandeep.kolte@gmail.com"
] | sandeep.kolte@gmail.com |
5f3f95c4fee66520ba6a1cc88ff46ec144020720 | 88fc0fa2f51f40faac15b42f7b5eb6529718d029 | /problems/dtlz/dtlz2.py | b29556cbe741119a159de893ea30a90867d26c46 | [
"MIT"
] | permissive | bigfatnoob/optima | d68a2bcc3d411a44eb38c8625177d0017514845a | dcd4a13f08f8c1bd68740e81f472ff6ac76addca | refs/heads/master | 2016-09-15T15:01:08.455136 | 2016-03-14T00:09:32 | 2016-03-14T00:09:32 | 35,189,485 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | from __future__ import print_function, division
import sys, os
sys.path.append(os.path.abspath("."))
from problems.problem import *
__author__ = 'panzer'
class DTLZ2(Problem):
"""
Hypothetical test problem with
"m" objectives and "n" decisions
"""
k = 10
def __init__(self, m, n=None):
"""
Initialize DTLZ2 instance
:param m: Number of objectives
:param n: Number of decisions
"""
Problem.__init__(self)
self.name = DTLZ2.__name__
if n is None:
n = DTLZ2.default_decision_count(m)
self.decisions = [Decision("x"+str(index+1),0,1) for index in range(n)]
self.objectives = [Objective("f"+str(index+1), True, 0, 1000) for index in range(m)]
@staticmethod
def default_decision_count(m):
return m + DTLZ2.k - 1
def evaluate(self, decisions):
m = len(self.objectives)
n = len(decisions)
k = n - m + 1
g = 0
for i in range(n - k, n):
g += (decisions[i] - 0.5)**2
f = [1 + g]*m
for i in range(0, m):
for j in range(0, m-(i+1)):
f[i] *= cos(decisions[j] * PI / 2)
if i != 0:
f[i] *= sin(decisions[m-(i+1)] * PI / 2)
return f
def get_pareto_front(self):
file_name = "problems/dtlz/PF/dtlz2_"+str(len(self.objectives))+"_objectives.txt"
pf = []
with open(file_name) as f:
for line in f.readlines():
pf.append(map(float,line.strip().replace("\n","").split(" ")))
return pf | [
"panzer@Georges-MacBook-Pro-2.local"
] | panzer@Georges-MacBook-Pro-2.local |
e489bd2d840667373619b3b7a58eb5ce8d826d5c | 86568070ba7718af241cb0bdabf5dba0243b5d89 | /5. 卷积神经网络/5.7 使用重复结构的网络/VGG_Func.py | 3cab9146970d61d98083486621d5683b0e05f68e | [] | no_license | kaitokuroba7/pytorch_learning | 560ec952a7c3471129b38d66a6a2ff6dae9909fc | f1c90c533b0475d15df44811d3ef870bb3d3e3d4 | refs/heads/master | 2023-04-14T08:48:07.409902 | 2021-04-20T11:24:49 | 2021-04-20T11:24:49 | 351,309,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: J.Zhang
@contact: 1027380683@qq.com
@site: https://github.com/kaitokuroba7
@software: PyCharm
@file: VGG_Func.py
@time: 2021/3/16 15:37
"""
import time
import torch
from torch import nn, optim
import Function.utils as d2l
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def vgg_block(num_convs, in_channels, out_channels):
blk = []
for i in range(num_convs):
if i == 0:
blk.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))
else:
blk.append(nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1))
blk.append(nn.ReLU())
blk.append(nn.MaxPool2d(kernel_size=2, stride=2))
return nn.Sequential(*blk)
def vgg(conv_arch, fc_features, fc_hidden_utils=4096):
net = nn.Sequential()
# 卷积层部分
for i, (num_convs, in_channels, out_channels) in enumerate(conv_arch):
# 每经过一个vgg_block都会使得高宽减半
net.add_module('vgg_block_' + str(i+1), vgg_block(num_convs, in_channels, out_channels))
# 全连接层
net.add_module("fc", nn.Sequential(
d2l.FlattenLayer(),
nn.Linear(fc_features, fc_hidden_utils),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(fc_hidden_utils, fc_hidden_utils),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(fc_hidden_utils, 10)
))
return net
if __name__ == "__main__":
res = vgg_block(2, 32, 32)
print(res)
pass
| [
"zhangjie0209@zju.edu.cn"
] | zhangjie0209@zju.edu.cn |
68876b760fd8caeed66ef8e05f73700fad04321c | df87cea4e024ba6350625f4e479cd18984c39bba | /qualia2/rl/envs/__init__.py | aea2eb025e3070f4f4833dd757e60f848f653758 | [
"MIT"
] | permissive | Kashu7100/Qualia2.0 | 71428f643f52a97c0d0670d0c7d7db14b96a18e5 | 6f059237d0baa364fcb521f9e0cca61d4e6e5d84 | refs/heads/master | 2021-06-15T09:06:31.663708 | 2021-04-06T16:59:47 | 2021-04-06T16:59:47 | 172,283,390 | 51 | 12 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | # -*- coding: utf-8 -*-
from .atari import *
from .box2d import *
#from .mujoco import *
from .toy_text import *
from .roboschool import *
from .classic_control import *
#from pybullet import *
| [
"noreply@github.com"
] | noreply@github.com |
69f32e38775c2f66ff284189680c3d7a613569d3 | 225ba370ecd1052f2c9551d9882be126036a2bd1 | /kNN/handwriting_kNN.py | 062a9315627d749a8006f788ed11f35a375a644f | [] | no_license | zsm982202/MechineLearning_python3 | 43eb1ddefffdfbf5760b479bbd9c74c82e62b03d | 6afd87c1c7bf6a988b1a72d26a5eb4ffe99198e6 | refs/heads/master | 2022-12-15T12:33:36.830904 | 2020-09-21T09:32:22 | 2020-09-21T09:32:22 | 282,404,877 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | import kNN
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import font_manager
from os import listdir
def img2vector(filename):
returnVect = np.zeros([1, 1024])
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32 * i + j] = int(lineStr[j])
return returnVect
def handwritingClassTest():
hwLabels = []
trainingFileList = listdir('digits/trainingDigits')
m = len(trainingFileList)
trainingMat = np.zeros([m, 1024])
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i, :] = img2vector('digits/trainingDigits/' + fileNameStr)
testFileList = listdir('digits/testDigits')
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('digits/testDigits/' + fileNameStr)
classifierResult = kNN.classify0(vectorUnderTest, trainingMat, hwLabels, 3)
print('the classifier came back with: ' + str(classifierResult) + ', the real answer is: ' + str(classNumStr))
if classifierResult != classNumStr:
errorCount += 1.0
print('\nthe total number of error is: ' + str(errorCount) + '\nthe total error rate is: ' + str(errorCount / float(mTest)))
if __name__ == "__main__":
# img2vector('digits/testDigits/0_13.txt')
handwritingClassTest()
| [
"zsm982202@Outlook.com"
] | zsm982202@Outlook.com |
4dd552079273443466cfa2d4f347e85fedbcd48c | 012cbbe582a76749e043f6d135c1bbcf8acaabfb | /urls_from_txt_file/program.py | 54f64ee8ef77ab53ab03eafa42dd47908657a7d6 | [] | no_license | juancarl0s/crawl_outputHTTPstatusCodes | fc31c2dae4e21d25cac35c8f0c30619398610eb4 | 5375ea0fdfcd9b0aec0ff8f4ec5eaef4895629f6 | refs/heads/master | 2016-09-01T22:09:43.378301 | 2015-02-23T22:36:46 | 2015-02-23T22:36:46 | 28,617,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | from bs4 import BeautifulSoup
import urllib
import re
import csv
def write_url_and_statusCodes_ToFile(txtBoolean, csvBoolean):
urls_set = set()
# urls_file = open("urls.txt", "r")
# for line in urls_file:
# if line.endswith("/"):
# line = line[:-1]
# urls_set.add(line)
# print line
with open("urls.txt", "r") as urls_file:
for line in urls_file:
if line.endswith("/"):
line = line[:-1]
urls_set.add(line)
#write text file
print "Writing to file(s)"
if txtBoolean:
statusCodesTXT = open("urls_and_statusCodes.txt", "w")
if csvBoolean:
statusCodesCSV = csv.writer(open("urls_and_statusCodes.csv", "wb"))
for url in urls_set:
print "Checking %s" % url
if txtBoolean:
statusCodesTXT.write(url + "Status code: %d" % urllib.urlopen(url).code + "\n\n")
if csvBoolean:
statusCodesCSV.writerow([url, urllib.urlopen(url).code])
if txtBoolean:
statusCodesTXT.close()
print "Done with file(s)"
return
selected_option = raw_input("Do you want the results to be writte as a:\n 1)Text file (status_codes.txt).\n 2)CSV file (status_codes.csv).\n 3)Both.\nSelection: ")
if selected_option == "1":
write_url_and_statusCodes_ToFile(True,False)
elif selected_option == "2":
write_url_and_statusCodes_ToFile(False,True)
elif selected_option == "3":
write_url_and_statusCodes_ToFile(True,True) | [
"juancarlos.hal0@gmail.com"
] | juancarlos.hal0@gmail.com |
6379d81b97e6d5382544005543d31ea1c8b3b723 | 41d2a7ce007953a57c4c3fa4bd2d8d033f7a956c | /Runs/frame-rotation-investigation/system.py | a901ecd475600f38469f914c9563c3ebada0f488 | [] | no_license | gcross/Practicum | 3251791ffb05f4d3d4426728582b03f67869f027 | b4e667855a33c8d8889b94f057abd45913ab80e6 | refs/heads/master | 2021-01-23T13:29:26.188630 | 2010-02-27T01:16:15 | 2010-02-27T01:16:15 | 256,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,250 | py | #@+leo-ver=4-thin
#@+node:gcross.20090827130017.1614:@thin system.py
#@<< Imports >>
#@+node:gcross.20090827130017.1615:<< Imports >>
import vpi
from numpy import *
from numpy.random import rand
import os
import os.path
import itertools
from itertools import izip
#@-node:gcross.20090827130017.1615:<< Imports >>
#@nl
#@<< MPI Initialization >>
#@+node:gcross.20090827130017.1616:<< MPI Initialization >>
from mpi4py import MPI
comm = MPI.COMM_WORLD
number_of_processors = comm.Get_size()
my_rank = comm.Get_rank()
#@-node:gcross.20090827130017.1616:<< MPI Initialization >>
#@nl
#@+others
#@+node:gcross.20090827130017.1895:Observable classes
#@+others
#@+node:gcross.20090827130017.1896:Base classes
#@+node:gcross.20090827130017.1897:class Observable
class Observable(object):
#@ @+others
#@+node:gcross.20090827130017.2016:total_and_write
def total_and_write(self):
totals = self.compute_total()
if my_rank == 0:
self.write_out_totals(totals)
#@-node:gcross.20090827130017.2016:total_and_write
#@-others
#@-node:gcross.20090827130017.1897:class Observable
#@+node:gcross.20090827130017.1899:class AverageValuesEstimate
class AverageValuesEstimate(Observable):
#@ @+others
#@+node:gcross.20090828095451.2961:(fields)
total_number_of_observations = 0
#@-node:gcross.20090828095451.2961:(fields)
#@+node:gcross.20090827130017.2017:__init__
def __init__(self,*shape):
self.estimate = zeros(shape,dtype=double)
self.estimate_squared = zeros(shape,dtype=double)
#@-node:gcross.20090827130017.2017:__init__
#@+node:gcross.20090827130017.1898:total_and_write
def write_out_totals(self,totals_and_errors):
totals, totals_squared = totals_and_errors
errors = sqrt((totals_squared-totals**2)/(self.total_number_of_observations * number_of_processors))
self.write_out(totals,errors)
#@-node:gcross.20090827130017.1898:total_and_write
#@+node:gcross.20090827130017.1900:compute_total
def compute_total(self):
total_estimate_and_square = zeros(2*prod(self.estimate.shape),dtype='d',order='Fortran')
comm.Reduce((array([self.estimate,self.estimate_squared]).ravel(),MPI.DOUBLE),(total_estimate_and_square,MPI.DOUBLE))
total_estimate_and_square /= (self.total_number_of_observations * number_of_processors)
return total_estimate_and_square.reshape((2,)+self.estimate.shape)
#@-node:gcross.20090827130017.1900:compute_total
#@+node:gcross.20090827130017.2014:add
def add(self,estimate):
self.total_number_of_observations += 1
self.estimate += estimate
self.estimate_squared += estimate**2
#@-node:gcross.20090827130017.2014:add
#@-others
#@-node:gcross.20090827130017.1899:class AverageValuesEstimate
#@+node:gcross.20090827130017.1901:class SingleAverageValueEstimate
class SingleAverageValueEstimate(AverageValuesEstimate):
#@ @+others
#@+node:gcross.20090827130017.1902:__init__
def __init__(self):
AverageValuesEstimate.__init__(self,1)
#@-node:gcross.20090827130017.1902:__init__
#@-others
#@-node:gcross.20090827130017.1901:class SingleAverageValueEstimate
#@+node:gcross.20090827130017.1903:class EstimatesAppendedToFile
class EstimatesAppendedToFile(Observable):
#@ @+others
#@+node:gcross.20090827130017.1904:__init__
def __init__(self,filename,label):
self.filename = filename
self.label = label
#@-node:gcross.20090827130017.1904:__init__
#@+node:gcross.20090827130017.1905:write_out
def write_out(self,total_estimate,total_estimate_error):
ensure_path_to_file_exists(self.filename)
with open(self.filename,"a") as f:
print >> f, self.label,
for value, error in izip(total_estimate,total_estimate_error):
print >> f, value, error
print >> f
#@-node:gcross.20090827130017.1905:write_out
#@-others
#@-node:gcross.20090827130017.1903:class EstimatesAppendedToFile
#@+node:gcross.20090827130017.1906:class SingleAverageValueEstimateAppendedToFile
class SingleAverageValueEstimateAppendedToFile(SingleAverageValueEstimate,EstimatesAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.1907:__init__
def __init__(self,filename,label):
SingleAverageValueEstimate.__init__(self)
EstimatesAppendedToFile.__init__(self,filename,label)
#@-node:gcross.20090827130017.1907:__init__
#@-others
#@-node:gcross.20090827130017.1906:class SingleAverageValueEstimateAppendedToFile
#@+node:gcross.20090827130017.1908:class SingleAverageValueAtSliceEstimateAppendedToFile
class SingleAverageValueAtSliceEstimateAppendedToFile(SingleAverageValueEstimateAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.1909:__init__
def __init__(self,slice_number,label,filename):
SingleAverageValueEstimateAppendedToFile.__init__(self,label,filename)
self.slice_number = slice_number
#@-node:gcross.20090827130017.1909:__init__
#@-others
#@-node:gcross.20090827130017.1908:class SingleAverageValueAtSliceEstimateAppendedToFile
#@-node:gcross.20090827130017.1896:Base classes
#@+node:gcross.20090827130017.1910:Histograms
#@+node:gcross.20090827130017.1911:class Histogram
class Histogram(Observable):
#@ @+others
#@+node:gcross.20090827130017.1912:compute_total
def compute_total(self):
total_histogram = zeros(self.histogram.shape,dtype='i',order='Fortran')
comm.Reduce((self.histogram,MPI.INT),(total_histogram,MPI.INT))
return total_histogram
#@-node:gcross.20090827130017.1912:compute_total
#@+node:gcross.20090827130017.1913:write_out_totals
def write_out_totals(self,histogram):
ensure_path_to_file_exists(self.filename)
total_counts = float(sum(histogram))
bin_width = float(self.right-self.left)/self.number_of_bins
current = float(self.left)+bin_width/2
with open(self.filename,"w") as f:
for count in histogram:
print >> f, "{0} {1}".format(current,count/total_counts)
current += bin_width
#@-node:gcross.20090827130017.1913:write_out_totals
#@-others
#@-node:gcross.20090827130017.1911:class Histogram
#@+node:gcross.20090827130017.1914:class PositionDensity1DHistogram
class PositionDensity1DHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1915:__init__
def __init__(self,slice_number,left,right,number_of_bins,filenames):
assert len(left) == len(right)
self.left = left
self.right = right
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((len(left),number_of_bins),dtype='i',order='Fortran')
self.filenames = filenames
#@-node:gcross.20090827130017.1915:__init__
#@+node:gcross.20090827130017.1916:update
def update(self):
vpi.histograms.accumulate_1d_densities(
self.system.x[self.slice_number],
self.left,self.right,
self.histogram
)
#@-node:gcross.20090827130017.1916:update
#@+node:gcross.20090827130017.1917:write_out_totals
def write_out_totals(self,histograms):
for filename, histogram, left, right in izip(self.filenames,histograms,self.left,self.right):
ensure_path_to_file_exists(filename)
with open(filename,"w") as f:
total_counts = float(sum(histogram))
bin_width = float(right-left)/self.number_of_bins
current = float(left)+bin_width/2
for count in histogram:
print >> f, "{0} {1}".format(current,count/total_counts)
current += bin_width
#@-node:gcross.20090827130017.1917:write_out_totals
#@-others
#@-node:gcross.20090827130017.1914:class PositionDensity1DHistogram
#@+node:gcross.20090827130017.1918:class RadialDensityHistogram
class RadialDensityHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1919:(fields)
left = 0
#@-node:gcross.20090827130017.1919:(fields)
#@+node:gcross.20090827130017.1920:__init__
def __init__(self,slice_number,maximum_radius,number_of_bins,filename):
self.right = self.maximum_radius = maximum_radius
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,),dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1920:__init__
#@+node:gcross.20090827130017.1921:update
def update(self):
vpi.histograms.accumulate_radial_densities(
self.system.x[self.slice_number],
self.maximum_radius,
self.histogram
)
#@-node:gcross.20090827130017.1921:update
#@-others
#@-node:gcross.20090827130017.1918:class RadialDensityHistogram
#@+node:gcross.20090827130017.1922:class PlaneRadialDensityHistogram
class PlaneRadialDensityHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1923:(fields)
left = 0
#@-node:gcross.20090827130017.1923:(fields)
#@+node:gcross.20090827130017.1924:__init__
def __init__(self,slice_number,maximum_radius,number_of_bins,filename):
self.right = self.maximum_radius = maximum_radius
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,),dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1924:__init__
#@+node:gcross.20090827130017.1925:update
def update(self):
vpi.histograms.accumulate_plane_radial_densities(
self.system.x[self.slice_number],
self.maximum_radius,
self.system.rotation_plane_axis_1,
self.system.rotation_plane_axis_2,
self.histogram
)
#@-node:gcross.20090827130017.1925:update
#@-others
#@-node:gcross.20090827130017.1922:class PlaneRadialDensityHistogram
#@+node:gcross.20090827130017.1926:class RecipricalRadiusSquaredDensityHistogram
class RecipricalRadiusSquaredDensityHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1927:(fields)
left = 0
#@-node:gcross.20090827130017.1927:(fields)
#@+node:gcross.20090827130017.1928:__init__
def __init__(self,slice_number,maximum_value,number_of_bins,filename):
self.right = self.maximum_value = maximum_value
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,),dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1928:__init__
#@+node:gcross.20090827130017.1929:update
def update(self):
vpi.histograms.accumulate_reciprical_radius_squared_densities(
self.system.x[self.slice_number],
self.maximum_value,
self.histogram
)
#@-node:gcross.20090827130017.1929:update
#@-others
#@-node:gcross.20090827130017.1926:class RecipricalRadiusSquaredDensityHistogram
#@+node:gcross.20090827130017.1930:class RecipricalPlaneRadiusSquaredDensityHistogram
class RecipricalPlaneRadiusSquaredDensityHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1931:(fields)
left = 0
#@-node:gcross.20090827130017.1931:(fields)
#@+node:gcross.20090827130017.1932:__init__
def __init__(self,slice_number,maximum_value,number_of_bins,filename):
self.right = self.maximum_value = maximum_value
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,),dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1932:__init__
#@+node:gcross.20090827130017.1933:update
def update(self):
vpi.histograms.accumulate_recip_plane_r_sq_densities(
self.system.x[self.slice_number],
self.maximum_value,
self.system.rotation_plane_axis_1,
self.system.rotation_plane_axis_2,
self.histogram
)
#@-node:gcross.20090827130017.1933:update
#@+node:gcross.20090827130017.1934:write_out_totals
def write_out_totals(self,histogram):
ensure_path_to_file_exists(self.filename)
with open(self.filename,"w") as f:
total_counts = float(sum(histogram))
bin_width = float(self.maximum_value)/self.number_of_bins
current = bin_width/2
for count in self.histogram:
print >> f, "{0} {1}".format(current,count/total_counts)
current += bin_width
#@-node:gcross.20090827130017.1934:write_out_totals
#@-others
#@-node:gcross.20090827130017.1930:class RecipricalPlaneRadiusSquaredDensityHistogram
#@+node:gcross.20090827130017.1935:class AngularSeparationDensityHistogram
class AngularSeparationDensityHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1936:(fields)
left = 0
right = 2*pi
#@-node:gcross.20090827130017.1936:(fields)
#@+node:gcross.20090827130017.1937:__init__
def __init__(self,slice_number,number_of_bins,filename):
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,),dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1937:__init__
#@+node:gcross.20090827130017.1938:update
def update(self):
system = self.system
x = system.x[self.slice_number]
angles = arctan2(x[:,system.rotation_plane_axis_2],x[:,system.rotation_plane_axis_1])
vpi.histograms.accumulate_angular_separation_densities(
angles,
self.histogram
)
#@-node:gcross.20090827130017.1938:update
#@+node:gcross.20090827130017.1939:write_out_totals
def write_out_totals(self,histogram):
ensure_path_to_file_exists(self.filename)
with open(self.filename,"w") as f:
total_counts = float(sum(histogram))
bin_width = float(2*pi)/self.number_of_bins
current = bin_width/2
for count in self.histogram:
print >> f, "{0} {1}".format(current,count/total_counts)
current += bin_width
#@-node:gcross.20090827130017.1939:write_out_totals
#@-others
#@-node:gcross.20090827130017.1935:class AngularSeparationDensityHistogram
#@+node:gcross.20090827130017.1940:class NeighborAngularSeparationDensityHistogram
class NeighborAngularSeparationDensityHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1941:(fields)
left = 0
right = 2*pi
#@-node:gcross.20090827130017.1941:(fields)
#@+node:gcross.20090827130017.1942:__init__
def __init__(self,slice_number,number_of_bins,filename):
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,),dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1942:__init__
#@+node:gcross.20090827130017.1943:update
def update(self):
system = self.system
x = system.x[self.slice_number]
angles = arctan2(x[:,system.rotation_plane_axis_2],x[:,system.rotation_plane_axis_1])
vpi.histograms.accumulate_neighbor_angular_separation_densities(
angles,
self.histogram
)
#@-node:gcross.20090827130017.1943:update
#@-others
#@-node:gcross.20090827130017.1940:class NeighborAngularSeparationDensityHistogram
#@+node:gcross.20090827130017.1944:class AngularVelocityHistogram
class AngularVelocityHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1945:__init__
def __init__(self,slice_number,left,right,number_of_bins,filename):
self.left = left
self.right = right
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,),dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1945:__init__
#@+node:gcross.20090827130017.1946:update
def update(self):
system = self.system
first_derivatives, _ = vpi.angular_momentum.compute_angular_derivatives(
system.x[self.slice_number],
system.rotation_plane_axis_1, system.rotation_plane_axis_2,
system.number_of_rotating_particles
)
vpi.histograms.accumulate(first_derivatives,self.left,self.right,self.histogram)
#@-node:gcross.20090827130017.1946:update
#@-others
#@-node:gcross.20090827130017.1944:class AngularVelocityHistogram
#@+node:gcross.20090827130017.1947:class AngularVelocitySquaredHistogram
class AngularVelocitySquaredHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1948:__init__
def __init__(self,slice_number,left,right,number_of_bins,filename):
self.left = left
self.right = right
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,),dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1948:__init__
#@+node:gcross.20090827130017.1949:update
def update(self):
system = self.system
first_derivatives, _ = vpi.angular_momentum.compute_angular_derivatives(
system.x[self.slice_number],
system.rotation_plane_axis_1, system.rotation_plane_axis_2,
system.number_of_rotating_particles
)
first_derivatives **= 2
vpi.histograms.accumulate(first_derivatives,self.left,self.right,self.histogram)
#@-node:gcross.20090827130017.1949:update
#@-others
#@-node:gcross.20090827130017.1947:class AngularVelocitySquaredHistogram
#@+node:gcross.20090827130017.1950:class RotationQuadraticTermHistogram
class RotationQuadraticTermHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1951:__init__
def __init__(self,slice_number,left,right,number_of_bins,filename):
self.left = left
self.right = right
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,),dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1951:__init__
#@+node:gcross.20090827130017.1952:update
def update(self):
system = self.system
x = system.x[self.slice_number]
first_derivatives, _ = vpi.angular_momentum.compute_angular_derivatives(
x,
system.rotation_plane_axis_1, system.rotation_plane_axis_2,
system.number_of_rotating_particles
)
term = (first_derivatives ** 2) / (x[:,system.rotation_plane_axis_2-1]**2+x[:,system.rotation_plane_axis_1-1]**2)
vpi.histograms.accumulate(term,self.left,self.right,self.histogram)
#@-node:gcross.20090827130017.1952:update
#@-others
#@-node:gcross.20090827130017.1950:class RotationQuadraticTermHistogram
#@+node:gcross.20090827130017.1953:class AngularVelocityAndRadiusHistogram
class AngularVelocityAndRadiusHistogram(Histogram):
#@ @+others
#@+node:gcross.20090827130017.1954:__init__
def __init__(self,slice_number,maximum_angular_velocity,maximum_radius,number_of_bins,filename):
self.maximum_angular_velocity = maximum_angular_velocity
self.maximum_radius = maximum_radius
self.number_of_bins = number_of_bins
self.slice_number = slice_number
self.histogram = zeros((number_of_bins,)*2,dtype='i',order='Fortran')
self.filename = filename
#@-node:gcross.20090827130017.1954:__init__
#@+node:gcross.20090827130017.1955:update
def update(self):
system = self.system
x = system.x[self.slice_number]
first_derivatives, _ = vpi.angular_momentum.compute_angular_derivatives(
x,
system.rotation_plane_axis_1, system.rotation_plane_axis_2,
system.number_of_rotating_particles
)
radii = sqrt(x[system.rotation_plane_axis_2-1]**2+x[system.rotation_plane_axis_1-1]**2)
i_values = floor(first_derivatives/self.maximum_angular_velocity*self.number_of_bins)
j_values = floor(radii/self.maximum_radius*self.number_of_bins)
for (i,j) in izip(i_values,j_values):
if (i >= 0) and (i < self.number_of_bins) and (j >= 0) and (j < self.number_of_bins):
self.histogram[i,j] += 1
#@-node:gcross.20090827130017.1955:update
#@+node:gcross.20090827130017.1956:write_out_totals
def write_out_totals(self,histogram):
ensure_path_to_file_exists(self.filename)
total_counts = float(sum(histogram))
with open(self.filename,"w") as f:
for i in xrange(self.number_of_bins):
for j in xrange(self.number_of_bins):
angular_velocity = (i+0.5)/self.number_of_bins*self.maximum_angular_velocity
radius = (j+0.5)/self.number_of_bins*self.maximum_radius
print >> f, "{0} {1} {2}".format(angular_velocity,radius,histogram[i,j]/total_counts)
print >> f, ""
#@-node:gcross.20090827130017.1956:write_out_totals
#@-others
#@-node:gcross.20090827130017.1953:class AngularVelocityAndRadiusHistogram
#@-node:gcross.20090827130017.1910:Histograms
#@+node:gcross.20090827130017.1957:Energy estimates
#@+node:gcross.20090827130017.1958:class TotalEnergyEstimate
class TotalEnergyEstimate(SingleAverageValueEstimateAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.1960:update
def update(self):
system = self.system
for slice_number in [0,-1]:
gradient_of_log_trial_fn, laplacian_of_log_trial_fn = system.compute_trial_derivatives(system.x[slice_number],system.xij2[slice_number])
self.add(
vpi.observables.compute_local_energy_estimate(
system.U[slice_number],
gradient_of_log_trial_fn, laplacian_of_log_trial_fn,
system.lambda_,
)
)
#@-node:gcross.20090827130017.1960:update
#@-others
#@-node:gcross.20090827130017.1958:class TotalEnergyEstimate
#@+node:gcross.20090827130017.1962:Slice estimates
#@+node:gcross.20090827130017.1963:class SliceEnergyEstimate
class SliceEnergyEstimate(SingleAverageValueEstimateAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.1964:__init__
def __init__(self,slice_number,filename,label):
SingleAverageValueEstimateAppendedToFile.__init__(self,filename,label)
self.slice_number = slice_number
#@-node:gcross.20090827130017.1964:__init__
#@-others
#@-node:gcross.20090827130017.1963:class SliceEnergyEstimate
#@+node:gcross.20090827130017.1965:class EffectivePotentialSliceEnergyEstimate
class EffectivePotentialSliceEnergyEstimate(SliceEnergyEstimate):
#@ @+others
#@+node:gcross.20090827130017.1966:update
def update(self):
system = self.system
U = zeros((1,system.number_of_particles),dtype=double,order='Fortran')
gradU = zeros((1,system.number_of_particles,system.number_of_dimensions),dtype=double,order='Fortran')
vpi.angular_momentum.compute_effective_rotational_potential(
system.x[self.slice_number:self.slice_number+1],system.lambda_,
system.rotation_plane_axis_1,system.rotation_plane_axis_2,
system.frame_angular_velocity,system.number_of_rotating_particles,
U, gradU
)
self.add(sum(U))
#@-node:gcross.20090827130017.1966:update
#@-others
#@-node:gcross.20090827130017.1965:class EffectivePotentialSliceEnergyEstimate
#@+node:gcross.20090827130017.1967:class PhysicalPotentialSliceEnergyEstimate
class PhysicalPotentialSliceEnergyEstimate(SliceEnergyEstimate):
#@ @+others
#@+node:gcross.20090827130017.1968:update
def update(self):
self.add(sum(dot(self.system.x[self.slice_number]**2,self.system.harmonic_oscillator_coefficients))/2.0)
#@-node:gcross.20090827130017.1968:update
#@-others
#@-node:gcross.20090827130017.1967:class PhysicalPotentialSliceEnergyEstimate
#@+node:gcross.20090827130017.1969:class TotalPotentialSliceEnergyEstimate
class TotalPotentialSliceEnergyEstimate(SliceEnergyEstimate):
#@ @+others
#@+node:gcross.20090827130017.1970:update
def update(self):
self.add(sum(self.system.U[self.slice_number]))
#@-node:gcross.20090827130017.1970:update
#@-others
#@-node:gcross.20090827130017.1969:class TotalPotentialSliceEnergyEstimate
#@-node:gcross.20090827130017.1962:Slice estimates
#@+node:gcross.20090827130017.1971:Path estimates
#@+node:gcross.20090827130017.1972:class PathEnergyEstimates
class PathEnergyEstimates(AverageValuesEstimate):
#@ @+others
#@+node:gcross.20090827130017.1973:(fields)
estimates = 0
#@-node:gcross.20090827130017.1973:(fields)
#@+node:gcross.20090827130017.1974:__init__
def __init__(self,filename):
self.filename = filename
#@-node:gcross.20090827130017.1974:__init__
#@+node:gcross.20090827130017.1975:write_out_totals
def write_out_totals(self,total_estimates):
ensure_path_to_file_exists(self.filename)
center_slice_number = self.system.center_slice_number
with open(self.filename,"w") as f:
for slice_number, estimate in enumerate(total_estimates):
print >> f, center_slice_number-abs(center_slice_number-slice_number), estimate, slice_number
#@-node:gcross.20090827130017.1975:write_out_totals
#@-others
#@-node:gcross.20090827130017.1972:class PathEnergyEstimates
#@+node:gcross.20090827130017.1976:class EffectivePotentialPathEnergyEstimates
class EffectivePotentialPathEnergyEstimates(PathEnergyEstimates):
#@ @+others
#@+node:gcross.20090827130017.1977:update
def update(self):
system = self.system
U = zeros((system.number_of_slices,system.number_of_particles),dtype=double,order='Fortran')
gradU = zeros((system.number_of_slices,system.number_of_particles,system.number_of_dimensions),dtype=double,order='Fortran')
vpi.angular_momentum.compute_effective_rotational_potential(
system.x,system.lambda_,
system.rotation_plane_axis_1,system.rotation_plane_axis_2,
system.frame_angular_velocity,system.number_of_rotating_particles,
U, gradU
)
self.estimates += sum(U,axis=-1)
#@-node:gcross.20090827130017.1977:update
#@-others
#@-node:gcross.20090827130017.1976:class EffectivePotentialPathEnergyEstimates
#@+node:gcross.20090827130017.1978:class PhysicalPotentialPathEnergyEstimates
class PhysicalPotentialPathEnergyEstimates(PathEnergyEstimates):
#@ @+others
#@+node:gcross.20090827130017.1979:update
def update(self):
self.estimates += sum(dot(self.system.x**2,self.system.harmonic_oscillator_coefficients),axis=-1)/2.0
#@-node:gcross.20090827130017.1979:update
#@-others
#@-node:gcross.20090827130017.1978:class PhysicalPotentialPathEnergyEstimates
#@+node:gcross.20090827130017.1980:class TotalPotentialPathEnergyEstimates
class TotalPotentialPathEnergyEstimates(PathEnergyEstimates):
#@ @+others
#@+node:gcross.20090827130017.1981:update
def update(self):
self.estimates += sum(self.system.U,axis=-1)
#@-node:gcross.20090827130017.1981:update
#@-others
#@-node:gcross.20090827130017.1980:class TotalPotentialPathEnergyEstimates
#@-node:gcross.20090827130017.1971:Path estimates
#@-node:gcross.20090827130017.1957:Energy estimates
#@+node:gcross.20090827130017.1982:Position estimates
#@+node:gcross.20090827130017.1983:class AveragePositionEstimate
class AveragePositionEstimate(SingleAverageValueAtSliceEstimateAppendedToFile):
pass
#@-node:gcross.20090827130017.1983:class AveragePositionEstimate
#@+node:gcross.20090827130017.1984:class AverageAxialCoordinateEstimate
class AverageAxialCoordinateEstimate(AveragePositionEstimate):
#@ @+others
#@+node:gcross.20090827130017.1985:__init__
def __init__(self,axis,slice_number,filename,label):
AveragePositionEstimate.__init__(self,slice_number,filename,label)
self.axis = axis
#@-node:gcross.20090827130017.1985:__init__
#@+node:gcross.20090827130017.1986:update
def update(self):
self.add(average(self.system.x[self.slice_number,:,self.axis]))
#@-node:gcross.20090827130017.1986:update
#@-others
#@-node:gcross.20090827130017.1984:class AverageAxialCoordinateEstimate
#@+node:gcross.20090827130017.1987:class AverageAxialDistanceEstimate
class AverageAxialDistanceEstimate(AveragePositionEstimate):
#@ @+others
#@+node:gcross.20090827130017.1988:__init__
def __init__(self,axis,slice_number,filename,label):
AveragePositionEstimate.__init__(self,slice_number,filename,label)
self.axis = axis
#@-node:gcross.20090827130017.1988:__init__
#@+node:gcross.20090827130017.1989:update
def update(self):
self.add(average(abs(self.system.x[self.slice_number,:,self.axis])))
#@-node:gcross.20090827130017.1989:update
#@-others
#@-node:gcross.20090827130017.1987:class AverageAxialDistanceEstimate
#@+node:gcross.20090827130017.1990:class AverageRadiusEstimate
class AverageRadiusEstimate(AveragePositionEstimate):
#@ @+others
#@+node:gcross.20090827130017.1991:update
def update(self):
self.add(vpi.observables.compute_radius_average(self.system.x[self.slice_number]))
#@-node:gcross.20090827130017.1991:update
#@-others
#@-node:gcross.20090827130017.1990:class AverageRadiusEstimate
#@+node:gcross.20090827130017.1992:class AveragePlaneRadiusEstimate
class AveragePlaneRadiusEstimate(AveragePositionEstimate):
#@ @+others
#@+node:gcross.20090827130017.1993:__init__
def __init__(self,plane_axis_1,plane_axis_2,slice_number,filename,label):
AveragePositionEstimate.__init__(self,slice_number,filename,label)
assert plane_axis_1 >= 1
assert plane_axis_2 >= 1
assert not (plane_axis_1 == plane_axis_2)
self.plane_axis_1 = plane_axis_1
self.plane_axis_2 = plane_axis_2
#@-node:gcross.20090827130017.1993:__init__
#@+node:gcross.20090827130017.1994:update
def update(self):
self.add( vpi.observables.compute_plane_radius_average(self.system.x[self.slice_number],self.plane_axis_1,self.plane_axis_2))
#@-node:gcross.20090827130017.1994:update
#@-others
#@-node:gcross.20090827130017.1992:class AveragePlaneRadiusEstimate
#@+node:gcross.20090827130017.1995:class AverageRecipricalPlaneRadiusSquaredEstimate
class AverageRecipricalPlaneRadiusSquaredEstimate(AveragePositionEstimate):
#@ @+others
#@+node:gcross.20090827130017.1996:__init__
def __init__(self,plane_axis_1,plane_axis_2,slice_number,filename,label):
AveragePositionEstimate.__init__(self,slice_number,filename,label)
assert plane_axis_1 >= 1
assert plane_axis_2 >= 1
assert not (plane_axis_1 == plane_axis_2)
self.plane_axis_1 = plane_axis_1
self.plane_axis_2 = plane_axis_2
#@-node:gcross.20090827130017.1996:__init__
#@+node:gcross.20090827130017.1997:update
def update(self):
self.add(vpi.observables.compute_recip_plane_r_sq_average(self.system.x,self.plane_axis_1,self.plane_axis_2))
#@-node:gcross.20090827130017.1997:update
#@-others
#@-node:gcross.20090827130017.1995:class AverageRecipricalPlaneRadiusSquaredEstimate
#@-node:gcross.20090827130017.1982:Position estimates
#@+node:gcross.20090827130017.1998:Rotation related estimates
#@+node:gcross.20090827130017.1999:class AverageAngularVelocityEstimate
class AverageAngularVelocityEstimate(SingleAverageValueAtSliceEstimateAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.2000:update
def update(self):
system = self.system
first_derivatives, _ = vpi.angular_momentum.compute_angular_derivatives(
system.x[self.slice_number],
system.rotation_plane_axis_1, system.rotation_plane_axis_2,
system.number_of_rotating_particles
)
self.add(average(first_derivatives))
#@-node:gcross.20090827130017.2000:update
#@-others
#@-node:gcross.20090827130017.1999:class AverageAngularVelocityEstimate
#@+node:gcross.20090827130017.2001:class AverageAngularVelocitySquaredEstimate
class AverageAngularVelocitySquaredEstimate(SingleAverageValueAtSliceEstimateAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.2002:update
def update(self):
system = self.system
first_derivatives, _ = vpi.angular_momentum.compute_angular_derivatives(
system.x[self.slice_number],
system.rotation_plane_axis_1, system.rotation_plane_axis_2,
system.number_of_rotating_particles
)
first_derivatives **= 2
self.add(average(first_derivatives))
#@-node:gcross.20090827130017.2002:update
#@-others
#@-node:gcross.20090827130017.2001:class AverageAngularVelocitySquaredEstimate
#@+node:gcross.20090827130017.2003:class AverageRotationQuadraticTermEstimate
class AverageAngularVelocitySquaredEstimate(SingleAverageValueAtSliceEstimateAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.2004:update
def update(self):
system = self.system
first_derivatives, _ = vpi.angular_momentum.compute_angular_derivatives(
system.x[self.slice_number],
system.rotation_plane_axis_1, system.rotation_plane_axis_2,
system.number_of_rotating_particles
)
first_derivatives **= 2
self.add(average(first_derivatives))
#@-node:gcross.20090827130017.2004:update
#@-others
#@-node:gcross.20090827130017.2003:class AverageRotationQuadraticTermEstimate
#@+node:gcross.20090827130017.2008:class AverageAngularSeparationEstimate
class AverageAngularSeparationEstimate(SingleAverageValueAtSliceEstimateAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.2009:update
def update(self):
system = self.system
x = system.x[self.slice_number]
angles = arctan2(x[:,system.rotation_plane_axis_2],x[:,system.rotation_plane_axis_1])
self.add(vpi.observables.compute_average_angular_separation(angles))
#@-node:gcross.20090827130017.2009:update
#@-others
#@-node:gcross.20090827130017.2008:class AverageAngularSeparationEstimate
#@+node:gcross.20090827130017.2010:class AverageNeighborAngularSeparationEstimate
class AverageNeighborAngularSeparationEstimate(SingleAverageValueAtSliceEstimateAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.2011:update
def update(self):
system = self.system
x = system.x[self.slice_number]
angles = arctan2(x[:,system.rotation_plane_axis_2],x[:,system.rotation_plane_axis_1])
self.add(vpi.observables.compute_avg_neighbor_angular_sep(angles))
#@-node:gcross.20090827130017.2011:update
#@-others
#@-node:gcross.20090827130017.2010:class AverageNeighborAngularSeparationEstimate
#@+node:gcross.20090827130017.2012:class AverageRotationQuadraticTermEstimate
class AverageRotationQuadraticTermEstimate(SingleAverageValueAtSliceEstimateAppendedToFile):
#@ @+others
#@+node:gcross.20090827130017.2013:update
def update(self):
system = self.system
x = system.x[self.slice_number]
first_derivatives, _ = vpi.angular_momentum.compute_angular_derivatives(
x,
system.rotation_plane_axis_1, system.rotation_plane_axis_2,
system.number_of_rotating_particles
)
term = (first_derivatives ** 2) / (x[:,system.rotation_plane_axis_2-1]**2+x[:,system.rotation_plane_axis_1-1]**2)
self.add(average(term))
#@-node:gcross.20090827130017.2013:update
#@-others
#@-node:gcross.20090827130017.2012:class AverageRotationQuadraticTermEstimate
#@-node:gcross.20090827130017.1998:Rotation related estimates
#@-others
#@-node:gcross.20090827130017.1895:Observable classes
#@+node:gcross.20090827130017.1734:Functions
#@+node:gcross.20090827130017.1735:ensure_path_to_file_exists
def ensure_path_to_file_exists(path):
directory, _ = os.path.split(path)
if not os.path.exists(directory):
os.makedirs(directory)
#@-node:gcross.20090827130017.1735:ensure_path_to_file_exists
#@-node:gcross.20090827130017.1734:Functions
#@+node:gcross.20090827130017.1736:class System
class System:
#@ @+others
#@+node:gcross.20090827130017.1737:Physics Functions
#@+node:gcross.20090827130017.1738:compute_potential
def compute_potential(self,x,_):
x_sq = x**2
U = array(dot(x_sq,self.harmonic_oscillator_coefficients)/2.0,dtype=double,order='Fortran')
number_of_slices = x.shape[0]
vpi.angular_momentum.accumulate_effective_potential2(
x,self.lambda_,
self.rotation_plane_axis_1,self.rotation_plane_axis_2,
self.frame_angular_velocity,self.number_of_rotating_particles,
U
)
gradU2 = zeros((number_of_slices,),dtype=double,order='Fortran')
return U, gradU2, False
#@-node:gcross.20090827130017.1738:compute_potential
#@+node:gcross.20090827130017.1739:compute_trial
def compute_trial(self,x,_):
return -sum(dot(x**2,self.harmonic_oscillator_coefficients))/2
#@-node:gcross.20090827130017.1739:compute_trial
#@+node:gcross.20090827130017.1740:compute_trial_derivatives
def compute_trial_derivatives(self,x,xij2):
gradient_of_log_trial_fn = -x*self.harmonic_oscillator_coefficients
laplacian_of_log_trial_fn = -sum(self.harmonic_oscillator_coefficients)*x.shape[0]
return gradient_of_log_trial_fn, laplacian_of_log_trial_fn
#@-node:gcross.20090827130017.1740:compute_trial_derivatives
#@+node:gcross.20090828171041.1863:compute_greens_function
def compute_greens_function(self,x,xij2,U,gradU2,lam,dt,slice_start,slice_end,particle_number):
return vpi.gfn.gfn2_sp(slice_start,slice_end,particle_number,U,dt)
#@-node:gcross.20090828171041.1863:compute_greens_function
#@-node:gcross.20090827130017.1737:Physics Functions
#@+node:gcross.20090827130017.1741:Observable management
#@+node:gcross.20090827130017.1742:add_observable
def add_observable(self,observable):
self.observables.append(observable)
observable.system = self
#@-node:gcross.20090827130017.1742:add_observable
#@+node:gcross.20090827130017.1743:total_and_write_observables
def total_and_write_observables(self):
for observable in self.observables:
observable.total_and_write()
#@-node:gcross.20090827130017.1743:total_and_write_observables
#@-node:gcross.20090827130017.1741:Observable management
#@+node:gcross.20090827130017.1744:__init__
def __init__(self,**keywords):
self.__dict__.update(keywords)
number_of_slices = self.number_of_slices
number_of_particles = self.number_of_particles
number_of_dimensions = self.number_of_dimensions
vpi.rand_utils.init_seed(my_rank)
self.x = vpi.lattice.make_lattice(1.0,number_of_slices,number_of_particles,number_of_dimensions)
self.xij2 = zeros((number_of_slices,number_of_particles,number_of_particles),dtype=double,order='Fortran')
vpi.xij.update_xij(self.xij2,self.x)
self.U = zeros((number_of_slices,number_of_particles),dtype=double,order='Fortran')
self.gradU2 = zeros((number_of_slices),dtype=double,order='Fortran')
self.slice_move_attempted_counts = zeros((number_of_slices,),'i')
self.slice_move_accepted_counts = zeros((number_of_slices,),'i')
self.move_type_attempted_counts = zeros((3,),'i')
self.move_type_accepted_counts = zeros((3,),'i')
self.number_of_observations = self.total_number_of_observations // number_of_processors + 1
self.total_number_of_observations = self.number_of_observations * number_of_processors
self.number_of_thermalizations_per_observation = number_of_particles * number_of_slices // self.dM
assert (number_of_slices % 2 == 0 and number_of_slices % 4 == 2)
self.center_slice_number = number_of_slices // 2
self.observables = []
#@-node:gcross.20090827130017.1744:__init__
#@+node:gcross.20090827130017.1745:run
def run(self):
#@ << Stash properties into local variables >>
#@+node:gcross.20090827130017.1746:<< Stash properties into local variables >>
x = self.x
xij2 = self.xij2
U = self.U
gradU2 = self.gradU2
number_of_prethermalization_steps = self.number_of_prethermalization_steps
number_of_thermalizations_per_observation = self.number_of_thermalizations_per_observation
move_type_probabilities = self.move_type_probabilities
move_type_differentials = self.move_type_differentials
dM = self.dM
lambda_ = self.lambda_
low_swap_dimension = self.low_swap_dimension
high_swap_dimension = self.high_swap_dimension
slice_move_attempted_counts = self.slice_move_attempted_counts
move_type_attempted_counts = self.move_type_attempted_counts
slice_move_accepted_counts = self.slice_move_accepted_counts
move_type_accepted_counts = self.move_type_accepted_counts
compute_potential = self.compute_potential
compute_trial = self.compute_trial
compute_greens_function = self.compute_greens_function
observables = self.observables
#@-node:gcross.20090827130017.1746:<< Stash properties into local variables >>
#@nl
#@ << Prethermalize the system >>
#@+node:gcross.20090827130017.1747:<< Prethermalize the system >>
vpi.thermalize.thermalize_path(
x,xij2,
U,gradU2,
number_of_prethermalization_steps,
move_type_probabilities,move_type_differentials,
dM,
lambda_,
low_swap_dimension,high_swap_dimension,
slice_move_attempted_counts,move_type_attempted_counts,
slice_move_accepted_counts,move_type_accepted_counts,
compute_potential,compute_trial,compute_greens_function
)
#@nonl
#@-node:gcross.20090827130017.1747:<< Prethermalize the system >>
#@nl
#@ << Main iteration >>
#@+node:gcross.20090827130017.1748:<< Main iteration >>
decile = self.number_of_observations // 10
for number_completed in xrange(self.number_of_observations):
vpi.thermalize.thermalize_path(
x,xij2,
U,gradU2,
number_of_thermalizations_per_observation,
move_type_probabilities,move_type_differentials,
dM,
lambda_,
low_swap_dimension,high_swap_dimension,
slice_move_attempted_counts,move_type_attempted_counts,
slice_move_accepted_counts,move_type_accepted_counts,
compute_potential,compute_trial,compute_greens_function,
)
for observable in observables:
observable.update()
if (number_completed % decile == 0) and (my_rank == 0):
print "{0:.0%} complete; local bridge move acceptance rate = {1:.0%}, local rigid move acceptance rate = {2:.0%}".format(
float(number_completed)/self.number_of_observations,
float(move_type_accepted_counts[0])/move_type_attempted_counts[0],
float(move_type_accepted_counts[1])/move_type_attempted_counts[1],
)
#@-node:gcross.20090827130017.1748:<< Main iteration >>
#@nl
#@-node:gcross.20090827130017.1745:run
#@-others
#@-node:gcross.20090827130017.1736:class System
#@-others
#@-node:gcross.20090827130017.1614:@thin system.py
#@-leo
| [
"gcross@phys.washington.edu"
] | gcross@phys.washington.edu |
35c6e6afcf266be4f22ea078c86e88bb4c047b20 | cb1e4c175fa498778669ceee62dae642cd9efaf3 | /Deep_Learning_A_Z/Volume 1 - Supervised Deep Learning/Part 1 - Artificial Neural Networks (ANN)/Section 4 - Building an ANN/ann.py | 99e806d5f60ff981c533c6d44e6072d52ee9aa40 | [] | no_license | RakibulRanak/Deep-Learning | 3ec7ec5b5654174fd50cb99d61367c02a29e4e11 | 86c5e537ab05170ef775e2f3da31f4da61db15be | refs/heads/master | 2022-08-25T09:30:49.182700 | 2020-05-27T18:53:09 | 2020-05-27T18:53:09 | 266,579,349 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | # Part-1 -> data preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:,3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer
# Male/Female
labelencoder_X = LabelEncoder()
X[:, 2] = labelencoder_X.fit_transform(X[:, 2])
# Country column
# as more than two value we need k-1 dummy variables
ct = ColumnTransformer([("Country", OneHotEncoder(), [1])], remainder = 'passthrough')
X = ct.fit_transform(X)
X=X[:,1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#Part-2 -> Importing the Keras Libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
#Initializing the ANN
classifier = Sequential()
#Adding the input layer and the first hidden layer (dense keras 2 api call)
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu',input_dim=11))
#Adding the second hidden layer
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
#Adding the output layer
classifier.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
#Compiling the ANN
classifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
#Fitting the ANN to the Training set
classifier.fit(X_train,y_train,batch_size=10,nb_epochs=100)
#part3 Making the predctions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred=(y_pred>0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
| [
"rakibulhasanranak1@gmail.com"
] | rakibulhasanranak1@gmail.com |
8f773f2400f616bab03e24d342fb8893ba99722c | 278efb37d60e1bdfc5e36ee5fbc8b0dd69239f41 | /projectimmo/streaming_app/migrations/0005_video_user.py | af7e8dee5992709932d0fb4fee576e313b4e6bae | [] | no_license | ChamHostile/streaming_christo | 469465ded163206198d4b600a1a323e947e2971f | f9af556f011ab8c876fffc7181ec508fbd627268 | refs/heads/main | 2023-07-15T08:11:25.098937 | 2021-09-03T13:39:41 | 2021-09-03T13:39:41 | 402,782,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # Generated by Django 3.1.6 on 2021-08-13 14:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('streaming_app', '0004_video_file_name'),
]
operations = [
migrations.AddField(
model_name='video',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"hamza.aboudou@gmail.com"
] | hamza.aboudou@gmail.com |
db3ea413448855aaad55aeb94371debf6adb76ed | 9d0fa09b8b6e1754d4ec00d2c0fed02425beed77 | /classify_images.py | ec833b6d76211312aacf1f72f7240a72a4920c54 | [] | no_license | hardik632/pre-trained-image-classifer | 2b084c825cdff79bc2f8e19c78e0ee6c8fc9c371 | a367422363455282a947c0b927ef3c782fa2c1a1 | refs/heads/master | 2020-05-15T17:47:42.347030 | 2019-04-20T13:44:26 | 2019-04-20T13:44:26 | 182,410,878 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,569 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/classify_images.py
#
# PROGRAMMER: hardik dhiman
# DATE CREATED: 28-jan-2019
# REVISED DATE:
# PURPOSE: Create a function classify_images that uses the classifier function
# to create the classifier labels and then compares the classifier
# labels to the pet image labels. This function inputs:
# -The Image Folder as image_dir within classify_images and function
# and as in_arg.dir for function call within main.
# -The results dictionary as results_dic within classify_images
# function and results for the functin call within main.
# -The CNN model architecture as model wihtin classify_images function
# and in_arg.arch for the function call within main.
# This function uses the extend function to add items to the list
# that's the 'value' of the results dictionary. You will be adding the
# classifier label as the item at index 1 of the list and the comparison
# of the pet and classifier labels as the item at index 2 of the list.
#
##
# Imports classifier function for using CNN to classify images
from classifier import classifier
# TODO 3: Define classify_images function below, specifically replace the None
# below by the function definition of the classify_images function.
# Notice that this function doesn't return anything because the
# results_dic dictionary that is passed into the function is a mutable
# data type so no return is needed.
#
def classify_images(images_dir, results_dic, model):
"""
Creates classifier labels with classifier function, compares pet labels to
the classifier labels, and adds the classifier label and the comparison of
the labels to the results dictionary using the extend function. Be sure to
format the classifier labels so that they will match your pet image labels.
The format will include putting the classifier labels in all lower case
letters and strip the leading and trailing whitespace characters from them.
For example, the Classifier function returns = 'Maltese dog, Maltese terrier, Maltese'
so the classifier label = 'maltese dog, maltese terrier, maltese'.
Recall that dog names from the classifier function can be a string of dog
names separated by commas when a particular breed of dog has multiple dog
names associated with that breed. For example, you will find pet images of
a 'dalmatian'(pet label) and it will match to the classifier label
'dalmatian, coach dog, carriage dog' if the classifier function correctly
classified the pet images of dalmatians.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images within this function
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by the classifier function (string)
results_dic - Results Dictionary with 'key' as image filename and 'value'
as a List. Where the list will contain the following items:
index 0 = pet image label (string)
--- where index 1 & index 2 are added by this function ---
NEW - index 1 = classifier label (string)
NEW - index 2 = 1/0 (int) where 1 = match between pet image
and classifer labels and 0 = no match between labels
model - Indicates which CNN model architecture will be used by the
classifier function to classify the pet images,
values must be either: resnet alexnet vgg (string)
Returns:
None - results_dic is mutable data type so no return needed.
"""
for key in results_dic:
model_label=""
model_label = classifier(images_dir+key,model)
model_label = model_label.lower()
model_label = model_label.strip()
truth =results_dic[key][0]
if truth in model_label:
results_dic[key].extend((model_label,1))
else:
results_dic[key].extend((model_label,0))
return (results_dic)
| [
"noreply@github.com"
] | noreply@github.com |
8db666da06657c0966f6541b5b98a80860b1b13b | c7eb5c81a4581f6bd7be32b23b14f3e1a36d51fd | /db_migrate.py | d757c37ff03068d8ee079bbe24f25a07ef437aa1 | [] | no_license | mchenco/blog | 5d9988bf3906a8a4559db0eaf33134be75a35602 | fd39544db86f9d45168e6c25b1771a7edaf51a34 | refs/heads/master | 2016-09-13T09:44:31.567629 | 2016-05-18T03:48:00 | 2016-05-18T03:48:00 | 58,906,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | #!flask/bin/python
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old model')
old_model= api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
| [
"chen.michelle15@gmail.com"
] | chen.michelle15@gmail.com |
e311a5f20fb1dbca7de12fdfcb7920fccbcd889a | be84495751737bbf0a8b7d8db2fb737cbd9c297c | /renmas/materials/specular_sampling.py | 95d9d6e4cdf41f2cad8e48e97a02b9cddb8e55ba | [] | no_license | mario007/renmas | 5e38ff66cffb27b3edc59e95b7cf88906ccc03c9 | bfb4e1defc88eb514e58bdff7082d722fc885e64 | refs/heads/master | 2021-01-10T21:29:35.019792 | 2014-08-17T19:11:51 | 2014-08-17T19:11:51 | 1,688,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py |
import math
import renmas.maths
import renmas.utils as util
class SpecularSampling:
def __init__(self):
pass
def get_sample(self, hitpoint):
hp = hitpoint
ndotwo = hp.normal.dot(hp.wo)
r = hp.normal * ndotwo * 2.0 - hp.wo
hp.wi = r
hp.ndotwi = hp.normal.dot(r)
hp.specular = True #special case
def get_sample_asm(self, runtime):
# eax - pointer to hitpoint
asm_structs = renmas.utils.structs("hitpoint")
ASM = """
#DATA
float two[4] = 2.0, 2.0, 2.0, 0.0
"""
ASM += asm_structs + """
#CODE
macro dot xmm0 = eax.hitpoint.normal * eax.hitpoint.wo
macro broadcast xmm1 = xmm0[0]
macro eq128 xmm1 = xmm1 * two
macro eq128 xmm1 = xmm1 * eax.hitpoint.normal
macro eq128 xmm1 = xmm1 - eax.hitpoint.wo
macro dot xmm4 = xmm1 * eax.hitpoint.normal
macro eq128 eax.hitpoint.wi = xmm1
macro eq32 eax.hitpoint.ndotwi = xmm4
mov dword [eax + hitpoint.specular], 14
ret
"""
assembler = util.get_asm()
mc = assembler.assemble(ASM, True)
#mc.print_machine_code()
name = "brdf_specular" + str(util.unique())
self.ds = runtime.load(name, mc)
self.func_ptr = runtime.address_module(name)
def pdf(self, hitpoint):
if hitpoint.specular:
hitpoint.pdf = 1.0
else:
hitpoint.pdf = 0.0
def pdf_asm(self):
prefix = "_" + str(hash(self)) + "_"
# eax - pointer to hitpoint
ASM = "#CODE \n"
ASM += "mov ebx, dword [eax + hitpoint.specular] \n"
ASM += "cmp ebx, 0 \n" #0-no specular sample
ASM += "jne " + prefix + "spec_sample\n"
ASM += "pxor xmm0, xmm0 \n" # put 0.0 in xmm0
ASM += "jmp " + prefix + "end_spec \n"
ASM += prefix + "spec_sample: \n"
ASM += "pcmpeqw xmm0, xmm0 \n" # generate 1.0 in xmm0
ASM += "pslld xmm0, 25 \n"
ASM += "psrld xmm0, 2 \n"
ASM += prefix + "end_spec: \n"
return ASM
| [
"mvidov@yahoo.com"
] | mvidov@yahoo.com |
1259ebebbd710c3ca40da7838c78b8380800f990 | 5ff14cf2f759100e17308f37bf4d1a525c7ca882 | /data_processing/data_loader_old.py | 18913bad765ab0b2e8d42a7aa7fce21b3be9ee3d | [] | no_license | ThiasTux/TemplateMatching | ad0d54ab52418c10a290a7ea2307c19d84808ad9 | 8d28ce26049b12bed9718c95a0acb0de242b29f0 | refs/heads/master | 2021-06-21T07:17:43.542849 | 2021-01-21T16:25:58 | 2021-01-21T16:25:58 | 168,568,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,972 | py | import pickle
import random
import numpy as np
from utils import distance_measures as dtm
def load_dataset(dataset_choice=100, classes=None, num_gestures=None, user=None):
# Skoda dataset
if dataset_choice == 100:
data = pickle.load(open("outputs/datasets/skoda/all_data_isolated.pickle", "rb"))
elif dataset_choice == 101:
data = pickle.load(open("outputs/datasets/skoda/all_old_data_isolated.pickle", "rb"))
# Opportunity dataset
elif dataset_choice == 200:
data = pickle.load(open("outputs/datasets/opportunity/all_data_isolated.pickle", "rb"))
elif dataset_choice == 201:
data = pickle.load(open("outputs/datasets/opportunity/all_quant_accy_data_isolated.pickle", "rb"))
elif dataset_choice == 211:
data = pickle.load(open("outputs/datasets/opportunity/all_old_data_isolated.pickle", "rb"))
# HCI guided
elif dataset_choice == 300:
data = pickle.load(open("outputs/datasets/hci/all_data_isolated.pickle", "rb"))
# Synthetic datasets
elif dataset_choice == 700:
data = pickle.load(open("outputs/datasets/synthetic/all_data_isolated.pickle", "rb"))
elif dataset_choice == 701:
data = pickle.load(open("outputs/datasets/synthetic2/all_data_isolated.pickle", "rb"))
elif dataset_choice == 702:
data = pickle.load(open("outputs/datasets/synthetic3/all_data_isolated.pickle", "rb"))
elif dataset_choice == 704:
data = pickle.load(open("outputs/datasets/synthetic4/all_data_isolated.pickle", "rb"))
elif dataset_choice == 800:
data = pickle.load(open("outputs/datasets/unilever_drinking/all_data_isolated.pickle", "rb"))
if user is None:
selected_data = [[d for d in data if d[0, -2] == c] for c in classes]
else:
selected_data = [[d for d in data if d[0, -2] == c and d[0, -1] == user] for c in classes]
if num_gestures is not None:
try:
selected_data = [random.sample(sel_data, num_gestures) for sel_data in selected_data]
except ValueError:
pass
return [instance for class_data in selected_data for instance in class_data]
def load_training_dataset(dataset_choice=704, classes=None, num_gestures=None, user=None, extract_null=False,
null_class_percentage=0.5, template_choice_method=1, seed=2):
# Skoda dataset
if dataset_choice == 100:
data = pickle.load(open("outputs/datasets/skoda/all_data_isolated.pickle", "rb"))
elif dataset_choice == 101:
data = pickle.load(open("outputs/datasets/skoda/all_old_data_isolated.pickle", "rb"))
# Opportunity dataset
elif dataset_choice == 200:
data = pickle.load(open("outputs/datasets/opportunity/all_data_isolated.pickle", "rb"))
elif dataset_choice == 201:
data = pickle.load(open("outputs/datasets/opportunity/all_quant_accy_data_isolated.pickle", "rb"))
# HCI guided
elif dataset_choice == 300:
data = pickle.load(open("outputs/datasets/hci/all_data_isolated.pickle", "rb"))
# Synthetic dataset
elif dataset_choice == 700:
data = pickle.load(open("outputs/datasets/synthetic/all_data_isolated.pickle", "rb"))
elif dataset_choice == 701:
data = pickle.load(open("outputs/datasets/synthetic2/all_data_isolated.pickle", "rb"))
elif dataset_choice == 702:
data = pickle.load(open("outputs/datasets/synthetic3/all_data_isolated.pickle", "rb"))
elif dataset_choice == 704:
data = pickle.load(open("outputs/datasets/synthetic4/all_data_isolated.pickle", "rb"))
elif dataset_choice == 800:
data = pickle.load(open("outputs/datasets/unilever_drinking/all_data_isolated.pickle", "rb"))
if user is None:
selected_data = [[d for d in data if d[0, -2] == c] for c in classes]
else:
selected_data = [[d for d in data if d[0, -2] == c and d[0, -1] == user] for c in classes]
if num_gestures is not None:
try:
selected_data = [random.sample(sel_data, num_gestures) for sel_data in selected_data]
except ValueError:
pass
labels = [instance[0, -2] for class_data in selected_data for instance in class_data]
if template_choice_method != 0:
chosen_templates = [np.array([]) for _ in classes]
if template_choice_method == 1:
for k, c in enumerate(classes):
templates = selected_data[k]
matching_scores = np.zeros((len(templates), len(templates)), dtype=int)
for i in range(len(templates)):
for j in range(i + 1, len(templates)):
d, c = dtm.LCS(templates[i][:, 1], templates[j][:, 1])
matching_scores[i][j] = d
matching_scores[j][i] = d
matching_scores_sums = np.sum(matching_scores, axis=0)
matching_scores_perc = np.array(
[matching_scores_sums[i] / len(templates[i]) for i in range(len(templates))])
ordered_indexes = np.argsort(matching_scores_perc)
chosen_templates[k] = np.array(templates[ordered_indexes[-1]])
elif template_choice_method == 2:
for k, c in enumerate(classes):
templates = [d for d in selected_data if d[0, -2] == c]
chosen_templates[k] = templates[np.random.uniform(0, len(templates))]
templates = [instance for class_data in selected_data for instance in class_data]
if extract_null:
tmp_null_selected_data = [d for d in data if d[0, -2] == 0]
null_class_data = [item for d in tmp_null_selected_data for item in d[:, 1]]
num_null_instances = int((len(templates) * null_class_percentage) / (1 - null_class_percentage))
null_selected_data = list()
avg_length = int(np.average([len(d) for d in selected_data]))
for i in range(num_null_instances):
tmp_null_data = np.zeros((avg_length, 4))
tmp_null_data[:, 0] = np.arange(avg_length)
np.random.seed(2)
start_idx = np.random.randint(0, len(null_class_data) - avg_length)
end_idx = start_idx + avg_length
tmp_null_data[:, 1] = null_class_data[start_idx:end_idx]
null_selected_data.append(tmp_null_data)
null_labels = [0 for _ in null_selected_data]
labels += null_labels
templates += null_selected_data
return chosen_templates, templates, labels
else:
templates = [instance for class_data in selected_data for instance in class_data]
if extract_null:
tmp_null_selected_data = [d for d in data if d[0, -2] == 0]
null_class_data = [item for d in tmp_null_selected_data for item in d[:, 1]]
num_null_instances = int((len(templates) * null_class_percentage) / (1 - null_class_percentage))
null_selected_data = list()
avg_length = int(np.average([len(d) for d in selected_data]))
for i in range(num_null_instances):
tmp_null_data = np.zeros((avg_length, 4))
tmp_null_data[:, 0] = np.arange(avg_length)
start_idx = np.random.randint(0, len(null_class_data) - avg_length)
end_idx = start_idx + avg_length
tmp_null_data[:, 1] = null_class_data[start_idx:end_idx]
null_selected_data.append(tmp_null_data)
null_labels = [0 for _ in null_selected_data]
labels += null_labels
templates += null_selected_data
return templates, labels
def load_evolved_templates(es_results_file, classes, use_evolved_thresholds=False):
chosen_templates = [None for _ in classes]
thresholds = list()
for i, c in enumerate(classes):
file_path = es_results_file + "_00_{}_templates.txt".format(c)
with open(file_path, "r") as templates_file:
last_line = templates_file.readlines()[-1]
if use_evolved_thresholds:
template = np.array([int(v) for v in last_line.split(" ")[:-1]])
thresholds.append(int(last_line.split(" ")[-1]))
else:
template = np.array([int(v) for v in last_line.split(" ")])
chosen_templates[i] = np.stack((np.arange(len(template)), template), axis=-1)
if use_evolved_thresholds:
return chosen_templates, thresholds
else:
return chosen_templates
def load_continuous_dataset(dataset_choice='skoda', user=1, template_choice_method=1, seed=2):
# Skoda dataset
if dataset_choice == 'skoda':
data = pickle.load(open("outputs/datasets/skoda/all_data_isolated.pickle", "rb"))
elif dataset_choice == 'skoda_old':
data = pickle.load(open("outputs/datasets/skoda/all_old_data_isolated.pickle", "rb"))
# Opportunity dataset
elif dataset_choice == 'opp':
data = pickle.load(
open("outputs/datasets/opportunity/user_{:02d}_accx_data_continuous.pickle".format(user), "rb"))
elif dataset_choice == 'opp_quant':
data = pickle.load(
open("outputs/datasets/opportunity/user_{:02d}_quant_accy_data_continuous.pickle".format(user), "rb"))
# HCI guided
elif dataset_choice == 'hci':
data = pickle.load(
open("outputs/datasets/hci/user_{:02d}_data_continuous.pickle".format(user), "rb"))
# Synthetic dataset
elif dataset_choice == 'synt_1':
data = pickle.load(open("outputs/datasets/synthetic/all_data_isolated.pickle", "rb"))
elif dataset_choice == 'synt_2':
data = pickle.load(open("outputs/datasets/synthetic2/all_data_isolated.pickle", "rb"))
elif dataset_choice == 'unil':
data = pickle.load(open("outputs/datasets/unilever_drinking/all_data_isolated.pickle", "rb"))
return data
def enc_data_loader(input_path):
data = np.loadtxt(input_path, )
return data
| [
"mathias.ciliberto@gmail.com"
] | mathias.ciliberto@gmail.com |
3b01692f4601f5435f8efd2fd3532e1a63d29fa9 | 48ddaeb3eb7b127ccf6ce362b2a42307ecacc875 | /src/filters/filter_time_to_space.py | ed224805af288dbf31e6d71f0f856bbf33a2f2b7 | [
"BSD-3-Clause"
] | permissive | aashish24/ParaViewGeophysics | f720cdf2555f2a81801dfa4132189a040c980b8b | d9a71ffd21a57fa0eb704c5f6893ec9b1ddf6da6 | refs/heads/master | 2022-03-05T15:23:56.166299 | 2017-11-10T22:28:44 | 2017-11-10T22:28:44 | 110,368,572 | 0 | 0 | BSD-3-Clause | 2020-01-12T00:22:22 | 2017-11-11T18:12:25 | Python | UTF-8 | Python | false | false | 4,291 | py | Name = 'ProjectShotRecordToSpace'
Label = 'Project Shot Record To Space'
FilterCategory = 'CSM Geophysics Filters'
Help = ''
NumberOfInputs = 2
InputDataType = 'vtkTable'
OutputDataType = 'vtkPolyData'
ExtraXml = ''
Properties = dict(
ns=126,
nt=1500,
ds=1.0,
dt=0.001,
)
def RequestData():
from vtk.util import numpy_support as nps
import numpy as np
pdo = self.GetOutput()
idata = 0
icoord = 1
if 'coord' in inputs[0].GetColumn(0).GetName():
idata = 1
icoord = 0
# connect to input ports
data_in = inputs[idata]
coords_in = inputs[icoord]
#print('Data: ', data_in.GetColumn(0).GetName())
#print('Coords: ', coords_in.GetColumn(0).GetName())
# Put arrays from inout to numpy arrays
data = nps.vtk_to_numpy(data_in.GetColumn(0))
coords = nps.vtk_to_numpy(coords_in.GetColumn(0))
# Reshape arrays
#data = np.reshape(data, (ns,nt,1)) # NOT NEEDED!!!!!!
coords = np.reshape(coords, (ns,6))
# Coordinate indices in the ns x 6 matrix:
gx = 3
gy = 2
gz = 5
sx = 1
sy = 0
sz = 4
vtk_pts = vtk.vtkPoints()
traces_as_points = np.empty((nt,4))
# For each trace (essentially columns in both structures/arrays)
for i in range(ns):
# Grab source and receiver coords
pts = coords[i]
# source:
s = [pts[sx], pts[sy], pts[sz]]
# Receiver:
g = [pts[gx], pts[gy], pts[gz]]
# Direction Vector: Vector points from receiver to source
vec = [s[0] - g[0], s[1] - g[1], s[2] - g[2]]
# Total spatial distance:
dist = math.sqrt(vec[0]**2 + vec[1]**2) # + vec[2]**2
# Get unit vector for direction
vec = [vec[0]/dist, vec[1]/dist, vec[2]] # /dist
# Determine spacing factor from distance of 3D line and total data to fit on that 3D line
#ds = math.floor(dist) / nt
# Generate an array of coords for that whole line at that spacing and associate trace data
line_coords = np.empty((nt,3))
for j in range(nt):
x = g[0] + (vec[0] * (nt-j) ) #* dt
y = g[1] + (vec[1] * (nt-j) ) #* dt
z = g[2]#s[2] + (vec[2] * j * ds)
#line_coords = np.append(line_coords, [x,y,z])
#line_coords[j] = [x,y,z]
vtk_pts.InsertNextPoint(x,y,z)
# Add each trace one after another (x,y,z,data) to 4D array
#temp = np.append(line_coords, data[i], axis=1)
#traces_as_points = np.append(traces_as_points, temp, axis=0)
#pdo.SetPoints(vtk_pts)
#insert = nps.numpy_to_vtk(num_array=data, deep=True, array_type=vtk.VTK_FLOAT)
pdo.GetPointData().AddArray(data_in.GetColumn(0))
#pdo.AddArray(data)
# Add the points to the vtkPolyData object
# Right now the points are not associated with a line -
# it is just a set of unconnected points. We need to
# create a 'cell' object that ties points together
# to make a curve (in this case). This is done below.
# A 'cell' is just an object that tells how points are
# connected to make a 1D, 2D, or 3D object.
pdo.SetPoints(vtk_pts)
# Make a vtkPolyLine which holds the info necessary
# to create a curve composed of line segments. This
# really just hold constructor data that will be passed
# to vtkPolyData to add a new line.
aPolyLine = vtk.vtkPolyLine()
#Indicate the number of points along the line
numPts = ns*nt
aPolyLine.GetPointIds().SetNumberOfIds(numPts)
for i in range(0,numPts):
# Add the points to the line. The first value indicates
# the order of the point on the line. The second value
# is a reference to a point in a vtkPoints object. Depends
# on the order that Points were added to vtkPoints object.
# Note that this will not be associated with actual points
# until it is added to a vtkPolyData object which holds a
# vtkPoints object.
aPolyLine.GetPointIds().SetId(i, i)
# Allocate the number of 'cells' that will be added. We are just
# adding one vtkPolyLine 'cell' to the vtkPolyData object.
pdo.Allocate(1, 1)
# Add the poly line 'cell' to the vtkPolyData object.
pdo.InsertNextCell(aPolyLine.GetCellType(), aPolyLine.GetPointIds())
| [
"chrsulli@mines.edu"
] | chrsulli@mines.edu |
63d840a4e9086763b14e0fc3229eb897db7931ef | 955e99e0f46a8578562853fdb2cb9237923dcdd7 | /submission/tasks.py | 38e5e592ddcd58d9f712267eef81801226332d06 | [] | no_license | joeyac/WebServer | 7d7ccc3df3092f923e52248c15e5dbb3ad5b866b | c856ed5570712887c61df9f563a9c028c27a8367 | refs/heads/master | 2021-06-16T16:04:02.847217 | 2017-05-19T04:42:23 | 2017-05-19T04:42:23 | 81,619,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from judger.tasks import JudgeDispatcher
@shared_task
def p_judge(submission_id, language_name, src_code,
time_limit=None, memory_limit=None,
test_case_id=None, spj_code=None,
oj=None, problem_id=None):
JudgeDispatcher(submission_id, language_name, src_code,
time_limit, memory_limit,
test_case_id, spj_code,
oj, problem_id).judge() | [
"623353308@qq.com"
] | 623353308@qq.com |
1300eb74b39e37aa12c11ab90b55b2f14bb5b104 | 061c9850fe1d8085f9b04ee541eb9dd7b389ea48 | /backend/home/migrations/0002_load_initial_data.py | ac672869ea320e1bcfcb77501628434b0faf52fa | [] | no_license | crowdbotics-apps/tony-stg-app-7-dev-14211 | 1245fab608661791618c21efff0dc5e3d536b94b | ba6c52b243a6bd99d721233b9b7ab9f90b2228f8 | refs/heads/master | 2023-01-07T07:48:10.718703 | 2020-11-11T03:44:25 | 2020-11-11T03:44:25 | 308,393,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "tony-stg-app-7"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">tony-stg-app-7</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "tony-stg-app-7-dev-14211.botics.co"
site_params = {
"name": "tony-stg-app-7",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
877269ec91daed7055f3dc47068189e5aeaf2c5d | c60abf0b0850bc09f5871637203360a5e6983988 | /attack_models/tools/eval_roc_dd.py | 05c091fd037b2523b28005aa587e036a722b1c10 | [
"MIT",
"Python-2.0"
] | permissive | sanixa/gan-leaks-custom | 87780a0f792acc1c3e49c9820ffc4870d267f748 | f2efd8c8f4d267dd728bf00c8936d6f04a63736e | refs/heads/main | 2023-07-27T14:54:01.075339 | 2021-09-09T04:59:01 | 2021-09-09T04:59:01 | 404,181,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,186 | py | import numpy as np
import os
import argparse
import matplotlib.pyplot as plt
from sklearn import metrics
############################################################################
# visualization functions
############################################################################
def plot_roc(pos_results, neg_results):
labels = np.concatenate((np.zeros((len(neg_results),)), np.ones((len(pos_results),))))
results = np.concatenate((neg_results, pos_results))
acc_thres = np.sort(results)[int(len(results)/2)]
pred_labels = [1 if x > acc_thres else 0 for x in results]
acc = metrics.accuracy_score(labels, pred_labels)
tn, fp, fn, tp = metrics.confusion_matrix(labels, pred_labels).ravel()
tpr_thres = tp/ (tp+fn)
fpr_thres = fp/ (fp+tn)
fpr, tpr, threshold = metrics.roc_curve(labels, results, pos_label=1)
auc = metrics.roc_auc_score(labels, results)
ap = metrics.average_precision_score(labels, results)
return fpr, tpr, threshold, auc, ap, acc, tpr_thres, fpr_thres
def plot_hist(pos_dist, neg_dist, save_file):
plt.figure()
plt.hist(pos_dist, bins=100, alpha=0.5, weights=np.zeros_like(pos_dist) + 1. / pos_dist.size, label='positive')
plt.hist(neg_dist, bins=100, alpha=0.5, weights=np.zeros_like(neg_dist) + 1. / neg_dist.size, label='negative')
plt.legend(loc='upper right')
plt.tight_layout()
plt.xlabel('distance')
plt.ylabel('normalized frequency')
plt.savefig(save_file)
plt.close()
#############################################################################################################
# get the arguments
#############################################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--result_load_dir', '-ldir', type=str, default=None,
help='directory of the attack result')
parser.add_argument('--attack_type', type=str, choices=['fbb', 'pbb', 'wb'],
help='type of the attack')
parser.add_argument('--reference_load_dir', '-rdir', default=None,
help='directory for the reference model result (optional)')
parser.add_argument('--save_dir', '-sdir', type=str, default=None,
help='directory for saving the evaluation results (optional)')
return parser.parse_args()
#############################################################################################################
# main
#############################################################################################################
def main():
args = parse_arguments()
attack_type = args.attack_type
result_load_dir = args.result_load_dir
reference_load_dir = args.reference_load_dir
save_dir = args.save_dir
result_load_dir = result_load_dir.strip("\r")
if attack_type == 'fbb':
pos_loss = np.load(result_load_dir+'/pos_loss.npy')[:, 0]
neg_loss = np.load(result_load_dir+'/neg_loss.npy')[:, 0]
else:
pos_loss = np.load(result_load_dir+ '/pos_loss.npy').flatten()
neg_loss = np.load(result_load_dir+'/neg_loss.npy').flatten()
### plot roc curve
fpr, tpr, threshold, auc, ap, acc, tpr_thres, fpr_thres = plot_roc(-pos_loss, -neg_loss)
plt.plot(fpr, tpr, label='%s attack, auc=%.3f, ap=%.3f' % (attack_type, auc, ap))
print("The accuracy value of %s attack is: %.3f " % (attack_type, acc))
print("The tpr_thres/fpr_thres value of %s attack is: %.3f and %.3f" % (attack_type, tpr_thres, fpr_thres))
print("The AUC ROC value of %s attack is: %.3f " % (attack_type, auc))
################################################################
# attack calibration
################################################################
if reference_load_dir is not None:
pos_ref = np.load(os.path.join(reference_load_dir, 'pos_loss.npy'))
neg_ref = np.load(os.path.join(reference_load_dir, 'neg_loss.npy'))
num_pos_samples = np.minimum(len(pos_loss), len(pos_ref))
num_neg_samples = np.minimum(len(neg_loss), len(neg_ref))
try:
pos_calibrate = pos_loss[:num_pos_samples] - pos_ref[:num_pos_samples]
neg_calibrate = neg_loss[:num_neg_samples] - neg_ref[:num_neg_samples]
except:
pos_calibrate = pos_loss[:num_pos_samples] - pos_ref[:num_pos_samples, 0]
neg_calibrate = neg_loss[:num_neg_samples] - neg_ref[:num_neg_samples, 0]
fpr, tpr, threshold, auc, ap = plot_roc(-pos_calibrate, -neg_calibrate)
plt.plot(fpr, tpr, label='calibrated %s attack, auc=%.3f, ap=%.3f' % (attack_type, auc, ap))
print("The AUC ROC value of calibrated %s attack is: %.3f " % (attack_type, auc))
plt.legend(loc='lower right')
plt.xlabel('false positive')
plt.ylabel('true positive')
plt.title('ROC curve')
if save_dir is not None:
plt.savefig(os.path.join(save_dir, 'roc.png'))
plt.show()
if __name__ == '__main__':
main()
| [
"cegg12345678@yahoo.com.tw"
] | cegg12345678@yahoo.com.tw |
2781abb2571ce6222079aaeec64e43050fc8c7dd | 04f83aab47940b739f13c1ba102c230372966c43 | /SHyFTFitter/scripts/configTemplateInfo.py | ec70fe88e67072237e3cf70d7d4f78a0d8a603d1 | [] | no_license | PerilousApricot/SUSHyFT-Analyzer | 5a11909963d30c8ad7f19f499253a6753e78608a | 9f5ba528a96203459c52a0434b32311a16e2ff3b | refs/heads/master | 2016-09-15T15:31:30.617286 | 2016-03-14T20:32:09 | 2016-03-14T21:02:28 | 21,915,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,738 | py | #! /usr/bin/env python
import ROOT
import optparse, sys, re, pprint, os
from FitterConfig import FitterConfig
# global variables to be filled
histNames = []
groupNames = []
fileName = ''
lumi = 1.
# number of jet and tag bins
totalDict = {}
histList = []
# REs
commentRE = re.compile (r'\#.+$')
trailingRE = re.compile (r'\s*$')
sampleRE = re.compile (r'^\s*\+\s+names\s*=\s*(.+)', re.IGNORECASE)
groupRE = re.compile (r'^\s*\+\s+groupNames\s*=\s*(.+)', re.IGNORECASE)
fileRE = re.compile (r'^\s*\+\s+templateFile\s*=\s*(.+)', re.IGNORECASE)
lumiRE = re.compile (r'^\s*\+\s+intLumi\s*=\s*(.+)', re.IGNORECASE)
commaRE = re.compile (r'\s*,\s*')
jetRE = re.compile (r'_(\d+)j')
tagRE = re.compile (r'_(\d+)t')
htRE = re.compile (r'_hT', re.IGNORECASE)
colorDict = {
'Top' : 2,
'sing' : 93,
'Wbb' : 56,
'Wcc' : 62,
'Wc' : 65,
'Wqq' : 69,
'EW' : 89,
'QCD' : 33,
}
if __name__ == "__main__":
# Setup options parser
parser = optparse.OptionParser \
("usage: %prog [options] templates.root" \
"Prints out info on templates.")
parser.add_option ('--lum', dest = 'lum', type='float', default=0.,
help='Override integrated luminosity in config file');
parser.add_option ("--latex", dest='latex',
action='store_true',
help="Formats output as latex table")
parser.add_option ("--debug", dest='debug',
action='store_true',
help="Print out FitterConfig object")
parser.add_option ('--noData', dest='noData', action='store_true',
default=True,
help='Do not display data counts')
parser.add_option ('--Data', dest='noData', action='store_false',
help='Display data counts')
parser.add_option ('--totalMC', dest='totalMC', action='store_true',
default=False,
help='Display total MC prediction counts')
parser.add_option ('--file', dest = 'file', type='string',
help='Override root file to use');
parser.add_option ('--combineGroups', dest = 'combineGroups',
action='append', type='string', default=[],
help='Groups to combine');
parser.add_option ('--combineSamples', dest = 'combineSamples',
action='append', type='string', default=[],
help='Samples to combine');
parser.add_option ("--groups", dest='groups', action="append",
type="string", default=[],
help="Which groups to use")
parser.add_option ("--samples", dest='samples', action="append",
type="string", default=[],
help="Which samples to use")
## saveGroup = optparse.OptionGroup (parser, "Save Stacks Options")
## saveGroup.add_option ("--saveStacks", dest='saveStacks',
## action='store_true',
## help="Saves images of stack of templates")
## saveGroup.add_option ("--cms", dest='cms', action='store_true',
## help="Use CMS titles, etc for plots")
## saveGroup.add_option ("--big", dest='big', action='store_true',
## help="Make big plots")
## saveGroup.add_option ("--eps", dest='eps', action='store_true',
## help='Save .eps files')
## parser.add_option_group (saveGroup)
options, args = parser.parse_args()
ROOT.gROOT.SetBatch()
ROOT.gROOT.SetStyle('Plain')
if len (args) < 1:
print "Need to provide configuration file. Aborting."
sys.exit(1)
configName = args[0]
config = FitterConfig (configName, ignoreBinString=True)
config.noData = options.noData
config.setValuesFromArgs (args)
#config.readConfig (configName)
config.printMCtotal = options.totalMC
config.latex = options.latex
config.setCombineGroups (options.combineGroups)
config.setCombineSamples (options.combineSamples)
samples = []
for sample in options.samples:
samples.extend (commaRE.split (sample))
if samples:
config.setSamples (samples)
groups = []
for group in options.groups:
groups.extend (commaRE.split (group))
if groups:
config.setGroups (groups)
if options.file:
config.fileName = options.file
if options.lum:
config.lumi = options.lum
print "info for %s:" % config.fileName
config.printInfo()
if options.debug:
print "%s" % config
| [
"andrew.m.melo@vanderbilt.edu"
] | andrew.m.melo@vanderbilt.edu |
178a300a703710946bedd0c3206460881d295d52 | 8495dad5d79e666cd4e96945b9bf7c90f722acba | /server_stream.py | aee642239ad4160a1197123feb90879f8fcc829c | [] | no_license | ike091/powder-ndn | 58472f950aca4d63ae25656414cf23f7746ea53d | 515dfdfc32fcd20cdf47bf2dc2e634450275d443 | refs/heads/master | 2022-12-02T11:51:30.997782 | 2020-08-07T20:13:35 | 2020-08-07T20:13:35 | 264,268,348 | 1 | 2 | null | 2020-06-02T16:45:59 | 2020-05-15T18:23:31 | Python | UTF-8 | Python | false | false | 5,492 | py | import time
import argparse
import traceback
import random
import asyncio
from pyndn import Name
from pyndn import Data
from pyndn import Face
from pyndn.security import KeyChain
from pyndn.threadsafe_face import ThreadsafeFace
import numpy as np
import pandas as pd
def dump(*list):
"""Prints all parameters"""
result = ""
for element in list:
result += (element if type(element) is str else str(element)) + " "
print(result)
class Producer():
"""Hosts data under a certain namespace"""
def __init__(self, data_size, verbose=False):
# create a KeyChain for signing data packets
self._key_chain = KeyChain()
self._is_done = False
self._num_interests = 0
# self._keyChain.createIdentityV2(Name("/ndn/identity"))
# host data at the local forwarder
self._face = Face()
# immutable byte array to use as data
self._byte_array = bytes(data_size)
# the number of bytes contained in each data packet
self._data_size = data_size
# the verbosity of diagnostic information
self._verbose = verbose
# keep track of if the first interest has been recieved (for timing)
self._is_first_interst = True
# keep track of various performance metrics:
self._interests_satisfied = 0
self._interests_recieved = 0
self._data_sent = 0
self._elapsed_time = {}
self._initial_time = {}
self._final_time = {}
print("Producer instance created.")
def run(self, namespace):
"""Starts listening for interest packets in the given namespace."""
prefix = Name(namespace)
# Use the system default key chain and certificate name to sign commands.
self._face.setCommandSigningInfo(self._key_chain, self._key_chain.getDefaultCertificateName())
# Also use the default certificate name to sign Data packets.
self._face.registerPrefix(prefix, self.onInterest, self.onRegisterFailed)
dump("Registering prefix", prefix.toUri())
print(f"Listening for interests under {namespace}...")
# Run the event loop forever. Use a short sleep to
# prevent the Producer from using 100% of the CPU.
while not self._is_done:
self._face.processEvents()
time.sleep(0.01)
# shutdown this face - TODO: figure out why this can't be done in the self.shutdown() method
self._face.shutdown()
def onInterest(self, prefix, interest, transport, registeredPrefixId):
"""Called when an interest for the specified name is recieved"""
# keep track of when first interest was recieved
self._initial_time['download_time'] = time.time()
# set data to a byte array of a specified size
interestName = interest.getName()
data = Data(interestName)
data.setContent(self._byte_array)
# sign and send data
data.getMetaInfo().setFreshnessPeriod(3600 * 1000)
self._key_chain.sign(data, self._key_chain.getDefaultCertificateName())
transport.send(data.wireEncode().toBuffer())
# print additional information if verobse flag is set
if self._verbose:
dump("Replied to:", interestName.toUri())
# increment appropriate variables
self._interests_recieved += 1
self._interests_satisfied += 1
self._num_interests += 1
def onRegisterFailed(self, prefix):
"""Called when forwarder can't register prefix."""
dump("Register failed for prefix", prefix.toUri())
self.shutdown()
def shutdown(self):
self._final_time['download_time'] = time.time()
self._is_done = True
self.print_status_report()
def print_status_report(self):
"""Prints performance metrics for this producer."""
# compute total data sent (in bytes)
self._data_sent = self._interests_satisfied * self._data_size
# compute timing
for key, value in self._initial_time.items():
self._elapsed_time[key] = self._final_time[key] - self._initial_time[key]
# calculate bitrate of interests sent
download_kbps = ((self._data_sent * 8) / 1000) / self._elapsed_time['download_time']
print("\n----------------------------------")
print(f"Number of interests recieved: {self._interests_recieved}")
print(f"Number of interests satisfied: {self._interests_satisfied}")
print("----------------------------------")
# this probably isn't a useful metric, as the output interface will throttle this
# print(f"{self._data_sent / 1000} kilobytes sent for a bitrate of {download_kbps} kbps")
print(f"{self._data_size * self._interests_satisfied} bytes of data sent.")
print("----------------------------------\n")
def main():
# handle and specify arguments
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--prefix", help="the prefix to host data under", default="/ndn/external/test")
parser.add_argument("-v", "--verbosity", help="increase output verbosity", action="store_true")
parser.add_argument("-s", "--data_size", help="set the per-packet data size in bytes", type=int, default=1000)
args = parser.parse_args()
# host data under a user-specified name prefix
producer = Producer(args.data_size, verbose=args.verbosity)
producer.run(args.prefix)
main()
| [
"ike091@gmail.com"
] | ike091@gmail.com |
ee6c17019d4216d285873c680eee31afc64e67f1 | 0c4c0cdcd597e64f79ebdcb137042e30dbb4a8bd | /ImageDownloader.py | 55e1a2e2563ba22ff53322c4f16e58e19a00d9b3 | [
"MIT"
] | permissive | skulbrane/image-scraper | 78f3356b5595f880d3b562eb59c6eb432efab8a6 | ff1257dd6f95a2a9ecce87af042da75e90efa5c1 | refs/heads/master | 2021-01-21T01:57:53.644869 | 2015-09-13T13:10:14 | 2015-09-13T13:10:14 | 42,398,550 | 0 | 0 | null | 2015-09-13T14:19:15 | 2015-09-13T14:19:15 | null | UTF-8 | Python | false | false | 1,118 | py | #!/usr/bin/python
import urllib
import os
class ImagesDownload(object):
def __init__(self, fileName = None):
self.fileName = fileName
# relative file path
def filePath(self):
dirPath = os.path.dirname(__file__)
if self.fileName != None:
relFilePath = os.path.join(dirPath, self.fileName)
return dirPath, relFilePath
else:
return dirPath
# read file content
def readFile(self, filePath):
with open(filePath) as textFile:
textFileContent = textFile.readlines()
return textFileContent
# download images
def downloadImages(self, dirName, urlData):
if not os.path.exists('pictures'): os.makedirs('pictures')
dirName = dirName+'/pictures'
for idx, val in enumerate(urlData):
urllib.urlretrieve(val, dirName+"/"+str(idx)+".jpg")
if __name__ == '__main__':
try:
images = ImagesDownload('ImageCollection.txt')
dirName, imageBookPath = images.filePath()
imageBookData = images.readFile(imageBookPath)
images.downloadImages(dirName, imageBookData)
except:
print "Error: unable to download"
| [
"naveenranjankarippai@gmail.com"
] | naveenranjankarippai@gmail.com |
877c2d0eb3274c9286722d217a3cb4d2b2fbe11f | e6f3f8aa78ff2560903b3d5e4beff97c83c048e3 | /src/predict.py | d0d54e5aa7cb4b34cc4448b5ed5beef68ac62fd5 | [] | no_license | chris010970/eurosat | 9fad0a11fb2c0804f10dadae9bc9226a937af2e3 | 25e455db68fd13d359956aa11172935ce2841909 | refs/heads/master | 2022-10-10T17:08:26.476974 | 2020-06-04T16:50:01 | 2020-06-04T16:50:01 | 266,398,939 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,001 | py | import os
import time
import argparse
import numpy as np
import pandas as pd
# metrics
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# graphics
import seaborn as sn
import matplotlib.pyplot as plt
# local imports
from eurosat import Eurosat
from model import loadFromFile
from generator import MultiChannelImageDataGenerator
class Predict:
def __init__( self, args ):
"""
constructor
"""
# initialise members
self._model, self._architecture = loadFromFile( args.model_path )
self._eurosat = Eurosat()
return
def process( self, args ):
"""
main path of execution
"""
# get stats dataframe
stats = pd.read_csv( os.path.join( args.data_path, 'stats.csv' ) )
args.batch_size = 1
# get train and test dataframes
df = { 'train' : pd.read_csv( os.path.join( args.data_path, 'train.csv' ) ),
'test' : pd.read_csv( os.path.join( args.data_path, 'test.csv' ) ) }
# add OHE target column to subset data frames
for subset in [ 'train', 'test']:
df[ subset ] = self._eurosat.updateDataFrame( df[ subset ] )
# generate actual vs prediction
for subset in [ 'train', 'test' ]:
actual = np.asarray( df[ subset ][ 'id' ].tolist(), dtype=int )
predict = self.getPrediction( df[ subset ], stats, args )
# get confusion matrix
cm = self.getConfusionMatrix( actual,
predict,
self._eurosat._classes.keys() )
# plot confusion matrix
self.plotConfusionMatrix( cm, subset )
return
def getPrediction( self, df, stats, args ):
"""
generate prediction for images referenced in data frame
"""
# create generator
generator = MultiChannelImageDataGenerator( [ df ],
args.batch_size,
stats=stats,
shuffle=False )
# initiate prediction
steps = len( df ) // args.batch_size
y_pred = self._model.predict_generator( generator, steps=steps )
# return index of maximum softmax value
return np.argmax( y_pred, axis=1 )
def getConfusionMatrix( self, actual, predict, labels ):
"""
compute confusion matrix for prediction
"""
# compute normalised confusion matrix
cm = confusion_matrix( actual, predict )
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# parse normalised confusion matrix into dataframe
return pd.DataFrame( cm, index=labels, columns=labels )
def plotConfusionMatrix( self, cm, subset ):
"""
plot train and test confusion matrix
"""
# create figure
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
# plot heatmap - adjust font and label size
sn.set(font_scale=0.8)
sn.heatmap(cm, annot=True, annot_kws={"size": 12}, fmt='.2f', ax=axes )
axes.set_title( 'Normalised Confusion Matrix: {}'.format( subset ) )
plt.show()
return
def parseArguments(args=None):
"""
parse command line arguments
"""
# parse configuration
parser = argparse.ArgumentParser(description='eurosat train')
parser.add_argument('data_path', action='store')
parser.add_argument('model_path', action='store')
return parser.parse_args(args)
def main():
"""
main path of execution
"""
# parse arguments
args = parseArguments()
# create and execute training instance
obj = Predict( args )
obj.process( args )
return
# execute main
if __name__ == '__main__':
main()
| [
"c.r.williams0109@gmail.com"
] | c.r.williams0109@gmail.com |
6d48b59e64f7bb87c4865c674526f93c44ab1e21 | c598e71fd1fdd9469b9616178cc4daab9a509841 | /4. Leader&Team Dynamics - from simulation to experimentation (with Simn Karg)/oTree/__temp_migrations/LCG/0046_auto_20190117_1131.py | 98d72b7688f322525ef0f90791ab4b01e4fa40ea | [
"MIT"
] | permissive | Rnault/SCDC-archives | 0e8f7612c59ccb07b9708963481f9c5c7f3530b5 | 4de173bf807a235a3529d6e19ecc70de583173ab | refs/heads/master | 2020-07-05T19:15:27.241907 | 2019-11-30T20:37:04 | 2019-11-30T20:37:04 | 202,743,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,995 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-01-17 10:31
from __future__ import unicode_literals
from django.db import migrations
import otree.db.models
class Migration(migrations.Migration):
dependencies = [
('LCG', '0045_auto_20190117_1130'),
]
operations = [
migrations.AlterField(
model_name='player',
name='q1',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='I try hard to act honestly in most things I do.'),
),
migrations.AlterField(
model_name='player',
name='q10',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='It is ok to do something you know is wrong if the rewards for doing it are great.'),
),
migrations.AlterField(
model_name='player',
name='q11',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='If no one is watching or will know it does not matter if I do the right thing.'),
),
migrations.AlterField(
model_name='player',
name='q12',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='It is more important that people think you are honest than being honest.'),
),
migrations.AlterField(
model_name='player',
name='q13',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='If no one could find out, it is okay to steal a small amount of money or other things that no one will miss.'),
),
migrations.AlterField(
model_name='player',
name='q14',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='There is no point in going out of my way to do something good if no one is around to appreciate it.'),
),
migrations.AlterField(
model_name='player',
name='q15',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='If a cashier accidentally gives me kr 10 extra change, I usually act as if I did not notice it.'),
),
migrations.AlterField(
model_name='player',
name='q16',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='Lying and cheating are just things you have to do in this world. '),
),
migrations.AlterField(
model_name='player',
name='q17',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='Doing things that some people might view as not honest does not bother me.'),
),
migrations.AlterField(
model_name='player',
name='q18',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='If people treat me badly, I will treat them in the same manner.'),
),
migrations.AlterField(
model_name='player',
name='q19',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='I will go along with a group decision, even if I know it is morally wrong.'),
),
migrations.AlterField(
model_name='player',
name='q2',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='Not hurting other people is one of the rules I live by.'),
),
migrations.AlterField(
model_name='player',
name='q20',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name="Having moral values is worthless in today's society."),
),
migrations.AlterField(
model_name='player',
name='q3',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='It is important for me to treat other people fairly.'),
),
migrations.AlterField(
model_name='player',
name='q4',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='I want other people to know they can rely on me.'),
),
migrations.AlterField(
model_name='player',
name='q5',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='I always act in ways that do the most good and least harm to other people.'),
),
migrations.AlterField(
model_name='player',
name='q6',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='If doing something will hurt another person, I try to avoid it even if no one would know.'),
),
migrations.AlterField(
model_name='player',
name='q7',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='One of the most important things in life is to do what you know is right.'),
),
migrations.AlterField(
model_name='player',
name='q8',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='Once I′ve made up my mind about what is the right thing to do, I make sure I do it.'),
),
migrations.AlterField(
model_name='player',
name='q9',
field=otree.db.models.IntegerField(choices=[[1, 'strongly agree'], [2, 'agree'], [3, 'mildly agree'], [4, 'mildly disagree'], [5, 'disagree'], [6, 'strongly disagree']], null=True, verbose_name='As long as I make a decision to do something that helps me, it does not matter much if other people are harmed.'),
),
]
| [
"32672200+Rnault@users.noreply.github.com"
] | 32672200+Rnault@users.noreply.github.com |
fc07286660f08ae78ec2f050069a02a7ff21a844 | d59996dc9d343c78a8749437f24a6b8722b243cf | /Python-Files/Fish-NoFish/evaluation/roc_curve.py | b231875061d5e0afef572cb69de03e00bd69eb3c | [
"Apache-2.0"
] | permissive | jcgeo9/ML-For-Fish-Recognition | 70ff0c18c026f2a7e9d33345999a77c919fd0788 | 0b5faba77d0b2c5452950637f047882c80fa6fb7 | refs/heads/main | 2023-07-20T07:28:57.115057 | 2021-08-21T20:19:50 | 2021-08-21T20:19:50 | 389,645,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | # =============================================================================
# Created By : Giannis Kostas Georgiou
# Project : Machine Learning for Fish Recognition (Individual Project)
# =============================================================================
# Description : File to load the saved model, predict class of images
# of its test set and plot and save its roc curve
# To be used after test set is converted to .pickle files and
# model is trained and saved
# How to use : Replace variables in CAPS according to needs of the test set
# =============================================================================
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import sklearn.metrics as metrics
from sklearn.metrics import confusion_matrix
import os
import cv2
import tensorflow as tf
import pickle
import sys
save_roc_loc='PATH TO WHERE THE ROC SHOULD BE SAVED'
name_roc='NAME OF THE ROC'
name_roc_zoomed='NAME OF THE ZOOMED ROC'
data_dir='PATH TO TEST SET DIRECTORY'
model_path='PATH TO SAVED MODEL'
#insert arrays of test set
#features
X=pickle.load(open(data_dir+"X_combined_test_data.pickle","rb"))
#class
y=pickle.load(open(data_dir+"y_combined_test_data.pickle","rb"))
#normalize the test set
X=X/255.0
#loads the saved model from the path specified
model=tf.keras.models.load_model(model_path+"Binary_Filters_32,32,64,64,64-Dense_64_BEST")
#predict the classes of the images in the test set
y_pred = model.predict_classes(X)
#produce the roc curve based on y and predicted y
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y, y_pred)
#calculate the auc of the graph
auc_keras = auc(fpr_keras, tpr_keras)
#plot and save the ROC curve with y limit 0-1 and x limit 0-1 (normal graph)
plt.figure(figsize=(12,12))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label='(area = {:.3f})'.format(auc_keras))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend()
plt.savefig(save_roc_loc+name_roc, dpi=400)
plt.show()
#plot and save the ROC curve with y limit 0.9-1 and x limit 0-0.1 (zoomed in graph)
plt.figure(figsize=(12,12))
plt.xlim(0, 0.1)
plt.ylim(0.9, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.savefig(save_roc_loc+name_roc_zoomed, dpi=400)
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
57599d880bfc8ab69bb071aed7da865fd03fc465 | 80220579951046b30e5873ec42d8a31770a06b71 | /product/serializers.py | 45d157811c1bd74a6c578e154e5476d03e4c5bb5 | [] | no_license | Mohamed2011-bit/hdya-api | 676e39e6ab8a2e50b26383ba06acc43274ef326e | fa213c36c6f88702cc6afd8d7f63c1d7bfc19956 | refs/heads/master | 2023-02-09T04:07:02.144404 | 2021-01-05T12:18:51 | 2021-01-05T12:18:51 | 326,989,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,024 | py | from .models import * # Product , Category , Occassion , RelationShip
from rest_framework import serializers, status, validators
from rest_framework.response import Response
from django.utils import timezone
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'title', 'description')
class OccassionSerializer(serializers.ModelSerializer):
class Meta:
model = Occassion
fields = ('id', 'name', 'description')
class RelationShipSerializer(serializers.ModelSerializer):
class Meta:
model = RelationShip
fields = ('id', 'name', 'description')
class ProductPictureSerializer(serializers.ModelSerializer):
class Meta:
model = ProductPicture
fields = ('id', 'image', 'product')
class ProductSerializer(serializers.ModelSerializer):
category = serializers.PrimaryKeyRelatedField(queryset=Category.objects.all())
occassions = serializers.PrimaryKeyRelatedField(many=True, queryset=Occassion.objects.all(), required=False)
relationships = serializers.PrimaryKeyRelatedField(many=True, queryset=RelationShip.objects.all(), required=False)
images = ProductPictureSerializer(many=True, required=False)
class Meta:
model = Product
fields = ('id',
'name',
'details',
'category',
'price',
'age_from',
'age_to',
'gender',
'is_featured',
'user',
'occassions',
'relationships',
'images',
'number_of_user_rated_product',
'avg_rate',
'created_at',
'updated_at',
)
read_only_fields = ('is_featured', 'created_at', 'updated_at', 'user')
def validate(self, attrs):
if attrs['age_from'] > attrs['age_to']:
raise serializers.ValidationError('invalid valud for age to. It mus be greather than age from')
attrs['user'] = self.context['request'].user
return attrs
def create(self, validated_data):
print(validated_data)
product = super(ProductSerializer, self).create(validated_data)
# ProductPicture.objects.create(product=product, image=image)
return product
class ReviewSerializer(serializers.ModelSerializer):
class Meta:
model = Review
fields = '__all__'
read_only_fields = ('user', 'created_at')
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = '__all__'
read_only_fields = ('user', 'created_at', 'updated_at')
class ProductReportSerializer(serializers.ModelSerializer):
class Meta:
model = ProductReport
fields = '__all__'
class ReviewReportSerializer(serializers.ModelSerializer):
class Meta:
model = ReviewReport
fields = '__all__'
| [
"egyria2011@gmail.com"
] | egyria2011@gmail.com |
95bb386cc14b99e28952fb65f32afe14f29c9620 | e6b4f7a3721c9f0c59de2623165b6967fa48a095 | /gispot/crcpy/raw/ejpg.py | 6ea332206b053c9830b21ceda779745b33c4b506 | [] | no_license | hygnic/Gispot | 8a3db18e4348597990793968d502c4619afdd523 | 440d168fd84bd98d2d9f2bc27b34ac9d7816a4e1 | refs/heads/master | 2023-04-29T15:39:09.876858 | 2023-04-16T08:17:55 | 2023-04-16T08:17:55 | 220,610,954 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 987 | py | # -*- coding:cp936 -*-
# lcc
"""
批量将导出MXD文档导出为JPEG图片
"""
#
# import sys
# sys.path.append("../../GUIs")
# print sys.path
import arcpy,os
# import tooltk
# tooltk.Tooltk().rootwindow.mainloop()
# 设置需要出图mxd文档文件目录
# path = ur"G:\正安县\正安县公示图\400"
# 设置分辨率
# res = 300
arcpy.env.overwriteOutput = True
def export(path, res):
"""
批量将导出MXD文档导出为JPEG图片
:param path: mxd文件夹目录 string
:param res: 分辨率 int
:return:
"""
for afile in os.listdir(path):
if afile[-3:].lower() == 'mxd':
mxd1 = arcpy.mapping.MapDocument(os.path.join(path, afile))
print u"正在出图..."
arcpy.mapping.ExportToJPEG(mxd1,
os.path.join(path, afile[:-3] + 'jpg'), resolution = res)
del mxd1
print 'Done'
else:
print u"\n非MXD文件,跳过"
if __name__ == '__main__':
export("path", 300)
# app = tooltk.Tooltk()
# app.GUIexport()
#
# app.window.mainloop()
| [
"hygnic@outlook.com"
] | hygnic@outlook.com |
d9ac6aaaeeaf79aa22f03653a341b038974aaff2 | 2804432fba5a4fe639d07a207bb01f71e03d9189 | /test/cts/tool/CTSConverter/src/nn/specs/V1_0/space_to_depth_float_2.mod.py | df557f6dc777e190bcc08907f42fa96d78c54f38 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | intel/webml-polyfill | 5685299e1b6d91a010c5e057685bf010d5646e4f | bd014955c5bcc9dc5465aea06721072f45ab4a75 | refs/heads/master | 2023-09-01T17:30:55.961667 | 2023-04-14T01:18:47 | 2023-04-14T01:18:47 | 126,892,425 | 168 | 75 | Apache-2.0 | 2023-04-14T05:16:41 | 2018-03-26T21:31:32 | Python | UTF-8 | Python | false | false | 541 | py | model = Model()
i1 = Input("input", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
block = Int32Scalar("block_size", 2)
output = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("SPACE_TO_DEPTH", i1, block).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]}
output0 = {output: # output 0
[1., 2., 5., 6., 3., 4., 7., 8., 9., 10., 13., 14., 11., 12., 15., 16.]}
# Instantiate an example
Example((input0, output0))
| [
"feng.dai@intel.com"
] | feng.dai@intel.com |
7983688468b4d5fa8642db2796bd417ead2836df | 6929d8bbe467fc44f6cad50a549b3ad55a112061 | /sentimental_analysis/naive_bayes_census.py | 8b6f0991d4e2ed4e5b5ec87725037bebe7219dbd | [] | no_license | douglasdcm/machine-learning | 8ca54e8f57c15d7de4ff007e6ae81007492ac5e9 | 0374959baa2f7bb43973c82792d2be329d87cd23 | refs/heads/master | 2023-01-13T05:07:58.641325 | 2020-11-06T01:07:45 | 2020-11-06T01:07:45 | 285,450,117 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 16:59:59 2019
@author: Douglas
"""
import pandas as pd
base = pd.read_csv('census.csv')
#separate previsores from classes
previsores = base.iloc[:, 0:14].values
classe = base.iloc[:, 14].values
#transform categoric variables in nominal
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_previsores = LabelEncoder()
previsores[:, 1] = labelencoder_previsores.fit_transform(previsores[:, 1])
previsores[:, 3] = labelencoder_previsores.fit_transform(previsores[:, 3])
previsores[:, 5] = labelencoder_previsores.fit_transform(previsores[:, 5])
previsores[:, 6] = labelencoder_previsores.fit_transform(previsores[:, 6])
previsores[:, 7] = labelencoder_previsores.fit_transform(previsores[:, 7])
previsores[:, 8] = labelencoder_previsores.fit_transform(previsores[:, 8])
previsores[:, 9] = labelencoder_previsores.fit_transform(previsores[:, 9])
previsores[:, 13] = labelencoder_previsores.fit_transform(previsores[:, 13])
#transform previsores in arrays (numeric)
onehotencoder = OneHotEncoder(categorical_features = [1,3,5,6,7,8,9,13])
previsores = onehotencoder.fit_transform(previsores).toarray()
#transformclasse in arrays (numeric)
labelencoder_classe = LabelEncoder()
classe = labelencoder_classe.fit_transform(classe)
#put all data on same scale
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
previsores = scaler.fit_transform(previsores)
#split data tranning from data test
from sklearn.model_selection import train_test_split
previsores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.15, random_state=0)
#generate the probabilistic table
from sklearn.naive_bayes import GaussianNB
classificador = GaussianNB()
classificador.fit(previsores_treinamento, classe_treinamento)
#generate a resut based on trainning
previsoes = classificador.predict(previsores_teste)
#verify the accuary in prediction of results
from sklearn.metrics import confusion_matrix, accuracy_score
precisao = accuracy_score(classe_teste, previsoes)
#generate the matrix with right and whrng results
matriz = confusion_matrix(classe_teste, previsoes)
| [
"douglas.dcm@gmail.com"
] | douglas.dcm@gmail.com |
75b6a08bfc8195f65cc88df6c92417abca540564 | 758466928597639a81c74ac013a07dbd66266b2f | /design4green/home_page/migrations/0005_consommation_c_date.py | 97abf7cfb7b3dbe7541820ecc362c1b2a89dd669 | [] | no_license | Odrann/team11-design4green | 3dffcd1997e01fb2e871a222064b668f8ca66e9d | 7d446bd9680e3ed65eea42dc876504bedd93ec81 | refs/heads/master | 2020-09-13T21:51:25.266801 | 2019-11-22T05:37:13 | 2019-11-22T05:37:13 | 222,913,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # Generated by Django 2.2 on 2019-11-21 10:51
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home_page', '0004_auto_20191121_1026'),
]
operations = [
migrations.AddField(
model_name='consommation',
name='c_date',
field=models.DateField(default=datetime.date.today),
),
]
| [
"root@vps753512.ovh.net"
] | root@vps753512.ovh.net |
a8e832012a3c3327109eb9d3599d47e1af0658d0 | ec110cb38223acf0fa1f32e043b74edd3e321b2d | /函数/数学运算.py | be4fea690c1a1df75af9540c0ac5a2cfd238ed2a | [] | no_license | liuweihai/python_3 | c55dc03012213637ddfcf3ef8256845af395423c | 452ab91328928aabf9b834e866accbc8264c48f9 | refs/heads/master | 2020-03-30T00:54:58.235151 | 2018-10-15T12:12:03 | 2018-10-15T12:12:03 | 150,550,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | import math
# abs---返回数值的绝对值
print(abs(-33.2))
# 创建一个复数(不懂使用场景),complex([real[, imag]])
print(complex('2'))
# 整数类型转换 float (小数)类型
print(float(2))
# 浮点类型转化整数类型
print(int(2.7))
# pow 开方 pow(x,[,y])
print(pow(2,-10))
# 产生一个序列
obj = []
for x in range(3):
obj.append(x)
print(obj,x)
print(u'obj总和:',sum(obj))
for x in range(1,10):
for y in range(1,x+1):
print(y,'*',x,'=',x*y)
| [
"liugangqwe@sohu.com"
] | liugangqwe@sohu.com |
da4b2bb2dded2719a601563b647c1ecb6d568f76 | 850e1ef3007236da868444a97edb230e98b6aa6b | /test.py | 9927d9e0d4c97f00cb422e7ff9dd888b8e212d43 | [] | no_license | wubiao239/python | 343bf1f6449606e3c9695d40eb072f028f879670 | d0d59405dfb4c46f6e55f7e95eac4fa2c3822984 | refs/heads/master | 2021-01-18T23:02:44.052493 | 2016-04-21T12:25:38 | 2016-04-21T12:25:38 | 42,394,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | import urllib.request
import urllib.parse
email="domainindia@163.com" #用户名
password="wb19900321" #用户密码
ry=1 #注册年限
rd="2015-10-24" #注册时间 今天16号pending17号注册日期18号
cuid="13392391" #客户id
coid="46297369" #联系人id
#配置参数=============================================================。
param={"user_email":email,"user_pw":password,"domain_name":"test.in","reg_years":ry, "reg_date":rd, "customer_id":cuid, "contact_id":coid ,"contact_submit":"立即加入"}
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
params = urllib.parse.urlencode(param)
binary_data = params.encode()
res=urllib.request.Request("http://tools.crusherexporters.com/post-domain.php",data=binary_data,headers=headers)
f = urllib.request.urlopen(res)
print(f.read().decode())
| [
"wubiao239@163.com"
] | wubiao239@163.com |
9a0a6ee353a2d8e0a58603081ad649422122d6fa | 4f57d03df135822a63c4f00f2b5e6dcb3c9a3cdc | /setup.py | aa22d008c02e69c578c9b1e5cbdbdfcae5e6c2c1 | [] | no_license | exantech/monero-wallet-service | 059c437e261f4d14a89a7786d1152d735d66f181 | 720477c30e7f14936d530f635d7fa09fc516ee54 | refs/heads/master | 2022-12-10T11:09:10.747734 | 2018-03-19T15:55:28 | 2019-06-03T11:38:19 | 189,993,281 | 2 | 0 | null | 2022-12-08T01:04:04 | 2019-06-03T11:35:45 | Python | UTF-8 | Python | false | false | 2,392 | py | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='monero-wallet-service',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1',
zip_safe=False,
description='Monero Wallet Service backend',
# long_description=long_description,
# Author details
author='Denis Voskvitsov',
author_email='dv@exante.eu',
# Choose your license
license='EULA',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
package_index='http://ci2-pypi.ghcg.com/simple/',
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'aiohttp==3.0.9',
'aiohttp-swagger==1.0.5',
'aioredis==1.1.0',
'async-timeout==2.0.1',
'attrs==17.4.0',
'boto3==1.9.90',
'chardet==3.0.4',
'hiredis==0.2.0',
'idna==2.6',
'idna-ssl==1.0.1',
'Jinja2==2.10',
'MarkupSafe==1.0',
'multidict==4.1.0',
'PyYAML==3.12',
'yarl==1.1.1',
'peewee==2.10.2',
'peewee-async==0.5.12',
'peewee-db-evolve==0.6.8',
'psycopg2==2.7.4',
'psycopg2-binary==2.7.4',
'aiopg==0.13.2',
'python-slugify==1.2.5',
'urllib3==1.22',
'ujson==1.35',
'Flask==0.12.2',
'flask-peewee==3.0.0',
'flask-swagger-ui==3.6.0',
'uwsgi==2.0.17',
'redis==2.10.6',
'cryptonote==0.1',
],
include_package_data=True,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
| [
"denis.voskvitsov@gmail.com"
] | denis.voskvitsov@gmail.com |
9a2b8cc4e98ef53d1b0387daf377c46dea6c8bfd | 37d026d9ca3809327585b96dbb086e23187688ff | /WannaEat/views.py | 38575c631a8dcca5afb07db3596aa4f9144e7292 | [] | no_license | x1Dman/PracticeWork2020 | b918a817d4ab21e8104fb0ec95cb83f7200c1066 | 8f152e987d2d01d54f42ff5c35e5cfbfa0ca8825 | refs/heads/master | 2022-12-22T11:57:59.518115 | 2020-10-01T21:05:37 | 2020-10-01T21:05:37 | 282,449,303 | 0 | 1 | null | 2020-10-01T21:05:38 | 2020-07-25T13:27:48 | Python | UTF-8 | Python | false | false | 2,742 | py | from django.contrib.auth import authenticate
from django.shortcuts import render, redirect
from django.views.decorators.csrf import ensure_csrf_cookie
# Create your views here.
from django.views.generic.base import View
from django.views.generic import ListView, DetailView
from rest_framework import status, generics
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import Receipt
from .forms import ReviewForm, UploadFileForm
from .serializers import ReceiptListSerializer, UserSerializer
class ReceiptView(ListView):
# model = Receipt
# queryset = Receipt.objects.all()
# template_name = "receipts/receipts_list.html"
def get(self, request):
receipts = Receipt.objects.all()
return render(request, "receipts/receipts_list.html", {"receipts": receipts})
class ReceiptDetailView(DetailView):
# model = Receipt
# slug_field = "video_ulr"
def get(self, request, slug):
receipt = Receipt.objects.get(video_ulr=slug)
return render(request, "receipts/receipt_detail.html", {"receipt": receipt})
class AddReview(View):
def post(self, request, pk):
form = ReviewForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
if request.POST.get("parent", None):
form.parent_id = int(request.POST.get("parent"))
form.receipt_id = pk
form.save()
print("congratz for send msg")
else:
print("message wasn't sended")
# full path to current page
return redirect(Receipt.objects.get(id=pk).get_absolute_url())
class UserCreate(generics.CreateAPIView):
authentication_classes = ()
permission_classes = ()
serializer_class = UserSerializer
class LoginView(APIView):
permission_classes = ()
def post(self, request,):
username = request.data.get("username")
password = request.data.get("password")
user = authenticate(username=username, password=password)
if user:
return Response({"token": user.auth_token.key})
else:
return Response({"error": "Wrong Credentials"}, status=status.HTTP_400_BAD_REQUEST)
class ReceiptListView(APIView):
def get(self, request):
receipt = Receipt.objects
serializer = ReceiptListSerializer(receipt, many=True)
return Response(serializer.data)
def post(self, request):
serializer = ReceiptListSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | [
"glaider102@mail.ru"
] | glaider102@mail.ru |
85ffcabb6a037ada824120d70ad122cef05babc1 | cfc68ffbc9b16c24368a53ebca44cd2180a164fb | /sklearn/_min_dependencies.py | 7c8042b8b14350c674d8d88b24e279bd8b8d1f6c | [
"BSD-3-Clause"
] | permissive | cuicaihao/scikit-learn | b21b15473ffc90ed8688ec17b0eb77dd2f3f7ac3 | 9c9c8582dff9f4563aa130ef89f155bad0051493 | refs/heads/master | 2022-11-19T14:08:45.737980 | 2022-11-18T18:29:04 | 2022-11-18T18:29:04 | 56,463,897 | 2 | 0 | BSD-3-Clause | 2023-09-07T04:13:18 | 2016-04-17T23:35:29 | Python | UTF-8 | Python | false | false | 2,648 | py | """All minimum dependencies for scikit-learn."""
from collections import defaultdict
import platform
import argparse
# scipy and cython should by in sync with pyproject.toml
# NumPy version should match oldest-supported-numpy for the minimum supported
# Python version.
# see: https://github.com/scipy/oldest-supported-numpy/blob/main/setup.cfg
if platform.python_implementation() == "PyPy":
NUMPY_MIN_VERSION = "1.19.2"
else:
NUMPY_MIN_VERSION = "1.17.3"
SCIPY_MIN_VERSION = "1.3.2"
JOBLIB_MIN_VERSION = "1.1.1"
THREADPOOLCTL_MIN_VERSION = "2.0.0"
PYTEST_MIN_VERSION = "5.3.1"
CYTHON_MIN_VERSION = "0.29.24"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"matplotlib": ("3.1.3", "benchmark, docs, examples, tests"),
"scikit-image": ("0.16.2", "docs, examples, tests"),
"pandas": ("1.0.5", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"flake8": ("3.8.2", "tests"),
"black": ("22.3.0", "tests"),
"mypy": ("0.961", "tests"),
"pyamg": ("4.0.0", "tests"),
"sphinx": ("4.0.1", "docs"),
"sphinx-gallery": ("0.7.0", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("7.1.2", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.3.0", "docs"),
"sphinxext-opengraph": ("0.4.2", "docs"),
"plotly": ("5.10.0", "docs, examples"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("1.1.3", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
| [
"noreply@github.com"
] | noreply@github.com |
2c044c0ffc8be7367fa298dac5d0de87e27c6dcb | d51efec85349f0ae1ef5b61e143b70fa0480e451 | /single inheritance.py | 07163f65348cab51cf85cb87e73b9464eec46dba | [] | no_license | subhamkrverma/pythonprog | 73b616b7f2df54a81b24f8edee96c441e60c7cdc | 557471a6c75eedf4c359a36c12bf257ef26429e0 | refs/heads/master | 2020-12-30T04:11:20.188184 | 2020-02-07T06:14:10 | 2020-02-07T06:14:10 | 238,855,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | class Student:
college="Aditya"
def __init__(self,name,rollno,fee):
self.name=name
self.rollno=rollno
self.fee=fee
class HostelStudent(Student):
def __init__(self,room,hfee,name,rollno,fee):
super().__init__(name,rollno,fee)
self.room=room
self.hfee=hfee
def display_data(self):
print(self.room,self.hfee,self.name,self.rollno,self.fee,self.college)
s1=HostelStudent(207,"80k","subham","1248","70k")
s1.display_data()
| [
"noreply@github.com"
] | noreply@github.com |
ce8252c5e46cdff5af7de4c0567b2517223e86ef | e7795ee2b99cbad79210f54b85532a5575ed10ea | /ctf/icec.tf/numb3rs/n.py | 870845e1a7dfb75422e6e8325346bdd36e7f523d | [] | no_license | duracell/challenges | 9c8106606262770393d17a5c46502b53beb1a9cd | cb748a72cced69b3ff3894b93778af0ab533a57e | refs/heads/master | 2021-06-01T02:04:46.139286 | 2016-06-29T19:18:39 | 2016-06-29T19:18:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | #!/usr/bin/env python3
from string import ascii_lowercase as a
def chunks(line, n=2):
for i in range(0, len(line), n):
yield line[i:i+n]
c = "05110006_00111507000104190802_08130308020418".replace("_", "")
print("".join((a[int(x)] for x in chunks(c))))
| [
"wehrmeyer.martin@web.de"
] | wehrmeyer.martin@web.de |
bd76088d4ae1dc4f81258e126d1f7f191b466add | cf1b3312af6395c0f8cc7d3ef7d6310a125816bf | /examples/text_to_sql/RAT-SQL/evaluation/utils.py | 455e5a391481ce269d9afbc4625a17a98e566448 | [
"Apache-2.0"
] | permissive | thomas-yanxin/PaddleNLP | 92db7b4c5eef4494f6e770eaebd80001e66494d2 | 1ddc5bbeeb587a20c10629d17b030214aba77990 | refs/heads/develop | 2023-06-22T18:00:34.532679 | 2021-07-21T06:12:58 | 2021-07-21T06:12:58 | 388,380,705 | 1 | 0 | Apache-2.0 | 2021-07-22T08:11:12 | 2021-07-22T08:11:11 | null | UTF-8 | Python | false | false | 14,829 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import logging
import re
import copy
import json
op_sql_dict = {0: ">", 1: "<", 2: "==", 3: "!="}
agg_sql_dict = {0: "", 1: "AVG", 2: "MAX", 3: "MIN", 4: "COUNT", 5: "SUM"}
conn_sql_dict = {0: "", 1: "and", 2: "or"}
### from IRNet keywords, need to be simplify
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit',
'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
COND_OPS = ('not_in', 'between', '==', '>', '<', '>=', '<=', '!=', 'in', 'like')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
LOGIC_AND_OR = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
CONST_COLUMN = set(['time_now'])
EXPECT_BRACKET_PRE_TOKENS = set(AGG_OPS + SQL_OPS + COND_OPS + CLAUSE_KEYWORDS +
('from', ','))
g_empty_sql = {
"select": [],
"from": {
"conds": [],
"table_units": []
},
"where": [],
"groupBy": [],
"having": [],
"orderBy": [],
"limit": None,
"except": None,
"intersect": None,
"union": None
}
def is_float(value):
"""is float"""
try:
float(value)
return True
except ValueError:
return False
except TypeError:
return False
def get_scores(count, pred_total, gold_total):
"""
Args:
Returns:
"""
if pred_total != gold_total:
return 0, 0, 0
elif count == pred_total:
return 1, 1, 1
return 0, 0, 0
def tokenize_NL2SQL(string, cols, single_equal=False, math=True):
"""
Args:
Returns:
"""
string = string.replace("\'", "\"").lower()
assert string.count('"') % 2 == 0, "Unexpected quote"
re_cols = [i.lower() for i in cols]
def _extract_value(string):
"""extract values in sql"""
fields = string.split('"')
for idx, tok in enumerate(fields):
if idx % 2 == 1:
fields[idx] = '"%s"' % (tok)
return fields
def _resplit(tmp_tokens, fn_split, fn_omit):
"""resplit"""
new_tokens = []
for token in tmp_tokens:
token = token.strip()
if fn_omit(token):
new_tokens.append(token)
elif re.match(r'\d\d\d\d-\d\d(-\d\d)?', token):
new_tokens.append('"%s"' % (token))
else:
new_tokens.extend(fn_split(token))
return new_tokens
def _split_aggs(tmp_tokens):
"""split aggs in select"""
new_toks = []
for i, tok in enumerate(tmp_tokens):
if tok in ('from', 'where'):
new_toks.extend(tmp_tokens[i:])
break
if not ((tok.endswith(')') or tok.endswith('),')) and len(tok) > 5):
new_toks.extend(tok.split(','))
continue
extra = ''
if tok.endswith(','):
extra = ','
tok = tok[:-1]
if tok[:4] in ('sum(', 'avg(', 'max(', 'min('):
new_toks.extend([tok[:3], '(', tok[4:-1], ')'])
elif tok[:6] == 'count(':
new_toks.extend(['count', '(', tok[6:-1], ')'])
else:
new_toks.append(tok)
if extra:
new_toks.append(extra)
return new_toks
def join_by_col(toks, cols):
new_toks = []
_len = len(toks)
i = 0
while i < _len - 1:
merge = False
for j in range(10):
if ''.join(toks[i:i + j]) in cols:
new_toks.append(''.join(toks[i:i + j]))
i += j
merge = True
if not merge:
new_toks.append(toks[i])
i += 1
new_toks.append(toks[-1])
return new_toks
tokens_tmp = _extract_value(string)
two_bytes_op = ['==', '!=', '>=', '<=', '<>', '<in>']
if single_equal:
if math:
sep1 = re.compile(r'([ \+\-\*/\(\)=,><;])') # 单字节运算符
else:
sep1 = re.compile(r'([ \(\)=,><;])')
else:
if math:
sep1 = re.compile(r'([ \+\-\*/\(\),><;])') # 单字节运算符
else:
sep1 = re.compile(r'([ \(\),><;])')
sep2 = re.compile('(' + '|'.join(two_bytes_op) + ')') # 多字节运算符
tokens_tmp = _resplit(tokens_tmp, lambda x: x.split(' '),
lambda x: x.startswith('"'))
tokens_tmp = _resplit(tokens_tmp, lambda x: re.split(sep2, x),
lambda x: x.startswith('"'))
tokens_tmp = _split_aggs(tokens_tmp)
tokens = list(filter(lambda x: x.strip() != '', tokens_tmp))
tokens = join_by_col(tokens, re_cols)
def _post_merge(tokens):
"""merge:
* col name with "(", ")"
* values with +/-
"""
idx = 1
while idx < len(tokens):
if tokens[idx] == '(' and tokens[
idx - 1] not in EXPECT_BRACKET_PRE_TOKENS and tokens[
idx - 1] != '=':
while idx < len(tokens):
tmp_tok = tokens.pop(idx)
tokens[idx - 1] += tmp_tok
if tmp_tok == ')':
break
elif tokens[idx] in (
'+', '-'
) and tokens[idx - 1] in COND_OPS and idx + 1 < len(tokens):
tokens[idx] += tokens[idx + 1]
tokens.pop(idx + 1)
idx += 1
else:
idx += 1
return tokens
tokens = _post_merge(tokens)
if single_equal:
tokens = [i if i != '=' else '==' for i in tokens]
return tokens
def sql2query(sql, cols):
"""
transform sql json to sql query, this is only for NL2SQL, eg. select a, b where a op val1
"""
sels = sql['sel']
aggs = sql['agg']
op = sql["cond_conn_op"]
conds = sql["conds"]
condstrs = [
f'{cols[cond[0]]} {op_sql_dict[cond[1]]} "{cond[2]}"' for cond in conds
]
cond_str = f" {conn_sql_dict[op]} ".join(condstrs)
def agg_col(agg, col):
if agg == 0:
return cols[col]
else:
return f"{agg_sql_dict[agg]} ( {cols[col]} )"
selstrs = [agg_col(i, j) for i, j in zip(aggs, sels)]
sel_str = ' , '.join(selstrs)
return f"SELECT {sel_str} WHERE {cond_str}"
def query2sql(query, cols, single_equal=False, with_value=True):
cols = [i.lower() for i in cols]
sql_op_dict = {}
sql_agg_dict = {}
sql_conn_dict = {}
for k, v in op_sql_dict.items():
sql_op_dict[v] = k
sql_op_dict[v.lower()] = k
for k, v in agg_sql_dict.items():
sql_agg_dict[v] = k
sql_agg_dict[v.lower()] = k
for k, v in conn_sql_dict.items():
sql_conn_dict[v] = k
sql_conn_dict[v.lower()] = k
query = tokenize_NL2SQL(query, cols, single_equal=single_equal, math=False)
assert query[0] == 'select'
def parse_cols(toks, start_idx):
"""
:returns next idx, (agg, col)
"""
if 'from' in toks:
toks = toks[:toks.index('from')]
idx = start_idx
len_ = len(toks)
outs = []
while idx < len_:
if toks[idx] in AGG_OPS:
agg_id = sql_agg_dict[toks[idx]]
idx += 1
assert idx < len_ and toks[idx] == '(', toks[idx]
idx += 1
agg, col = toks[start_idx], toks[idx]
idx += 1
assert idx < len_ and toks[idx] == ')', toks[idx] + ''.join(
toks)
idx += 1
outs.append((agg, col))
elif toks[idx] == ',':
idx += 1
else:
agg, col = '', toks[idx]
idx += 1
outs.append(('', col))
return outs
def _format_col(old_col):
"""format"""
if old_col.lower().startswith('table_'):
return old_col.split('.', 1)[1]
else:
return old_col
if 'where' not in query:
cond_index = len(query)
conn = ''
conds = []
else:
cond_index = query.index("where")
condstr = query[cond_index + 1:]
conn = [i for i in condstr[3::4]]
assert len(set(conn)) < 2, conn
conn = list(set(conn))[0] if conn else ''
conds = [condstr[i:i + 3] for i in range(len(condstr))[::4]]
sels = parse_cols(query[:cond_index], 1)
sql = {}
sql["agg"] = [sql_agg_dict[i[0]] for i in sels]
sql["cond_conn_op"] = sql_conn_dict[conn]
sql["sel"] = [cols.index(_format_col(i[1])) for i in sels]
if with_value:
sql["conds"] = [[
cols.index(_format_col(c[0])), sql_op_dict[c[1]],
'"' + c[2].strip('\"') + '"'
] for c in conds]
else:
sql["conds"] = [[
cols.index(_format_col(c[0])), sql_op_dict[c[1]], "1"
] for c in conds]
sql_sels = [(sql_agg_dict[i[0]], cols.index(_format_col(i[1])))
for i in sels]
return sql, sql_sels
def evaluate_NL2SQL(table, gold, predict, single_equal=False, mode=None):
scores = {}
scores_novalue = {}
# load db
with open(table) as ifs:
table_list = json.load(ifs)
table_dict = {}
for table in table_list:
table_dict[table['db_id']] = table
# load qa
with open(
gold, 'r', encoding='utf-8') as f1, open(
predict, 'r', encoding='utf-8') as f2:
gold_list = [l.strip().split('\t') for l in f1 if len(l.strip()) > 0]
gold_dict = dict([(x[0], x[1:]) for x in gold_list])
pred_list = [l.strip().split('\t') for l in f2 if len(l.strip()) > 0]
pred_dict = dict([(x[0], x[1]) for x in pred_list])
right = total = 0
cnt_sel = 0
cnt_cond = cnt_conn = 0
def compare_set(gold, pred):
_pred = copy.deepcopy(pred)
_gold = copy.deepcopy(gold)
pred_total = len(_pred)
gold_total = len(_gold)
cnt = 0
for unit in _pred:
if unit in _gold:
cnt += 1
_gold.remove(unit)
return cnt, pred_total, gold_total
for qid, item in gold_dict.items():
total += 1
if qid not in pred_dict:
continue
sql_gold, db_id = ''.join(item[0:-1]), item[-1]
db = table_dict[db_id]
cols = [i[1] for i in db["column_names"]]
sql_pred = pred_dict[qid]
try:
sql_gold = sql_gold.replace('==', '=')
sql_pred = sql_pred.replace('==', '=')
components_gold, sels_gold = query2sql(
sql_gold, cols, single_equal=single_equal)
components_pred, sels_pred = query2sql(
sql_pred, cols, single_equal=single_equal)
cnt, pred_total, gold_total = compare_set(sels_gold, sels_pred)
score_sels, _, _ = get_scores(cnt, pred_total, gold_total)
cnt, pred_total, gold_total = compare_set(components_gold["conds"],
components_pred["conds"])
score_conds, _, _ = get_scores(cnt, pred_total, gold_total)
score_conn = components_gold["cond_conn_op"] == components_pred[
"cond_conn_op"]
if score_sels:
cnt_sel += 1
if score_conds:
cnt_cond += 1
if score_conn:
cnt_conn += 1
if score_sels and score_conds and score_conn:
right += 1
else:
logging.debug("error instance %s:\npred: %s\ngold: %s" %
(qid, sql_pred, sql_gold))
except Exception as e:
##traceback.print_exc()
logging.warning('parse sql error, error sql:')
logging.warning(sql_gold + '|||' + sql_pred)
##raise e
continue
scores["all"] = dict(
[("count", total), ("exact", right), ("acc", right * 1.0 / total)])
scores["select"] = dict(
[("count", total), ("exact", cnt_sel), ("acc", cnt_sel * 1.0 / total)])
scores["condition"] = dict([("count", total), ("exact", cnt_cond),
("acc", cnt_cond * 1.0 / total)])
scores["connection"] = dict([("count", total), ("exact", cnt_conn),
("acc", cnt_conn * 1.0 / total)])
return scores, scores_novalue
if __name__ == '__main__':
print(query2sql("SELECT 所在省份 , 产线名称 WHERE 日熔量(吨) < 600", []))
print(
query2sql(
"SELECT MAX ( 货币资金(亿元) ) WHERE 总资产(亿元) > 100 or 净资产(亿元) > 100", []))
print(
query2sql("SELECT 股价 , EPS17A WHERE 铁路公司 = 广深铁路",
["股价", "铁路公司", "EPS17A"], True))
cols = ["公司", "2014(亿元)", "2015(亿元)", "2016(亿元)"]
print(
query2sql(
"SELECT COUNT ( 公司 ) WHERE 2014(亿元) > 20 and 2015(亿元) > 20 and 2016(亿元) > 20",
cols))
# print(query2sql("SELECT 书名/Title WHERE 索书号/CallNo. == BF637.U53C555=12010 or ISBN == 9.78142212482e+12", ["书名/Title","索书号/CallNo.",'ISBN']))
# print(tokenize("SELECT 标称生产企业名称 WHERE 规格(包装规格) == 187.2g/盒 and 标称产品名称 == 富兰克牌西洋参含片", math=False))
# print(tokenize("SELECT 设备型号 WHERE 生产企业 == AISINAWCO.,LTD. or 设备名称 == WCDMA无线数据终端", math=False))
# print(tokenize("SELECT sum(t1.amount_claimed) FROM claim_headers AS t1 JOIN claims_documents AS t2 ON t1.claim_header_id = t2.claim_id WHERE t2.created_date = ( SELECT created_date FROM claims_documents ORDER BY created_date LIMIT 1 )"))
# print(query2sql("SELECT 书号(ISBN) WHERE 教材名称 == 线性代数 or 教材名称 == 中级有机化学", ["书号(ISBN)", "教材名称" ]))
| [
"noreply@github.com"
] | noreply@github.com |
64f5e208e79a59bba5aeaf436b162032a26a176d | 7318faadb7a3eefbdd114169b49d25c14d5e952e | /MinMaxScaler.py | 3e928686f544b380aedba20dbd98c51d9ba6ff39 | [] | no_license | lukesun7612/Panel-data-regression | 8a166963eecbe1bcac920244531ea87b2c79a793 | 8775ad2087f44ad0e83ddfe22f2372c2c94feb62 | refs/heads/master | 2023-04-18T11:59:23.080164 | 2021-04-30T11:11:20 | 2021-04-30T11:11:20 | 339,061,896 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,741 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: lukes
@project: JA3
@file: MinMaxScaler.py
@time: 2020/12/31 18:28
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from scipy.stats.mstats import winsorize
import matplotlib.pyplot as plt
# set input path and output path
inputpath = 'D:/result/coefresult1.csv'
outputpath = 'D:/博士/论文/JA3/Table/result3.xlsx'
usecol = ['overspeed','highspeedbrake','harshacceleration','harshdeceleration']
# Read the data and organize it
data = pd.read_csv(inputpath, skiprows=2, index_col=0)
data.columns = usecol
data.index.names = ['id']
data = data.drop(data.tail(1).index)
data.loc['id1'] = [0, 0, 0, 0]
data = data.iloc[np.arange(-1, len(data)-1)]
print(data)
# winsorize the data with Package<winsorize>
data_w = winsorize(data.values, limits=[0.01, 0.01], axis=0, inplace=True)
# Normalize the winsorized data, map into [0,5]
scaler1 = MinMaxScaler(feature_range=(0,5))
result1 = scaler1.fit_transform(data_w)
df = pd.DataFrame(result1, index=data.index, columns=usecol)
# #Standardize the winsorized data
# scaler2 = StandardScaler()
# result2 = scaler2.fit_transform(data_w)
# result2 = scaler1.fit_transform(result2)
print(df)
# plot risk level figure
fig, ax = plt.subplots()
plot = ax.scatter(np.ones(182)+0.001*np.arange(1,183), df['overspeed'].values, c=df['overspeed'].values, cmap='rainbow', alpha=0.5)
ax.scatter(2*np.ones(182)+0.001*np.arange(1,183), df['highspeedbrake'].values, c=df['highspeedbrake'].values, cmap='rainbow', alpha=0.5)
ax.scatter(3*np.ones(182)+0.001*np.arange(1,183), df['harshacceleration'].values, c =df['harshacceleration'].values, cmap='rainbow', alpha=0.5)
ax.scatter(4*np.ones(182)+0.001*np.arange(1,183), df['harshdeceleration'].values, c=df['harshdeceleration'].values, cmap='rainbow', alpha=0.5)
plt.xlim(0.2,5)
plt.ylim(-0.05,5.05)
plt.xticks([1.091,2.091,3.091,4.091],["Over-speed","High-speed-brake","Harsh-acceleration","Harsh-deceleration"])
plt.yticks([1,2,3,4,5.05],["level 1","level 2","level 3","level 4","level 5"])
plt.xlabel('Near-miss Event')
plt.ylabel('Driving Risk Level')
plt.grid(axis='y', ls='--')
# Add annotate
plt.scatter(1.125,df.overspeed[124], edgecolors='k', c='')
plt.annotate("id125(score=%s)"%df.overspeed[124].round(3), xy=(1.125, df.overspeed[124]), xytext=(1.3,4.1), arrowprops=dict(arrowstyle='->', connectionstyle="arc3"), bbox=dict(boxstyle='Round,pad=0.5', fc='white', lw=1, ec='k', alpha=0.5))
plt.scatter(2.125,df.highspeedbrake[124], edgecolors='k', c='')
plt.annotate("id125(score=%s)"%df.highspeedbrake[124].round(3), xy=(2.125, df.highspeedbrake[124]), xytext=(2.3,1.2), arrowprops=dict(arrowstyle='->', connectionstyle="arc3"), bbox=dict(boxstyle='Round,pad=0.5', fc='white', lw=1, ec='k', alpha=0.5))
plt.scatter(3.125,df.harshacceleration[124], edgecolors='k', c='')
plt.annotate("id125(score=%s)"%df.harshacceleration[124].round(3), xy=(3.125, df.harshacceleration[124]), xytext=(3.3,2.8), arrowprops=dict(arrowstyle='->', connectionstyle="arc3"), bbox=dict(boxstyle='Round,pad=0.5', fc='white', lw=1, ec='k', alpha=0.5))
plt.scatter(4.125,df.harshdeceleration[124], edgecolors='k', c='')
plt.annotate("id125(score=%s)"%df.harshdeceleration[124].round(3), xy=(4.125, df.harshdeceleration[124]), xytext=(4.3,3.2), arrowprops=dict(arrowstyle='->', connectionstyle="arc3"), bbox=dict(boxstyle='Round,pad=0.5', fc='white', lw=1, ec='k', alpha=0.5))
# Add colorbar, make sure to specify tick locations to match desired ticklabels
cbar = fig.colorbar(plot, ticks=[-1, 0, 1])
cbar.set_ticks([0,1.3,2.5,3.8,5])
cbar.set_ticklabels(["Excellent","Good","Medium","Bad","Terrible"])
plt.show()
if __name__ == '__main__':
df.to_excel(outputpath)
| [
"lukesun7612@gmail.com"
] | lukesun7612@gmail.com |
97c488e5ad90e0f2906fd430de44698e972b15b5 | 53ba0b6f172abcade631ae1f52852c400302559e | /test/cv/bases/activates/DynamicReLUdemo.py | b99525743213c9b3e245292809f8a30322dc5698 | [
"Apache-2.0"
] | permissive | sssssshf/python_developer_tools | f97c64ee0aa0a7e9d31d173192805771c83abb7f | 44d2e67a2e2495a12d6b32da12c76cf0010ac7ea | refs/heads/main | 2023-08-19T02:44:53.536200 | 2021-10-13T02:10:19 | 2021-10-13T02:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,507 | py | # !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:8/14/2021 3:19 PM
# @File:demo
import os
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
from tqdm import tqdm
from python_developer_tools.cv.bases.activates.DynamicReLU import DyReLUA, DyReLUB, DyReLUC, convert_relu_to_DyReLU
from python_developer_tools.cv.utils.torch_utils import init_seeds
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class shufflenet_v2_x0_5M(nn.Module):
def __init__(self,nc,pretrained=True):
super(shufflenet_v2_x0_5M, self).__init__()
self.model_ft = torchvision.models.shufflenet_v2_x0_5(pretrained=pretrained)
# 将relu替换为DyReLUA
self.model_ft = convert_relu_to_DyReLU(self.model_ft,"A")
num_ftrs = self.model_ft.fc.in_features
self.model_ft.fc = nn.Linear(num_ftrs, nc)
def forward(self,x):
x = self.model_ft.conv1(x)
x = self.model_ft.maxpool(x)
x = self.model_ft.stage2(x)
x = self.model_ft.stage3(x)
x = self.model_ft.stage4(x)
x = self.model_ft.conv5(x)
x = x.mean([2, 3]) # globalpool
out = self.model_ft.fc(x)
return out
if __name__ == '__main__':
"""
ReLU 41%
DyReLUA 42 %
DyReLUB 41 %
DyReLUC 40 %
"""
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
epochs = 50
batch_size = 1024
num_workers = 8
classes = 10
init_seeds(1024)
trainset = torchvision.datasets.CIFAR10(root=os.getcwd(), train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True)
testset = torchvision.datasets.CIFAR10(root=os.getcwd(), train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
model = shufflenet_v2_x0_5M(classes, True)
model.cuda()
model.train()
criterion = nn.CrossEntropyLoss()
# SGD with momentum
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)
for epoch in range(epochs):
train_loss = 0.0
for i, (inputs, labels) in tqdm(enumerate(trainloader)):
inputs, labels = inputs.cuda(), labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
# loss
loss = criterion(outputs, labels)
# backward
loss.backward()
# update weights
optimizer.step()
# print statistics
train_loss += loss
scheduler.step()
print('%d/%d loss: %.6f' % (epochs, epoch + 1, train_loss / len(trainset)))
correct = 0
model.eval()
for j, (images, labels) in tqdm(enumerate(testloader)):
outputs = model(images.cuda())
_, predicted = torch.max(outputs.data, 1)
correct += (predicted.cpu() == labels).sum()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / len(testset))) | [
"zengxh@chint.com"
] | zengxh@chint.com |
d842dfe74d0122a04a9b59ab7061ca9f3ce55684 | adc8b9b0266b81e1170c91b1799078d41d88e5bc | /uw/bin/irunner | 2323bdc697fdd9b6695e7a61d3f4c31d515e90fa | [
"Apache-2.0"
] | permissive | noslenfa/tdjangorest | 21c620a3f0b23abba5ec426f760fb490dec61519 | d26d3bd10f9ac509814530b0a066e2ac7b4f05a6 | refs/heads/master | 2021-01-23T07:20:52.699844 | 2015-07-23T20:24:43 | 2015-07-23T20:24:43 | 39,590,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | #!/Users/NELSON/Desktop/UW/uw/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from IPython.lib.irunner import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"noslenfa@gmail.com"
] | noslenfa@gmail.com | |
0d96baf22073ad043d5f79d347ed98cec15fe659 | 283b368f358f2f205dbcd6a61bc2ebcf427906e6 | /src/main.py | 7e8306ae55058a2c3c431fa4a641e37cf62eb767 | [] | no_license | Lennaayy/fyp | 430249cca155997acbfa5cd916798d5eac384d28 | 4a65d3e05266e2272d9c43f2a8e31f0db9e49275 | refs/heads/main | 2023-04-11T06:24:43.132751 | 2021-04-25T21:35:43 | 2021-04-25T21:35:43 | 341,686,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,318 | py | import readers.coord_read as cr
import readers.block_read as br
import logic.legal_moves as lg
import logic.possible_moves as pm
import logic.environment_step as es
import logic.max_arg as ma
import logic.complete_level as cl
from PIL import ImageGrab
import random
import numpy as np
import keyboard
# Initial Hyperparameters and copy values for decay
alpha = 0.6
alpha_copy = 0.6
gamma = 0.2
gamma_copy = 0.2
epsilon = 0.4
epsilon_copy = 0.4
epochs = 1
observation_space = 1
state = 0
# Levels that can possibly take longer to complete
long_levels = ["27", "34", "39","41","43", "44", "45", "46", "47", "48"]
# The bounding box coordinates of the game
tlx, tly, brx, bry = cr.window_coords()
reset_x, reset_y = cr.reset_coords(tlx, tly, brx, bry)
random.seed()
try:
while True:
if keyboard.is_pressed('q'): # if key 'q' is pressed/held the program will quit
break
# Grab the game state, this is only needed once per level as we can return to the starting state if a reset is needed
img = ImageGrab.grab(bbox=(tlx, tly, brx, bry))
img.save("game_state.png", "PNG")
# Get the required grouping value and level number
group_val = br.group_requirement_value(tlx, tly, brx, bry)
level = br.get_level(tlx, tly, brx, bry)
if level == "50":
print("\nReached Level 50, try solve the final level by yourself :)")
break
# Get all blocks on screen
all_block_coords = br.find_blocks("game_state.png", tlx, tly)
all_possible_block_groupings = pm.possible_moves(all_block_coords, group_val)
blocks_left = all_block_coords
# Find the legal moves
legal_block_groupings = lg.legal_moves(blocks_left, group_val)
# Create the Q-Table
action_space = len(all_possible_block_groupings)
q_table = np.zeros([observation_space, action_space])
groups = []
# Reset all Parameters
alpha = 0.6
alpha_copy = 0.6
gamma = 0.2
gamma_copy = 0.2
epsilon = 0.4
epsilon_copy = 0.4
epochs = 1
move_count = 0
max_moves = 0
# Start the Level until it is solved
print(f"\nLevel {level} Started")
if level in long_levels:
print("This level completion time can vary, (1-2 minutes maximum)")
doing_level = True
while doing_level:
if keyboard.is_pressed('q'): # if key 'q' is pressed/held the program will quit
break
# Natural decay of parameters
if epochs % 500 == 0:
epsilon_copy *= 0.99
alpha_copy *= 0.95
gamma_copy *= 0.99
# Extra decay for early moves to be exploited more heavily
if move_count < (epochs//1500):
alpha = 1
gamma = 0.2
epsilon = 0.01
else:
alpha = alpha_copy
gamma = gamma_copy
epsilon = epsilon_copy
# increment move and set a max value for maximum moves without resetting
move_count += 1
if move_count > max_moves:
max_moves = move_count
# Choose to exploit or explore the state space
if random.uniform(0, 1) < epsilon:
move = random.choice(legal_block_groupings)
action = all_possible_block_groupings.index(move) # Explore action space
else:
action = ma.max_argument(q_table, all_possible_block_groupings, legal_block_groupings) # Exploit learned values
# Take the action in the environment while retrieving the information
next_state, reward, doing_level, blocks_left = es.environment_step(all_possible_block_groupings, state, action, blocks_left, all_block_coords, group_val, level)
groups.append(all_possible_block_groupings[action])
# Find the legal moves
legal_block_groupings = lg.legal_moves(blocks_left, group_val)
# Retrieve necessary q-table parameters and update values accodingly
old_value = q_table[state, action]
next_max = ma.max_q_value(q_table, all_possible_block_groupings, legal_block_groupings)
new_value = old_value + alpha * (reward + (gamma * next_max) - old_value) # Formula here
q_table[state, action] = new_value
# this reward indicates the state space has reset, return to move 0 with no moves made
if reward == -20:
groups = []
move_count = 0
# If the level has been completed, finish it and move on to the next
if doing_level == False:
print("Solved on epoch:", epochs)
cl.complete_level(groups)
# Iterate state and epoch
state = next_state
epochs += 1
# Reset q-learning if trapped in local optimum
if max_moves < (epochs // 1500)+1:
q_table = np.zeros([observation_space, action_space])
groups = []
move_count = 0
max_moves = 0
epochs = 0
epsilon = 0.4
except KeyboardInterrupt:
pass
| [
"lennyeum@gmail.com"
] | lennyeum@gmail.com |
6c197618046ee0d263b2e3a1c9afa8a75a232a6f | 628643508ebe023e3a310bfea2a48676fb230504 | /packages/dumbo/dumbo/backends/common.py | c04d464a33e5fd16d05937e2a187335a671695f1 | [] | no_license | wuyingminhui/input-lib | e89b317721e86ba9e4aec5934689eb9a90d7acea | 90e59e457c59ece98c26a3dc41d1119ae4fb599d | refs/heads/master | 2021-01-20T17:37:34.464335 | 2013-02-19T23:53:46 | 2013-02-19T23:53:46 | 19,423,999 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,628 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from dumbo.util import incrcounter, setstatus, getopts, configopts
class Params(object):
def get(self, name):
try:
return os.environ[name]
except KeyError:
return None
def __getitem__(self, key):
return self.get(str(key))
def __contains__(self, key):
return self.get(str(key)) != None
class Counter(object):
def __init__(self, name, group='Program'):
self.group = group
self.name = name
def incr(self, amount):
incrcounter(self.group, self.name, amount)
return self
__iadd__ = incr
class Counters(object):
def __init__(self):
self.counters = {}
def __getitem__(self, key):
try:
return self.counters[key]
except KeyError:
counter = Counter(str(key))
self.counters[key] = counter
return counter
def __setitem__(self, key, value):
pass
class MapRedBase(object):
params = Params()
counters = Counters()
def setstatus(self, msg):
setstatus(msg)
status = property(fset=setstatus)
class JoinKey(object):
def __init__(self, body, isprimary=False):
self.body = body
self.isprimary = isprimary
def __cmp__(self, other):
bodycmp = cmp(self.body, other.body)
if bodycmp:
return bodycmp
else:
return cmp(self.isprimary, other.isprimary)
@classmethod
def fromjoinkey(cls, jk):
return cls(jk.body, jk.isprimary)
@classmethod
def fromdump(cls, dump):
return cls(dump[0], dump[1] == 1)
def dump(self):
return (self.body, 2 - int(self.isprimary))
def __repr__(self):
return repr(self.dump())
class RunInfo(object):
def get_input_path(self):
return 'unknown'
class Iteration(object):
def __init__(self, prog, opts):
(self.prog, self.opts) = (prog, opts)
def run(self):
addedopts = getopts(self.opts, ['fake',
'debug',
'python',
'iteration',
'itercount',
'hadoop',
'starter',
'name',
'memlimit',
'param',
'parser',
'record',
'joinkeys',
'hadoopconf',
'mapper',
'reducer'])
if addedopts['fake'] and addedopts['fake'][0] == 'yes':
def dummysystem(*args, **kwargs):
return 0
global system
system = dummysystem # not very clean, but it works...
if addedopts['debug'] and addedopts['debug'][0] == 'yes':
self.opts.append(('cmdenv', 'dumbo_debug=yes'))
if not addedopts['python']:
python = 'python'
else:
python = addedopts['python'][0]
self.opts.append(('python', python))
if not addedopts['iteration']:
iter = 0
else:
iter = int(addedopts['iteration'][0])
if not addedopts['itercount']:
itercnt = 0
else:
itercnt = int(addedopts['itercount'][0])
if addedopts['name']:
name = addedopts['name'][0]
else:
name = self.prog.split('/')[-1]
self.opts.append(('name', '%s (%s/%s)' % (name, iter + 1,
itercnt)))
if not addedopts['hadoop']:
pypath = '/'.join(self.prog.split('/')[:-1])
if pypath: self.opts.append(('pypath', pypath))
else:
self.opts.append(('hadoop', addedopts['hadoop'][0]))
progmod = self.prog.split('/')[-1]
progmod = progmod[:-3] if progmod.endswith('.py') else progmod
memlim = ' 262144000' # 250MB limit by default
if addedopts['memlimit']:
# Limit amount of memory. This supports syntax
# of the form '256m', '12g' etc.
try:
_memlim = int(addedopts['memlimit'][0][:-1])
memlim = ' %i' % {
'g': 1073741824 * _memlim,
'm': 1048576 * _memlim,
'k': 1024 * _memlim,
'b': 1 * _memlim,
}[addedopts['memlimit'][0][-1].lower()]
except KeyError:
# Assume specified in bytes by default
memlim = ' ' + addedopts['memlimit'][0]
if addedopts['mapper']:
self.opts.append(('mapper', addedopts['mapper'][0]))
else:
self.opts.append(('mapper', '%s -m %s map %i%s' % (python,
progmod, iter, memlim)))
if addedopts['reducer']:
self.opts.append(('reducer', addedopts['reducer'][0]))
else:
self.opts.append(('reducer', '%s -m %s red %i%s' % (python,
progmod, iter, memlim)))
for param in addedopts['param']:
self.opts.append(('cmdenv', param))
if addedopts['parser'] and iter == 0:
parser = addedopts['parser'][0]
shortcuts = dict(configopts('parsers', self.prog))
if parser in shortcuts:
parser = shortcuts[parser]
self.opts.append(('cmdenv', 'dumbo_parser=' + parser))
if addedopts['record'] and iter == 0:
record = addedopts['record'][0]
shortcuts = dict(configopts('records', self.prog))
if record in shortcuts:
record = shortcuts[record]
self.opts.append(('cmdenv', 'dumbo_record=' + record))
if addedopts['joinkeys'] and addedopts['joinkeys'][0] == 'yes':
self.opts.append(('cmdenv', 'dumbo_joinkeys=yes'))
self.opts.append(('partitioner',
'org.apache.hadoop.mapred.lib.BinaryPartitioner'))
self.opts.append(('jobconf',
'mapred.binary.partitioner.right.offset=-6'))
for hadoopconf in addedopts['hadoopconf']:
self.opts.append(('jobconf', hadoopconf))
self.opts.append(('libegg', re.sub('\.egg.*$', '.egg', __file__)))
return 0
class FileSystem(object):
def cat(self, path, opts):
return 1 # fail by default
def ls(self, path, opts):
return 1 # fail by default
def exists(self, path, opts):
return 1 # fail by default
def rm(self, path, opts):
return 1 # fail by default
def put(self, path1, path2, opts):
return 1 # fail by default
def get(self, path1, path2, opts):
return 1 # fail by default
class Backend(object):
def matches(self, opts):
""" Returns True if the backend matches with the given opts """
return True
#abstractmethod
def create_iteration(self, opts):
""" Creates a suitable Iteration object """
pass
#abstractmethod
def create_filesystem(self, opts):
""" Creates a suitable FileSystem object """
pass
def get_mapredbase_class(self, opts):
""" Returns a suitable MapRedBase class """
return MapRedBase
def get_joinkey_class(self, opts):
""" Returns a suitable JoinKey class """
return JoinKey
def get_runinfo_class(self, opts):
""" Returns a suitable RunInfo class """
return RunInfo
| [
"fwenzel@mozilla.com"
] | fwenzel@mozilla.com |
cd81605d730a296a3ab6be9f41e8971e5e43ffb8 | de7cf0bdc4c96f2693e998f041be37b609483957 | /database_OOP.py | 79ba4ab25b62e3e276fc2de2a60c9a297ee8c7a1 | [] | no_license | ugneokmanaite/Python_Database_Connections | 7030abdb86ff6e40bf20dc84332dc9d320658255 | 4c1f3b9f851785df0fc69141824f9bd357adaf58 | refs/heads/master | 2022-11-27T16:10:49.713362 | 2020-07-14T07:07:40 | 2020-07-14T07:07:40 | 278,344,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | import pyodbc
class database_OOP:
# passing connection parameters
def __init__(self,server,database, username, password):
self.server = server
self.database = database
self.username = username
self.password = password
# establishing connection
def establish_connection(self):
connectionString = 'DRIVER={ODBC Driver 17 for SQL Server};SERVER=' + self.server + ';DATABASE=' + self.database + ';UID=' + self.username + ';PWD=' + self.password
try:
with pyodbc.connect(connectionString, timeout=5) as connection:
print("Connection did not time out")
except:
print("Connection Timed Out")
else:
return connection
# creating cursor
def create_cursor(self, connection):
return connection.cursor()
# executing SQL commands
def execute_sql(self, sql_command, connection, user_input):
cursor=self.create_cursor(connection)
query_result=cursor.execute(sql_command)
try:
if user_input==1 :#
self.workingWith_fetchone(query_result)
elif user_input==2:\
self.workingWith_fetchmany(query_result)
elif user_input ==3:\
self.workingWith_fetchall(query_result)
elif user_input ==4:\
self.avg_unit_price(query_result)
else:
raise ValueError
except ValueError:
print("This is incorrect user_input")
def workingWith_fetchone(self, query_result):
rows = query_result.fetchone()
print(rows.ProductName)
def workingWith_fetchmany(self, query_result):
rows = query_result.fetchmany(30)
for row in rows:
print("Product Name::"+row.ProductName, "Costs::" , row.UnitPrice)
def workingWith_fetchall(self, query_result):
my_result = query_result.fetchall()#
print(my_result)
def avg_unit_price(self, query_result):
my_result = query_result ("SELECT AVG(UnitPrice) FROM Products").fetchone()
# def avgprice(self):("SELECT AVG(UnitPrice) FROM Products").fetchone()
# # calc avg unit price of all the products
# return self.enterSQL("SELECT AVG(UnitPrice) FROM Products").fetchone()
| [
"uokmanaite@spartaglobal.com"
] | uokmanaite@spartaglobal.com |
6f5439d715a6d9fd4af2012e1594c857ebaf59cc | 8e65928ef06e0c3392d9fa59b7b716f941e933c3 | /python/hackerrank/30-days-of-code/6-review-loop/solution.py | 94600b266256e8d7f30a7421e7c00d1ec9a4b659 | [] | no_license | KoryHunter37/code-mastery | 0c79aed687347bfd54b4d17fc28dc110212c6dd1 | 6610261f7354d35bde2411da8a2f4b9dfc238dea | refs/heads/master | 2022-02-24T10:34:03.074957 | 2019-09-21T19:24:57 | 2019-09-21T19:24:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # https://www.hackerrank.com/challenges/30-review-loop/problem
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input())
strings = []
for i in range(n):
strings.append(str(input()))
for string in strings:
left, right = [], []
for i in range(len(string)):
if i % 2 == 0:
left.append(string[i])
else:
right.append(string[i])
print("".join(left) + " " + "".join(right))
| [
"koryhunter@gatech.edu"
] | koryhunter@gatech.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.