seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
5138543046 | import Queue
class FifoBuffer(object):
'''
>>> f = FifoBuffer()
>>> f.write('01')
>>> f.write('234')
>>> f.read(3)
'012'
>>> f.read(0)
''
>>> f.read(2)
'34'
'''
def __init__(self, empty_cb=None):
'''
Make an empty fifo buffer.
'''
self._first = ''
self._index = 0
self._rest = Queue.Queue()
self._empty_cb = empty_cb
def read(self, n):
'''
Block and read the next n bytes.
'''
ret = ''
# While we need more
while len(ret) + len(self._first) - self._index < n:
assert 0 <= self._index <= len(self._first)
ret += self._first[self._index:]
self._index = None # try to throw an error
while True:
try:
self._first = self._rest.get_nowait()
except Queue.Empty:
if not self._empty_cb:
raise
self._empty_cb()
else:
break
self._index = 0
# Pad it
new_index = n - len(ret) + self._index
ret += self._first[self._index:new_index]
self._index = new_index
assert len(ret) == n
return ret
def write(self, data):
'''
Write some data to the back of the fifo buffer.
'''
assert isinstance(data, str)
self._rest.put(data)
| yingted/wai-fi | src/server/fifobuffer.py | fifobuffer.py | py | 1,124 | python | en | code | 1 | github-code | 13 |
33266670700 | # -*- coding: utf-8 -*-
from django.db import models
from Users.models import CustomUser
class HostGroup(models.Model):
name = models.CharField(u'主机组', max_length=128, blank=False, null=False, unique=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'hosts_groups'
class HostIDC(models.Model):
name = models.CharField(u'机房', max_length=128, blank=False, null=False, unique=True)
address = models.CharField(u'地址', max_length=128, blank=True, null=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'hosts_idc'
class HostCabinet(models.Model):
name = models.CharField(u'机柜', max_length=128, blank=False, null=False, unique=True)
status = models.BooleanField(u'机柜状态', default=0)
idc = models.ForeignKey('HostIDC', verbose_name=u'机房地址')
def __unicode__(self):
return self.name
class Meta:
db_table = 'hosts_cabinet'
class Manufactory(models.Model):
manufactory = models.CharField(u'厂商名称', max_length=64, unique=True)
support_num = models.CharField(u'支持电话', max_length=30, blank=True)
def __unicode__(self):
return self.manufactory
class Meta:
db_table = 'hosts_manufactory'
class Server(models.Model):
sn = models.CharField(u'SN号', max_length=128, null=True, blank=True)
hostname = models.CharField(u'主机名称', max_length=128, blank=False, null=False, unique=True)
ip_address = models.GenericIPAddressField(u'公网地址', max_length=45, null=True, blank=True, unique=True)
nip_address = models.GenericIPAddressField(u'内网地址', max_length=45, null=False, blank=False, unique=True)
group = models.ManyToManyField(HostGroup, related_name='group_name')
idc = models.ForeignKey(HostIDC, related_name='idc_name')
cabinet = models.ForeignKey(HostCabinet, related_name='cabinet_name')
manufactory = models.ForeignKey(Manufactory, related_name='manufactory_name')
server_model = models.CharField(u'服务器型号', max_length=128, null=True, blank=True)
system_distribution = models.CharField(u'发行版本', max_length=64, blank=True, null=True)
system_type = models.CharField(u'系统类型', max_length=64, blank=True, null=True)
system_release = models.CharField(u'系统版本', max_length=64, blank=True, null=True)
kernel_release = models.CharField(u'内核版本', max_length=64, blank=True, null=True)
create_datetime = models.DateTimeField(u'创建时间', auto_now=True)
update_datetime = models.DateTimeField(u'更新时间', auto_now_add=True)
trade_datetime = models.DateTimeField(u'购买时间', null=True, blank=True)
expire_datetime = models.DateTimeField(u'过保修期', null=True, blank=True)
manager = models.ForeignKey(CustomUser, related_name='business_manager', null=True, blank=True)
admin = models.ForeignKey(CustomUser, related_name='business_admin', null=True, blank=True)
def __unicode__(self):
return self.hostname
class Meta:
db_table = u'hosts_servers'
class CPU(models.Model):
hostname = models.ForeignKey(u'Server', null=True, blank=True)
cpu_model = models.CharField(u'CPU型号', max_length=128, blank=True)
cpu_count = models.SmallIntegerField(u'物理CPU个数')
cpu_core_count = models.SmallIntegerField(u'逻辑CPU核数')
create_datetime = models.DateTimeField(u'创建时间', blank=True, auto_now_add=True)
update_datetime = models.DateTimeField(u'修改时间', blank=True, auto_now=True)
memo = models.TextField(u'备注', null=True, blank=True)
class Meta:
db_table = u'hosts_cpu'
class Disk(models.Model):
sn = models.CharField(u'SN号', max_length=128, blank=True, null=True)
hostname = models.ForeignKey(u'Server', null=True, blank=True)
slot = models.CharField(u'插槽位', max_length=64)
manufactory = models.CharField(u'制造商', max_length=64, blank=True, null=True)
model = models.CharField(u'磁盘型号', max_length=128, blank=True, null=True)
capacity = models.FloatField(u'磁盘容量GB')
disk_iface_choice = (
('SATA', 'SATA'),
('SAS', 'SAS'),
('SCSI', 'SCSI'),
('SSD', 'SSD'),
)
iface_type = models.CharField(u'接口类型', max_length=64, choices=disk_iface_choice, default='SAS')
create_datetime = models.DateTimeField(u'创建时间', blank=True, auto_now_add=True)
update_datetime = models.DateTimeField(u'修改时间', blank=True, auto_now=True)
memo = models.TextField(u'备注', blank=True, null=True)
class NIC(models.Model):
name = models.CharField(u'网卡名', max_length=64, blank=True, null=True)
sn = models.CharField(u'SN号', max_length=128, blank=True, null=True)
hostname = models.ForeignKey(u'Server', null=True, blank=True)
model = models.CharField(u'网卡型号', max_length=128, blank=True, null=True)
macaddress = models.CharField(u'MAC', max_length=64, unique=True)
ipaddress = models.GenericIPAddressField(u'IP', blank=True, null=True)
netmask = models.CharField(max_length=64, blank=True, null=True)
bonding = models.CharField(max_length=64, blank=True, null=True)
memo = models.CharField(u'备注', max_length=128, blank=True, null=True)
create_datetime = models.DateTimeField(u'创建时间', blank=True, auto_now_add=True)
update_datetime = models.DateTimeField(u'修改时间', blank=True, auto_now=True)
class Meta:
db_table = u'hosts_network'
class RAM(models.Model):
sn = models.CharField(u'SN号', max_length=128, blank=True, null=True)
hostname = models.ForeignKey(u'Server', null=True, blank=True)
model = models.CharField(u'内存型号', max_length=128)
slot = models.CharField(u'插槽', max_length=64)
capacity = models.IntegerField(u'内存大小(MB)')
memo = models.CharField(u'备注', max_length=128, blank=True, null=True)
create_datetime = models.DateTimeField(u'创建时间', blank=True, auto_now_add=True)
update_datetime = models.DateTimeField(u'修改时间', blank=True, auto_now=True)
class Meta:
db_table = u'hosts_memory' | Donyintao/SoilServer | assets/models.py | models.py | py | 6,207 | python | en | code | 7 | github-code | 13 |
3583833233 | import logging
from pdf2image import convert_from_bytes
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__
import io
import os
import azure.functions as func
def pdf_to_png(company_name, document_name, pdf_file_blob):
pages = convert_from_bytes(pdf_file_blob.read(), fmt='png')
connect_str = 'DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=unique54storagename;AccountKey=FffG8tJM8JJ2qP0iBgXETCSrMC6+6XQ25EgtedQqrabwx+fHjy7pILIbEFBtOnbrXX9fJddf5OsECtXkdprE/A=='
# Create the BlobServiceClient object which will be used to create a container client
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
page_number = 0
container_name = 'companies-doc-images'
local_file_list = company_name + document_name + '.txt'
with open(local_file_list, 'w') as f:
#write file names to complete.txt in sequence
for page in pages:
# Save pages as images in the pdf
target_image_name = company_name + '/' + document_name + '/page'+ str(page_number) + '.png'
local_image_name = company_name + document_name + str(page_number) + '.png'
page.save(local_image_name, 'PNG')
blob_client = blob_service_client.get_blob_client(container=container_name, blob=target_image_name)
with open(local_image_name, "rb") as data:
blob_client.upload_blob(data)
f.write(target_image_name)
f.write('\n')
os.remove(local_image_name)
page_number = page_number + 1
target_file_name = company_name + '/' + document_name + '/complete.txt'
blob_client = blob_service_client.get_blob_client(container=container_name, blob=target_file_name)
with open(local_file_list, "rb") as data:
blob_client.upload_blob(data)
os.remove(local_file_list)
def main(inputblob: func.InputStream):
logging.info(f"Python blob trigger function processed blob \n"
f"Name: {inputblob.name}\n"
f"Blob Size: {inputblob.length} bytes")
#logging.info(f'Python Queue trigger function processed {len(inputblob)} bytes')
#outputblob.set(inputblob)
company = inputblob.name[15:-4]
filename = 'test'
pdf_to_png(company, 'testdocname', inputblob)
"""
logging.info(f"Converting blob bytestream to PNG")
image_list = pdf_to_png(inputblob.name[15:-4], inputblob)
logging.info(f"Converting blob bytestream to PNG complete")
"""
| eonduplessis/pdf-to-png-function | new_pdf_file/__init__.py | __init__.py | py | 2,555 | python | en | code | 0 | github-code | 13 |
2322312191 | import logging
import boto
from boto.ec2 import cloudwatch
import time
import datetime
from services import aws_ec2
from utils import aws_utils
from utils.cw_classes import EnvMetric, InstanceMetric
def get_start_end_statistics_time(config):
end = aws_utils.apply_time_difference(datetime.datetime.now())
delta = datetime.timedelta(seconds = end.second, microseconds = end.microsecond)
end -= delta
duration = datetime.timedelta(minutes=int(config.get('monitoring_period_minutes')), seconds=5)
start = end - duration
return start,end
def get_avg_cpu_utilization_percentage_for_environment(config):
"""Return average CPU utilization for the given environment within number of minutes specified in config"""
logger = logging.getLogger(__name__)
instances = aws_ec2.get_running_instances(config)
cw = boto.ec2.cloudwatch.connect_to_region(config.get('region'))
env_metric = EnvMetric()
for instance in instances:
list_metrics = cw.list_metrics(dimensions={'InstanceId': instance.id}, metric_name='CPUUtilization')
#Newly added instances do not have recorded data, thus the query returns an empty list
if len(list_metrics) > 0:
inst_metric = InstanceMetric(instance,list_metrics[0])
start,end = get_start_end_statistics_time(config)
inst_metric.query = list_metrics[0].query(start, end, ['Average'])
percent, num = inst_metric.average_percentage()
rec = str(inst_metric.metric_records())
logger.info('%s: CPU %.0f%% for %d min. %s' %(inst_metric.instance.id, percent, num,rec))
env_metric.instance_metrics.append(inst_metric)
now = str(time.time()).split('.')[0]
now_human = str(datetime.datetime.now())
percent, num = env_metric.get_average_percentage()
data = '%s, %s, %.2f, %d, %d' %(now_human, now, percent, len(config.get_list('instances')), len(config.get_list('stopped_instances')))
logger.info(data)
print(data)
return env_metric
| dzzh/IN4392 | aws/services/aws_cw.py | aws_cw.py | py | 2,024 | python | en | code | 0 | github-code | 13 |
34855241945 | import numpy as np
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from matplotlib import cm
s=20
X, y = load_iris(return_X_y=True)
X = X[:, [2, 3]][50:]
y = y[50:] - 1
f, ax = plt.subplots(figsize=(4, 2.2))
ax.set_xlim(2.5, 7)
ax.set_ylim(0.7, 2.7)
x_ = ax.set_xlabel('Petal length')
y_ = ax.set_ylabel('Petal width')
for i, name in enumerate(['Versicolor', 'Virginica']):
loc = np.where(y == i)[0]
plt.scatter(X[loc, 0], X[loc, 1], s=s, label=name)
plt.legend(loc='upper left')
plt.savefig('images/linear_model1.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
lr = LogisticRegression().fit(X, y)
c = lr.coef_[0]
b = lr.intercept_
print(c, b)
x = np.linspace(2.5, 7)
pred = (- c[0] * x - b) / c[1]
plt.plot(x, pred, c='k', label='limit', linewidth=3)
plt.legend(loc='upper left')
plt.savefig('images/linear_model2.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xx, yy = np.meshgrid(np.linspace(2.5, 7),
np.linspace(0.7, 2.7))
Z = lr.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
colors = ['b', 'orange']
plt.contourf(xx, yy, Z, levels=1, alpha=0.3, colors=colors)
plt.savefig('images/linear_model3.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
f, ax = plt.subplots(figsize=(3.5, 3.5))
n = 200
c = np.array([(0, 0), (0, 1), (1, 1), (1, 0)])
y = [0, 1, 0, 1]
X = np.concatenate([0.1 * np.random.randn(n, 2) + c_ for c_ in c])
y = np.concatenate([y_ * np.ones(n) for y_ in y])
xm, xM = -.5, 1.5
ax.set_xlim(xm, xM)
ax.set_ylim(xm, xM)
s = 3
for i, name in enumerate(['class 1', 'class 2']):
loc = np.where(y == i)[0]
plt.scatter(X[loc, 0], X[loc, 1], s=s, label=name)
plt.legend()
ax.set_xticks([])
ax.set_yticks([])
plt.savefig('images/linear_model4.png', dpi=200)
plt.close('all')
# f, ax = plt.subplots(figsize=(4, 2.2))
#
# scores = []
# train = []
# n_f = 30
# n_features_list = np.arange(1, n_f)
# n_repeat = 100
# for n_features in n_features_list:
# print(n_features)
# sc = []
# tr = []
# for i in range(n_repeat):
# X, y = make_regression(n_samples=10, n_features=n_f, n_informative=2)
# X_train, X_test, y_train, y_test = train_test_split(X, y)
# lr = LinearRegression().fit(X_train[:, :n_features], y_train)
# p = lr.predict(X_train[:, :n_features])
# pred = lr.predict(X_test[:, :n_features])
# score = np.sqrt(np.mean((y_test - pred) ** 2))
# sc.append(score)
# tr.append(np.sqrt(np.mean((p - y_train) ** 2)))
# scores.append(np.mean(sc))
# train.append(np.mean(tr))
#
# plt.plot(n_features_list, scores)
# plt.plot(n_features_list, train)
# plt.show()
| data-psl/lectures2020 | slides/03_machine_learning_models/linear_models.py | linear_models.py | py | 2,904 | python | en | code | 42 | github-code | 13 |
7579962732 | #!/usr/bin/python3
'''reads stdin line by line and computes metrics:'''
import sys
def print_metrics(status_codes, total_size):
'''
Print the computed metrics
'''
print(f'File size: {total_size}')
for err_code in sorted(status_codes.keys()):
if status_codes[err_code]:
print(f'{err_code}: {status_codes[err_code]}')
def parse_line(line):
'''
Parse a line and return its characters
'''
char_seq = line.split()
return char_seq[-2], int(char_seq[-1])
def main():
'''status codes dict'''
total_size = 0
status_codes = {
"200": 0,
"301": 0,
"400": 0,
"401": 0,
"403": 0,
"404": 0,
"405": 0,
"500": 0
}
try:
for i, line in enumerate(sys.stdin, start=1):
code, size = parse_line(line)
total_size += size
if code in status_codes:
status_codes[code] += 1
if i % 10 == 0:
print_metrics(status_codes, total_size)
except KeyboardInterrupt:
print_metrics(status_codes, total_size)
raise
if __name__ == '__main__':
main()
| janymuong/alx-higher_level_programming | 0x0B-python-input_output/test_files/101-stats.py | 101-stats.py | py | 1,177 | python | en | code | 0 | github-code | 13 |
34226927884 | import string, praw, OAuth2Util, time, datetime
from operator import itemgetter
#Variables to Change
subs = [
['korbendallas', '']
#['photoshopbattles', 'battletalk'],
#['cfb', ''],
#['Yogscast', 'Fonjask'],
#['MilitaryGfys', ''],
#['conspiracy', ''],
#['OutOfTheLoop', ''],
#['shouldibuythisgame', 'emnii'],
#['runescape', ''],
#['2007scape', ''],
#['RSDarkscape', ''],
#['drama', ''],
#['WarshipPorn', ''],
#['Overwatch', 'OverwatchMeta'],
#['mma', 'xniklasx'],
#['anxiety', ''],
#['femalehairadvice', ''],
#['sewing', '']
]
subname = '' #Current Subreddit
post_to_sub = '' #Subreddit to post report to
username = '_korbendallas_'
user_agent = '_korbendallas_'
#Global Submission Variables
submission_data = [] #Submission_Title, Submission_Author, Submission_Short_Link, Submission_Score, Submission_Short_Link, Submission_Created_Epoch, Submission_Created_GMT
top_submissions = ['Score|Author|Post Title', ':---|:---|:---']
gilded_submissions = ['Score|Author|Post Title|Gilded', ':---|:---|:---|:---']
submission_authors = [] #Total_Score, Author, Count
total_submission_count = 0
top_submission_authors = ['Author|Total Score|Submission Count|Submission Average', ':---|:---|:---|:---']
total_submission_authors = 0
#Global Comment Variables
comment_data = [] #Comment_Author, Comment_Score, Comment_Link, Submission_Title
top_comments = ['Score|Author|Comment', ':---|:---|:---']
gilded_comments = ['Score|Author|Comment|Gilded', ':---|:---|:---|:---']
comment_authors = [] #Total_Score, Author, Count
total_comment_count = 0
top_comment_authors = ['Author|Total Score|Comment Count|Comment Average', ':---|:---|:---|:---']
total_comment_authors = 0
def Main():
global subs
global subname
global post_to_sub
#Login
r = praw.Reddit(user_agent)
r.login('MYUSERNAME', 'MYPASSWORD', disable_warning=True)
#Loop
for s in subs:
subname = s[0]
post_to_sub = s[1]
run_report(r)
return
def run_report(r):
global subname
sub = r.get_subreddit(subname)
print('Running Report for ' + subname)
try:
gather_data(r, sub)
except (Exception) as e:
print('gather')
print(e)
return
try:
process_submission_data()
except (Exception) as e:
print('submissions')
print(e)
return
try:
process_comment_data()
except (Exception) as e:
print('comments')
print(e)
return
try:
submit_report(r)
except (Exception) as e:
print('submit')
print(e)
return
reset_variables()
return
def gather_data(r, sub):
print('Gathering Data')
global submission_data
global gilded_submissions
global comment_data
global gilded_comments
#Gather submissions from the week
epoch_today = time.time()
epoch_a_week_ago = epoch_today - 604800
search_string = 'timestamp:' + str(int(epoch_a_week_ago)) + '..' + str(int(epoch_today))
submissions = sub.search(search_string, syntax='cloudsearch', limit=None)
#Go through each submission
for submission in submissions:
try:
#Disregard deleted or removed posts
if submission.author:
submission_data_row = []
submission_data_row.append(submission.title) #Submission_Title
submission_data_row.append('/u/' + submission.author.name) #Submission_Author
submission_data_row.append(submission.short_link) #Submission_Short_Link
submission_data_row.append(int(submission.score)) #Submission_Score
submission_data_row.append(submission.short_link) #Submission_Short_Link
submission_data_row.append(float(submission.created_utc)) #Submission_Created_Epoch
submission_data_row.append(str(time.strftime('%m/%d/%Y %H:%M:%S', time.gmtime(float(submission.created_utc))))) #Submission_Created_GMT
submission_data.append(submission_data_row)
#Add gilded submissions to list
if submission.gilded > 0:
gilded_submissions.append(str(submission.score) + '|/u/' + submission.author.name + '|[' + submission.title + '](' + submission.short_link + ')|' + str(submission.gilded) + 'X')
#Get the comments
submission.replace_more_comments(limit=None, threshold=0)
comments = praw.helpers.flatten_tree(submission.comments)
#Disregard submissions with no comments
if comments:
#Go through each comment
for comment in comments:
try:
#Disregard deleted comments
if comment.author and comment.banned_by == None:
comment_data_row = []
comment_data_row.append('/u/' + comment.author.name) #Comment_Author
comment_data_row.append(int(comment.score)) #Comment_Score
comment_data_row.append(comment.permalink) #Comment_Link
comment_data_row.append(submission.title) #Submission_Title
comment_data.append(comment_data_row)
#Add gilded submissions to list
if comment.gilded > 0:
gilded_comments.append(str(comment.score) + '|/u/' + comment.author.name + '|[' + submission.title + '](' + submission.short_link + ')|' + str(comment.gilded) + 'X')
except (Exception) as e:
print(e)
except (Exception) as e:
print(e)
return
def process_submission_data():
print('Processing Submissions')
global submission_data
global top_submissions
global submission_authors
global total_submission_count
global top_submission_authors
global total_submission_authors
submission_data = reversed(sorted(submission_data, key=itemgetter(3)))
total_submission_count
for submission_data_row in submission_data:
try:
total_submission_count = total_submission_count + 1
#Create Top 25 Submission Table
if len(top_submissions) < 28:
top_submissions.append(str(submission_data_row[3]) + '|' + str(submission_data_row[1]) + '|[' + submission_data_row[0] + '](' + submission_data_row[2] + ')')
#Compile Top Submission Author Scores
if submission_authors:
submission_author_exists = False
for submission_author in submission_authors:
if submission_data_row[1] == submission_author[1]:
submission_author[0] = submission_author[0] + submission_data_row[3]
submission_author[2] = submission_author[2] + 1
submission_author_exists = True
break
if not submission_author_exists:
submission_authors.append([submission_data_row[3], submission_data_row[1], 1])
else:
submission_authors.append([submission_data_row[3], submission_data_row[1], 1])
except (Exception) as e:
print(e)
#Compile Top Submission Author Table
submission_authors = reversed(sorted(submission_authors, key=itemgetter(0)))
for submission_author in submission_authors:
try:
total_submission_authors = total_submission_authors + 1
if len(top_submission_authors) < 28:
top_submission_authors.append(submission_author[1] + '|' + str(submission_author[0]) + '|' + str(submission_author[2]) + '|' + str(int(float(submission_author[0]) / float(submission_author[2]))))
else:
break
except (Exception) as e:
print(e)
return
def process_comment_data():
print('Processing Comments')
global comment_data#Comment_Author, Comment_Score, Comment_Link, Submission_Title
global top_comments
global comment_authors#Total_Score, Author, Count
global total_comment_count
global top_comment_authors
global total_comment_authors
comment_data = reversed(sorted(comment_data, key=itemgetter(1)))
for comment_data_row in comment_data:
try:
total_comment_count = total_comment_count + 1
#Create Top 25 Comments Table
if len(top_comments) < 28:
top_comments.append(str(comment_data_row[1]) + '|' + str(comment_data_row[0]) + '|[' + comment_data_row[3] + '](' + comment_data_row[2] + '?context=1000)')
#Compile Top Comment Author Scores
if comment_authors:
comment_author_exists = False
for comment_author in comment_authors:
if comment_data_row[0] == comment_author[1]:
comment_author[0] = comment_author[0] + comment_data_row[1]
comment_author[2] = comment_author[2] + 1
comment_author_exists = True
break
if not comment_author_exists:
comment_authors.append([comment_data_row[1], comment_data_row[0], 1])
else:
comment_authors.append([comment_data_row[1], comment_data_row[0], 1])
except (Exception) as e:
print(e)
#Compile Top Comment Author Table
comment_authors = reversed(sorted(comment_authors, key=itemgetter(0)))
for comment_author in comment_authors:
try:
total_comment_authors = total_comment_authors + 1
if len(top_comment_authors) < 28:
top_comment_authors.append(str(comment_author[1]) + '|' + str(comment_author[0]) + '|' + str(comment_author[2]) + '|' + str(int(float(comment_author[0]) / float(comment_author[2]))))
except (Exception) as e:
print(e)
return
def submit_report(r):
print('Compiling and Submitting Report')
global subname
global post_to_sub
global submission_data
global top_submissions
global gilded_submissions
global submission_authors
global total_submission_count
global top_submission_authors
global total_submission_authors
global comment_data
global top_comments
global gilded_comments
global comment_authors
global total_comment_count
global top_comment_authors
global total_comment_authors
report_text = ['#Weekly Report for /r/' + subname]
try:
report_text.append(str(time.strftime('%A, %B %d, %Y', (datetime.datetime.now() + datetime.timedelta(days=-7)).timetuple())) + ' - ' + str(time.strftime('%A, %B %d, %Y', time.gmtime())))
report_text.append('---')
report_text.append('---')
report_text.append('#Submissions')
report_text.append('---')
report_text.append('---')
report_text.append('Total Submissions: ' + str(total_submission_count))
report_text.append('Total Submission Authors: ' + str(total_submission_authors))
report_text.append('---')
report_text.append('##Top 25 Submissions')
report_text.append('\r\n'.join(top_submissions))
report_text.append('---')
report_text.append('##Top 25 Submitters')
report_text.append('\r\n'.join(top_submission_authors))
report_text.append('---')
report_text.append('---')
report_text.append(str(len(gilded_submissions) - 2) + ' Gilded Submissions')
if len(gilded_submissions) > 2:
report_text.append('\r\n'.join(gilded_submissions))
report_text.append('---')
report_text.append('---')
report_text.append('#Comments')
report_text.append('---')
report_text.append('---')
report_text.append('Total Comments: ' + str(total_comment_count))
report_text.append('Total Comment Authors: ' + str(total_comment_authors))
report_text.append('---')
report_text.append('##Top 25 Comments')
report_text.append('\r\n'.join(top_comments))
report_text.append('---')
report_text.append('##Top 25 Commenters')
report_text.append('\r\n'.join(top_comment_authors))
report_text.append('---')
report_text.append('---')
report_text.append(str(len(gilded_comments) - 2) + ' Gilded Comments')
if len(gilded_comments) > 2:
report_text.append('\r\n'.join(gilded_comments))
report_text.append('---')
report_text.append('---')
report_text.append('^(created by /u/_korbendallas_)')
report_text.append('---')
except (Exception) as e:
print(e)
#Submit Report
post_title = 'Weekly Report for /r/' + subname + ' - ' + str(time.strftime('%A, %B %d, %Y', time.gmtime()))
try:
r.submit('WeeklyReport', post_title, text='\r\n\r\n'.join(report_text))
except:
print('Error submitting post to WeeklyReport :', post_title)
try:
if not post_to_sub == '':
r.submit(post_to_sub, post_title, text='\r\n\r\n'.join(report_text))
except:
print('Error submitting post to', post_to_sub, ':', post_title)
return
def reset_variables():
global subname
global post_to_sub
global submission_data
global top_submissions
global gilded_submissions
global submission_authors
global total_submission_count
global top_submission_authors
global total_submission_authors
global comment_data
global top_comments
global gilded_comments
global comment_authors
global total_comment_count
global top_comment_authors
global total_comment_authors
subname = ''
post_to_sub = ''
submission_data = []
top_submissions = ['Score|Author|Post Title', ':---|:---|:---']
gilded_submissions = ['Score|Author|Post Title|Gilded', ':---|:---|:---|:---']
submission_authors = []
total_submission_count = 0
top_submission_authors = ['Author|Total Score|Submission Count|Submission Average', ':---|:---|:---|:---']
total_submission_authors = 0
comment_data = []
top_comments = ['Score|Author|Comment', ':---|:---|:---']
gilded_comments = ['Score|Author|Comment|Gilded', ':---|:---|:---|:---']
comment_authors = []
total_comment_count = 0
top_comment_authors = ['Author|Total Score|Comment Count|Comment Average', ':---|:---|:---|:---']
total_comment_authors = 0
return
Main()
| korbendallas-reddit/WeeklyReport | WeeklyReport.py | WeeklyReport.py | py | 15,203 | python | en | code | 4 | github-code | 13 |
71404260497 | # pylint: disable=W0621
"""Asynchronous Python client for GLIMMR."""
import asyncio
from glimmr import Glimmr
async def main():
"""Show example on controlling your GLIMMR device."""
async with Glimmr("192.168.1.34") as led:
await led.update()
print(led.system_data)
await led.set_mode(0)
input("Press Enter to continue set device to ambient mode...")
await led.set_ambient_scene(3)
input("Press Enter to continue set device to solid ambient color...")
await led.set_ambient_color("ff0000")
input("Press Enter to turn device off...")
await led.set_mode(0)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| d8ahazard/glimmr-python | examples/control_example.py | control_example.py | py | 740 | python | en | code | 0 | github-code | 13 |
37998229868 | import TruthD3PDMaker
import D3PDMakerCoreComps
from D3PDMakerCoreComps.D3PDObject import D3PDObject
from D3PDMakerCoreComps.IndexMultiAssociation import IndexMultiAssociation
from D3PDMakerCoreComps.IndexAssociation import IndexAssociation
from D3PDMakerConfig.D3PDMakerFlags import D3PDMakerFlags
from TruthD3PDAnalysis.AllTruthFilterTool import AllTruthFilterTool
from TruthD3PDMaker.TruthD3PDMakerKeys import TruthD3PDKeys
from TruthD3PDMaker.TruthD3PDMakerFlags import TruthD3PDFlags
def make_GenVertex_D3PDObject( default_prefix, default_sgkey,
default_object_name = "",
default_filter = AllTruthFilterTool(),
default_label = None, **other_defaults ):
def make_obj( name, prefix, object_name,
getter = None, sgkey = None, filter = default_filter,
label = default_label, **kw ):
if sgkey == None: sgkey = default_sgkey
if label == None: label = TruthD3PDKeys.GenVertexGetterLabel()
if getter == None:
getter = TruthD3PDMaker.GenVertexGetterTool( name + '_Getter',
Label = label,
Selector = filter,
SGKey = sgkey )
defs = other_defaults.copy()
defs.update( kw )
from D3PDMakerConfig.D3PDMakerFlags import D3PDMakerFlags
return D3PDMakerCoreComps.VectorFillerTool( name,
Prefix = prefix,
Getter = getter,
ObjectName = object_name,
SaveMetadata = \
D3PDMakerFlags.SaveObjectMetadata(),
**defs )
return D3PDObject( make_obj, default_prefix, default_object_name )
GenVertexD3PDObject = make_GenVertex_D3PDObject( TruthD3PDKeys.GenVertexPrefix(),
D3PDMakerFlags.TruthSGKey(),
"GenVertexD3PDObject" )
GenVertexD3PDObject.defineBlock( 0,
'GenVertex',
TruthD3PDMaker.GenVertexFillerTool,
WriteID=TruthD3PDFlags.WriteTruthVertexIDs() )
if TruthD3PDFlags.GenParticleAssocLabel() != None and TruthD3PDFlags.GenParticleAssocLabel() != "":
if TruthD3PDFlags.GenVertexInPartAssoc():
GenVertexPartInAssoc = \
IndexMultiAssociation( GenVertexD3PDObject,
TruthD3PDMaker.GenVertexParticleAssociationTool,
TruthD3PDFlags.GenParticleAssocLabel(),
blockname = "GenVertexPartInAssoc",
prefix = 'inpart_',
InParticles = True )
if TruthD3PDFlags.GenVertexOutPartAssoc():
GenVertexPartOutAssoc = \
IndexMultiAssociation( GenVertexD3PDObject,
TruthD3PDMaker.GenVertexParticleAssociationTool,
TruthD3PDFlags.GenParticleAssocLabel(),
blockname = "GenVertexPartOutAssoc",
prefix = 'outpart_',
InParticles = False )
if TruthD3PDFlags.GenEventAssocLabel() != None and TruthD3PDFlags.GenEventAssocLabel() != "":
GenVertexEventAssoc = IndexAssociation( GenVertexD3PDObject,
TruthD3PDMaker.GenVertexEventAssociationTool,
TruthD3PDFlags.GenEventAssocLabel(),
level = 1,
blockname = "GenVertexEventAssoc",
prefix = 'mcevt_' )
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/D3PDMaker/TruthD3PDMaker/python/GenVertexD3PDObject.py | GenVertexD3PDObject.py | py | 4,104 | python | en | code | 1 | github-code | 13 |
7722492485 | #! /usr/bin/env python
import os
import sys
import getopt
import filecmp
def strict_comparator(argv):
print()
print("---------------------------------------------")
print("| |")
print("| S T R I C T C O M P A R A T O R |")
print("| |")
print("| Performs a strict, exact match |")
print("| comparison of two files |")
print("| |")
print("---------------------------------------------")
print()
try:
opts , args = getopt.getopt(argv,"h s: n:")
except getopt.GetoptError:
print("Error in user inputs.")
exit(0)
new_result = 'NULL'
std_result = 'NULL'
for opt, arg in opts:
if opt == '-h':
print('-s for standard file -n new output file')
elif opt == '-n':
new_result = arg
elif opt == '-s':
std_result = arg
print('(o) Comparing files ' + new_result + ' and ' + std_result)
# For "Result" to be true, both the std and new files must exist and
# the must match exactly.
if os.path.isfile(new_result):
if os.path.isfile(std_result):
Result = filecmp.cmp(new_result,std_result)
else:
print('std result does not exist')
Result = False
else:
print('new result does not exist')
Result = False
# Record the result in a local file called "TestResult_summary"
f = open("TestResult_summary","w")
if Result == True:
print("passed", file=f)
else:
print("FAILED", file=f)
f.close()
if __name__=='__main__':
args = sys.argv[1:]
strict_comparator(args)
| johnportiz14/pyDiffusionFDM | testingPackage/strict_comparator.py | strict_comparator.py | py | 1,797 | python | en | code | 0 | github-code | 13 |
40963718152 | # # -*- coding: UTF-8 -*-
#
# """
# # @Time : 2019-09-17 17:40
# # @Author : yanlei
# # @FileName: test.py
# """
# import sys
# import collections
#
#
# def func(s):
# count = collections.Counter(s)
# stack = []
# visited = collections.defaultdict(bool)
# for num in s:
# count[num] -= 1
# if visited[num]:
# continue
# while stack and count[stack[-1]] and stack[-1] > num:
# visited[stack[-1]] = False
# stack.pop()
# visited[num] = True
# stack.append(num)
# return "".join(stack)
#
#
# for line in sys.stdin:
# print(func(line))
import sys
import math
# def func(num):
# if -10 < num < 10:
# return num
# str_num = str(num)
# if str_num[0] != "-":
# str_num = str_num[::-1]
# num = int(str_num)
# else:
# str_num = str_num[1:][::-1]
# num = int(str_num)
# num = -num
# return num if -(2 ** 31) < num < (2 ** 31) - 1 else 0
#
#
# for line in sys.stdin:
# print(func(int(line)))
# s = 'string'
# s[0] = 'a'
# print(s[0])
# tu = ('123', 122)
# # t2 = tu*2
# # print(type(t2))
# a = {'tom', 'jack'}
# print(a)
# print(type(a))
# dic = {}
# print(dic)
# print(type(dic))
# dic[('a',)] = 123
# print(dic)
# print(type(dic))
import copy
# a = {1: [1,2,3]}
# b = a
# b[0] = 456
# print(a)
# print(b)
# b = a.copy()
# print(id(a))
# print(id(b))
# x = a[1]
# x.append(4)
# print(a)
# print(b)
string = 'afasfhajifasfbfhaskfnqwihweruytuinzcvbnvzbvbxagsghlewtlqourpqurqpohbbzkziahiaiq'
alp_dict = {}
for i in string:
keys = alp_dict.keys()
if i not in keys:
alp_dict[i] = 1
else:
alp_dict[i] += 1
print(alp_dict)
# 反转字典
# new_dict = dict(zip(alp_dict.values(), alp_dict.keys()))
new_dict = {}
for k,v in alp_dict.items():
print(k,v)
if v not in new_dict:
new_dict[v] = k
else:
new_dict[v] = new_dict[v] + k
# print(alp_dict.items())
print(new_dict)
top10 = sorted(new_dict.keys(), reverse=True)[:3]
print(top10)
print(new_dict[top10[0]])
| Yanl05/FullStack | 笔试/test.py | test.py | py | 2,079 | python | en | code | 0 | github-code | 13 |
983695480 | with open('2016/2.txt') as f:
input = f.read().strip()
first = '''
123
456
789
'''.strip().split('\n')
second = '''
00100
02340
56789
0ABC0
00D00
'''.strip('\n').split('\n')
commands = {'U': [0, -1], 'R': [1, 0], 'D': [0, 1], 'L': [-1, 0]}
def getCode(pad, pos):
for line in input.split('\n'):
for command in line:
newX = max(min(pos[0] + commands[command][0], len(pad) - 1), 0)
newY = max(min(pos[1] + commands[command][1], len(pad) - 1), 0)
if (pad[newX][newY] != '0'):
pos = [newX, newY]
print(pad[pos[1]][pos[0]], end='')
print('')
getCode(first, [1, 1])
getCode(second, [0, 2])
| andrewgreenh/advent-of-code | Python/2016/2.py | 2.py | py | 670 | python | en | code | 2 | github-code | 13 |
73944520659 |
def find_line(binary_warped):
haff= binary_warped[binary_warped.shape[0] // 2:, :]
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
#plt.plot(histogram)
#plt.imshow(binary_warped)
#plt.show()
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
nwindows = 9
window_height = np.int(binary_warped.shape[0] // nwindows)
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
margin = 100
minpix = 50
left_lane_inds = []
right_lane_inds = []
for window in range(nwindows):
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
#cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
#cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
A=False
left_x_mean = np.mean(leftx, axis=0)
right_x_mean = np.mean(rightx, axis=0)
lane_width = np.subtract(right_x_mean, left_x_mean)
if len(leftx) <= 1000 or len(rightx) <= 1000 or lane_width < 300 or lane_width > 800 :
A=True
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
if (len(leftx)!=0):
left_fit = np.polyfit(lefty, leftx, 2)
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
else:
left_fit=None
left_fitx = 1 * ploty ** 2 + 1 * ploty
if (len(rightx)!=0):
right_fit = np.polyfit(righty, rightx, 2)
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
else:
right_fit=None
right_fitx = 1 * ploty ** 2 + 1 * ploty
return left_fitx, right_fitx, ploty,left_fit,right_fit,left_lane_inds ,right_lane_inds, A
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
# average x values of the fitted line over the last n iterations
self.bestx = np.zeros(720)
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
self.line_base_pos = np.zeros(1)
# difference in fit coefficients between last and new fits
self.diffs = np.array([0, 0, 0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
self.pre=False
# smoothen the n frames
self.smoothen_nframes = 10
# first frame
self.first_frame = True
def add_best_fit(self, lane_fit, lane_inds):
if lane_fit is not None:
if self.best_fit is not None:
self.diffs = abs(lane_fit - self.best_fit)
if (self.diffs[0] > 0.001 or self.diffs[1] > 1.0 or self.diffs[2] > 100.) and len(self.current_fit)> 0:
#if M<=2:
self.detected = False
#print('AA')
else:
self.detected = True
self.px_count = np.count_nonzero(lane_inds)
self.current_fit.append(lane_fit)
if len(self.current_fit) > 5:
self.current_fit = self.current_fit[len(self.current_fit) - 5:]
self.best_fit = lane_fit
else:
self.best_fit = lane_fit
#self.best_fit = np.average(self.current_fit, axis=0)
else:
self.detected = False
if len(self.current_fit) > 2:
self.current_fit = self.current_fit[:len(self.current_fit) - 1]
self.best_fit = np.average(self.current_fit, axis=0)
else:
self.current_fit = self.current_fit[:len(self.current_fit)]
self.best_fit = np.average(self.current_fit, axis=0)
def previouslane(binary_warped,prev_left_fit,prev_right_fit):
#color_warp = np.zeros_like(binary_warped).astype(np.uint8)
#out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
margin = 100
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_lane_inds = ((nonzerox > (prev_left_fit[0] * (nonzeroy ** 2) + prev_left_fit[1] * nonzeroy +
prev_left_fit[2] - margin)) & (nonzerox < (prev_left_fit[0] * (nonzeroy ** 2) +
prev_left_fit[1] * nonzeroy +
prev_left_fit[2] + margin))).nonzero()[0]
right_lane_inds = ((nonzerox > (prev_right_fit[0] * (nonzeroy ** 2) + prev_right_fit[1] * nonzeroy +
prev_right_fit[2] - margin)) & (nonzerox < (prev_right_fit[0] * (nonzeroy ** 2) +
prev_right_fit[1] * nonzeroy +
prev_right_fit[2] + margin))).nonzero()[0]
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
#print('len='+str(len(righty)))
if len(lefty) == 0:
left_fit = prev_left_fit
if len(righty) == 0:
right_fit = prev_right_fit
if len(leftx) != 0:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) != 0:
right_fit = np.polyfit(righty, rightx, 2)
# left_line.current_fit = left_fit
# right_line.current_fit = right_fit
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
return left_fitx, right_fitx, ploty,left_fit,right_fit,left_lane_inds,right_lane_inds
left_line = Line()
right_line = Line()
def draw_on_original(undist, left_fit, right_fit, ploty, Minv):
color_warp = np.zeros_like(undist).astype(np.uint8)
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0]))
result = cv2.addWeighted(undist, 1, newwarp, 0.4, 0)
return result
| QuyenPham1131998/self-driving-car | Sliding Window.py | Sliding Window.py | py | 8,535 | python | en | code | 0 | github-code | 13 |
26260452533 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 20:34:21 2020
@author: vinmue
"""
import numpy as np
from tensorflow.keras.layers import Dense,LSTM,Input,Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model, clone_model
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
import os
#tf.compat.v1.disable_eager_execution()
class DQNModel(object):
def __init__(self,input_shape, action_len, mem_size=1000000):
self.input_shape = input_shape
self.action_len = action_len
self.mem_size = mem_size
self.mem_counter = 0
self.total_counter = 0
self.states = np.zeros((mem_size,*input_shape),dtype = np.int32)
self.actions = np.zeros(mem_size,dtype = np.int32)
self.rewards = np.zeros(mem_size, dtype = float)
self.new_states = np.zeros((mem_size,*input_shape),dtype = np.int32)
self.dones = np.zeros(mem_size,dtype = np.int32)
def store_transition(self,state, action, reward, new_state,done):
self.mem_counter = 0 if self.mem_counter >= self.mem_size else self.mem_counter
self.states[self.mem_counter,:,:] = state
self.actions[self.mem_counter] = action
self.rewards[self.mem_counter] = reward
self.new_states[self.mem_counter,:,:] = new_state
self.dones[self.mem_counter] = done
self.mem_counter+=1
self.total_counter+=1
def sample_memory(self,batch_size):
max_memory = min(self.mem_size,self.total_counter)
batch = np.random.choice(np.arange(max_memory),batch_size,replace=False)
states = self.states[batch,:,:]
actions = self.actions[batch]
rewards = self.rewards[batch]
new_states = self.new_states[batch,:,:]
dones = self.dones[batch]
return states,actions,rewards,new_states, dones
def build_model(self,n_layers,n_neurons,learning_rate):
self.q_policy = Sequential()
#self.q_policy.add(Input(shape = (self.input_shape)))
for i in range(n_layers):
self.q_policy.add(LSTM(n_neurons))
self.q_policy.add(Dropout(0.05))
self.q_policy.add(Dense(self.action_len))
self.q_policy.compile(optimizer =Adam(learning_rate=learning_rate),\
loss = 'mean_squared_error')
self.q_target = clone_model(self.q_policy)
def load_model(self,model_file):
self.q_policy = load_model(model_file)
self.q_target = clone_model(model_file)
def save_model(self,model_file):
self.q_policy.save(model_file)
class DQNAgent(object):
def __init__(self,learning_rate,gamma,batch_size,input_shape, action_len,\
min_memory_for_training, epsilon,epsilon_min = 0.01,epsilon_dec = 1e-3,\
mem_size=1000000, model_file = "dqn_model.h5", frozen_iterations=1):
#input arguments
self.it_counter =0
self.gamma = gamma
self.batch_size = batch_size
self.input_shape = input_shape
self.action_len = action_len
self.epsilon = epsilon
self.epsilon_min = epsilon_min
self.epsilon_dec = epsilon_dec
self.mem_size = mem_size
self.model_file = model_file
self.min_memory_for_training = min_memory_for_training
#new attributes
self.action_space = np.arange(action_len)
self.dqn = DQNModel(input_shape = input_shape, action_len=action_len,mem_size= mem_size)
self.frozen_iterations = frozen_iterations
if os.path.exists(model_file):
self.dqn.load_model(model_file)
else:
self.dqn.build_model(n_layers = 2, n_neurons = 64, learning_rate=learning_rate)
#loading model
def save_model(self):
self.dqn.save_model(self.model_file)
def store_transition(self, state, action, reward, new_state, done):
self.dqn.store_transition(state,action,reward,new_state,done)
def choose_action(self,state):
#print("states in choose action: ",state)
if np.random.random()<self.epsilon:
action = np.random.choice(self.action_space)
else:
state = np.array([state])
q_st = self.dqn.q_policy.predict(state)
action = np.argmax(q_st)
return action
def learn(self):
if self.dqn.mem_counter < self.min_memory_for_training:
return
states, actions, rewards, new_states, dones = self.dqn.sample_memory(self.batch_size)
#print("states in learn: ",states)
#print("new_states: ",new_states)
q_target = self.dqn.q_policy.predict(states)
q_next= self.dqn.q_target.predict(new_states)
batch_index = np.arange(self.batch_size)
q_target[batch_index,actions] = rewards + self.gamma * np.max(q_next, axis = 1)*dones
self.dqn.q_policy.fit(states, q_target, batch_size = self.batch_size, verbose =0)
#self.epsilon = self.epsilon - self.epsilon_dec if self.epsilon - self.epsilon_dec \
# > self.epsilon_min else self.epsilon_min
self.epsilon = self.epsilon * self.epsilon_dec if self.epsilon * self.epsilon_dec > self.epsilon_min else self.epsilon_min
self.it_counter += 1
if self.it_counter % self.frozen_iterations ==0:
self.dqn.q_target.set_weights(self.dqn.q_policy.get_weights())
return | ViniTheSwan/ReinforcementTrading | parent/Trading/RL/DeepQLearning.py | DeepQLearning.py | py | 5,489 | python | en | code | 1 | github-code | 13 |
1083577863 | import MapReduce
import sys
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document identifier
# value: document contents
key = record[1]
value = record
#words = value.split()
mr.emit_intermediate(key, record)
def reducer(key, list_of_values):
# key: word
# value: list of occurrence counts
orders = []
list_items = []
for o in list_of_values:
if o[0] == "order":
orders.append(o)
else:
list_items.append(o)
#print(list_items)
for i in list_items:
mr.emit(orders[0] + i)
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper,reducer)
| fawadrashid/coursera | assignments/datasci-002/assignment3/join.py | join.py | py | 813 | python | en | code | 0 | github-code | 13 |
7183809230 | #!/usr/bin/Python3.9
import json
import openpyxl
import os
from JsonToExcel import *
import platform
base_path = getattr(sys, '_MEIPASS', os.path.dirname(
os.path.abspath(__file__)))
ACCOUNT_HISTORY_PATH = os.path.join(base_path, 'accountHistory.json')
def updateMemory(filePATH):
with open(ACCOUNT_HISTORY_PATH, "r") as f:
accountHistory = json.load(f)
workbook = openpyxl.load_workbook(filePATH)
worksheet = workbook.active
descriptions = []
comptes = []
taxRegime = []
for i in range(worksheet.max_row-3):
descriptions.append(worksheet.cell(
i+2, COLUMNS["DESCRIPTION"]+1).value)
comptes.append(worksheet.cell(i+2, COLUMNS["COMPTE"]+1).value)
taxRegime.append(worksheet.cell(
i+2, COLUMNS["REGIME DE TAXE"]+1).value)
for i in range(len(descriptions)):
if comptes[i] is not None:
accountHistory[descriptions[i]] = {
"COMPTE": comptes[i],
"TAX REGIME": taxRegime[i]
}
with open(ACCOUNT_HISTORY_PATH, "w+") as f:
json.dump(accountHistory, f)
| Gwendalda/BankDocParser | updateAccountHistory.py | updateAccountHistory.py | py | 1,115 | python | en | code | 0 | github-code | 13 |
27469795834 | def safe_int_input(text):
try:
data = int(input(text).strip())
return data
except:
return safe_int_input(text)
print("#"*50)
print(" Is Leap Year ".center(50,"#"))
print("#"*50)
print("")
current_year = safe_int_input("What year do you want to check? ")
if(current_year % 4 == 0):
if(current_year % 100 == 0):
if(current_year % 400 == 0):
print("Is a Leap Year")
else:
print("Not a leap year")
else:
print("Is a leap year")
else:
print("Not a leap year")
| GameMill/100DaysOfPython | day003/05.is_leap_year.py | 05.is_leap_year.py | py | 553 | python | en | code | 1 | github-code | 13 |
19691607224 | # @encoding: utf-8
# @author : wissingcc
# @contact : chen867820261@gmail.com
# @software: PyCharm
# @file : _svm.py
# @time : 4/2/2022 下午8:11
import numpy as np
from utils.param import init_params
from utils.metric.binary import acc_v2
from utils.metric.regression import mse
from .kernel_func import RBFKernel, LinearKernel, PolynomialKernel, LaprasKernel, SigmoidKernel
class BaseSVM(object):
"""
:param kernel: {'linear', 'poly', 'rbf', 'sigmoid', 'lapras'}, default='rbf'
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'lapras' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
:param degree: int, default=3
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
:param gamma: {'scale', 'auto'} or float, default='scale'
Kernel coefficient for 'rbf'.
- if ``gamma='scale'`` (default) is passed then it uses
1 / (n_features * X.var()) as value of gamma,
- if 'auto', uses 1 / n_features.
:param C: float, default=1.0
Regularization parameter. The strength of the regularization is
inversely proportional to C. Must be strictly positive.
The penalty is a squared l2 penalty.
"""
def __init__(self, C=1., kernel='rbf', degree=3., gamma='scale'):
self.C = C
self.w = None
self.xi = None
self.yi = None
self.b = 0
self.alpha = None
self.degree = degree
self.gamma = gamma
self.kernel_fn = kernel
self.kernel = None
self._score = None
def fit(self, x, y, tol=1.e-3):
pass
def get_gamma(self, x):
_, n_features = x.shape
return 1 / (n_features * x.var()) if self.gamma == 'scale' else 1 / n_features
def get_kernel(self, x):
if self.kernel_fn == 'linear':
return LinearKernel()
elif self.kernel_fn == 'rbf':
return RBFKernel(self.get_gamma(x))
elif self.kernel_fn == 'poly':
return PolynomialKernel(self.degree, self.get_gamma(x))
elif self.kernel_fn == 'lapras':
return LaprasKernel(self.get_gamma(x))
elif self.kernel_fn == 'sigmoid':
return SigmoidKernel(self.get_gamma(x))
def _predict(self, x):
pre = np.sum(self.kernel(x, self.xi) * (self.alpha * self.yi).T, axis=1) + self.b
# pre = x.dot(self.w) + self.b
return pre
def predict(self, x):
return self._predict(x)
def score(self):
return self._score
class SVC(BaseSVM):
"""
only for binary classification
TODO multi classification
"""
def __init__(self, C=1., kernel='linear', degree=3., gamma='scale'):
super(SVC, self).__init__(C=C, kernel=kernel, degree=degree, gamma=gamma)
self.turn = 1
def fit(self, x, y, tol=1.e-3):
m, n = x.shape
self.xi = x.copy()
self.yi = y.copy()
self.yi[self.yi == 0] = -1
self.kernel = self.get_kernel(self.xi)
self.alpha = init_params(m, 1)
for _ in range(m):
old_a = self.alpha
ai = np.argmax(self.alpha)
aj = np.argmin(self.alpha)
while True:
old_ai = self.alpha[ai]
a_y = np.sum(self.alpha * self.yi)
a_i = self.alpha[ai] * self.yi[ai]
a_j = self.alpha[aj] * self.yi[aj]
c = -a_y + a_i + a_j
self.alpha[ai] = (c - 2 / (self.yi[ai] * self.kernel(self.xi[ai], self.xi[aj]))) / (2 * self.yi[ai])
# self.alpha[ai] = c / (2 * y_hat[ai] * y_hat[aj] * x[ai].dot(x[aj].T)) ????
self.alpha[aj] = (c - self.alpha[ai] * self.yi[ai]) / self.yi[aj]
if 0 <= self.alpha[ai] <= self.C or old_ai == self.alpha[ai]:
break
a_updates = np.abs(self.alpha - old_a)
if np.max(a_updates) < tol:
break
b = self.yi - np.sum(self.kernel(self.xi, self.xi) * self.alpha * self.yi, axis=1)
self.b = np.mean(b[self.alpha.reshape(-1) > 0])
self._score = acc_v2(self.predict(self.xi).reshape(-1), y.reshape(-1))
if self._score <= .45:
self.turn = -1
self._score = 1 - self._score
def _predict(self, x):
pre = np.sum(self.kernel(x, self.xi) * (self.alpha * self.yi).T, axis=1) + self.b
pre = np.sign(pre * self.turn)
pre[pre == -1] = 0
return pre
class SVR(BaseSVM):
"""
TODO multi-output regression
:param C: float, default=1.0
Regularization parameter. The strength of the regularization is
inversely proportional to C. Must be strictly positive.
The penalty is a squared l2 penalty.
:param epsilon: float, default=0.1
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
:param kernel: {'linear', 'poly', 'rbf', 'sigmoid', 'lapras'}, default='rbf'
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'lapras' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
:param degree: int, default=3
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
:param gamma: {'scale', 'auto'} or float, default='scale'
Kernel coefficient for 'rbf'.
- if ``gamma='scale'`` (default) is passed then it uses
1 / (n_features * X.var()) as value of gamma,
- if 'auto', uses 1 / n_features.
"""
def __init__(self, C=1., epsilon=0.1, kernel='rbf', degree=3., gamma='scale'):
super(SVR, self).__init__(C=C, kernel=kernel, degree=degree, gamma=gamma)
self.epsilon = epsilon
def fit(self, x, y, tol=1.e-3):
"""
fit Support Vector Regression
:param x: [n_sample, n_feature]
:param y: [n_sample, n_output]
:param tol: float, default=1e-3
Tolerance for stopping criterion.
:return:
"""
m, n = x.shape
self.xi = x.copy()
self.yi = y.copy()
self.kernel = self.get_kernel(x)
self.alpha = init_params(m, 2) # [a, a_hat]
for _ in range(m):
old_a = self.alpha
ai = np.argmax(self.alpha[:, 0])
aj = np.argmin(self.alpha[:, 0])
# optimize alpha
while True:
old_ai = self.alpha[ai, 0]
a_y = np.sum(self.alpha[:, 1] - self.alpha[:, 0])
c = -a_y + (self.alpha[ai, 1] - self.alpha[ai, 0]) + (self.alpha[aj, 1] - self.alpha[aj, 0])
self.alpha[ai: 0] = self.alpha[ai: 1] - c / 2. + (self.epsilon + self.yi[ai]) / self.kernel(self.xi[ai], self.xi[aj])
self.alpha[aj: 0] = self.alpha[aj: 1] - c + self.alpha[ai: 1] - self.alpha[ai: 0]
t_a = self.alpha[ai, 0]
if 0 <= self.alpha[ai, 0] <= self.C or old_ai == self.alpha[ai, 0]:
break
# optimize alpha_hat
ai = np.argmax(self.alpha[:, 1])
aj = np.argmin(self.alpha[:, 1])
while True:
old_ai = self.alpha[ai, 1]
a_y = np.sum(self.alpha[:, 1] - self.alpha[:, 0])
c = -a_y + (self.alpha[ai, 1] - self.alpha[ai, 0]) + (self.alpha[aj, 1] - self.alpha[aj, 0])
self.alpha[ai: 1] = self.alpha[ai: 0] + c / 2. + (self.epsilon - self.yi[ai]) / self.kernel(self.xi[ai], self.xi[aj])
self.alpha[aj: 1] = self.alpha[aj: 0] + c - self.alpha[ai: 1] + self.alpha[ai: 0]
if 0 <= self.alpha[ai, 1] <= self.C or old_ai == self.alpha[ai, 1]:
break
a_updates = np.abs(self.alpha - old_a)
if np.max(a_updates) < tol:
break
b = self.yi + self.epsilon - np.sum(self.kernel(self.xi, self.xi) * (self.alpha[:, 1:] - self.alpha[:, :1]), axis=0)
self.b = np.mean(b[self.alpha[:, 1] != self.alpha[:, 0]])
self._score = mse(self.predict(x), y)
def _predict(self, x):
pre = np.sum(self.kernel(x, self.xi) * (self.alpha[:, 1:] - self.alpha[:, :1]).T, axis=1) + self.b
return pre
| WissingChen/Machine-Learning-Algorithms | model/svm/_svm.py | _svm.py | py | 8,658 | python | en | code | 0 | github-code | 13 |
70138178898 | ######################## IMPORTS ########################
import dataclasses
import re
from enum import Enum
from ecom.datatypes import TypeInfo
# ------------------- PyQt Modules -------------------- #
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from sources.common.widgets.Widgets import ValueWidget, TypeSelector
# --------------------- Sources ----------------------- #
from sources.common.widgets.Widgets import SquareIconButton, ValueWidget, SquareIconButton, TypeSelector
from sources.databases.balloondata import BalloonPackageDatabase, serializeTypedValue
from ecom.datatypes import TypeInfo, StructType, EnumType, ArrayType, DynamicSizeError
######################## CLASSES ########################
# SHARED DATA TYPES ------------------------------------------------------------------------------
class SharedTypesEditorWidget(QWidget):
change = pyqtSignal()
def __init__(self, database):
super().__init__()
self.currentDataType = None
self.database = database
self.baseTypesValues = [baseType.value for baseType in TypeInfo.BaseType]
self.baseTypeNames = [baseType.name for baseType in TypeInfo.BaseType]
self.editorCategories = []
# DATATYPES TABLE
self.table = QTableWidget()
sharedTypesContainer, sharedTypesLayout = QWidget(), QVBoxLayout()
self.table.setColumnCount(3) # Added a new column for 'Edit' buttons
self.table.setHorizontalHeaderLabels(['Shared Type', '', 'Description'])
self.table.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeToContents)
self.table.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)
self.table.horizontalHeader().setSectionResizeMode(2, QHeaderView.Stretch)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
sharedTypesLayout.addWidget(self.table)
sharedTypesContainer.setLayout(sharedTypesLayout)
# STACKED WIDGET
self.stackedWidget = QStackedWidget()
self.stackedWidget.addWidget(sharedTypesContainer)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.stackedWidget)
self.setLayout(mainLayout)
self.populateDataTypes()
def populateDataTypes(self):
autogeneratedTypes = ['ConfigurationId', 'Configuration', 'TelecommandType', 'TelemetryType', ]
originalTypes = ['TelecommandMessageHeader', 'TelemetryMessageHeader']
for name, typInfo in self.database.dataTypes.items():
if name not in autogeneratedTypes and name not in originalTypes:
rowPosition = self.table.rowCount()
self.table.insertRow(rowPosition)
itemName = QTableWidgetItem(name)
self.table.setItem(rowPosition, 0, itemName)
category = self.getDataTypeCategory(typInfo)
if category in ['Enum', 'Structure']:
editButton = QPushButton(category)
else:
buttonName = typInfo.baseTypeName
if buttonName in self.baseTypesValues:
buttonName = self.baseTypeNames[self.baseTypesValues.index(buttonName)]
editButton = QPushButton(buttonName)
editButton.clicked.connect(self.editDataTypeClicked)
self.table.setCellWidget(rowPosition, 1, editButton)
descriptionItem = QTableWidgetItem(typInfo.description if typInfo.description else '')
self.table.setItem(rowPosition, 2, descriptionItem)
self.table.itemSelectionChanged.connect(self.change.emit)
self.table.itemChanged.connect(self.changingNameOrDescription)
@staticmethod
def getDataTypeCategory(typeInfo):
if issubclass(typeInfo.type, Enum):
return 'Enum'
elif issubclass(typeInfo.type, StructType):
return 'Structure'
elif issubclass(typeInfo.type, ArrayType):
return 'Array'
elif typeInfo.description is None:
return 'Simple'
else:
return 'Advanced'
def editDataTypeClicked(self):
senderWidget = self.sender()
if isinstance(senderWidget, QPushButton):
row = self.table.indexAt(senderWidget.pos()).row()
name = self.table.item(row, 0).text()
baseType, dataType = senderWidget.text(), self.database.dataTypes[name]
category = self.getDataTypeCategory(dataType)
if category == 'Enum':
editor = EnumEditorWidget(self.database, name)
self.goToEditor(editor)
elif category == 'Structure':
editor = StructureEditorWidget(self.database, name)
self.goToEditor(editor)
else:
dialog = TypeSelector(self.database, baseType)
result = dialog.exec_()
if result == QDialog.Accepted:
selectedType = dialog.selectedType
selectedTypeName = selectedType[0].upper() if selectedType[0] in self.baseTypesValues else selectedType[0]
configType = f'{selectedTypeName}[{selectedType[2]}]' if selectedType[1] else f'{selectedTypeName}'
senderWidget.setText(configType)
self.change.emit()
def goToEditor(self, editor):
editorContainer = QWidget()
editorLayout = QVBoxLayout(editorContainer)
goBackButton = QPushButton('Go Back', editorContainer)
goBackButton.clicked.connect(self.goBackToPreviousEditor)
if isinstance(editor, StructureEditorWidget):
editor.elementEditCreation.connect(self.goToEditor)
editor.change.connect(self.change.emit)
self.editorCategories.append(editor)
editorLayout.addWidget(goBackButton)
editorLayout.addWidget(editor)
self.stackedWidget.addWidget(editorContainer)
self.stackedWidget.setCurrentWidget(editorContainer)
self.change.emit()
def goBackToPreviousEditor(self):
currentIndex = self.stackedWidget.currentIndex()
self.stackedWidget.setCurrentIndex(currentIndex - 1)
removingEditor = self.stackedWidget.widget(currentIndex)
self.stackedWidget.removeWidget(removingEditor)
removingEditor.deleteLater()
self.editorCategories.pop(-1)
self.change.emit()
def addDataType(self):
if len(self.editorCategories) == 0:
dialog = ElementAdditionDialog(self.database, None)
result = dialog.exec_()
if result == QDialog.Accepted:
elementCategoryIndex = dialog.stackedWidget.currentIndex()
category = ['Enum', 'Structure', 'Other'][elementCategoryIndex - 1]
rowPosition = self.table.rowCount()
self.table.insertRow(rowPosition)
itemName = QTableWidgetItem(dialog.elementName)
self.table.setItem(rowPosition, 0, itemName)
if category in ['Enum', 'Structure']:
editButton = QPushButton(category)
else:
buttonName = dialog.elementType
if buttonName in self.baseTypesValues:
buttonName = self.baseTypeNames[self.baseTypesValues.index(buttonName)]
editButton = QPushButton(buttonName)
editButton.clicked.connect(self.editDataTypeClicked)
self.table.setCellWidget(rowPosition, 1, editButton)
descriptionItem = QTableWidgetItem('')
self.table.setItem(rowPosition, 2, descriptionItem)
# TODO : Add code to add sharedDataType to database
self.change.emit()
else:
if isinstance(self.editorCategories[-1], StructureEditorWidget):
self.editorCategories[-1].addElement()
def changeDataTypeCategory(self):
if len(self.editorCategories) == 0:
pass
else:
if isinstance(self.editorCategories[-1], StructureEditorWidget):
self.editorCategories[-1].changeElementCategory()
def removeDataType(self):
if len(self.editorCategories) == 0:
pass
else:
if isinstance(self.editorCategories[-1], StructureEditorWidget):
self.editorCategories[-1].removeElement()
def changingNameOrDescription(self, item):
row, column, text = item.row, item.column, item.text
if column == 0:
# TODO : Add name change for struct elements
pass
if column == 2:
# TODO : Add description change for struct elements
pass
class ElementAdditionDialog(QDialog):
def __init__(self, database, dataType=None):
super().__init__()
self.elementName, self.elementType = None, None
self.database, self.dataType = database, dataType
self.setWindowTitle('Add New Element')
self.setWindowIcon(QIcon('sources/icons/PyStrato.png'))
self.setModal(True)
self.baseTypesValues = [baseType.value for baseType in TypeInfo.BaseType]
self.baseTypeNames = [baseType.name for baseType in TypeInfo.BaseType]
self.intTypeNames = [baseType for baseType in self.baseTypeNames if baseType.startswith('INT') or baseType.startswith('UINT')]
if dataType is None:
self.elementList = [name for name, typInfo in self.database.dataTypes.items()]
elif isinstance(dataType, list):
structInfo = self.database.getTypeInfo(self.dataType[0])
for element in self.dataType[1:]:
for name, child in structInfo.type:
if name == element:
structInfo = structInfo.type[name]
self.elementList = [name for name, child in structInfo.type]
else:
structInfo = self.database.getTypeInfo(self.dataType)
self.elementList = [name for name, child in structInfo.type]
# CATEGORIES SELECTION
categoryLayout = QHBoxLayout()
categoryButtonGroup = QButtonGroup(self)
self.enumButton = QPushButton('ENUM', self)
self.enumButton.setCheckable(True)
categoryButtonGroup.addButton(self.enumButton)
self.structButton = QPushButton('STRUCT', self)
self.structButton.setCheckable(True)
categoryButtonGroup.addButton(self.structButton)
self.otherButton = QPushButton('OTHER', self)
self.otherButton.setCheckable(True)
categoryButtonGroup.addButton(self.otherButton)
categoryLayout.addWidget(self.enumButton)
categoryLayout.addWidget(self.structButton)
categoryLayout.addWidget(self.otherButton)
categoryButtonGroup.buttonClicked.connect(self.categoryChosen)
# ENTRIES & BUTTONS
self.stackedWidget = QStackedWidget()
chooseLabel = QLabel('Choose a Category ...')
self.stackedWidget.addWidget(chooseLabel)
self.enumLineEdit = QLineEdit()
self.enumTypeComboBox = QComboBox()
self.enumTypeComboBox.addItems(self.intTypeNames)
enumSelector = QWidget()
enumLayout = QVBoxLayout(enumSelector)
enumLayout.addWidget(self.enumLineEdit)
enumLayout.addWidget(self.enumTypeComboBox)
self.stackedWidget.addWidget(enumSelector)
self.structureLineEdit = QLineEdit()
structureSelector = QWidget()
structureLayout = QVBoxLayout(structureSelector)
structureLayout.addWidget(self.structureLineEdit)
self.stackedWidget.addWidget(structureSelector)
self.otherLineEdit = QLineEdit()
self.otherTypeButton = QPushButton(self.baseTypeNames[0])
self.otherTypeButton.clicked.connect(self.changeOtherType)
otherSelector = QWidget()
otherLayout = QVBoxLayout(otherSelector)
otherLayout.addWidget(self.otherLineEdit)
otherLayout.addWidget(self.otherTypeButton)
self.stackedWidget.addWidget(otherSelector)
self.stackedWidget.setCurrentIndex(0)
self.okButton = QPushButton('OK')
self.cancelButton = QPushButton('Cancel')
self.okButton.clicked.connect(self.verifyElementName)
self.cancelButton.clicked.connect(self.reject)
# MAIN LAYOUT
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(self.okButton)
buttonLayout.addWidget(self.cancelButton)
mainLayout = QVBoxLayout()
mainLayout.addLayout(categoryLayout)
mainLayout.addWidget(self.stackedWidget)
mainLayout.addLayout(buttonLayout)
self.setLayout(mainLayout)
def categoryChosen(self, button):
categoryName = button.text()
currentElementName = self.getCurrentElementName()
if categoryName == 'ENUM':
self.stackedWidget.setCurrentIndex(1)
self.enumLineEdit.setText(currentElementName)
elif categoryName == 'STRUCT':
self.stackedWidget.setCurrentIndex(2)
self.structureLineEdit.setText(currentElementName)
elif categoryName == 'OTHER':
self.stackedWidget.setCurrentIndex(3)
self.otherLineEdit.setText(currentElementName)
else:
self.stackedWidget.setCurrentIndex(0)
def changeOtherType(self):
if self.dataType is None:
dialog = TypeSelector(self.database, typeName=self.otherTypeButton.text())
elif isinstance(self.dataType, list):
dialog = TypeSelector(self.database, typeName=self.otherTypeButton.text(), dataType=self.dataType[0], haveDataTypes=True)
else:
dialog = TypeSelector(self.database, typeName=self.otherTypeButton.text(), dataType=self.dataType, haveDataTypes=True)
result = dialog.exec_()
if result == QDialog.Accepted:
selectedType = dialog.selectedType
selectedTypeName = selectedType[0].upper() if selectedType[0] in self.baseTypesValues else selectedType[0]
configType = f'{selectedTypeName}[{selectedType[2]}]' if selectedType[1] else f'{selectedTypeName}'
self.otherTypeButton.setText(configType)
def getCurrentElementName(self):
currentIndex = self.stackedWidget.currentIndex()
if currentIndex == 1:
return self.enumLineEdit.text()
elif currentIndex == 2:
return self.structureLineEdit.text()
elif currentIndex == 3:
return self.otherLineEdit.text()
else:
return ''
def verifyElementName(self):
name = self.getCurrentElementName()
if name in self.elementList:
if self.dataType is None:
QMessageBox.warning(self, 'Used Name', 'This data-type name is already in use.')
else:
QMessageBox.warning(self, 'Used Name', 'This element name is already in use.')
elif len(name) == 0:
QMessageBox.warning(self, 'No Name Entered', 'No name was entered.')
else:
self.elementName = name
self.elementType = self.enumTypeComboBox.currentText() if self.stackedWidget.currentIndex() == 1 else self.otherTypeButton.text()
self.accept()
# ENUMERATORS -----------------------------------------------------------------------------
class EnumEditorWidget(QWidget):
change = pyqtSignal()
def __init__(self, database, dataType):
super().__init__()
# UI ELEMENTS
self.enumTypeInfo = None
self.database, self.dataType = database, dataType
self.baseTypesValues = [baseType.value for baseType in TypeInfo.BaseType]
self.baseTypeNames = [baseType.name for baseType in TypeInfo.BaseType]
self.valuesTableWidget = QTableWidget()
self.valuesTableWidget.setColumnCount(3)
self.valuesTableWidget.setHorizontalHeaderLabels(['Name', 'Value', 'Description'])
self.valuesTableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeToContents)
self.valuesTableWidget.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)
self.valuesTableWidget.horizontalHeader().setSectionResizeMode(2, QHeaderView.Stretch)
self.valuesTableWidget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.populateValues()
# MAIN LAYOUT
layout = QVBoxLayout(self)
layout.addWidget(self.valuesTableWidget)
def populateValues(self):
if isinstance(self.dataType, list):
enumTypeInfo = self.database.getTypeInfo(self.dataType[0])
for element in self.dataType[1:]:
for name, child in enumTypeInfo.type:
if name == element:
enumTypeInfo = enumTypeInfo.type[name]
break
else:
enumTypeInfo = self.database.getTypeInfo(self.dataType)
self.enumTypeInfo = enumTypeInfo
enumValues = self.enumTypeInfo.type.__members__
for row, (name, value) in enumerate(enumValues.items()):
self.addValueRow(name, str(value.value), value.__doc__ if value.__doc__ else '')
self.valuesTableWidget.resizeColumnsToContents()
self.valuesTableWidget.itemChanged.connect(self.changeEnumValue)
self.valuesTableWidget.itemSelectionChanged.connect(self.change.emit)
def addValueRow(self, name, value, description):
rowPosition = self.valuesTableWidget.rowCount()
self.valuesTableWidget.insertRow(rowPosition)
nameItem = QTableWidgetItem(name)
self.valuesTableWidget.setItem(rowPosition, 0, nameItem)
valueItem = QTableWidgetItem(value)
self.valuesTableWidget.setItem(rowPosition, 1, valueItem)
descriptionItem = QTableWidgetItem(description)
self.valuesTableWidget.setItem(rowPosition, 2, descriptionItem)
def changeEnumValue(self, item):
row, column = item.row(), item.column()
if column == 0:
pass
elif column == 0:
pass
elif column == 2:
pass
# TODO : Add code to change Enum Value as well as generated values below it
self.change.emit()
def addEnumValue(self):
dialog = EnumValueAdditionDialog(self.enumTypeInfo)
result = dialog.exec_()
if result == QDialog.Accepted:
valueName = dialog.nameLineEdit.text()
values = [self.valuesTableWidget.cellWidget(row, 1).text() for row in
range(self.valuesTableWidget.rowCount())]
digits = [s for s in reversed(values) if s.isdigit()]
self.addValueRow(valueName, str(int(digits[0]) + 1), '')
# TODO : Add code for enum value addition
self.change.emit()
def deleteEnumValue(self):
selectedRows = [item.row() for item in self.valuesTableWidget.selectedItems()]
if len(selectedRows):
selectedRows = sorted(list(set(selectedRows)))
dialog = EnumValueDeletionMessageBox(selectedRows)
result = dialog.exec_()
if result == QMessageBox.Yes:
for row in reversed(selectedRows):
self.valuesTableWidget.removeRow(row)
# TODO : Add enum value deletion
# TODO : Add code to update other enum values based on the changes
self.change.emit()
class EnumValueAdditionDialog(QDialog):
def __init__(self, enumTypeInfo):
super().__init__()
self.setWindowTitle('Add Enum Value')
self.setWindowIcon(QIcon('sources/icons/PyStrato.png'))
self.setModal(True)
enumValues = enumTypeInfo.type.__members__
self.names, self.values = zip(*[(name, value.value) for row, (name, value) in enumerate(enumValues.items())])
# ENTRIES & BUTTONS
self.nameLabel = QLabel('Name:')
self.nameLineEdit = QLineEdit()
self.okButton = QPushButton('OK')
self.cancelButton = QPushButton('Cancel')
self.okButton.clicked.connect(self.verifyEnumValueName)
self.cancelButton.clicked.connect(self.reject)
# LAYOUT
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(self.okButton)
buttonLayout.addWidget(self.cancelButton)
layout = QVBoxLayout(self)
layout.addWidget(self.nameLabel)
layout.addWidget(self.nameLineEdit)
layout.addLayout(buttonLayout)
self.setLayout(layout)
def verifyEnumValueName(self):
name = self.nameLineEdit.text()
if name in self.names:
QMessageBox.warning(self, 'Used Name', 'This enum value name is already in use.')
elif len(name) == 0:
QMessageBox.warning(self, 'No Name Entered', 'No name was entered.')
else:
self.accept()
class EnumValueDeletionMessageBox(QMessageBox):
def __init__(self, selectedRows):
super().__init__()
self.setModal(True)
self.setWindowIcon(QIcon('sources/icons/PyStrato.png'))
self.setIcon(QMessageBox.Question)
self.setWindowTitle('Confirmation')
self.setText(f'You are going to delete {len(selectedRows)} values(s).\n Do you want to proceed?')
self.addButton(QMessageBox.Yes)
self.addButton(QMessageBox.No)
self.setDefaultButton(QMessageBox.No)
# STRUCTURES -----------------------------------------------------------------------------
class StructureEditorWidget(QWidget):
elementEditCreation = pyqtSignal(QWidget)
change = pyqtSignal()
def __init__(self, database, dataType):
super().__init__()
self.structureInfo = None
self.database, self.dataType = database, dataType
self.baseTypesValues = [baseType.value for baseType in TypeInfo.BaseType]
self.baseTypeNames = [baseType.name for baseType in TypeInfo.BaseType]
# ELEMENT TABLE & BUTTON
self.elementTable = QTableWidget()
self.elementTable.setColumnCount(3)
self.elementTable.setHorizontalHeaderLabels(['Element', 'Type', 'Description'])
self.elementTable.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeToContents)
self.elementTable.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)
self.elementTable.horizontalHeader().setSectionResizeMode(2, QHeaderView.Stretch)
self.populateElements()
# MAIN LAYOUT
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.elementTable)
self.setLayout(mainLayout)
def populateElements(self):
if isinstance(self.dataType, list):
structInfo = self.database.getTypeInfo(self.dataType[0])
for element in self.dataType[1:]:
for name, child in structInfo.type:
if name == element:
structInfo = structInfo.type[name]
break
else:
structInfo = self.database.getTypeInfo(self.dataType)
self.structureInfo = structInfo
for row, (name, child) in enumerate(structInfo.type):
rowPosition = self.elementTable.rowCount()
self.elementTable.insertRow(rowPosition)
nameItem = QTableWidgetItem(name)
self.elementTable.setItem(row, 0, nameItem)
category = self.getTypeCategory(child)
# BUTTON NAME
if category in ['Simple', 'Array', 'Advanced'] or child.baseTypeName in list(
self.database.dataTypes.keys()):
buttonName = child.baseTypeName
if buttonName in self.baseTypesValues:
buttonName = self.baseTypeNames[self.baseTypesValues.index(buttonName)]
else:
buttonName = category
editButton = QPushButton(buttonName)
editButton.clicked.connect(self.typeButtonClicked)
self.elementTable.setCellWidget(rowPosition, 1, editButton)
descriptionItem = QTableWidgetItem(child.description)
self.elementTable.setItem(row, 2, descriptionItem)
self.elementTable.itemSelectionChanged.connect(self.change.emit)
self.elementTable.itemChanged.connect(self.changingNameOrDescription)
def addElement(self):
dialog = ElementAdditionDialog(self.database, self.dataType)
result = dialog.exec_()
if result == QDialog.Accepted:
elementCategoryIndex = dialog.stackedWidget.currentIndex()
category = ['Enum', 'Structure', 'Other'][elementCategoryIndex]
rowPosition = self.elementTable.rowCount()
self.elementTable.insertRow(rowPosition)
itemName = QTableWidgetItem(dialog.elementName)
self.elementTable.setItem(rowPosition, 0, itemName)
if category in ['Enum', 'Structure']:
editButton = QPushButton(category)
else:
buttonName = dialog.elementType
if buttonName in self.baseTypesValues:
buttonName = self.baseTypeNames[self.baseTypesValues.index(buttonName)]
editButton = QPushButton(buttonName)
editButton.clicked.connect(self.editDataTypeClicked)
self.elementTable.setCellWidget(rowPosition, 1, editButton)
descriptionItem = QTableWidgetItem('')
self.elementTable.setItem(rowPosition, 2, descriptionItem)
# TODO : Add code to add element in struct
self.change.emit()
def changeElementCategory(self):
# TODO : Add element category change
pass
def removeElement(self):
selectedRows = [item.row() for item in self.unitsTable.selectedItems()]
if len(selectedRows):
selectedRows = sorted(list(set(selectedRows)))
dialog = ElementDeletionMessageBox(selectedRows)
result = dialog.exec_()
if result == QMessageBox.Yes:
for row in reversed(selectedRows):
self.elementTable.removeRow(row)
# TODO : Add element deletion
self.change.emit()
def changingNameOrDescription(self, item):
row, column, text = item.row, item.column, item.text
if column == 0:
# TODO : Add name change for struct elements
pass
if column == 2:
# TODO : Add description change for struct elements
pass
def typeButtonClicked(self):
senderWidget = self.sender()
if isinstance(senderWidget, QPushButton):
row = self.elementTable.indexAt(senderWidget.pos()).row()
name = self.elementTable.item(row, 0).text()
baseType, dataType = senderWidget.text(), self.structureInfo.type[name]
dataTypes = [self.dataType, name] if not isinstance(self.dataType, list) else self.dataType + [name]
if baseType == 'Enum':
editor = EnumEditorWidget(self.database, dataTypes)
self.elementEditCreation.emit(editor)
elif baseType == 'Structure':
editor = StructureEditorWidget(self.database, dataTypes)
self.elementEditCreation.emit(editor)
else:
print(self.dataType)
dialog = TypeSelector(self.database, typeName=baseType, dataType=dataTypes[0], haveDataTypes=True)
result = dialog.exec_()
if result == QDialog.Accepted:
selectedType = dialog.selectedType
selectedTypeName = selectedType[0].upper() if selectedType[0] in self.baseTypesValues else selectedType[0]
configType = f'{selectedTypeName}[{selectedType[2]}]' if selectedType[1] else f'{selectedTypeName}'
senderWidget.setText(configType)
@staticmethod
def getTypeCategory(typeInfo):
if issubclass(typeInfo.type, Enum):
return 'Enum'
elif issubclass(typeInfo.type, StructType):
return 'Structure'
elif issubclass(typeInfo.type, ArrayType):
return 'Array'
elif typeInfo.description is None:
return 'Simple'
else:
return 'Advanced'
class ElementDeletionMessageBox(QMessageBox):
def __init__(self, selectedRows):
super().__init__()
self.setModal(True)
self.setWindowIcon(QIcon('sources/icons/PyStrato.png'))
self.setIcon(QMessageBox.Question)
self.setWindowTitle('Confirmation')
self.setText(f'You are going to delete {len(selectedRows)} element(s).\n Do you want to proceed?')
self.addButton(QMessageBox.Yes)
self.addButton(QMessageBox.No)
self.setDefaultButton(QMessageBox.No) | EnguerranVidal/PyStrato | sources/databases/sharedtypes.py | sharedtypes.py | py | 28,824 | python | en | code | 3 | github-code | 13 |
35935406253 | #!/usr/bin/env python2
from __init__ import load_config
import sys, os
sys.path.append(os.environ['MAGPHASE'])
import magphase as mp
import libutils as lu
import libaudio as la
from os import path
from argparse import ArgumentParser
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument('-s', '--senlst', dest='senlst', required=True)
p.add_argument('-c', '--config', dest='config', required=True)
a = p.parse_args()
load_config(a.config)
from __init__ import *
CONST_RATE = cfg_data.get('const', True)
with open(a.senlst) as f:
sentences = [l.rstrip() for l in f]
if CONST_RATE:
for s in sentences:
os.symlink(path.join(HTS1DIR, s+'.lab'), path.join(HTS2DIR, s+'.lab'))
else:
for s in sentences:
htsfile = path.join(HTS1DIR, s+'.lab')
outfile = path.join(HTS2DIR, s+'.lab')
try:
shift = lu.read_binfile(path.join(ACO1DIR, s+'.shift'), dim=1)
frames = mp.get_num_of_frms_per_state(shift, htsfile, 48000, False)
la.convert_label_state_align_to_var_frame_rate(htsfile, frames, outfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
else:
print1(s)
| KurtAhn/SLCV | src/realign-states.py | realign-states.py | py | 1,319 | python | en | code | 2 | github-code | 13 |
26318244592 | import random
import copy
import math
import time
from tkinter.constants import W
import numpy as np
def Print(worduko):
for i in range(9):
for j in range(9):
print(worduko[i][j], end=' '),
print('\n')
def count_conflict(worduko,irow,icol):
# print(worduko)
# print(irow)
# print(icol)
val=worduko[irow][icol]
conflicts=0
for i in range(len(worduko)):
if worduko[i][icol] == val and i != irow:
conflicts=conflicts+1
for j in range(len(worduko[0])):
if worduko[irow][j] == val and j != icol:
conflicts=conflicts+1
for i in range(3 * (irow // 3), 3 * (irow // 3) + 3):
for j in range(3 * (icol// 3), 3 * (icol // 3) + 3):
if val == worduko[i][j] and not (i == irow and j == icol):
conflicts=conflicts+1
# print("Count COnflict:"+str(conflicts))
return conflicts
def get_random_conflicts(worduko,original_wordkudo,random_conflict_value):
row=0
col=0
for row in range(len(worduko)):
for col in range(len(worduko[row])):
if(original_wordkudo[row][col]=='*' and count_conflict(worduko,row,col)>0):
# print(random_conflict_value)
if(random_conflict_value==0):
return (row,col)
random_conflict_value=random_conflict_value-1
return (row,col)
def min_conflict(selected_domain,worduko,selected_row,selected_col):
# print(selected_domain)
initial_letter=worduko[selected_row][selected_col]
selected_letter=selected_domain[0]
worduko[selected_row][selected_col]=selected_letter
min_conflicts=10000
for index in range(len(selected_domain)):
# print("Available:"+str(count_conflict(worduko,selected_row,selected_col))
letters=selected_domain[index]
if(letters!=initial_letter):
worduko[selected_row][selected_col]=letters
conflicts=count_conflict(worduko,selected_row,selected_col)
# print("Available choices: "+letters+" Conflicts: "+str(conflicts))
if(conflicts<min_conflicts):
selected_letter=letters
min_conflicts=conflicts
# print(rand_list,min_conflicts)
# selected_letter=random.choice(rand_list)
# print("Selected:"+str(selected_letter))
return selected_letter
def intialize_worduko(worduko,variables):
for row in range(len(worduko)):
for col in range(len(worduko[row])):
if(worduko[row][col]=='*'):
domain_of_variable=variables[row][col].get_domain()
worduko[row][col]=random.choice(domain_of_variable)
class Variable ():
def __init__(self, position, domain):
self.position = position
self.domain = domain
def remove_domain(self, num):
try:
self.domain.remove(num)
except:
pass
def Print_object(self):
print(self.position,end=' ')
print(self.domain,end=' ')
def get_domain(self):
return self.domain
def update_constraints(state, variables):
for row in range(len(state)):
for column in range(len(state[row])):
if state[row][column] == '*':
continue
for i in range(len(state)):
if variables[i][column] == None:
continue
variables[i][column].remove_domain(state[row][column])
for j in range(len(state[0])):
if variables[row][j] == None:
continue
variables[row][j].remove_domain(state[row][column])
for i in range(3 * (row // 3), 3 * (row // 3) + 3):
for j in range(3 * (column // 3), 3 * (column // 3) + 3):
if variables[i][j] == None:
continue
variables[i][j].remove_domain(state[row][column])
def count_variables(variables):
count = 0
for i in range(len(variables)):
for j in range(len(variables[i])):
if variables[i][j] != None:
count += 1
return count
def Total_conflicts(wordoku):
conflict=0
for i in range(len(wordoku)):
for j in range(len(wordoku[i])):
conflict=conflict+count_conflict(wordoku,i,j)
return conflict
def select_variable(variables):
best_variable_position = None
minimum_domain_values = None
for i in range(len(variables)):
for j in range(len(variables[i])):
if variables[i][j] == None:
continue
if minimum_domain_values == None or minimum_domain_values > len(variables[i][j].domain):
minimum_domain_values = len(variables[i][j].domain)
best_variable_position = (i, j)
return best_variable_position
def valid_variables(variables):
for i in range(len(variables)):
for j in range(len(variables[i])):
if variables[i][j] == None:
continue
if len(variables[i][j].domain) == 0:
return False
return True
def is_valid_state(state, letter_domain):
# print(letter_domain)
# print(len(state[0]))
for row in range(len(state)):
for column in range(len(state[row])):
if state[row][column] == '*':
continue
# print(state[row][column])
# print(row)
# print(column)
if state[row][column] not in letter_domain:
return False
for i in range(len(state)):
if state[i][column] == state[row][column] and i != row:
return False
for j in range(len(state[0])):
if state[row][j] == state[row][column] and j != column:
return False
for i in range(3 * (row // 3), 3 * (row // 3) + 3):
for j in range(3 * (column // 3), 3 * (column // 3) + 3):
if state[row][column] == state[i][j] and not (i == row and j == column):
return False
return True
def main():
# starting the main function
start_time = time.process_time ()
for loop in range(int(8e10)):
# reading the file
file = open("input5.txt", "r")
lines = file.readlines()
file.close()
worduko = []
letter_domain = set()
# adding the wordoku in 2-d vector
for line in lines:
row = []
row = list(line)
for letter in row:
if letter != '*' and letter != '\n':
letter_domain.add(letter)
if (row[-1] == '\n'):
row = row[:-1]
worduko.append(row)
# Print(worduko)
# copying the wordoku
original_wordkudo=copy.deepcopy(worduko)
# if not is_valid_state(worduko, letter_domain):
# print("NOT POSSIBLE")
# return
# creating the 2-d variable array where variable[i][j] will have Variable object that has domain to that position
variables = [[None for i in range(len(worduko))]
for j in range(len(worduko[0]))]
for i in range(len(worduko)):
for j in range(len(worduko[i])):
if worduko[i][j] == '*':
# print(letter_domain)
variables[i][j] = (Variable((i, j), list(letter_domain)))
# slicing down the domains of the variables
update_constraints(worduko, variables)
# for i in range(9):
# for j in range(9):
# if(worduko[i][j]!='*'):
# print(worduko[i][j])
# else:
# # print(variables[i][j])
# variables[i][j].Print_object()
# print(" ")
# print('\n')
# initializing the wordoku with some values of their domain
intialize_worduko(worduko,variables)
# Print(worduko)
if(is_valid_state(worduko,letter_domain)==True):
print("Answer Came")
Print(worduko)
# print("\n")
# Print(original_wordkudo)
# starting the master loop
for ind in range(1500):
# print("ind",ind)
total_c=Total_conflicts(worduko)
# print("Total Conflict:",total_c)
# Print(worduko)
# list of total conflicts
total_conflited_variable=[]
for row in range(len(worduko)):
for col in range(len(worduko[row])):
if(original_wordkudo[row][col]=='*' and count_conflict(worduko,row,col)>0):
total_conflited_variable.append(row*9+col)
# print("Total Conflicted Variable=",(total_conflited_variable))
# checking the total number of conflicts
if(len(total_conflited_variable)==0):
Print(worduko)
with open("solution.txt", 'w') as file:
file.writelines(''.join(str(j)
for j in i) + '\n' for i in worduko)
print ("Time Taken = ", time.process_time() - start_time)
return True
random_conflict_value= random.choice(total_conflited_variable)
# print(total_conflited_variable)
# print(random_conflict_value)
selected_row=random_conflict_value//9
selected_col=random_conflict_value%9
# print(selected_row)
# print(selected_col)
selected_domain=variables[selected_row][selected_col].get_domain()
# print(selected_domain)
# selecting the letter with minimum conflicts
letter_with_minimum_conflicts=min_conflict(selected_domain,worduko,selected_row,selected_col)
# print(letter_with_minimum_conflicts)
# adding the letter with minimum conflicts to the wordoku
worduko[selected_row][selected_col]=letter_with_minimum_conflicts
# Print(worduko)
return False
# if(solution_state == None):
# Print("Not Possible")
# return
# Print(solution_state)
# with open("solution.txt", 'w') as file:
# file.writelines(''.join(str(j)
#
# for j in i) + '\n' for i in solution_state)
answer =main()
if(answer==False):
print("Could not find answer")
| pankajk22/Artificial-Intelligence-Assignments | Assigment-1/WordokuSolver/WordokuSolver_minconflict.py | WordokuSolver_minconflict.py | py | 10,903 | python | en | code | 0 | github-code | 13 |
636916815 | import os
import time
from api.batcher_api import post_pop_batch
host = os.environ["BATCHER_HOST"]
def job():
try:
batch = post_pop_batch(host)
print(batch)
except ConnectionError:
print("Exited with network error")
exit(1)
if __name__ == "__main__":
while True:
job()
time.sleep(2)
| oMalyugina/send_less_than_4_messages_per_day | src/collector.py | collector.py | py | 348 | python | en | code | 0 | github-code | 13 |
11540853229 | from Bio import Entrez
from datetime import datetime
Entrez.email = "270992395@qq.com"
def name_to_gcfs(term):
term=term.replace('(',' ').replace(')',' ')
#provide your own mail here
term += ' AND (latest[filter] AND all[filter] NOT anomalous[filter] "refseq has annotation"[Properties])'
refer_term = term + ' AND ("complete genome"[filter] OR "chromosome level"[filter]) AND "reference genome"[filter]'
# refer_term = term + ' AND ("complete genome"[filter] OR "chromosome level"[filter] OR "scaffold level"[filter]) AND "reference genome"[filter]'
ids = search_assembly(refer_term)
if ids:
# print('Found {} reference genomes'.format(len(ids)))
# return ids
gcfs,assembly_level = get_gcf(ids)
category = "Reference"
else:
# print('No reference genomes found')
represent_term = term + \
' AND ("complete genome"[filter] OR "chromosome level"[filter] ) AND "representative genome"[filter]'
# ' AND ("complete genome"[filter] OR "chromosome level"[filter] OR "scaffold level"[filter]) AND "representative genome"[filter]'
ids = search_assembly(represent_term)
# print(ids)
if ids:
# print(ids)
# print('Found {} representative genomes'.format(len(ids)))
gcfs,assembly_level = get_gcf(ids)
category = "Represent"
else:
# complete_term =term+ ' AND "complete genome"[All Fields]'
term += ' AND ("complete genome"[filter] OR "chromosome level"[filter])'
ids = search_assembly(term)
gcfs, assembly_level = get_gcf(ids)
category = "Unreference" if gcfs else "-"
# if ids:
# chromosome_term = term+ ' AND "chromosome level"[filter]'
# ids = search_assembly(chromosome_term)
# gcfs = get_gcf(ids)
# level = "complete"if gcfs else "-"
return gcfs, category, assembly_level
def search_assembly(term):
"""Search NCBI assembly database for a given search term.
Args:
term: search term, usually organism name
"""
# retmax 代表搜的结果展示的最大数量
handle = Entrez.esearch(db="assembly", term=term, retmax='200')
record = Entrez.read(handle)
ids = record['IdList']
return ids
def get_gcf_dict(ids):
"""
难点:如何根据date对GCF进行排序
{'GCF_000008865.2': '2018/10/04 00:00', 'GCF_000005845.2': '2013/11/06 00:00'}
"""
gcf_dict = {}
# print(ids)
# 如果有complete genome,就不返回Chromosome genome
complete_flag=0
for id in ids:
# print(summary)
try:
summary = get_assembly_summary(
id)['DocumentSummarySet']['DocumentSummary'][0]
assembly_level=summary['AssemblyStatus']
accession = summary['AssemblyAccession']
update_time = summary['AsmReleaseDate_RefSeq']
if 'suppressed_refseq' in summary['PropertyList']:
accession+='(suppressed)'
except:
continue
try:
update_time= datetime.strptime(
update_time, '%Y/%m/%d %H:%M')
except:
update_time = datetime.strptime(
"1990/01/01 01:00", '%Y/%m/%d %H:%M')
assembly_dict={'Complete Genome':2,'Chromosome':1,'Scaffold':0}
assembly_order=assembly_dict.get(assembly_level,0)
gcf_dict[accession] = {'update_time': update_time, 'assembly_level': assembly_level,'assembly_order':assembly_order}
gcf_dict = dict(sorted(gcf_dict.items(), key=lambda x: (x[1]['assembly_order'],x[1]['update_time']), reverse=True))
return gcf_dict
def get_gcf(ids):
sort_dict=get_gcf_dict(ids)
if sort_dict:
for key, value in sort_dict.items():
assembly_level=sort_dict[key]['assembly_level']
break
gcfs = [i for i in sort_dict]
else:
gcfs = []
assembly_level='-'
return gcfs, assembly_level
def get_assembly_url(id):
summary = get_assembly_summary(id)
url = summary['DocumentSummarySet']['DocumentSummary'][0]['FtpPath_RefSeq']
return url
def get_assembly_summary(id):
"""Get esummary for an entrez id"""
esummary_handle = Entrez.esummary(
db="assembly", id=id, report="full", validate=False)
esummary_record = Entrez.read(esummary_handle,validate=False)
return esummary_record
if __name__ == "__main__":
host_name = "Shewanella"
gcf_info = name_to_gcfs(host_name)
print(host_name)
print(gcf_info)
# get_gcf_dict([42708])
"""Test
Escherichia coli :有两个reference基因组
Vibrio natriegens :有1个Representative基因组
Mycobacterium smegmatis mc2 155 : 没有Representative和reference基因组,有3个基因组
Streptomyces scabiei RL-34 直接靠host name是得不到assembly的,但是用Streptomyces scabiei能得到representative genomes
Bacillus alcalophilus CGMCC 1.3604 直接靠host name是得不到assembly,发现是没有complete genome ,但有representative genome,Assembly level是Scaffold
Providencia stuartii isolate MRSN 2154 报错ValueError: time data '1/01/01 00:00' does not match format '%Y/%m/%d %H:%M',
原来这个Providencia stuartii GCA_018128385.1这个的AsmReleaseDate_RefSeq字段有问题
Candidatus Hamiltonella defensa 5AT (Acyrthosiphon pisum) 意识到要有英文括号的应该换成空格
Acholeplasma laidlawii represent genome是scaffold的,结果返回的是unreference genome的complete genome,发现是脚本没有把scaffold level写成了scaffold,导致出现问题
Arthrobacter sp. ATCC 21022 返回的是anomalous genome发现要添加AND (NOT anomalous[filter])
Cronobacter turicensis z3032 返回的是GCF_000027065.2 (suppressed)
Planktothrix agardhii HAB637 返回['GCA_003609755.1', 'GCF_000710505.1'],GCA排在GCF前面不合理->改动后输出(['GCF_000710505.1'], 'Unreference', 'Chromosome')
"""
| Achuan-2/phage-host | 00_data/00_scripts/ncbi_assembly.py | ncbi_assembly.py | py | 6,195 | python | en | code | 0 | github-code | 13 |
42208305829 | import random
a1 = input('nome do aluno um:')
a2 = input('o nome do segundo aluno:')
a3 = input('nome do terceiro aluno:')
a4 = input('nome do quarto aluno:')
lista = [a1, a2, a3, a4]
escolhido = random.choice(lista)
print('O aluno escolhido foi {} '.format(escolhido))
#para o python um lista de objetos fica entre colchetes
#random.choice vai escolher um item deto daquela lista
| lightluigui/PyhtonExercises | ex019.py | ex019.py | py | 401 | python | pt | code | 1 | github-code | 13 |
13632417103 | def divisors(n):
a = 1
divisors = []
count = 0
while a <= n:
if n%a==0:
divisors.append(a)
count = count + 1
print(a)
a = a + 1
print (count, divisors)
b = int(input("Fdfsdfs"))
divisors(b)
| kieczkowska/codewars | exercise.py | exercise.py | py | 279 | python | en | code | 0 | github-code | 13 |
39303089690 | import random
# Getting a random number
def guess_the_no():
magic_no= random.randint(0,100)
for i in range(5):
guess= int(input('Guess the no: '))
if magic_no == guess:
print("You won! Congrats!")
elif guess < magic_no:
print("Hint! Go UP!!!")
else:
print("Hint! GO DOWN!!!")
last_guess_value = guess
print("The magic no was ", magic_no)
if last_guess_value == magic_no:
print("You Won!")
else:
print("You were just", abs(magic_no - last_guess_value), " no away from the magi no.")
print("Better luck next time!")
guess_the_no() | harshad317/Python_projects | guess_the_number.py | guess_the_number.py | py | 675 | python | en | code | 0 | github-code | 13 |
40366398465 | # -*- coding: utf-8 -*-
from malha.models import Linha
from malha.models import Parada
from malha.models import Veiculo
from malha.models import ParadaVeiculo
from django.conf import settings
from celery.task.schedules import crontab
from celery.decorators import periodic_task
from celery import task
import json
import urllib
@periodic_task(run_every=crontab(hour="2, 14", minute="0", day_of_week="1-5"))
def populate_linha():
url = settings.API_BASE_URL + 'lines'
response = urllib.urlopen(url)
data = json.loads(response.read())
for datum in data:
try:
linha = Linha.objects.get(id=datum['id'])
except Linha.DoesNotExist:
linha = Linha(id=datum['id'])
if (linha.has_changes(datum)):
linha.label = datum['label'].strip()
linha.color = datum['color']
linha.nome = datum['nombre'].strip()
linha.save()
@periodic_task(run_every=crontab(hour="2, 14", minute="10", day_of_week="1-5"))
def populate_parada():
for linha in Linha.objects.all():
url = settings.API_BASE_URL + 'line/' + linha.label
response = urllib.urlopen(url)
try:
data = json.loads(response.read())
except ValueError:
logger = populate_parada.get_logger()
if response.read() != '':
logger.error("URL '{}' return is incomprehensible".format(url))
continue
else:
logger.warning("No data returned for URL '{}'".format(url))
paradas = []
for datum in data['stops']:
try:
parada = Parada.objects.get(id=datum['id'])
except Parada.DoesNotExist:
parada = Parada(id=datum['id'])
if (parada.has_changes(datum)):
parada.label = datum['label']
parada.nome = datum['name']
parada.lat = datum['location']['lat']
parada.lon = datum['location']['lon']
parada.save()
if linha not in parada.linhas.all():
parada.linhas.add(linha)
paradas.append(parada)
for parada in linha.paradas.all():
if parada not in paradas:
parada.linhas.remove(linha)
@periodic_task(run_every=crontab(hour="2, 14", minute="10", day_of_week="1-5"))
def populate_veiculo():
for linha in Linha.objects.all():
url = settings.API_BASE_URL + 'line/' + linha.label + '/vehicles'
response = urllib.urlopen(url)
try:
data = json.loads(response.read())
except ValueError:
logger = populate_parada.get_logger()
if response.read() != '':
logger.error("URL '{}' return is incomprehensible".format(url))
continue
else:
logger.warning("No data returned for URL '{}'".format(url))
for datum in data:
try:
veiculo = Veiculo.objects.get(id=datum['id'])
except Veiculo.DoesNotExist:
veiculo = Veiculo(id=datum['id'])
veiculo.linha = linha
veiculo.save()
for parada in linha.paradas.all():
if parada not in veiculo.paradas.all():
parada_veiculo = ParadaVeiculo(
parada=parada, veiculo=veiculo)
parada_veiculo.save()
@task()
def update_linha_veiculo_location(linha_label):
url = settings.API_BASE_URL + 'line/' + linha_label + '/vehicles'
response = urllib.urlopen(url)
try:
data = json.loads(response.read())
except ValueError:
logger = populate_parada.get_logger()
if response.read() != '':
logger.error("URL '{}' return is incomprehensible".format(url))
return
else:
logger.warning("No data returned for URL '{}'".format(url))
for datum in data:
try:
veiculo = Veiculo.objects.get(id=datum['id'])
except Veiculo.DoesNotExist:
logger = populate_parada.get_logger()
logger.error("Vehicle with ID '{}' not found".format(datum['id']))
continue
veiculo.update_location(datum['location'])
@task()
def update_parada_estimativas(parada_label):
url = settings.API_BASE_URL + 'stop/' + parada_label + '/estimations'
response = urllib.urlopen(url)
try:
data = json.loads(response.read())
except ValueError:
logger = populate_parada.get_logger()
if response.read() != '':
logger.error("URL '{}' return is incomprehensible".format(url))
return
else:
logger.warning("No data returned for URL '{}'".format(url))
for datum in data:
try:
veiculo = Veiculo.objects.get(id=datum['vehicle'])
except Veiculo.DoesNotExist:
logger = populate_parada.get_logger()
logger.error(
"Vehicle with ID '{}' not found".format(datum['vehicle']))
continue
veiculo.update_estimations(datum, parada_label)
| dgtechfactory/comfortbus-web | malha/tasks.py | tasks.py | py | 5,150 | python | en | code | 0 | github-code | 13 |
34739488549 | import numpy as np
import datetime
def datestr2num(s):
return datetime.datetime.strptime(s.decode("ascii"), "%d-%m-%Y").toordinal()
# 载入收盘价和日期数据。
dates,closes=np.loadtxt('AAPL.csv', delimiter=',', usecols=(1, 6),
converters={1:datestr2num}, unpack=True)
# 使用lexsort函数按照收盘价排序:
indices = np.lexsort((dates, closes))
print("Indices", indices)
print(["%s %s" % (datetime.date.fromordinal(
int(dates[i])), closes[i]) for i in indices])
| lucelujiaming/numpyDataProcess | numpyDataProcess/test_lex.py | test_lex.py | py | 498 | python | en | code | 0 | github-code | 13 |
21293760269 | start = int(input())
end = int(input())
magic_num = int(input())
flag = False
counter = 0
for x in range(start, end+1):
for y in range(start, end+1):
counter += 1
result = x + y
if result == magic_num:
print(f"Combination N:{counter} ({x} + {y} = {magic_num})")
flag = True
break
if flag:
break
if not flag:
print(f"{counter} combinations - neither equals {magic_num}")
| SJeliazkova/SoftUni | Programming-Basic-Python/Exercises-and-Labs/Nested_Loops_Lab/04. Sum of Two Numbers.py | 04. Sum of Two Numbers.py | py | 456 | python | en | code | 0 | github-code | 13 |
10453243380 | """
本部分代码实现:
1.Blast结果整理
2.计算三指标权重并进行一致性检验
后续单菌种风险和综合风险于excel中完成
"""
import numpy as np
import pandas as pd
import os
from pandas.errors import EmptyDataError
def blast_sort(sheet_name):
"""
1.读取Blast结果并处理数据,统计数据库中识别到的基因个数
:param sheet_name:CARD:固有耐药;ResFinder:获得性耐药;VFDB:毒力因子;Vrprofile:可移动序列及耐药基因
"""
df = pd.read_excel('data_sample/Blast_result.xlsx', sheet_name=sheet_name)
gene_count = df.groupby('SEQUENCE')['GENE'].nunique().reset_index()
gene_count.columns = ['SEQUENCE', 'CARD_Count']
# 统计每个样本中检出基因的名称
gene_names = df.groupby('SEQUENCE')['GENE'].apply(list).reset_index()
gene_names.columns = ['SEQUENCE', 'CARD_Names']
# 合并结果
result_df = pd.merge(gene_count, gene_names, on='SEQUENCE')
# 将结果写入Excel的"sort"工作表
with pd.ExcelWriter('data_sample/Blast_result.xlsx', engine='openpyxl', mode='a') as writer:
result_df.to_excel(writer, sheet_name='sort', index=False)
def vrprofile_sort(folder_path='VRprofile_Download/'):
"""
处理VRprofile下载的MGE结果
:return:
"""
file_names = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
# 读取所有文件并合并数据
dataframes = []
for file_name in file_names:
file_path = os.path.join(folder_path, file_name)
try:
df = pd.read_csv(file_path, sep='\t')
if not df.empty:
dataframes.append(df)
except pd.errors.EmptyDataError:
print(f"文件 {file_name} 为空,跳过处理")
filtered_df = dataframes[dataframes["Antibiotic_Resistance_Genes"].notnull()]
# 将数据合并到一个DataFrame
data = pd.concat(filtered_df)
with pd.ExcelWriter('data_sample/Blast_result.xlsx', engine='openpyxl', mode='a') as writer:
data.to_excel(writer, sheet_name='Vrprofile', index=False)
def calculate_ahp():
A = np.array([[1, 5, 3],
[1/5, 1, 1/3],
[1/3, 3, 1]]) # 重要性矩阵
n = len(A[0])
RI = [0, 0, 0.58, 0.90, 1.12, 1.24, 1.32, 1.41, 1.45, 1.49, 1.51]
# R= np.linalg.matrix_rank(A) #求判断矩阵的秩
V,D = np.linalg.eig(A) #求判断矩阵的特征值和特征向量,V特征值,D特征向量;
list1 = list(V)
λ = np.max(list1) #最大特征值
index = list1.index(λ)
C = D[:, index] #对应特征向量
CI = (λ-n)/(n-1) #计算一致性检验指标CI
CR=CI/RI[n]
if CR<0.10:
print("CI=", CI)
print("CR=", CR)
print('重要性矩阵通过一致性检验,各向量权重向量Q为:')
sum=np.sum(C)
Q=C/sum #特征向量标准化
print(Q) # 输出权重向量
else:
print("重要性A未通过一致性检验,需对对比矩阵A重新构造")
| XiaoMaGoGoGo/PRCE-PS | Data_deal_AHP.py | Data_deal_AHP.py | py | 3,343 | python | zh | code | 0 | github-code | 13 |
25285160941 | import itertools
from numpy import linspace
from pylinal import VectorFunc
from math import cos, sin, pi
center = (0, 0, 0)
x0, y0, z0 = center
r = 1 # radius
# theta in [0, pi]; phi in [0, 2*pi]
x = lambda theta, phi: x0 + r * sin(theta) * cos(phi)
y = lambda theta, phi: y0 + r * sin(theta) * sin(phi)
z = lambda theta, phi: z0 + r * cos(theta)
sphere = VectorFunc([x, y, z])
theta = linspace(0, pi)
phi = linspace(0, 2*pi)
cartesian = itertools.product(theta, phi)
points = [sphere(theta, phi) for (theta, phi) in cartesian]
| PegasusHunter/pylinal | examples/sphere.py | sphere.py | py | 535 | python | en | code | 0 | github-code | 13 |
73744203538 | import time
start_time = time.time()
f = open('./names/names_1.txt', 'r')
names_1 = f.read().split("\n") # List containing 10000 names
f.close()
f = open('./names/names_2.txt', 'r')
names_2 = f.read().split("\n") # List containing 10000 names
f.close()
duplicates = []
hash = {}
for name1 in names_1:
hash[name1] = True
for name2 in names_2:
if name2 in hash:
duplicates.append(name2)
# for name_1 in names_1:
# for name_2 in names_2:
# if name_1 == name_2:
# duplicates.append(name_1)
end_time = time.time()
print(f"{len(duplicates)} duplicates:\n\n{', '.join(duplicates)}\n\n")
print(f"runtime: {end_time - start_time} seconds")
'''
Understand
----------
current code runs in quadratic time. should be able to shrink that with a chache.
Plan
----
- put all names from first list in a hash 0(n). loop through all names in second list and check if they're int the hash O(n) If they are, append to the duplicates list.
Execute
-------
.0056 seconds
Analyze
-------
seems like O(n) time complexity.
first loop - n * 1 (hash insert)
second loop - n * 1 (lookup) * 1 (append)
n * 1 + ( n * 1 * 1) = n + n = 2n
'''
| erin-koen/Sprint-Challenge--Data-Structures-Python | names/names.py | names.py | py | 1,187 | python | en | code | 0 | github-code | 13 |
73531013457 | '''
Numa eleição existem quatro candidatos.
Os códigos utilizados são:
1 , 2, 3, 4 - Votos para os respectivos candidatos
(você deve montar a tabela ex: 1 - Jose/ 2- João/etc)
5 - Voto Nulo
6 - Voto em Branco
Faça um programa que peça o número total de eleitores, receba o voto de cada eleitor
em seguida calcule e mostre:
O total de votos para cada candidato;
O total de votos nulos;
O total de votos em branco;
A porcentagem de votos nulos sobre o total de votos;
A porcentagem de votos em branco sobre o total de votos.
A porcentagem de votos de cada candidato.
'''
import os
lista_votos = []
count_1 = 0
count_2 = 0
count_3 = 0
count_4 = 0
count_5 = 0
count_6 = 0
def menu_eleicao():
print("*** ELEIÇÕES 2020 ***")
print("Em quem você vota? ")
print("1- Lula")
print("2- Dilma")
print("3- Bolsonaro")
print("4- Fernando Henrique")
print("5- Nulo")
print("6- Em branco")
print("Selecione uma opção: ")
print("Forneça o número total de eleitores: ")
num_eleitores = int(input())
for eleitor in range(1, num_eleitores+1):
os.system("cls")
menu_eleicao()
opcao = int(input())
lista_votos.append(opcao)
for voto in lista_votos:
if voto == 1:
count_1 = count_1 + 1
elif voto == 2:
count_2 = count_2 + 1
elif voto == 3:
count_3 = count_3 + 1
elif voto == 4:
count_4 = count_4 + 1
elif voto == 5:
count_5 = count_5 + 1
elif voto == 6:
count_6 = count_6 + 1
print("O total de votos para o candidato Lula foi de: ", count_1)
print("O total de votos para a candidata Dilma foi de: ", count_2)
print("O total de votos para o candidato Bolsonaro foi de: ", count_3)
print("O total de votos para o candidato Fernando Henrique foi de: ", count_4)
print("O total de votos nulos foi de: ", count_5)
print("O total de votos em branco foi de: ", count_6)
percentual_nulos = (count_5/num_eleitores)*100
percentual_brancos = (count_6/num_eleitores)*100
percentual_lula = (count_1/num_eleitores)*100
percentual_dilma = (count_2/num_eleitores)*100
percentual_bolsonaro = (count_3/num_eleitores)*100
percentual_fhc = (count_4/num_eleitores)*100
print("\nPressione enter para computar o percentual de votos...")
opcao = str(input())
os.system("cls")
print("O percentual de votos nulos sobre o total de votos foi de: ", percentual_nulos, "%")
print("O percentual de votos em branco sobre o total de votos foi de: ", percentual_brancos, "%")
print("O percentual de votos no Lula foi de: ", percentual_lula, "%")
print("O percentual de votos no Dilma foi de: ", percentual_dilma, "%")
print("O percentual de votos no Bolsonaro foi de: ", percentual_bolsonaro, "%")
print("O percentual de votos no Fernando Henrique foi de: ", percentual_fhc, "%")
| tspolli/exercises | eleicao.py | eleicao.py | py | 2,783 | python | pt | code | 0 | github-code | 13 |
14386249255 | #
# @lc app=leetcode.cn id=160 lang=python3
#
# [160] 相交链表
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
if not headA or not headB:
return None
curA = headA
curB = headB
n = 0
while curA.next:
curA = curA.next
n += 1
while curB.next:
curB = curB.next
n -= 1
if curA != curB:
return None
else:
if n > 0:
curA, curB = headA, headB
else:
curA, curB = headB, headA
for _ in range(abs(n)):
curA = curA.next
while True:
if curA == curB:
return curA
else:
curA = curA.next
curB = curB.next
# @lc code=end
l1 = ListNode(4)
l1.next = ListNode(1)
l1.next.next = ListNode(8)
l1.next.next.next = ListNode(4)
l1.next.next.next .next = ListNode(5)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(1)
l2.next.next.next = l1.next.next
Solution().getIntersectionNode(l1, l2)
| largomst/leetcode-problem-solution | 160.相交链表.2.py | 160.相交链表.2.py | py | 1,322 | python | en | code | 0 | github-code | 13 |
29316023726 | import torch.nn as nn
import torch.nn.functional as F
import torch
import gym
import numpy as np
"""
损失函数交叉熵杀我,reduction='none'
"""
class NN(nn.Module):
def __init__(self, input_dim=4, output_dim=2):
super(NN, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = 64
self.Linear1 = nn.Linear(self.input_dim, self.hidden_dim)
# nn.init.normal_(self.Linear1.weight, 0, 0.3)
# nn.init.constant_(self.Linear1.bias, 0.1)
self.Linear2 = nn.Linear(self.hidden_dim, self.output_dim)
# nn.init.normal_(self.Linear2.weight, 0, 0.3)
# nn.init.constant_(self.Linear2.bias, 0.1)
def forward(self, x):
# 将numpy转为tensor
x = torch.from_numpy(x).float()
x = self.Linear1(x)
x = F.tanh(x)
x = self.Linear2(x)
# print(x)
y = F.softmax(x, dim=1)
return x, y
class Agent(object):
def __init__(self, env, input_dim=4, output_dim=2):
super(Agent, self).__init__()
self.env = env
self.input_dim = input_dim
self.output_dim = output_dim
# self.Net = net
def choose_action(self, s, net, random=True):
x, y = net(s)
# 随机性策略
if random:
a = np.random.choice(np.arange(self.output_dim), p=y[0].detach().numpy())
# 确定性策略
else:
a = np.argmax(y.detach().numpy())
# print(a)
return int(a)
def act(self, a):
return self.env.step(a)
def sample(self, net):
obs = []
action = []
reward = []
s = self.env.reset()
total_r = 0
while True:
s = np.expand_dims(s, axis=0)
obs.append(s)
a = self.choose_action(s, net)
action.append(a)
s_, r, done, info = self.act(a)
reward.append(r)
total_r += r
# 结束
if done:
break
s = s_
obs = np.concatenate(obs, axis=0)
print(total_r)
return obs, action, reward
def modify(self, reward):
modify_reward = []
dicount_factor = 1
for i in range(len(reward)):
acc_r = 0
for j in range(i, len(reward)):
acc_r += dicount_factor ** (j - i) * reward[j]
modify_reward.append(acc_r)
print(modify_reward)
# 归一化
modify_reward = torch.tensor(modify_reward)
# modify_reward -= modify_reward.mean()
# modify_reward /= modify_reward.std()
return modify_reward
def learn(self, net, obs, action, reward):
y_pred = net(obs)
# 计算交叉熵
optim = torch.optim.Adam(net.parameters(), lr=0.02)
loss_func = nn.CrossEntropyLoss(reduction='none')
loss = loss_func(y_pred[0], torch.tensor(action))
modify_reward = self.modify(reward)
# 加权的loss
loss = loss * modify_reward
loss = loss.mean()
optim.zero_grad()
loss.backward()
optim.step()
def test(self, net):
total_r = 0
s = self.env.reset()
while True:
self.env.render()
# 加一维
s = np.expand_dims(s, axis=0)
a = self.choose_action(s, net, False)
s_, r, done, info = self.act(a)
total_r += r
if done:
break
s = s_
print("total_reward: {}".format(total_r))
def main():
env = gym.make("CartPole-v0")
obs_num = env.observation_space.shape[0]
# print(obs_num)
act_num = env.action_space.n
# print(act_num)
Net = NN()
agent = Agent(env, obs_num, act_num)
epoch = 400
for i in range(epoch):
# env.render()
obs, action, reward = agent.sample(Net)
agent.learn(Net, obs, action, reward)
agent.test(Net)
if __name__ == "__main__":
main() | Girapath/RL | PolicyGradient_try1.py | PolicyGradient_try1.py | py | 4,175 | python | en | code | 0 | github-code | 13 |
38774935199 | import pandas as pd
def format_accs(fn):
df = pd.read_csv(
fn,
low_memory=False,
dtype={
"MISPRIME": str,
"VISDATE": str,
"DISDATE": str,
"DISTIME": str,
'DOCSVC1': str,
'DOCSVC2': str,
'DOCSVC3': str,
'DOCSVC4': str,
'DOCSVC5': str,
'PROVTYPE1': str,
'PROVTYPE2': str,
'PROVTYPE3': str,
'PROVTYPE4': str,
'PROVTYPE5': str,
'DXCODE1': str,
'DXCODE2': str,
'DXCODE3': str,
'DXCODE4': str,
'DXCODE5': str,
'DXCODE6': str,
'DXCODE7': str,
'DXCODE8': str,
'DXCODE9': str,
'DXCODE10': str,
'PROCCODE1': str,
'PROCCODE2': str,
'PROCCODE3': str,
'PROCCODE4': str,
'PROCCODE5': str,
'PROCCODE6': str,
'PROCCODE7': str,
'PROCCODE8': str,
'PROCCODE9': str,
'PROCCODE10': str,
},
na_values=[""],
).rename(columns={
'VISDATE': 'VISIT_DATE',
'DISDATE': 'DISP_DATE',
'DISTIME': 'DISP_TIME',
"SEX": "GENDER",
"ISOLATE_NBR": "BI_NBR",
"Post_code": "POSTCODE",
"DISP": "DISPOSITION",
"AHS_ZONE": 'INST_ZONE',
"LOS_MINUTES": "VISIT_LOS_MINUTES",
"MISPRIME": "MIS_CODE",
}
)
df['MIS_CODE'] = df['MIS_CODE'].str.pad(9, side='right', fillchar='0')
df["VISIT_DATE"] = pd.to_datetime(df["VISIT_DATE"], format="%Y%m%d", errors="coerce")
df["DISP_DATE"] = pd.to_datetime(df["DISP_DATE"], format="%Y%m%d", errors="coerce")
df["DISP_TIME"] = pd.to_datetime(
df["DISP_TIME"], format="%H%M", errors="coerce"
).dt.time
drop_columns = df.filter(
regex="DXCODE|PROCCODE|PROVIDER_SVC|PROVTYPE|DOCSVC"
).columns.to_list()
df["DXCODES"] = df.filter(regex="DXCODE").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df["PROCCODES"] = df.filter(regex="PROCCODE").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df["PROVIDER_SVCS"] = df.filter(regex="DOCSVC").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df["PROVIDER_TYPES"] = df.filter(regex="PROVTYPE").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df["GENDER"] = df["GENDER"].replace(gender_map)
df = df.drop(drop_columns, axis=1)
return df.set_index('BI_NBR')
def format_nacrs(fn):
df = pd.read_csv(
fn,
low_memory=False,
na_values=[""],
dtype={
"INST": str,
"INSTFROM": str,
"INSTTO": str,
"INST_ZONE": str,
"MIS_CODE": str,
"PROVIDER_TYPE1": str,
"PROVIDER_SVC1": str,
"PROVIDER_TYPE2": str,
"PROVIDER_SVC2": str,
"PROVIDER_TYPE3": str,
"PROVIDER_SVC3": str,
"PROVIDER_TYPE4": str,
"PROVIDER_SVC4": str,
"PROVIDER_TYPE5": str,
"PROVIDER_SVC5": str,
"PROVIDER_TYPE6": str,
"PROVIDER_SVC6": str,
"PROVIDER_TYPE7": str,
"PROVIDER_SVC7": str,
"PROVIDER_TYPE8": str,
"PROVIDER_SVC8": str,
"DXCODE1": str,
"DXCODE2": str,
"DXCODE3": str,
"DXCODE4": str,
"DXCODE5": str,
"DXCODE6": str,
"DXCODE7": str,
"DXCODE8": str,
"DXCODE9": str,
"DXCODE10": str,
"PROCCODE1": str,
"PROCCODE2": str,
"PROCCODE3": str,
"PROCCODE4": str,
"PROCCODE5": str,
"PROCCODE6": str,
"PROCCODE7": str,
"PROCCODE8": str,
"PROCCODE9": str,
"PROCCODE1": str,
},
)
df["VISIT_DATE"] = pd.to_datetime(
df["VISIT_DATE"], format="%Y%m%d", errors="coerce"
)
df["DISP_DATE"] = pd.to_datetime(df["DISP_DATE"], format="%Y%m%d", errors="coerce")
df["ED_DEPT_DATE"] = pd.to_datetime(
df["ED_DEPT_DATE"], format="%Y%m%d", errors="coerce"
)
df["DISP_TIME"] = pd.to_datetime(
df["DISP_TIME"], format="%H%M", errors="coerce"
).dt.time
df["ED_DEPT_TIME"] = pd.to_datetime(
df["ED_DEPT_TIME"], format="%H%M", errors="coerce"
).dt.time
for col in ["VISIT_LOS_MINUTES", "EIP_MINUTES", "ED_ER_MINUTES", "AGE_ADMIT"]:
df[col] = df[col].astype(float)
df = df.rename(columns={"SEX": "GENDER", "ISOLATE_NBR": "BI_NBR"})
df["GENDER"] = df["GENDER"].replace(gender_map)
df = df.rename(columns={"Post_code": "POSTCODE"})
drop_columns = df.filter(
regex="DXCODE|PROCCODE|PROVIDER_SVC|PROVIDER_TYPE"
).columns.to_list()
df["DXCODES"] = df.filter(regex="DXCODE").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df["PROCCODES"] = df.filter(regex="PROCCODE").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df["PROVIDER_SVCS"] = df.filter(regex="PROVIDER_SVC").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df["PROVIDER_TYPES"] = df.filter(regex="PROVIDER_TYPE").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df = df.drop(drop_columns, axis=1)
return df.set_index("BI_NBR")
def format_claims(fn):
df = pd.read_csv(
fn,
low_memory=False,
dtype={
"DISDATE": str,
"DISTIME": str,
"HLTH_DX_ICD9X_CODE_1": str,
"HLTH_DX_ICD9X_CODE_2": str,
"HLTH_DX_ICD9X_CODE_3": str,
"HLTH_SRVC_CCPX_CODE": str,
},
na_values=[""],
)
df["HLTH_DX_ICD9X_CODES"] = df.filter(regex="ICD9X").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df = df.drop(
["HLTH_DX_ICD9X_CODE_1", "HLTH_DX_ICD9X_CODE_2", "HLTH_DX_ICD9X_CODE_3"], axis=1
)
df["SE_END_DATE"] = convert_datetime(df["SE_END_DATE"])
df["SE_START_DATE"] = convert_datetime(df["SE_START_DATE"])
df = df.rename(columns={"ISOLATE_NBR": "BI_NBR"})
df = df.sort_values(["BI_NBR", "SE_START_DATE", "SE_END_DATE"])
return df.set_index("BI_NBR")
def format_dad(fn):
df = pd.read_csv(
fn,
low_memory=False,
dtype={"INST": str, "INSTFROM": str, "INSTTO": str},
na_values=[""],
)
df["ADMITDATE"] = pd.to_datetime(df["ADMITDATE"], format="%Y%m%d", errors="coerce")
df["DISDATE"] = pd.to_datetime(df["DISDATE"], format="%Y%m%d", errors="coerce")
df["ADMITTIME"] = pd.to_datetime(
df["ADMITTIME"], format="%H%M", errors="coerce"
).dt.time
df["DISTIME"] = pd.to_datetime(
df["DISTIME"], format="%H%M", errors="coerce"
).dt.time
df = df.rename(columns={"SEX": "GENDER", "ISOLATE_NBR": "BI_NBR"})
df["GENDER"] = df["GENDER"].replace(gender_map)
drop_columns = df.filter(regex="DXCODE|DXTYPE|PROCCODE").columns.to_list()
df["DXCODES"] = df.filter(regex="DXCODE").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df["PROCCODES"] = df.filter(regex="PROCCODE").apply(
lambda x: [e for e in x if e is not None], axis=1
)
df = df.drop(drop_columns, axis=1)
df = df.rename(columns={"Post_code": "POSTCODE"})
return df.set_index("BI_NBR")
def format_lab(fn):
df = pd.read_csv(
fn, low_memory=False, dtype={"TEST_CD": str}, na_values=[""]
)
df = df.rename(columns={"ISOLATE_NBR": "BI_NBR"})
df["TEST_VRFY_DTTM"] = convert_datetime(df["TEST_VRFY_DTTM"])
return df.set_index("BI_NBR")
def format_pin(fn):
df = pd.read_csv(
fn, low_memory=False, dtype=str, na_values=[""]
)
df["DSPN_DATE"] = convert_datetime(df["DSPN_DATE"])
for col in ["DSPN_AMT_QTY", "DSPN_DAY_SUPPLY_QTY"]:
df[col] = df[col].astype(float)
df = df.rename(columns={"RCPT_GENDER_CD": "GENDER", "ISOLATE_NBR": "BI_NBR"})
df = df.drop('GENDER', axis=1)
return df.set_index("BI_NBR")
def format_reg(fn):
df = pd.read_csv(
fn, low_memory=False, dtype=str, na_values=[""]
)
df["PERS_REAP_END_DATE"] = convert_datetime(df["PERS_REAP_END_DATE"])
for col in [
"ACTIVE_COVERAGE",
"AGE_GRP_CD",
"DEATH_IND",
"FYE",
"IN_MIGRATION_IND",
"OUT_MIGRATION_IND",
]:
df[col] = df[col].astype(int)
df = df.drop(["SEX", "AGE_GRP_CD"], axis=1)
df = df.rename(columns={"ISOLATE_NBR": "BI_NBR", "Post_code": "POSTCODE"})
df['IN_MIGRATION_IND'] = df['IN_MIGRATION_IND'].astype(bool)
df['OUT_MIGRATION_IND'] = df['OUT_MIGRATION_IND'].astype(bool)
return df.set_index("BI_NBR")
def format_vs(fn):
df = pd.read_csv(
fn, low_memory=False, dtype=str, na_values=[""]
)
df["DETHDATE"] = convert_datetime(df["DETHDATE"])
df = df.rename(
columns={"SEX": "GENDER", "DETHDATE": "DEATH_DATE", "ISOLATE_NBR": "BI_NBR"}
)
df["GENDER"] = df["GENDER"].replace(gender_map)
df["AGE"] = df["AGE"].astype(int)
df = df.rename(columns={"Post_code": "POSTCODE"})
return df.set_index("BI_NBR")
def format_atc(fn):
df = pd.read_parquet(fn)
df["DRUG_LABEL"] = df.DRUG_LABEL.str.capitalize()
return df
def format_postcodes_meta(fn):
return (
pd.read_csv(fn)
.rename(columns={"Postal Code": "POSTCODE", "Place Name": "REGION", 'Province' : 'PROVINCE', 'Latitude': 'LATITUDE', 'Longitude': 'LONGITUDE'})
.set_index("POSTCODE")
.drop(["Unnamed: 5", "Unnamed: 6"], axis=1)
)
def format_population(fn):
df = pd.read_csv(fn).rename(
columns={
"Year": "YEAR",
"Sex": "GENDER",
"Age": "AGE",
"Population": "POPULATION",
}
)
dense = (
df[(df.GENDER != "BOTH") & (df.AGE != "ALL")][
["YEAR", "GENDER", "AGE", "POPULATION"]
]
.set_index(["YEAR", "GENDER", "AGE"])
.unstack(
[
"GENDER",
"AGE",
]
)
.astype(int)
.sort_index(axis=1)
)
return dense
| LewisResearchGroup/LSARP-api | lsarp_api/ahs/formatters.py | formatters.py | py | 10,344 | python | en | code | 0 | github-code | 13 |
30610099330 | import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
import os, sys
cam=cv2.VideoCapture("line.mp4")
time.sleep(2)
fit_result, l_fit_result, r_fit_result, L_lane,R_lane = [], [], [], [], []
def Collect_points(lines):
# reshape [:4] to [:2]
interp = lines.reshape(lines.shape[0]*2,2)
# interpolation & collecting points for RANSAC
for line in lines:
if np.abs(line[3]-line[1]) > 5:
tmp = np.abs(line[3]-line[1])
a = line[0] ; b = line[1] ; c = line[2] ; d = line[3]
slope = (line[2]-line[0])/(line[3]-line[1])
for m in range(0,tmp,5):
if slope>0:
new_point = np.array([[int(a+m*slope),int(b+m)]])
interp = np.concatenate((interp,new_point),axis = 0)
elif slope<0:
new_point = np.array([[int(a-m*slope),int(b-m)]])
interp = np.concatenate((interp,new_point),axis = 0)
return interp
def ransac_line_fitting(img, lines, min=100):
global fit_result, l_fit_result, r_fit_result
best_line = np.array([0,0,0])
if(len(lines)!=0):
for i in range(30):
sample = get_random_samples(lines)
parameter = compute_model_parameter(sample)
cost = model_verification(parameter, lines)
if cost < min: # update best_line
min = cost
best_line = parameter
if min < 3: break
# erase outliers based on best line
filtered_lines = erase_outliers(best_line, lines)
fit_result = get_fitline(img, filtered_lines)
else:
if (fit_result[2]-fit_result[1])/(fit_result[1]-fit_result[0]) < 0:
l_fit_result = fit_result
return l_fit_result
else:
r_fit_result = fit_result
return r_fit_result
if (fit_result[2]-fit_result[1])/(fit_result[1]-fit_result[0]) < 0:
l_fit_result = fit_result
return l_fit_result
else:
r_fit_result = fit_result
return r_fit_result
def smoothing(lines, pre_frame):
# collect frames & print average line
lines = np.squeeze(lines)
avg_line = np.array([0,0,0,0])
for ii,line in enumerate(reversed(lines)):
if ii == pre_frame:
break
avg_line += line
avg_line = avg_line / pre_frame
return avg_line
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
while True:
fet, img = cam.read()
img = cv2.resize(img,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
img_original = img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.bilateralFilter(gray,7,100,100)
edges = cv2.Canny(blur,50,150,apertureSize=3)
lines = cv2.HoughLines(edges,1,np.pi/180,70)
line_arr = np.squeeze(lines)
slope_degree = (np.arctan2(line_arr[:,1] - line_arr[:,3], line_arr[:,0] - line_arr[:,2]) * 180) / np.pi
L_lines, R_lines = line_arr[(slope_degree>0),:], line_arr[(slope_degree<0),:]
L_interp = Collect_points(L_lines)
R_interp = Collect_points(R_lines)
left_fit_line = ransac_line_fitting(img, L_interp)
right_fit_line = ransac_line_fitting(img, R_interp)
L_lane.append(left_fit_line), R_lane.append(right_fit_line)
left_fit_line = smoothing(L_lane, 10)
right_fit_line = smoothing(R_lane, 10)
final = draw_fitline(img, left_fit_line, right_fit_line)
try:
rho=resultline[0]
theta=resultline[1]
print("rho:", rho)
print("theta:", theta)
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0+1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 -1000*(a))
#cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
#cv2.line(img, cv2.GetSize(img),(0,0,255), 2)
#cv2.line(img, lines,(0,0,255), 2)
except:
print("can't find line")
res = np.vstack((img_original,img))
if cv2.waitKey(1)&0xFF == ord('q'):
break
cv2.imshow('img',res)
cv2.destroyAllWindows()
| yondini/Test | Project/ys_test/YS_line.py | YS_line.py | py | 5,204 | python | en | code | 0 | github-code | 13 |
19239598267 | '''
사용자가 킥보드를 선택하면, 선택에 따라 user_taste 값을 업데이트함
필요한 정보
- 선택한 킥보드에 대한 (price, kickboard_time, walk_time)
- user_taste
price, kickboard_time, walk_time 에 대한 각 최댓값
5000, 500, 100
learning_rate(alpha): 0.01
'''
import json
import sys
def main_(argv):
price = int(argv[1])
kickboard_time = int(argv[2])
walk_time = int(argv[3])
user_taste = json.loads(argv[4])
total = price / 5000 + kickboard_time / 500 + walk_time / 100
d_price = price / 5000 / total - user_taste[0]
d_kickboard_time = kickboard_time / 500 / total - user_taste[1]
d_walk_time = walk_time / 100 / total - user_taste[2]
user_taste[0] += d_price * 0.01
user_taste[1] += d_kickboard_time * 0.01
user_taste[2] += d_walk_time * 0.01
print('[',end='')
print(user_taste[0],',', user_taste[1],',', user_taste[2],end='')
print(']')
if __name__ == '__main__':
# price, kickboard_time, walk_time, user_taste
# argv = ['python_calc/user_taste_update.py', '600', '59', '179', '[0.2857142857142857,0.42857142857142855,0.2857142857142857]']
# main_(sys.argv)
main_(sys.argv)
| kimdo331/KNU-software-design | kickboard-back/python_calc/user_taste_update.py | user_taste_update.py | py | 1,244 | python | en | code | 0 | github-code | 13 |
73494016336 | '''
Напишите программу, которая умеет шифровать и расшифровывать шифр подстановки. Программа принимает на вход две строки одинаковой длины, на первой строке записаны символы исходного алфавита, на второй строке — символы конечного алфавита, после чего идёт строка, которую нужно зашифровать переданным ключом, и ещё одна строка, которую нужно расшифровать.
Пусть, например, на вход программе передано:
abcd
*d%#
abacabadaba
#*%*d*%
Это значит, что символ a исходного сообщения заменяется на символ * в шифре, b заменяется на d, c — на % и d — на #.
Нужно зашифровать строку abacabadaba и расшифровать строку #*%*d*% с помощью этого шифра. Получаем следующие строки, которые и передаём на вывод программы:
*d*%*d*#*d*
dacabac
Sample Input 1:
abcd
*d%#
abacabadaba
#*%*d*%
Sample Output 1:
*d*%*d*#*d*
dacabac
'''
s1 = input()
s2 = input()
s3 = input()
s4 = input()
string_output = []
dict_encrypt = {}
dict_decipher = {}
# s1 = 'abcd'
# s2 = '*d%#'
# s3 = 'abacabadaba'
# s4 = '#*%*d*%'
for i in range(len(s1)):
dict_encrypt[s1[i]] = s2[i]
for i in range(len(s3)):
print(dict_encrypt[s3[i]], end= "")
print()
def get_key(dict, chr):
for key, value in dict.items():
if value == chr:
return key
for i in range(0, len(s4)):
print(get_key(dict_encrypt, s4[i]), end= '')
| luckychaky/py_stepik | 3_7_2.py | 3_7_2.py | py | 1,838 | python | ru | code | 1 | github-code | 13 |
2886854339 | import cv2
# Reading image file
img = cv2.imread('lena.png')
cv2.imshow('ori.jpg', img)
cv2.waitKey(0)
print(f'\nImg : {img}\n---------------')
# Applying NumPy scalar multiplication on image
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
print(f'\nImgRGB : {imgRGB}\n---------------')
fimg = cv2.divide(imgRGB, 0.9)
cv2.imshow('result.jpg', fimg)
print(f'\nfimg : {fimg}\n---------------')
# Saving the output image
# cv2.imwrite('darkerLib.jpg', fimg)
# img_result = cv2.imread('darkerLib.jpg')
cv2.waitKey(0)
cv2.destroyAllWindows() | LuceRest/pcdp | 6th Meet (Monday, November, 1st 2021)/Task/Code/Question 2.py | Question 2.py | py | 543 | python | en | code | 0 | github-code | 13 |
74030570579 | import inspect
def check(fn):
def wrapper(*args, **kwargs):
sig = inspect.signature(fn)
params = sig.parameters # Ordereddict
print(params)
# values = list(params.values())
# keys = list(params.keys())
# for i, p in enumerate(args):
# if values[i].annotation != inspect._empty and not isinstance(p, values[i].annotation):
# raise TypeError('Wrong param={} {}'.format(keys[i], p))
for p, (k, v) in zip(args, params.items()):
if v.annotation is not inspect._empty and not isinstance(p, v.annotation):
raise TypeError('Wrong param={} {}'.format(k, p))
for k, v in kwargs.items():
if params[k].annotation is not inspect._empty:
if not isinstance(v, params[k].annotation):
raise TypeError('Wrong param={} {}'.format(k, v))
return fn(*args, **kwargs)
return wrapper
@check
def add(x, y: int = 7) -> int: # add = check(add)
return x + y
print(add(4, 5))
print(add(4, y=6))
print(add(y=6, x=9))
| sqsxwj520/python | 高阶函数和装饰器/day2/参数注解检查.py | 参数注解检查.py | py | 1,124 | python | en | code | 1 | github-code | 13 |
2142636246 | #!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Python module for Blender Driver demonstration application.
This code illustrates:
- HTTP server in Blender Game Engine as back end.
- JavaScript front end.
This module can only be used from within the Blender Game Engine."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Module for command line switches.
# https://docs.python.org/3.5/library/argparse.html
# The import isn't needed because this class uses the base class to get an
# object.
# import argparse
#
# Module for degrees to radian conversion.
# https://docs.python.org/3.5/library/math.html
from math import degrees, radians
#
# Third party modules, in alphabetic order.
#
# Blender library imports, in alphabetic order.
#
# Main Blender Python interface, which is used to get the size of a mesh.
# Import isn't needed because the base class keeps a reference to the interface
# object.
# import bpy
#
# Blender Game Engine KX_GameObject
# Import isn't needed because this class gets an object that has been created
# elsewhere.
# https://www.blender.org/api/blender_python_api_current/bge.types.KX_GameObject.html
#
# Blender Game Engine maths utilities, which can only be imported if running
# from within the Blender Game Engine.
# Import isn't needed because this class gets a Vector from the bpy layer.
# http://www.blender.org/api/blender_python_api_current/mathutils.html
# They're super-effective!
from mathutils import Vector, Matrix, Quaternion
#
# Local imports.
#
# Blender Driver application with background banner.
from . import cursorphysics
#
# Blender Driver application with HTTP server.
import blender_driver.application.http
# Diagnostic print to show when it's imported. Only printed if all its own
# imports run OK.
print('"'.join(('Application module ', __name__, '.')))
class Application(
blender_driver.application.http.Application, cursorphysics.Application):
def game_initialise(self):
super().game_initialise()
self._bannerObject.text = "\n".join((
" ".join(("User interface:", self._url)),
"Ctrl-Q here to terminate, or ESC to crash."))
self._restInterface.rest_put(
('root', 'floor'), tuple(self._cursorPath) + ('subjectPath',))
gameObjects = self._restInterface.rest_delete(('root', 'gameObjects'))
del gameObjects[:]
self._restInterface.rest_put({
'speed': self._cameraLinear,
'valuePath': ('root', 'camera', 'worldPosition', 0),
'targetValue': 20.0
}, ('animations', 'camera_setup', 0))
| sjjhsjjh/blender-driver | applications/httpdemonstration.py | httpdemonstration.py | py | 2,820 | python | en | code | 2 | github-code | 13 |
23607768402 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/16 21:40
# @Author : xiezheng
# @Site :
# @File : test_model.py
import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
from torch import nn
from torch import optim
import torch.backends.cudnn as cudnn
from prefetch_generator import BackgroundGenerator
from insightface_v2.utils.model_analyse import ModelAnalyse
from insightface_v2.utils.logger import get_logger
from insightface_v2.insightface_data.data_pipe import get_train_loader, get_all_val_data, get_test_loader
from insightface_v2.utils.verifacation import evaluate
from insightface_v2.model.focal_loss import FocalLoss
from insightface_v2.model.models import resnet34, resnet_face18, ArcMarginModel
from insightface_v2.utils.utils import parse_args, AverageMeter, clip_gradient, \
accuracy, get_logger, get_learning_rate, separate_bn_paras, update_lr
from insightface_v2.utils.checkpoint import save_checkpoint, load_checkpoint
from insightface_v2.utils.ver_data import val_verification
from torchvision import transforms
from torch.utils.data import DataLoader
from insightface_v2.model.mobilefacenetv2.mobilefacenetv2_v3_width import Mobilefacenetv2_v3_width
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
def de_preprocess(tensor):
return tensor * 0.501960784 + 0.5
hflip = transforms.Compose([
de_preprocess,
transforms.ToPILImage(),
transforms.functional.hflip,
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.501960784, 0.501960784, 0.501960784])
])
def hflip_batch(imgs_tensor):
hfliped_imgs = torch.empty_like(imgs_tensor)
for i, img_ten in enumerate(imgs_tensor):
hfliped_imgs[i] = hflip(img_ten)
return hfliped_imgs
def val_evaluate(model, carray, issame, nrof_folds=10, use_flip=True, emb_size=512, batch_size=200):
model.eval()
idx = 0
embeddings = np.zeros([len(carray), emb_size])
with torch.no_grad():
while idx + batch_size <= len(carray):
batch = torch.tensor(carray[idx:idx + batch_size])
out = model(batch.cuda())
if use_flip:
fliped_batch = hflip_batch(batch)
fliped_out = model(fliped_batch.cuda())
out = out + fliped_out
embeddings[idx:idx + batch_size] = l2_norm(out).cpu() # xz: add l2_norm
idx += batch_size
if idx < len(carray):
batch = torch.tensor(carray[idx:])
out = model(batch.cuda())
if use_flip:
fliped_batch = hflip_batch(batch)
fliped_out = model(fliped_batch.cuda())
out = out + fliped_out
embeddings[idx:] = l2_norm(out).cpu() # xz: add l2_norm
tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame, nrof_folds)
# buf = gen_plot(fpr, tpr)
# roc_curve = Image.open(buf)
# roc_curve_tensor = transforms.ToTensor()(roc_curve)
return accuracy.mean(), best_thresholds.mean()
def board_val(logger, writer, db_name, accuracy, best_threshold, step):
logger.info('||===>>Test Epoch: [[{:d}]\t\tVal:{}_accuracy={:.4f}%\t\tbest_threshold={:.4f}'
.format(step, db_name, accuracy*100, best_threshold))
writer.add_scalar('val_{}_accuracy'.format(db_name), accuracy*100, step)
writer.add_scalar('val_{}_best_threshold'.format(db_name), best_threshold, step)
# writer.add_image('val_{}_roc_curve'.format(db_name), roc_curve_tensor, step)
def test_flip(model, epoch):
agedb_accuracy, agedb_best_threshold = val_evaluate(model, agedb_30, agedb_30_issame)
board_val(logger, writer, 'agedb_30', agedb_accuracy, agedb_best_threshold, epoch)
lfw_accuracy, lfw_best_threshold = val_evaluate(model, lfw, lfw_issame)
board_val(logger, writer, 'lfw', lfw_accuracy, lfw_best_threshold, epoch)
cfp_accuracy, cfp_best_threshold = val_evaluate(model, cfp_fp, cfp_fp_issame)
board_val(logger, writer, 'cfp_fp', cfp_accuracy, cfp_best_threshold, epoch)
if __name__ == "__main__":
# only widen: model layers_num = 49 ; model size=9.73 MB; model flops=995.61 M
model = Mobilefacenetv2_v3_width(embedding_size=512, width_mult=1.315)
outpath = '/home/liujing/NFS/ICCV_challenge/xz_log/iccv_emore_log_gdc/auxnet/mobilefacenet/' \
'mobilenfacenetv2_width_4e-5_stagetwo_loadfc_magin64_fliped_test_bs200'
# 246
emore_folder = '/mnt/ssd/faces/iccv_challenge/train/ms1m-retinaface-t1'
writer = SummaryWriter(outpath)
logger = get_logger(outpath, 'insightface')
agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame = get_all_val_data(emore_folder)
logger.info("train dataset and val dataset are ready!")
for epoch in range(0, 36):
model_path = '/home/liujing/NFS/ICCV_challenge/xz_log/iccv_emore_log_gdc/auxnet/mobilefacenet/' \
'mobilenfacenetv2_width_4e-5_stagetwo_loadfc_magin64/check_point/checkpoint_{}.pth'.format(epoch)
chechpoint = torch.load(model_path)
model.load_state_dict(chechpoint['model'])
logger.info("epoch={}, model load success!!!".format(epoch))
model = model.cuda()
test_flip(model, epoch)
| CN1Ember/feathernet_mine | quan_table/insightface_v2/test_on_lfw/test_model.py | test_model.py | py | 5,309 | python | en | code | 1 | github-code | 13 |
10567678486 | import os
import yaml
from dotenv import load_dotenv
import schemas as k8s
from .utils import encode_base64
def create_configmaps_and_secrets(
applications_conf: k8s.ApplicationConfList,
shared_conf: k8s.SharedConf,
output_configmap_path: str,
output_secret_path: str,
):
yaml_configmap: str = ""
yaml_secret: str = ""
for app_conf in applications_conf.applications:
load_dotenv(
dotenv_path=os.path.join(os.getcwd(), f".env.{app_conf.name}"),
override=True,
)
env_dict = dict(os.environ)
configmap = k8s.ConfigMap(
metadata=k8s.ConfigMapMetaData(
name=f"{app_conf.name}-configmap",
namespace=shared_conf.namespace,
),
data={k: v for k, v in env_dict.items() if k in app_conf.env},
)
secret = k8s.Secret(
metadata=k8s.SecretMetaData(
name=f"{app_conf.name}-secret",
namespace=shared_conf.namespace,
),
data={
k: encode_base64(v) for k, v in env_dict.items() if k in app_conf.secret
},
)
yaml_configmap += yaml.dump(configmap.model_dump(exclude_none=True)) + "\n---\n"
yaml_secret += yaml.dump(secret.model_dump(exclude_none=True)) + "\n---\n"
with open(output_configmap_path, "w") as outfile:
outfile.write(yaml_configmap)
with open(output_secret_path, "w") as outfile:
outfile.write(yaml_secret)
| WujuMaster/K8S-Pydantic | functions/configmaps_secrets.py | configmaps_secrets.py | py | 1,518 | python | en | code | 0 | github-code | 13 |
14430833405 | '''
Complete exercises in section 8.7 (p.75)
CODE:
word = 'banana'
count = 0
for letter in word:
if letter == 'a':
count = count + 1
print(count)
1) - Encapsulate this code in a function named count,
and generalize it so that it accepts the string and the letter as arguments.
2) - Rewrite this function so that instead of traversing the string,
it uses the three-parameter version of find from the previous section.
'''
word = 'banana'
def count(word, l):
ct = 0
for x in range(0, len(word)):
if word.find(l, x) == x:
ct += 1
return ct
print(count(word, 'a'))
| lauramayol/laura_python_core | week_03/labs/06_strings/Exercise_06.py | Exercise_06.py | py | 614 | python | en | code | 0 | github-code | 13 |
8690768832 | import os
import librosa
# specify the directory where the music files are located
directory = '/path/to/music/files'
# loop through all files in the directory
for filename in os.listdir(directory):
# check if the file is a music file (e.g. .mp3, .wav, etc.)
if filename.endswith('.mp3') or filename.endswith('.wav'):
# load the music file
y, sr = librosa.load(os.path.join(directory, filename))
# extract the BPM
bpm = librosa.beat.tempo(y=y, sr=sr)[0]
# extract the key
key = librosa.feature.tonnetz(y=y, sr=sr)
# write the metadata to the file
metadata = {'bpm': bpm, 'key': key}
librosa.output.write_yaml(os.path.join(directory, filename), metadata)
| aeonborealis/Pangea-Sound-Lab | analyzesound.py | analyzesound.py | py | 736 | python | en | code | 1 | github-code | 13 |
21898361986 | '''
Created on Apr 1, 2012
@author: greg
'''
# Django settings for SBServer project.
import os
def configure(presets):
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Greg Soltis', 'greg@tinyfunstudios.com'),
)
MANAGERS = ADMINS
DB_CONFIG_FILE = 'prod_db.cfg'
SHARD_CONFIG_FILE = 'prod_shard.cfg'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'xc0u18^g0!ac3k%0+2vgglmnr1)x^!o(n6@$m3t^(7l!(#kv!-'
MEMCACHE_SERVERS = ['127.0.0.1:11211']
LOG_DIR = '/var/log'
LOG_FILENAME = os.path.join(LOG_DIR, 'spongebob.log')
return locals()
| yixu34/PlutoShareServer | src/plutoshare/config/prod.py | prod.py | py | 619 | python | en | code | 1 | github-code | 13 |
71084028179 | class StrMixin():
def __str__(self):
items = self.__dict__.items()
return ';'.join([f'{key}={value}' for key, value in items])
def sleep(object):
# return object.name + ' is sleeping'
try:
return object.name + ' is sleeping'
except:
pass
class Animal(StrMixin):
def __init__(self, name, gender, age):
self.name = name
self.gender = gender
self.age = age
class Vet(StrMixin):
def __init__(self, location, name):
self.location = location
self.names = name
a = Animal('Yuki', 'male', 2)
v = Vet('ul. Kokiche 14', 'dr. Shterev')
print(str(a))
print(str(v))
print(a.sleep())
print(v.sleep()) | bobsan42/SoftUni-Learning-42 | PythonOOP/oop03inheritance/strMixin.py | strMixin.py | py | 713 | python | en | code | 0 | github-code | 13 |
47724177834 | # -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import unittest
import pyxb
import sample
from pyxb.namespace.builtin import XMLSchema_instance as xsi
class TestTrac0202 (unittest.TestCase):
def tearDown (self):
pyxb.utils.domutils.BindingDOMSupport.SetDefaultNamespace(sample.Namespace)
Expectedt = """<?xml version="1.0" encoding="utf-8"?>
<samplerootelement xmlns="http://sample" xmlns:ns1="http://sample" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="c:\sample.xsd">
\t<sampleelement>
\t\t<ValueAmount ns1:currencyID="abc">100.0</ValueAmount>
\t</sampleelement>
</samplerootelement>
"""
Expectedd = Expectedt.encode('utf-8')
def testIssue (self):
elm = sample.sampleelementType()
elm.ValueAmount = '100'
elm.ValueAmount.currencyID = 'abc'
sam = sample.samplerootelement()
sam.sampleelement.append(elm)
bds = pyxb.utils.domutils.BindingDOMSupport()
bds.setDefaultNamespace(sample.Namespace)
bds.declareNamespace(xsi)
samdom = sam.toDOM(bds)
bds.addAttribute(samdom.documentElement, xsi.createExpandedName('schemaLocation'), "c:\sample.xsd")
# xsi is probably not referenced elsewhere, so add the XMLNS declaration too
bds.addXMLNSDeclaration(samdom.documentElement, xsi)
xmld = samdom.toprettyxml(encoding = "utf-8")
self.assertEqual(self.Expectedd, xmld)
if __name__ == '__main__':
unittest.main()
| pabigot/pyxb | tests/trac/trac-0202/check.py | check.py | py | 1,553 | python | en | code | 128 | github-code | 13 |
27184056063 | #%%
from web_scrap_tracker import Tracker
class Main():
def __init__(self,):
pass
def __new__(self,):
return self.initiate(self,)
def initiate(self,):
user_name= str(input(''))
hashtag= str(input(''))
options= str(input('Escolha uma das opções entre [ 1 / 2 / 3 ]:\n 1. Competitivo\n 2. Premier\n 3. Ambos'))
if options == "1":
return Tracker(user_name= user_name, hashtag= hashtag, competitive= True).run()
elif options == "2":
return Tracker(user_name= user_name, hashtag= hashtag, premier= True).run()
elif options == "3":
competitive= Tracker(user_name= user_name, hashtag= hashtag, competitive= True).run()
premier= Tracker(user_name= user_name, hashtag= hashtag, premier= True).run()
return {
"competitive": competitive,
"premier": premier,
}
else:
return print('Nenhuma das opções foi escolhida.')
#%%
if __name__ == "__main__":
principal= Main()
print(principal)
#%% | nalyking/tracker_scraping | main.py | main.py | py | 1,108 | python | en | code | 0 | github-code | 13 |
18232768924 | """
Получить сборочные задания в поставке
https://openapi.wildberries.ru/#tag/Marketplace-Postavki/paths/~1api~1v3~1supplies~1{supplyId}~1orders/get
Возвращает сборочные задания, закреплённые за поставкой.
Path Parameters
supplyId (REQUIRED) -- string -- Example: WB-GI-1234567 -- ID поставки
Response Schema: application/json
orders -- Array of objects (SupplyOrder)
Responses samples
200
{"orders": [{
"id": 13833711,
"rid": "f884001e44e511edb8780242ac120002",
"createdAt": "2022-05-04T07:56:29Z",
"warehouseId": 658434,
"offices": [...],
"user": {...},
"skus": [...],
"price": 0,
"convertedPrice": 0,
"currencyCode": 0,
"convertedCurrencyCode": 0,
"orderUid": "string",
"nmId": 0,
"chrtId": 0,
"article": "one-ring-7548",
"isLargeCargo": true}]
}
400
{ "code": "IncorrectParameter",
"message": "Передан некорректный параметр"}
401
proxy: unauthorized -- Токен отсутствует
or
proxy: invalid token -- Токен недействителен
or
proxy: not found -- Токен удален
403
{ "code": "AccessDenied",
"message": "Доступ запрещён"}
404
{ "code": "NotFound",
"message": "Не найдено"}
409
{ "code": "FailedToAddSupplyOrder",
"message": "Не удалось закрепить сборочное задание за поставкой.
Убедитесь, что сборочное задание и поставка удовлетворяют всем необходимым требованиям."}
500
{ "code": "InternalServerError",
"message": "Внутренняя ошибка сервиса"}
"""
from __future__ import annotations
import aiohttp
import asyncio
import json
from wb_secrets import std_token as __token
URL = "https://suppliers-api.wildberries.ru/api/v3/supplies/{supplyId}/orders"
async def market_get_supplies_order_tasks_by_id(supplyId: str) -> dict | tuple:
headers_dict = {
'Authorization': __token
}
async with aiohttp.ClientSession(headers=headers_dict) as session:
async with session.get(url=URL.format(supplyId=supplyId)) as response:
if response.status == 200:
return await response.json()
else:
return response.status, await response.text()
if __name__ == '__main__':
import pprint
data = {
"supplyId": "WB-GI-57989471",
}
pprint.pp(asyncio.run(market_get_supplies_order_tasks_by_id(**data)))
| BeliaevAndrey/WB_scripts | scripts/marketplace/supplies/market_get_supplies_order_tasks_by_id.py | market_get_supplies_order_tasks_by_id.py | py | 2,611 | python | ru | code | 0 | github-code | 13 |
73936577939 | import jax
import jax.numpy as jnp
import flax.linen as nn
from flax import struct
from jax.nn.initializers import Initializer
from typing import Optional
from utils import get_2d_sincos_pos_embed, apply_masks, repeat_interleave_batch
@struct.dataclass
class iJEPAConfig:
img_shape: int = (28, 28, 1)
patch_size: int = 4
n_classes: int = 10
n_patch: int = (28//4)**2
n_layer: int = 4
n_head: int = 4
n_embd: int = 256
predictor_n_embd: int = 128
n_pred: int = 4
dropout: float = 0.1
use_bias: bool = False
use_cls_token: bool = False
dtype: Optional[str] = jnp.float32
class Attention(nn.Module):
embed_dim: int
n_head: int
use_bias: bool
dropout: float
dtype: Optional[str]
@nn.compact
def __call__(self, x, train):
B, T, C = x.shape
assert C % self.n_head == 0, "Embedding dimensionality must be evenly divible by the number of heads"
head_dim = self.embed_dim // self.n_head
c_attn = nn.Dense(3 * C, use_bias=self.use_bias, dtype=self.dtype)(x) # (B, T, 3*C)
q, k, v = jnp.split(c_attn, 3, axis=-1) # (B, T, C)
q, k, v = map(lambda arr: arr.reshape(B, T, self.n_head, head_dim).swapaxes(1, 2), [q, k, v]) # (B, nh, T, hd)
attn = q @ k.swapaxes(-2, -1) / jnp.sqrt(k.shape[-1]) # (B, nh, T, hd) @ (B, nh, hd, T) -> (B, nh, T, T)
attn = jax.nn.softmax(attn, axis=-1)
attn = nn.Dropout(self.dropout)(attn, deterministic=not train)
y = attn @ v # (B, nh, T, T) @ (B, T, C) -> (B, nh, T, hd)
y = y.swapaxes(1, 2).reshape(B, T, C) # (B, T, nh, hd) -> (B, T, C)
c_proj = nn.Dense(C, use_bias=self.use_bias, dtype=self.dtype)(y) # (B, T, C)
x = nn.Dropout(self.dropout)(c_proj, deterministic=not train)
return x
class MLP(nn.Module):
dropout: float
use_bias: bool
dtype: Optional[str]
@nn.compact
def __call__(self, x, train=True):
_, _, C = x.shape
x = nn.Dense(C * 4, use_bias=self.use_bias, dtype=self.dtype)(x)
x = jax.nn.gelu(x)
x = nn.Dropout(self.dropout)(x, deterministic=not train)
x = nn.Dense(C, use_bias=self.use_bias, dtype=self.dtype)(x)
x = nn.Dropout(self.dropout)(x, deterministic=not train)
return x
class Block(nn.Module):
n_embd: int
n_head: int
dropout: float
use_bias: bool
dtype: Optional[str]
def setup(self):
self.ln_1 = nn.LayerNorm(epsilon=1e-6, use_bias=self.use_bias, dtype=self.dtype)
self.attn = Attention(self.n_embd, self.n_head, self.use_bias, self.dropout, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=1e-6, use_bias=self.use_bias, dtype=self.dtype)
self.mlp = MLP(self.dropout, self.use_bias, self.dtype)
def __call__(self, x, train):
x = x + self.attn(self.ln_1(x), train)
x = x + self.mlp(self.ln_2(x), train)
return x
class PatchEmbed(nn.Module):
patch_size: int
n_embd: int
@nn.compact
def __call__(self, x):
B, H, W, C = x.shape
x = nn.Conv(
self.n_embd,
kernel_size=(self.patch_size, self.patch_size),
strides=(self.patch_size, self.patch_size)
)(x)
return x.reshape(B, -1, self.n_embd)
def custom_2d_sincos_initializer(embed_dim, grid_size, cls_token) -> Initializer:
def init(key, shape, dtype):
pos_embd = get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token)
assert pos_embd.shape == shape, "Expected shape {shape} but got {pos_embd.shape}"
return pos_embd
return init
class ViTPredictor(nn.Module):
config: iJEPAConfig
def setup(self):
c = self.config
self.pred_embed = nn.Dense(c.predictor_n_embd)
self.mask_token = nn.Embed(1, c.predictor_n_embd)
pos_embd_init = custom_2d_sincos_initializer(
c.predictor_n_embd,
c.img_shape[0]//c.patch_size,
c.use_cls_token)
self._pos_embd = nn.Embed(
num_embeddings=c.n_patch,
features=c.predictor_n_embd,
dtype=c.dtype,
embedding_init=pos_embd_init,
name="pos_embed")
self.drop = nn.Dropout(c.dropout)
self.h = [
Block(
c.predictor_n_embd,
c.n_head,
c.dropout,
c.use_bias,
c.dtype
) for _ in range(c.n_layer)]
self.ln_f = nn.LayerNorm(epsilon=1e-6, use_bias=c.use_bias, dtype=c.dtype)
self.pred_proj = nn.Dense(c.n_embd, use_bias=True, dtype=c.dtype)
def __call__(self, x, context_mask, target_mask, train):
x = self.pred_embed(x)
x = x + self.pos_embd(context_mask)
B, N_ctxt, D = x.shape
pos_embd = self.pos_embd(target_mask)
pred_tokens = self.mask_token(jnp.arange(1))
pred_tokens = pred_tokens[None,].repeat(B, axis=0)
pred_tokens = pred_tokens.repeat(self.config.n_pred, axis=1)
pred_tokens += pos_embd
x = jnp.concatenate([x, pred_tokens], axis=1)
# x = self.drop(x, deterministic=not train)
for block in self.h:
x = block(x, train)
x = self.ln_f(x)
x = x[:, N_ctxt:]
x = self.pred_proj(x)
return x
def pos_embd(self, index):
return jax.vmap(self._pos_embd)(index)
class ViT(nn.Module):
config: iJEPAConfig
def setup(self):
c = self.config
self.patch_embd = PatchEmbed(c.patch_size, c.n_embd)
pos_embd_init = custom_2d_sincos_initializer(
c.n_embd,
c.img_shape[0]//c.patch_size,
c.use_cls_token)
self.pos_embd = nn.Embed(
num_embeddings=c.n_patch,
features=c.n_embd,
dtype=c.dtype,
embedding_init=pos_embd_init,
name="pos_embed")
self.drop = nn.Dropout(c.dropout)
self.h = [
Block(
c.n_embd,
c.n_head,
c.dropout,
c.use_bias,
c.dtype
) for _ in range(c.n_layer)]
self.ln_f = nn.LayerNorm(epsilon=1e-6, use_bias=c.use_bias, dtype=c.dtype)
self.linear_head = nn.Dense(c.n_classes, use_bias=c.use_bias, dtype=c.dtype)
def __call__(self, x, mask=None, train=None):
x = self.patch_embd(x)
B, T, D = x.shape
x = x + self.pos_embd(jnp.arange(T))
if mask is not None:
x = apply_masks(x, mask)
x = self.drop(x, deterministic=not train)
for block in self.h:
x = block(x, train)
x = self.ln_f(x)
return x
class iJEPA(nn.Module):
"""
The goal of Image-JEPA is "given a context block, predict the
representations of various target blocks in the same image."
- Assran et al. 2023 Section 3: Methods
Here instead of using blocks (randomly selected subsets of
images) than can overlap we partition naively over the patches.
"""
config: iJEPAConfig
def setup(self) -> None:
self.context_encoder = ViT(self.config)
self.predictor = ViTPredictor(self.config)
self.target_encoder = ViT(self.config)
def __call__(self, imgs, rng, train=None):
latent_target, target_mask, context_mask = self.target(imgs, rng, train=train)
latent_pred = self.context(imgs, context_mask, target_mask)
return latent_target, latent_pred
def context(self, imgs, context_mask, target_mask, train=None):
"""
Context encoder produces latent representation conditioned
only on the context patches. Predictor takes context encoder
output, concatenates the missing target patches positional
encoding and predicts latent representation of the target
patches.
"""
z = self.context_encoder(imgs, context_mask, train=train)
z = self.predictor(z, context_mask, target_mask, train=train)
return z
def target(self, imgs, rng, train=None):
"""
Target encoder is used to obtain the 'true' latent representation
of unmasked images. The target patches representation are indexed
and returned to be used in latent reconstruction loss.
"""
h = self.target_encoder(imgs, train=train)
B, T, _ = h.shape
target_mask, context_mask = get_masks(B, T, self.config.n_pred, rng)
targets = apply_masks(h, target_mask)
return jax.lax.stop_gradient(targets), target_mask, context_mask
def get_masks(n_batch, n_patch, n_pred, rng):
"""
Randomly drops n_pred patches from jnp.arange(n_patch)
Returns indices of dropped (target) patches, and remaining (context) patches
"""
@jax.vmap
def batch_permute(rng, x):
return jax.random.permutation(rng, x)
rngs = jax.random.split(rng, n_batch)
token_indicies = jnp.arange(n_patch)[None,]
token_indicies = token_indicies.repeat(n_batch, axis=0)
shuffled_indices = batch_permute(rngs, token_indicies)
target_mask = shuffled_indices[:, :n_pred]
context_mask = shuffled_indices[:, n_pred:]
return target_mask, context_mask
def apply_masks(arr, mask_indicies):
@jax.vmap
def batch(patch_arr, patch_indices):
@jax.vmap
def indexer(t):
return patch_arr[t]
return indexer(patch_indices)
return batch(arr, mask_indicies) | wbrenton/nanax | nanax/ijepa/model.py | model.py | py | 9,469 | python | en | code | 0 | github-code | 13 |
41471740795 | '''
We can also pass arguments to the function using args keyword.
In this example, we create two process that calculates the cube and squares of numbers and prints all results to the console.
'''
import time
import multiprocessing
def calc_square(numbers):
for n in numbers:
print('square ' + str(n*n))
def calc_cube(numbers):
for n in numbers:
print('cube ' + str(n*n*n))
if __name__ == "__main__":
arr = [2,3,8]
p1 = multiprocessing.Process(target=calc_square, args=(arr,)) #creates process p1
p2 = multiprocessing.Process(target=calc_cube, args=(arr,)) #creates the process p2
p1.start() #it will start the process p1
p2.start() #it will start the process p2
p1.join() #it will wait until the execution of this process is over
p2.join()
print("Done!")
'''We can also create more than one process at atime.
In this example, at first we create one process which is process1, this process just calculates the square of a number and at the same
time second process process2 is calculates the cube of a number.
'''
'''
output
square 4
square 9
square 64
cube 8
cube 27
cube 512
Done!
'''
| SumanMore/Multiprocessing-in-python | P3 Multiprocessing demonstrating args.py | P3 Multiprocessing demonstrating args.py | py | 1,202 | python | en | code | 0 | github-code | 13 |
25038307711 | import speech_recognition as sr
r = sr.Recognizer()
file = sr.AudioFile('..\AudioFiles\harvard1_5sec.wav')
with file as source:
audio = r.record(source, duration=4.0)
try:
recog = r.recognize_wit(audio, key = "6Y4KLO4YTWDQSYQXGONPHAVB3IRSWFRN")
print("You said: " + recog)
except sr.UnknownValueError:
print("could not understand audio")
except sr.RequestError as e:
print("Could not request results ; {0}".format(e))
| msalem-twoway/TwoWayVoice | AudioFiles/transcribeFile.py | transcribeFile.py | py | 456 | python | en | code | 0 | github-code | 13 |
27212656142 | # _*_ coding: utf-8 _*_
"""
inst_proxies.py by xianhu
"""
from ..utilities import ResultProxies
class Proxieser(object):
"""
class of Proxieser, must include function working()
"""
def working(self) -> ResultProxies:
"""
working function, must "try-except" and return ResultProxies()
"""
try:
result_proxies = self.proxies_get()
except Exception as excep:
kwargs = dict(excep_class=self.__class__.__name__, excep_string=str(excep))
result_proxies = ResultProxies(state_code=-1, proxies_list=None, **kwargs)
return result_proxies
def proxies_get(self) -> ResultProxies:
"""
get proxies from web or database. Parameters and returns refer to self.working()
"""
raise NotImplementedError
| xianhu/PSpider | spider/instances/inst_proxies.py | inst_proxies.py | py | 827 | python | en | code | 1,804 | github-code | 13 |
22444991507 | from typing import Callable, List, Optional
from .base import BaseEngine, TResult
class SingleThreadEngine(BaseEngine):
def __init__(self, func: Callable[..., TResult], *args, **kwargs):
super().__init__(func)
def run(
self,
args_list: Optional[List[tuple]] = None,
kwargs_list: Optional[List[dict]] = None
) -> List[TResult]:
if args_list is None:
args_list = [()] * len(kwargs_list)
if kwargs_list is None:
kwargs_list = [{}] * len(args_list)
assert len(args_list) == len(kwargs_list), "args_list and kwargs_list must have the same length"
return [self.fn(*args, **kwargs) for args, kwargs in zip(args_list, kwargs_list)]
| minato-ellie/matrix-runner | matrunner/engine/single_thread.py | single_thread.py | py | 740 | python | en | code | 0 | github-code | 13 |
70160338577 | # coding=utf8
"""Continuous analog input task with optional logging (TDMS files).
Demo script for acquiring a continuous set of analog
values with a National Instruments DAQ device.
To test this script, the NI MAX (Measurement & Automation
Explorer) has been used to create simulated devices.
In this test, a simulated device NI PCIe-6321 with 16 analog input (AI)
ports was created and named "Dev1".
The channel configuration string for analog input tasks always
follows this pattern:
DeviceName/PortName,ConfigMode,MinVoltage,MaxVoltage
where ConfigMode is an integer from this list
(see also argument terminalConfig from command
DAQmxCreateAIVoltageChan):
DAQmx_Val_Cfg_Default = 0,
DAQmx_Val_Diff = 1,
DAQmx_Val_RSE = 2,
DAQmx_Val_NRSE = 3,
DAQmx_Val_PseudoDiff = 4
Hint: It depends on the NI DAQ devices, if they allow
integrating different devices into the same measurement
task or not. Many devices do not allow this.
Data from a continuous task can be obtained by regularily
calling getVal / copyVal or by enabling the TDMS file logging
technique.
Reading TDMS files via Python is possible by the package npTDMS
(https://pypi.org/project/npTDMS).
"""
import time
# initialize the plugin for continuous analog input tasks
plugin = dataIO(
"NI-DAQmx",
"analogInput",
taskName="demoAiContinuous",
taskMode="continuous",
samplingRate=10000)
plugin.showToolbox()
# The NI-DAQ device uses the 'samplesPerChannel' in case of continuous
# tasks to define the internal buffer size. However if the number of
# samples, obtained by 'samplesPerChannel' * noOfChannels is lower
# than the values in the following table, NI-DAQ uses the values from
# the table:
#
# no sampling rate: 10000 samples
# 0 - 100 samples / sec: 1 kS
# 101 - 10000 S/s: 10 kS
# 10001 - 1000000 S/s: 100 kS
# else: 1 MS
plugin.setParam("samplesPerChannel", 5000)
# if this value is -1, the NI-DAQ device will calculate the internal
# buffer size depending on the samplingRate and the parameter
# 'samplesPerChannel'. Else, the internal buffer size can be overwritten
# by this parameter.
plugin.setParam("bufferSize", -1)
# the readTimeout is important for continuous acquisitions.
# It is considered during getVal/copyVal.
# If it is set to -1.0, each getVal/copyVal command will wait
# until 'samplesPerChannel' samples have been received for each channel.
# This cannot be stopped.
# If it is set to 0.0, getVal/copyVal will always return immediately
# and return up to 'samplesPerChannel' values per channel. The dataObject
# argument can also have less number of columns after these calls.
# Values > 0.0 are considered as real timeout. If the requested
# number of samples per channel are not received within this timeout,
# an error is raised (Status Code: -200284).
# In this example, the immediate return is used, but getVal will be
# called after a certain delay to wait for a certain number of values
# before getting them.
plugin.setParam("readTimeout", 0.0)
# assign some channels
plugin.setParam("channels", "Dev1/ai0,0,-10,10;Dev1/ai1,0,-8,8")
# Step 1: LoggingMode is a fast logging, in this mode, all acquired samples
# are automatically logged into the given tdms file. You must not use
# getVal or copyVal in this logging mode.
plugin.setParam("loggingMode", 1)
plugin.setParam("loggingFilePath", "D:/temp/demo_ai_continuous.tdms")
# when opening a tdms file in append mode and if the group name
# already exists, a new group with a '#number' suffix will be appended
# to the group name.
plugin.setParam("loggingGroupName", "group1")
# 'open': Always appends data to an existing TDMS file. If it does not exist
# yet, the task start operation will return with an error.
# 'openOrCreate': Creates a new TDMS file or appends data to the existing one.
# 'createOrReplace' (default): Creates a new TDMS file or replaces an existing
# one.
# 'create': Newly creates the TDMS file. If it already exists, a task start
# operation will return with an error.
plugin.setParam("loggingOperation", "createOrReplace")
# configure the task based on the configurations above.
plugin.startDevice()
for i in range(0, 10):
t = time.time()
print(f"Fast, direct logging run {i+1}/10...", end="")
# start the continuous task again
plugin.acquire()
# wait for 1 seconds (data are acquired and stored into the file)
time.sleep(1)
# stop the task
plugin.stop()
print(" done in %.3f s" % (time.time() - t))
# Step 2: choose another logging type. Usually it is recommended to
# stop the device before chaning the logging modes. However,
# it the device is still started if the logging parameters
# will be changed, it will automatically be stopped and restarted
# again.
# switch to loggingMode 2: Here only data that has been received
# via getVal / copyVal is additionally stored in the tdms file
plugin.setParam("loggingMode", 2)
plugin.setParam("loggingFilePath", "D:/temp/demo_ai_continuous2.tdms")
plugin.setParam("loggingOperation", "createOrReplace")
print(f"Simultaneous logging during getVal/copyVal (5sec)...", end="")
t = time.time()
# start the continuous task again
plugin.acquire()
for i in range(0, 10):
# wait a little bit
time.sleep(0.5)
# receive data that is automatically stored in the file, too
# getVal has to be called faster than the internal buffer of
# the device will exceed.
plugin.getVal(dataObject())
# stop the task
plugin.stop()
print(" done in %.2f s" % (time.time() - t))
# stop the device (if there are still running \
# tasks, they will also be stopped here)
plugin.stopDevice()
| itom-project/plugins | niDAQmx/demo/demo_ai_tdms_logging.py | demo_ai_tdms_logging.py | py | 5,676 | python | en | code | 1 | github-code | 13 |
10333481807 | from fastapi import FastAPI, HTTPException, UploadFile, File
from fastapi.responses import StreamingResponse
from typing import List, Optional
from datetime import datetime
import os
app = FastAPI()
files = []
upload_folder = "uploaded_files"
if not os.path.exists(upload_folder):
os.makedirs(upload_folder)
def get_file_by_id(file_id: str):
for file in files:
if file['file_id'] == file_id:
return file
return None
@app.post("/files/upload")
async def upload_file(file: UploadFile = File(...), metadata: Optional[dict] = None):
file_id = str(len(files) + 1)
# Save file data to local folder
file_path = os.path.join(upload_folder, file.filename)
with open(file_path, "wb") as local_file:
local_file.write(file.file.read())
# Store metadata
file_metadata = {
"file_id": file_id,
"file_name": file.filename,
"created_at": datetime.now(),
"size": os.path.getsize(file_path),
"file_type": file.content_type,
"local_path": file_path,
}
if metadata:
file_metadata.update(metadata)
files.append(file_metadata)
return {"file_id": file_id}
@app.get("/files/{file_id}")
async def read_file(file_id: str):
file = get_file_by_id(file_id)
if file:
return StreamingResponse(content=open(file['local_path'], "rb"), media_type=file['file_type'])
else:
raise HTTPException(status_code=404, detail="File not found")
@app.put("/files/{file_id}")
async def update_file(file_id: str, file: UploadFile = File(...), metadata: Optional[dict] = None):
existing_file = get_file_by_id(file_id)
if existing_file:
# Save updated file data to local folder
file_path = os.path.join(upload_folder, file.filename)
with open(file_path, "wb") as local_file:
local_file.write(file.file.read())
# Update metadata
existing_file['file_name'] = file.filename
existing_file['size'] = os.path.getsize(file_path)
existing_file['file_type'] = file.content_type
existing_file['created_at'] = datetime.now()
existing_file['local_path'] = file_path
if metadata:
existing_file.update(metadata)
return existing_file
else:
raise HTTPException(status_code=404, detail="File not found")
@app.delete("/files/{file_id}")
async def delete_file(file_id: str):
global files
file = get_file_by_id(file_id)
if file:
os.remove(file['local_path']) # Delete the file from the local folder
files = [f for f in files if f['file_id'] != file_id]
return {"message": "File deleted successfully"}
else:
raise HTTPException(status_code=404, detail="File not found")
@app.get("/files")
async def list_files():
return files
| insane4u00/dropbox-equivalent-service | main.py | main.py | py | 2,826 | python | en | code | 0 | github-code | 13 |
74556971538 | from dataclasses import fields
from tkinter import Widget
from django import forms
from .models import Employee, Trained
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "name", "surname", "card"
labels = {
"name": "Jméno:",
"surname": "Příjmení:",
"card": "Číslo karty:"
}
error_messages = {
"name" : {
"required" : "Jméno musíš vyplnit!",
"max_lenght" : "Co to je za jmnéno? Zkus to znovu, lépa a zkráceně!"
},
"surname" : {
"required" : "Příjmení musíš vyplnit!",
"max_lenght" : "Příjmnení musí být kratší jinak se dál nedostaneš!"
},
"card" : {
"required" : "Číslo karty musíš vyplnit!",
"max_lenght" : "Číslo karty je moc dlouhé!"
}
}
class InfoEmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "merit", "info"
labels = {"merit" : "Klady: ", "info": "Poznámky k zamšstnanci: "}
error_messages = {
"merit" : {
"required" : "Nastala nějaká chyba, kontaktuj programátora.",
"max_lenght" : "Bohužel jsi přesáhl maximální limit povolenách znaků!"
},
"info" : {
"required" : "Nastala nějaká chyba, kontaktuj programátora",
"max_lenght" : "Bohužel jsi přesáhl maximální limit povolenách znaků!"
},}
class TrainedForm(forms.ModelForm):
class Meta:
model = Trained
fields = "name",
labels = {
"name" : "Název školení "
}
error_messages = {
"name" : {
"max_lenght" : "Bohužel jsi přesáhl maximální limit povolenách znaků!"
}
}
class AddTrainedForm(forms.ModelForm):
class Meta:
model = Employee
fields = "trained",
labels = {
"name" : "Název školení"
} | KvetoslavPrikryl/Prace | Information/forms.py | forms.py | py | 2,123 | python | cs | code | 0 | github-code | 13 |
5574317566 | from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('tickets',views.tickets,name='tickets'),
path('view_ticket/<int:id>',views.view_ticket,name='view_ticket'),
path('change_ticket_status/<int:id>',views.change_ticket_status,name='change_ticket_status'),
path('change_ticket_category/<int:id>',views.change_ticket_category,name='change_ticket_category'),
path('change_ticket_priority/<int:id>',views.change_ticket_priority,name='change_ticket_priority'),
path('change_ticket_owner/<int:id>',views.change_ticket_owner,name='change_ticket_owner'),
path('add_ticket_reply/<int:id>',views.add_ticket_reply,name='add_ticket_reply'),
path('category',views.category,name='category'),
path('priority',views.priority,name='priority'),
path('status',views.status,name='status'),
path('new_category',views.new_category,name='new_category'),
path('new_priority',views.new_priority,name='new_priority'),
path('new_status',views.new_status,name='new_status'),
path('edit_category/<int:id>',views.edit_category,name='edit_category'),
path('edit_priority/<int:id>',views.edit_priority,name='edit_priority'),
path('edit_status/<int:id>',views.edit_status,name='edit_status'),
path('new_ticket',views.client_create_ticket,name='new_ticket'),
path('new_company',views.new_company,name='new_company'),
path('my_tickets',views.my_tickets,name='my_tickets'),
path('create_staffprofile',views.create_staff_profile,name='create_staffprofile'),
path('new_profile/',views.create_client_profile,name='new_profile2'),
path('new_profile/<int:id>/',views.create_client_profile,name='new_profile'),
path('client_companies',views.client_companies,name='client_companies'),
] | wkigenyi/helpdesk | support/urls.py | urls.py | py | 1,796 | python | en | code | 0 | github-code | 13 |
74562933458 | """
_InsertRun_
Oracle implementation of InsertRun
"""
from WMCore.Database.DBFormatter import DBFormatter
class InsertRun(DBFormatter):
def execute(self, binds, conn = None, transaction = False):
sql = """INSERT INTO run
(RUN_ID, HLTKEY)
SELECT :RUN,
:HLTKEY
FROM DUAL
WHERE NOT EXISTS (
SELECT * FROM run WHERE run_id = :RUN
)"""
self.dbi.processData(sql, binds, conn = conn,
transaction = transaction)
return
| dmwm/T0 | src/python/T0/WMBS/Oracle/RunConfig/InsertRun.py | InsertRun.py | py | 605 | python | en | code | 6 | github-code | 13 |
30302986 | from numpy import ndarray
from entities.common.text_position import TextPosition
from invoice_processing_utils.common_utils import get_ocr_response, create_position, save_image_with_bounding_boxes
class TextReader:
__EXTRACTED_TEXTS_OUTPUT_PATH_PREFIX = "7.Extracted texts.png"
def __init__(self, invoice: ndarray):
self.__invoice = invoice
def read_words(self) -> list[TextPosition]:
response = get_ocr_response(self.__invoice)
texts_with_positions = []
for text in response.text_annotations[1::]:
text_value = text.description
texts_with_positions.append(TextPosition(text_value, create_position(text.bounding_poly)))
save_image_with_bounding_boxes(self.__invoice, self.__EXTRACTED_TEXTS_OUTPUT_PATH_PREFIX,
[text_position.position for text_position in texts_with_positions])
texts_with_positions.sort()
return texts_with_positions
| AdrianC2000/InvoiceScannerApp | text_handler/text_reader.py | text_reader.py | py | 971 | python | en | code | 0 | github-code | 13 |
19145776544 | import os
import subprocess
from typing import List, Optional, Sequence
def rsync(
src: str,
dst: str,
opt: List[str],
host: Optional[str] = None,
excludes: Optional[Sequence[str]] = None,
filters: Optional[Sequence[str]] = None,
mkdirs: bool = False,
):
if excludes is None:
excludes = []
if filters is None:
filters = []
opt = list(opt)
for exclude in excludes:
opt.append(f"--exclude={exclude}")
for filter in filters:
opt.append(f"--filter=:- {filter}")
if not host:
os.makedirs(os.path.dirname(dst), exist_ok=True)
sync_cmd = ["rsync"] + opt + [src, dst]
subprocess.check_call(sync_cmd)
else:
if mkdirs:
subprocess.check_output(["ssh", host, "mkdir", "-p", os.path.dirname(dst)])
dst = f"{host}:{dst}"
sync_cmd = ["rsync"] + opt + [src, dst]
subprocess.check_call(sync_cmd)
| ethanluoyc/lxm3 | lxm3/xm_cluster/execution/utils.py | utils.py | py | 937 | python | en | code | 6 | github-code | 13 |
37965559818 | ###############################################################
#
# Job options file
#
#==============================================================
#--------------------------------------------------------------
# ATLAS default Application Configuration options
#--------------------------------------------------------------
# No event selector needed for basic 'Hello World!' Algorithm
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
# Full job is a list of algorithms
from AthenaCommon.AlgSequence import AlgSequence
job = AlgSequence()
# Add top algorithms to be run
from LArBadChannelTool.LArBadChannelToolConf import LArBadChannelToolTest
job += LArBadChannelToolTest( "BadChanTest" ) # 1 alg, named "BadChanTest"
#--------------------------------------------------------------
# Set output level threshold (DEBUG, INFO, WARNING, ERROR, FATAL)
#--------------------------------------------------------------
# Output level for LArBadChannelToolTest only (note name: instance, not type)
job.BadChanTest.OutputLevel = DEBUG
# You can set the global output level on the message svc (not
# recommended) or by using the -l athena CLI parameter
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
# Number of events to be processed (default is until the end of
# input, or -1, however, since we have no input, a limit needs
# to be set explicitly, here, choose 10)
theApp.EvtMax = 10
#--------------------------------------------------------------
# Algorithms Private Options (all optional)
#--------------------------------------------------------------
# For convenience, get a reference to the LArBadChannelToolTest Algorithm
# named "BadChanTest" in the job
BadChanTest = job.BadChanTest
# Set an int property
#BadChanTest.MyInt = 42
#--------------------------------------------------------------
# Algorithms Tool Usage Private Options (advanced and optional)
#--------------------------------------------------------------
# Import configurable for using our HelloTool
#from AthExHelloWorld.AthExHelloWorldConf import HelloTool
from LArBadChannelTool.LArBadChannelToolConf import LArBadChanTool
# Setup a public tool so that it can be used (again, note name)
#ToolSvc += HelloTool( "PublicHello" )
#ToolSvc.PublicHello.MyMessage = "A Public Message!"
ToolSvc += LArBadChanTool("BadChanTool")
#ToolSvc.BadChanTool.EMBAfile = "noisePb_29142.txt"
ToolSvc.BadChanTool.EMBAfile = "badchannels.txt"
ToolSvc.BadChanTool.EMBCfile = "badchannels.txt"
ToolSvc.BadChanTool.OutputLevel = DEBUG
# Tell "HelloWorld" to use this tool ("MyPublicHelloTool" is a
# ToolHandle property of LArBadChannelToolTest)
#HelloWorld.MyPublicHelloTool = ToolSvc.PublicHello
BadChanTest.BadChannelTool = ToolSvc.BadChanTool
#==============================================================
#
# End of job options file
#
###############################################################
| rushioda/PIXELVALID_athena | athena/LArCalorimeter/LArBadChannelTool/share/BadChannelToolTestOptions.py | BadChannelToolTestOptions.py | py | 3,111 | python | en | code | 1 | github-code | 13 |
24281086079 | # importing modules
import os
import sys
import re
import pandas as pd
import csv
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
from scipy.optimize import curve_fit
"""
This code takes the total energy obtained from the simulation model and fit an exponential
function (aexp(bx)+c) to it, where x is the displacement of the floating film from the center(x=0).
After finding the optimized parameters we can rescale the total energy for further data analyis. We
scale it by using the energy function at x=0 (a+b). Thus scaled total energy is: dU= U-(a+b).
"""
class data_cleaning:
def __init__(self, Vol, meshLength, sr, stretch):
"""
Class for cleaning the raw energy data and rescale it for further analysis
:param Vol:
Volume of the liquid drop in our simulation model
:type Vol: float
:param meshLength:
Mesh refinement that regulates the triangulations of the surface
:type meshLength: float
:param sr:
Sheet radius of the thin fim floating on the liquid interface
:type sr: float
:param stretch:
Stretching modulus of the thin film.
: type stretch: int
"""
print(type(Vol))
data = pd.read_csv("Stretching_"+str(stretch)+"-Vol_"+str(Vol)+".csv") #reading the csv file obtain from the data extraction script
x=data['Disp']
U=data['Total(stretching)']
csvfile='Curve_fitMesh-' + str(meshLength) + 'Radius'+ str(sr)+'Vol-' + str(Vol)+'stretching'+str(stretch)+'.csv' #saves the csv file with the cleaned rescaled data.
#define the function to fit our data
def func(x, a0,a1,a2):
"""
:param x:
Radial position of the sheet on the film
:type x: class 'pandas.core.series.Series'
:param a0:
coefficient of the exponential equation
:type a0: class 'numpy.float64'
:param a1:
exponential coefficient of the curve fit
:type a1: class 'numpy.float64'
:param a2:
coefficient of the curve fit
: type a2: class 'numpy.float64'
"""
return a0*np.exp(x*a1)+a2
popt, pcov =curve_fit(func, x, U, p0=[0.000123, 3.1, 55.3005]) #optimized curve fitting through scipy library with a guess
print(popt)
def plot(func, x, U, popt):
"""
:param func:
The exponential function
:type func: class 'function'
:param x:
Radial position of the sheet on the film
:type x: class 'pandas.core.series.Series'
:param U:
Total energy at each position
:type x: class 'pandas.core.series.Series'
:param popt:
Optimized a0, a1, a2 values
:type popt: class 'numpy.ndarray'
"""
print(type(popt))
fig, (ax1, ax2) = plt.subplots(2)
ax1.scatter(x,U, label='Drop Volume: '+str(Vol)) #plots the total energy vs displacement
ax1.set_ylim(99.155, 99.16)
x_model= np.linspace(0,3.2,1000)
y_model= func(x_model, popt[0], popt[1], popt[2])
ax1.plot(x_model, y_model, 'r--', label='fit: a=%5.3e, b=%5.3f, c=%5.3f' % tuple(popt)) #plotting the optimized curve fit over our data
ax1.set_ylabel('Energy, U')
ax1.set_xlabel('r')
ax1.set_title('Curve Fitting total energy function')
x=np.asarray(x)
total=np.asarray(U)
total=total-(popt[0]+popt[2]) #rescaling the energy by substracting value of the model at displacement 0 (a+c) (from aexp(bx)+c) from the total energy
ax2.scatter(x, total)
ax2.set_ylabel('Rescaled Energy, dU')
ax2.set_xlabel('r')
ax2.set_ylim(-0.0008, 0.004)
arr1inds = x.argsort()
x=x[arr1inds[::1]]
total = total[arr1inds[::1]]
np.savetxt(csvfile, np.column_stack((x, total)), fmt='%0.14f', delimiter=',', header='x,total', comments='') #saving the rescaled total energy along with the displacement
ax1.legend( bbox_to_anchor=[0.5, 0.75], loc='center', ncol=1)
fig.tight_layout()
plt.savefig('exponentialfit.png')
plt.show()
plot(func, x, U, popt)
i=1
Vol = 26.97 # volume or pressure of drop
if len(sys.argv) > i :
Vol, i = float(sys.argv[i]), i+1
meshLength = 0.2 # mesh refinement on sheet
if len(sys.argv) > i :
meshLength, i = float(sys.argv[i]), i+1
sr=0.0 #sheet radius
if len(sys.argv) > i :
sr, i = float(sys.argv[i]), i+1
stretch=0.0 #stretching modulus
if len(sys.argv) > i :
stretch, i = int(sys.argv[i]), i+1
data_cleaning(Vol, meshLength, sr, stretch)
| Rajscript/Curvature-Propelled-thin-film | Curvefit_model.py | Curvefit_model.py | py | 4,921 | python | en | code | 0 | github-code | 13 |
4043382267 | from unittest import TestCase
from src.main.part1.rps_evaluator import RPSEvaluator
class TestRockPaperScissorsEvaluator(TestCase):
def test_get_winning_move(self):
move: str = "rock"
expected: str = "paper"
actual: str = RPSEvaluator.get_winning_move(move)
self.assertEqual(expected, actual)
def test_get_losing_move(self):
move: str = "rock"
expected: str = "scissors"
actual: str = RPSEvaluator.get_losing_move(move)
self.assertEqual(expected, actual)
def test_get_winner(self):
move1: str = "rock"
move2: str = "scissors"
expected: str = "rock"
actual: str = RPSEvaluator.get_winner(move1, move2)
self.assertEqual(actual, expected)
| ElBell/Assessment1 | src/tests/test_part1/test_rps_evaluator.py | test_rps_evaluator.py | py | 758 | python | en | code | 0 | github-code | 13 |
37775277395 | from hpp.environments import Buggy
robot = Buggy("buggy")
robot.setJointBounds ("base_joint_xy", [-5, 16, -4.5, 4.5])
from hpp.corbaserver import ProblemSolver
ps = ProblemSolver (robot)
from hpp.gepetto import ViewerFactory
gui = ViewerFactory (ps)
gui.loadObstacleModel ('hpp_environments', "scene", "scene")
q_init = robot.getCurrentConfig ()
q_goal = q_init [::]
q_init[0:2] = [-3.7, -4];
gui (q_init)
q_goal [0:2] = [15,2]
gui (q_goal)
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
ps.selectSteeringMethod ("ReedsShepp")
ps.selectPathPlanner ("DiffusingPlanner")
ps.addPathOptimizer ("RandomShortcut")
t = ps.solve ()
print ("solving time", t)
| heidydallard/hpp-environments | examples/buggy.py | buggy.py | py | 666 | python | en | code | 0 | github-code | 13 |
28203926425 | from StringIO import StringIO
import gzip
def parse_url_params(url):
data = {}
for params in url.split('&'):
el = params.split('=')
if el is not None and len(el)>1:
data[el[0]] = el[1]
return data
def convert_to_utf8_str(arg):
# written by Michael Norton (http://docondev.blogspot.com/)
if isinstance(arg, unicode):
arg = arg.encode('utf-8')
elif not isinstance(arg, str):
arg = str(arg)
return arg
def ungzip(response):
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
buf.close()
else:
data = response.read()
return data
| scalaview/pyPixiv | utils.py | utils.py | py | 668 | python | en | code | 0 | github-code | 13 |
7497847791 | from django.urls import path
from django.shortcuts import redirect
from django.views.decorators.cache import cache_page
from .views import *
urlpatterns = [
path('', cache_page(60)(PostList.as_view()), name='news'),
path('create/', PostCreate.as_view(), name='post_create'),
path('search/', PostSearch.as_view(), name='news_search'),
path('<int:pk>/', PostDetail.as_view(), name='post_detail'),
path('<int:pk>/edit/', PostUpdate.as_view(), name='post_edit'),
path('<int:pk>/delete/', PostDelete.as_view(), name='post_delete'),
path('logout/', logout_user, name='logout'),
path('upgrade/', upgrade_me, name='upgrade'),
path('profile/<int:pk>/', UserDetail.as_view(), name='profile_detail'),
path('profile/<int:pk>/edit/', UserUpdate.as_view(), name='profile_edit'),
path('profile/<int:pk>/delete/', UserDelete.as_view(), name='profile_delete'),
path('subscribe', subscribe, name='subscribe'),
]
| PavelUmanskiy/NewsPaper | NewsPaper/news/urls.py | urls.py | py | 966 | python | en | code | 0 | github-code | 13 |
17919155465 | # Decode A Web Page
# This is the first 4-chili exercise of this blog! We’ll see what people think, and decide whether or not to continue with 4-chili exercises in the future.
# Exercise 17 (and Solution)
# Use the BeautifulSoup and requests Python packages to print out a list of all the article titles on the New York Times homepage.
import requests
import bs4
from bs4 import BeautifulSoup
target_link = "https://www.nytimes.com"
response = requests.get(url=target_link)
response_html = response.text
doc = BeautifulSoup(response_html, 'html.parser')
articles = doc.find_all(class_='e1lsht870')
for x in articles:
print(x.string)
| alexjulian1227/python-learning | python-exercises/exercise-17/main.py | main.py | py | 675 | python | en | code | 0 | github-code | 13 |
40885468935 | """(S)HeteroFL"""
import os, argparse, time
import numpy as np
import wandb
from tqdm import tqdm
import torch
from torch import nn, optim
from torch.nn.modules.batchnorm import _NormBase
# federated
from federated.learning import train_slimmable, test, refresh_bn
# utils
from utils.utils import set_seed, AverageMeter, CosineAnnealingLR, \
MultiStepLR, LocalMaskCrossEntropyLoss, str2bool
from utils.config import CHECKPOINT_ROOT
# NOTE import desired federation
from federated.core import SHeteFederation as Federation
def render_run_name(args, exp_folder):
"""Return a unique run_name from given args."""
if args.model == 'default':
args.model = {'Digits': 'digit', 'Cifar10': 'preresnet18', 'DomainNet': 'alex'}[args.data]
run_name = f'{args.model}'
run_name += Federation.render_run_name(args)
# log non-default args
if args.seed != 1: run_name += f'__seed_{args.seed}'
# opt
if args.lr_sch != 'none': run_name += f'__lrs_{args.lr_sch}'
if args.opt != 'sgd': run_name += f'__opt_{args.opt}'
if args.batch != 32: run_name += f'__batch_{args.batch}'
if args.wk_iters != 1: run_name += f'__wk_iters_{args.wk_iters}'
# slimmable
if args.no_track_stat: run_name += f"__nts"
# split-mix
if not args.rescale_init: run_name += '__nri'
if not args.rescale_layer: run_name += '__nrl'
if args.loss_temp != 'none': run_name += f'__lt{args.loss_temp}'
args.save_path = os.path.join(CHECKPOINT_ROOT, exp_folder)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
SAVE_FILE = os.path.join(args.save_path, run_name)
return run_name, SAVE_FILE
def get_model_fh(data, model):
if data == 'Digits':
if model in ['digit']:
from nets.slimmable_models import SlimmableDigitModel
# TODO remove. Function the same as ens_digit
ModelClass = SlimmableDigitModel
else:
raise ValueError(f"Invalid model: {model}")
elif data in ['DomainNet']:
if model in ['alex']:
from nets.slimmable_models import SlimmableAlexNet
ModelClass = SlimmableAlexNet
else:
raise ValueError(f"Invalid model: {model}")
elif data == 'Cifar10':
if model in ['preresnet18']: # From heteroFL
from nets.HeteFL.slimmable_preresne import resnet18
ModelClass = resnet18
else:
raise ValueError(f"Invalid model: {model}")
else:
raise ValueError(f"Unknown dataset: {data}")
return ModelClass
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
# basic problem setting
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--data', type=str, default='Digits', help='data name')
parser.add_argument('--model', type=str.lower, default='default', help='model name')
parser.add_argument('--no_track_stat', action='store_true', help='disable BN tracking')
parser.add_argument('--test_refresh_bn', action='store_true', help='refresh BN before test')
# control
parser.add_argument('--no_log', action='store_true', help='disable wandb log')
parser.add_argument('--test', action='store_true', help='test the pretrained model')
parser.add_argument('--resume', action='store_true', help='resume training from checkpoint')
parser.add_argument('--verbose', type=int, default=0, help='verbose level: 0 or 1')
# federated
Federation.add_argument(parser)
# optimization
parser.add_argument('--lr', type=float, default=1e-2, help='learning rate')
parser.add_argument('--lr_sch', type=str, default='none', help='learning rate schedule')
parser.add_argument('--opt', type=str.lower, default='sgd', help='optimizer')
parser.add_argument('--iters', type=int, default=300, help='#iterations for communication')
parser.add_argument('--wk_iters', type=int, default=1, help='#epochs in local train')
# slimmable test
parser.add_argument('--test_slim_ratio', type=float, default=1.,
help='slim_ratio of model at testing.')
# split-mix
parser.add_argument('--rescale_init', type=str2bool, default=True, help='rescale init after'
' slim')
parser.add_argument('--rescale_layer', type=str2bool, default=True, help='rescale layer outputs'
' after slim')
parser.add_argument('--loss_temp', type=str, default='none', choices=['none', 'auto'],
help='temper cross-entropy loss. auto: set temp as the width scale.')
args = parser.parse_args()
set_seed(args.seed)
# set experiment files, wandb
exp_folder = f'HFL_{args.data}'
run_name, SAVE_FILE = render_run_name(args, exp_folder)
wandb.init(group=run_name[:120], project=exp_folder, mode='offline' if args.no_log else 'online',
config={**vars(args), 'save_file': SAVE_FILE})
# /////////////////////////////////
# ///// Fed Dataset and Model /////
# /////////////////////////////////
fed = Federation(args.data, args)
# Data
train_loaders, val_loaders, test_loaders = fed.get_data()
mean_batch_iters = int(np.mean([len(tl) for tl in train_loaders]))
print(f" mean_batch_iters: {mean_batch_iters}")
# Model
ModelClass = get_model_fh(args.data, args.model)
running_model = ModelClass(
track_running_stats=not args.no_track_stat or (args.test and args.test_refresh_bn),
num_classes=fed.num_classes, slimmabe_ratios=fed.train_slim_ratios,
).to(device)
# Loss
if args.pu_nclass > 0: # niid
loss_fun = LocalMaskCrossEntropyLoss(fed.num_classes)
else:
loss_fun = nn.CrossEntropyLoss()
# Use running model to init a fed aggregator
fed.make_aggregator(running_model)
# /////////////////
# //// Resume /////
# /////////////////
# log the best for each model on all datasets
best_epoch = 0
best_acc = [0. for j in range(fed.client_num)]
train_elapsed = [[] for _ in range(fed.client_num)]
start_epoch = 0
if args.resume or args.test:
if os.path.exists(SAVE_FILE):
print(f'Loading chkpt from {SAVE_FILE}')
checkpoint = torch.load(SAVE_FILE)
best_epoch, best_acc = checkpoint['best_epoch'], checkpoint['best_acc']
train_elapsed = checkpoint['train_elapsed']
start_epoch = int(checkpoint['a_iter']) + 1
fed.model_accum.load_state_dict(checkpoint['server_model'])
print('Resume training from epoch {} with best acc:'.format(start_epoch))
for client_idx, acc in enumerate(best_acc):
print(' Best user-{:<10s}| Epoch:{} | Val Acc: {:.4f}'.format(
fed.clients[client_idx], best_epoch, acc))
else:
if args.test:
raise FileNotFoundError(f"Not found checkpoint at {SAVE_FILE}")
else:
print(f"Not found checkpoint at {SAVE_FILE}\n **Continue without resume.**")
# ///////////////
# //// Test /////
# ///////////////
if args.test:
wandb.summary[f'best_epoch'] = best_epoch
# Set up model with specified width
print(f" Test with a single slim_ratio {args.test_slim_ratio}")
running_model.switch_slim_mode(args.test_slim_ratio)
test_model = running_model
# Test on clients
test_acc_mt = AverageMeter()
for test_idx, test_loader in enumerate(test_loaders):
if args.test_refresh_bn:
fed.download(running_model, test_idx, strict=False)
def set_rescale_layer_and_bn(m):
# if isinstance(m, ScalableModule):
# m.rescale_layer = False
if isinstance(m, _NormBase):
m.reset_running_stats()
m.momentum = None
running_model.apply(set_rescale_layer_and_bn)
for ep in tqdm(range(20), desc='refresh bn', leave=False):
refresh_bn(running_model, train_loaders[test_idx], device)
else:
fed.download(running_model, test_idx)
_, test_acc = test(test_model, test_loader, loss_fun, device)
print(' {:<11s}| Test Acc: {:.4f}'.format(fed.clients[test_idx], test_acc))
wandb.summary[f'{fed.clients[test_idx]} test acc'] = test_acc
test_acc_mt.append(test_acc)
# Profile model FLOPs, sizes (#param)
from nets.profile_func import profile_model
flops, params = profile_model(test_model, device=device)
wandb.summary['GFLOPs'] = flops / 1e9
wandb.summary['model size (MB)'] = params / 1e6
print('GFLOPS: %.4f, model size: %.4fMB' % (flops / 1e9, params / 1e6))
print(f"\n Average Test Acc: {test_acc_mt.avg}")
wandb.summary[f'avg test acc'] = test_acc_mt.avg
wandb.finish()
exit(0)
# ////////////////
# //// Train /////
# ////////////////
# LR scheduler
if args.lr_sch == 'cos':
lr_sch = CosineAnnealingLR(args.iters, eta_max=args.lr, last_epoch=start_epoch)
elif args.lr_sch == 'multi_step':
lr_sch = MultiStepLR(args.lr, milestones=[150, 250], gamma=0.1, last_epoch=start_epoch)
else:
assert args.lr_sch == 'none', f'Invalid lr_sch: {args.lr_sch}'
lr_sch = None
for a_iter in range(start_epoch, args.iters):
# set global lr
global_lr = args.lr if lr_sch is None else lr_sch.step()
wandb.log({'global lr': global_lr}, commit=False)
# ----------- Train Client ---------------
train_loss_mt, train_acc_mt = AverageMeter(), AverageMeter()
print("============ Train epoch {} ============".format(a_iter))
for client_idx in fed.client_sampler.iter():
# (Alg 2) Sample base models defined by shift index.
slim_ratios, slim_shifts = fed.sample_bases(client_idx)
start_time = time.process_time()
# Download global model to local
fed.download(running_model, client_idx)
# (Alg 3) Local Train
if args.opt == 'sgd':
optimizer = optim.SGD(params=running_model.parameters(), lr=global_lr,
momentum=0.9, weight_decay=5e-4)
elif args.opt == 'adam':
optimizer = optim.Adam(params=running_model.parameters(), lr=global_lr)
else:
raise ValueError(f"Invalid optimizer: {args.opt}")
train_loss, train_acc = train_slimmable(
running_model, train_loaders[client_idx], optimizer, loss_fun, device,
max_iter=mean_batch_iters * args.wk_iters if args.partition_mode != 'uni'
else len(train_loaders[client_idx]) * args.wk_iters,
slim_ratios=slim_ratios, slim_shifts=slim_shifts, progress=args.verbose > 0,
loss_temp=args.loss_temp
)
# Upload
fed.upload(running_model, client_idx,
max_slim_ratio=max(slim_ratios), slim_bias_idx=slim_shifts)
# Log
client_name = fed.clients[client_idx]
elapsed = time.process_time() - start_time
wandb.log({f'{client_name}_train_elapsed': elapsed}, commit=False)
train_elapsed[client_idx].append(elapsed)
train_loss_mt.append(train_loss), train_acc_mt.append(train_acc)
print(f' User-{client_name:<10s} Train | Loss: {train_loss:.4f} |'
f' Acc: {train_acc:.4f} | Elapsed: {elapsed:.2f} s')
wandb.log({
f"{client_name} train_loss": train_loss,
f"{client_name} train_acc": train_acc,
}, commit=False)
# Use accumulated model to update server model
fed.aggregate()
# ----------- Validation ---------------
val_acc_list = [None for j in range(fed.client_num)]
val_loss_mt = AverageMeter()
slim_val_sacc_mt = {slim_ratio: AverageMeter() for slim_ratio in fed.val_slim_ratios}
for client_idx in range(fed.client_num):
fed.download(running_model, client_idx)
for i_slim_ratio, slim_ratio in enumerate(fed.val_slim_ratios):
# Load and set slim ratio
running_model.switch_slim_mode(slim_ratio) # full net should load the full net
# Test
val_loss, val_acc = test(running_model, val_loaders[client_idx], loss_fun, device)
# Log
val_loss_mt.append(val_loss)
val_acc_list[client_idx] = val_acc
if args.verbose > 0:
print(' {:<19s} slim {:.2f}| Val Loss: {:.4f} | Val Acc: {:.4f}'.format(
'User-'+fed.clients[client_idx] if i_slim_ratio == 0 else ' ', slim_ratio,
val_loss, val_acc))
wandb.log({
f"{fed.clients[client_idx]} sm{slim_ratio:.2f} val_s-acc": val_acc,
}, commit=False)
if slim_ratio == fed.user_max_slim_ratios[client_idx]:
wandb.log({
f"{fed.clients[client_idx]} val_s-acc": val_acc,
}, commit=False)
slim_val_sacc_mt[slim_ratio].append(val_acc)
# Log averaged
print(f' [Overall] Train Loss {train_loss_mt.avg:.4f} '
f'| Val Acc {np.mean(val_acc_list)*100:.2f}%')
wandb.log({
f"train_loss": train_loss_mt.avg,
f"train_acc": train_acc_mt.avg,
f"val_loss": val_loss_mt.avg,
f"val_acc": np.mean(val_acc_list),
}, commit=False)
wandb.log({
f"slim{k:.2f} val_sacc": mt.avg if len(mt) > 0 else None
for k, mt in slim_val_sacc_mt.items()
}, commit=False)
# ----------- Save checkpoint -----------
if np.mean(val_acc_list) > np.mean(best_acc):
best_epoch = a_iter
for client_idx in range(fed.client_num):
best_acc[client_idx] = val_acc_list[client_idx]
if args.verbose > 0:
print(' Best site-{:<10s}| Epoch:{} | Val Acc: {:.4f}'.format(
fed.clients[client_idx], best_epoch, best_acc[client_idx]))
print(' [Best Val] Acc {:.4f}'.format(np.mean(val_acc_list)))
# Save
print(f' Saving the local and server checkpoint to {SAVE_FILE}')
save_dict = {
'server_model': fed.model_accum.state_dict(),
'best_epoch': best_epoch,
'best_acc': best_acc,
'a_iter': a_iter,
'all_domains': fed.all_domains,
'train_elapsed': train_elapsed,
}
torch.save(save_dict, SAVE_FILE)
wandb.log({
f"best_val_acc": np.mean(best_acc),
}, commit=True)
| illidanlab/SplitMix | fed_hfl.py | fed_hfl.py | py | 15,257 | python | en | code | 29 | github-code | 13 |
4682703613 | __struct_classes = {}
from sydpy.types._type_base import TypeBase
def Enum(*args):
if args not in __struct_classes:
__struct_classes[args] = type('enum', (enum,), dict(vals=args))
return __struct_classes[args]
class enum(TypeBase):
vals = None
def __init__(self, val=None):
if val is None:
self._val = None
elif isinstance(val, str):
for i, v in enumerate(self.vals):
if v == val:
self._val = i
return
else:
raise Exception("Supplied value not among enum members!")
else:
try:
self._val = int(val._val)
except AttributeError:
try:
self._val = int(val)
except TypeError:
raise Exception("Cannot convert to enum!")
@classmethod
def _rnd(cls, rnd_gen):
val = rnd_gen._rnd_int(0, len(cls.vals))
return cls(val)
def __str__(self):
if self._val is not None:
return self.vals[self._val]
else:
return ''
__repr__ = __str__
def __int__(self):
return self._val
def __eq__(self, other):
if isinstance(other, str):
if self._val is None:
return False
else:
return self.vals[self._val] == other
else:
try:
for v in other:
if self == v:
return True
return False
except TypeError:
return self._val == other
| bogdanvuk/sydpy | sydpy/types/enum.py | enum.py | py | 1,730 | python | en | code | 12 | github-code | 13 |
13670900243 | DOCUMENTATION = r"""
---
module: s3_logging
version_added: 1.0.0
short_description: Manage logging facility of an s3 bucket in AWS
description:
- Manage logging facility of an s3 bucket in AWS
author:
- Rob White (@wimnat)
options:
name:
description:
- "Name of the s3 bucket."
required: true
type: str
state:
description:
- "Enable or disable logging."
default: present
choices: [ 'present', 'absent' ]
type: str
target_bucket:
description:
- "The bucket to log to. Required when state=present."
type: str
target_prefix:
description:
- "The prefix that should be prepended to the generated log files written to the target_bucket."
default: ""
type: str
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
- amazon.aws.boto3
"""
RETURN = r""" # """
EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
community.aws.s3_logging:
name: mywebsite.com
target_bucket: mylogs
target_prefix: logs/mywebsite.com
state: present
- name: Remove logging on an s3 bucket
community.aws.s3_logging:
name: mywebsite.com
state: absent
"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def compare_bucket_logging(bucket_logging, target_bucket, target_prefix):
if not bucket_logging.get("LoggingEnabled", False):
if target_bucket:
return True
return False
logging = bucket_logging["LoggingEnabled"]
if logging["TargetBucket"] != target_bucket:
return True
if logging["TargetPrefix"] != target_prefix:
return True
return False
def verify_acls(connection, module, target_bucket):
try:
current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket)
current_grants = current_acl["Grants"]
except is_boto3_error_code("NoSuchBucket"):
module.fail_json(msg=f"Target Bucket '{target_bucket}' not found")
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to fetch target bucket ACL")
required_grant = {
"Grantee": {"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", "Type": "Group"},
"Permission": "FULL_CONTROL",
}
for grant in current_grants:
if grant == required_grant:
return False
if module.check_mode:
return True
updated_acl = dict(current_acl)
updated_grants = list(current_grants)
updated_grants.append(required_grant)
updated_acl["Grants"] = updated_grants
del updated_acl["ResponseMetadata"]
try:
connection.put_bucket_acl(aws_retry=True, Bucket=target_bucket, AccessControlPolicy=updated_acl)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to update target bucket ACL to allow log delivery")
return True
def enable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
target_bucket = module.params.get("target_bucket")
target_prefix = module.params.get("target_prefix")
changed = False
try:
bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name)
except is_boto3_error_code("NoSuchBucket"):
module.fail_json(msg=f"Bucket '{bucket_name}' not found")
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to fetch current logging status")
try:
changed |= verify_acls(connection, module, target_bucket)
if not compare_bucket_logging(bucket_logging, target_bucket, target_prefix):
bucket_logging = camel_dict_to_snake_dict(bucket_logging)
module.exit_json(changed=changed, **bucket_logging)
if module.check_mode:
module.exit_json(changed=True)
result = connection.put_bucket_logging(
aws_retry=True,
Bucket=bucket_name,
BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": target_bucket,
"TargetPrefix": target_prefix,
}
},
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to enable bucket logging")
result = camel_dict_to_snake_dict(result)
module.exit_json(changed=True, **result)
def disable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
changed = False
try:
bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to fetch current logging status")
if not compare_bucket_logging(bucket_logging, None, None):
module.exit_json(changed=False)
if module.check_mode:
module.exit_json(changed=True)
try:
response = AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidTargetBucketForLogging"])(
connection.put_bucket_logging
)(Bucket=bucket_name, BucketLoggingStatus={})
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to disable bucket logging")
module.exit_json(changed=True)
def main():
argument_spec = dict(
name=dict(required=True),
target_bucket=dict(required=False, default=None),
target_prefix=dict(required=False, default=""),
state=dict(required=False, default="present", choices=["present", "absent"]),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
connection = module.client("s3", retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
if state == "present":
enable_bucket_logging(connection, module)
elif state == "absent":
disable_bucket_logging(connection, module)
if __name__ == "__main__":
main()
| ansible-collections/community.aws | plugins/modules/s3_logging.py | s3_logging.py | py | 6,782 | python | en | code | 174 | github-code | 13 |
3957146057 | # Desafio 44
# Elabore um programa que calcule o valor a ser pago por um produto, considerando o seu preço normal e condição de pagamento:
# Avista dinheiro/cheque: 10% de desconto
# Avista no cartão: 5% de desconto
# Em até 2x no cartão: Preço normal
# 3x ou mais no cartão: 20% de juros
preço = float(input('Total das compras: R$ '))
print('''Formas de Pagamento:
[ 1 ] á vista dinheiro/cheque
[ 2 ] á vista no cartão
[ 3 ] 2x no cartão
[ 4 ] 3x ou mais no cartão''')
opção = int(input('Qual é a opção? '))
if opção == 1:
total = preço * 0.9
elif opção == 2:
total = preço * 0.95
elif opção == 3:
total = preço
parcela = total / 2
print(f'Sua compra será parcelada em 2x de R$ {parcela:.2f} SEM JUROS!')
elif opção == 4:
total = preço * 1.2
totparc = int(input('Quantas parcela? '))
parcela = total / totparc
print(f'Sua compra será parcela em {totparc}x de R$ {parcela:.2f} COM JUROS! ')
print(f'Sua compra de R$ {preço} vai custar R$ {total:.2f} no final.')
| mozart-jr/Python | Desafio 41 ao 50/Desafio 44.py | Desafio 44.py | py | 1,050 | python | pt | code | 0 | github-code | 13 |
7631301572 | # G[2] 메모리 30840 KB 시간 1708 ms
import sys
MOVE = [(-1, 0), (1, 0), (0, -1), (0, 1)] # 위 아 왼 오
N, M, K = map(int, sys.stdin.readline().split())
space = [[[0, 0] for _ in range(N)] for _ in range(N)]
shark = {idx: [-1, -1, -1] for idx in range(1, M+1)}
shark_priority = [[]]
for i in range(N):
for j, val in enumerate(map(int, sys.stdin.readline().split())):
if val > 0:
shark[val] = [i, j, -1]
for i, val in enumerate(map(int, sys.stdin.readline().split())):
shark[i+1][2] = val-1
for idx in range(M):
shark_priority.append([list(map(lambda x : int(x)-1, sys.stdin.readline().split())) for _ in range(4)])
def spray():
for idx in range(1, M+1):
if idx not in shark:
continue
i, j = shark[idx][0], shark[idx][1]
space[i][j] = [idx, K]
def moveShark(idx):
if idx not in shark:
return
i, j, d = shark[idx]
priority = shark_priority[idx][d]
# 빈 공간이 있는 경우
for p in range(4):
di, dj = MOVE[priority[p]]
if 0 <= i+di < N and 0 <= j+dj < N and space[i+di][j+dj] == [0, 0]:
shark[idx] = [i+di, j+dj, priority[p]]
return
# 자신의 냄새가 있는 곳으로
for p in range(4):
di, dj = MOVE[priority[p]]
if 0 <= i+di < N and 0 <= j+dj < N and space[i+di][j+dj][0] == idx:
shark[idx] = [i+di, j+dj, priority[p]]
return
def fadeOut():
for i in range(N):
for j in range(N):
if space[i][j][0] > 0:
space[i][j][1] -= 1
if space[i][j][1] == 0:
space[i][j] = [0, 0]
def solv():
for idx in range(1, M+1):
moveShark(idx)
fadeOut()
for idx in range(M, 0, -1):
if idx not in shark:
continue
shark_list = list(map(lambda x: x[1][:2], filter(lambda x: x[0] != idx, shark.items())))
if shark[idx][:2] in shark_list:
del shark[idx]
spray()
spray()
time = 0
while len(shark) > 1 and time <= 1000:
solv()
time += 1
print(-1 if time > 1000 else time) | nuuuri/algorithm | 구현/BOJ_19237.py | BOJ_19237.py | py | 2,133 | python | en | code | 0 | github-code | 13 |
35295670258 | a = int(input())
b = int(input())
c = int(input())
list_ = [a, b, c]
list_.sort(reverse=True)
ans = [0, 0, 0]
for i in range(len(list_)):
if list_[i]==a:
ans[0] = i+1
elif list_[i]==b:
ans[1] = i+1
else:
ans[2] = i+1
for i in ans:
print(i) | nozomuorita/atcoder-workspace-python | abc/abc018/a.py | a.py | py | 290 | python | en | code | 0 | github-code | 13 |
34785457958 | from rct229.rulesets.ashrae9012019.data.schema_enums import schema_enums
from rct229.utils.assertions import getattr_
from rct229.utils.jsonpath_utils import find_all, find_one
from rct229.utils.utility_functions import (
find_exactly_one_child_loop,
find_exactly_one_fluid_loop,
find_exactly_one_hvac_system,
)
FLUID_LOOP = schema_enums["FluidLoopOptions"]
def is_hvac_sys_fluid_loop_attached_to_chiller(rmi_b, hvac_b_id):
"""Returns TRUE if the fluid loop associated with the cooling system associated with the HVAC system is attached to a chiller. Returns FALSE if this is not the case.
Parameters
----------
rmi_b : json
RMD at RuleSetModelInstance level
hvac_b_id : str
The HVAC system ID.
Returns
-------
bool
True: fluid loop associated with the cooling system associated with the HVAC system is attached to a chiller
False: otherwise
"""
is_hvac_sys_fluid_loop_attached_to_chiller_flag = False
chillers = find_all("$.chillers[*]", rmi_b)
primary_cooling_loop_ids = [
getattr_(chiller_b, "chiller", "cooling_loop") for chiller_b in chillers
]
secondary_cooling_loop_ids = [
secondary_loop["id"]
for primary_cooling_loop_id in primary_cooling_loop_ids
for secondary_loop in find_all(
"$.child_loops[*]",
find_exactly_one_fluid_loop(rmi_b, primary_cooling_loop_id),
)
]
# Get the hvac system
hvac_b = find_exactly_one_hvac_system(rmi_b, hvac_b_id)
# Allow single loop and primary/secondary loop configuration as true.
chilled_water_loop_id = find_one("cooling_system.chilled_water_loop", hvac_b)
water_loop = None
if chilled_water_loop_id in primary_cooling_loop_ids:
water_loop = find_exactly_one_fluid_loop(rmi_b, chilled_water_loop_id)
elif chilled_water_loop_id in secondary_cooling_loop_ids:
water_loop = find_exactly_one_child_loop(rmi_b, chilled_water_loop_id)
is_hvac_sys_fluid_loop_attached_to_chiller_flag = (
water_loop is not None and find_one("type", water_loop) == FLUID_LOOP.COOLING
)
return is_hvac_sys_fluid_loop_attached_to_chiller_flag
| pnnl/ruleset-checking-tool | rct229/rulesets/ashrae9012019/ruleset_functions/baseline_systems/baseline_hvac_sub_functions/is_hvac_sys_fluid_loop_attached_to_chiller.py | is_hvac_sys_fluid_loop_attached_to_chiller.py | py | 2,200 | python | en | code | 6 | github-code | 13 |
35320235448 | #!/usr/bin/python
#coding:utf-8
'''
name : testPlot.py
author : ykita
date : Sat Feb 13 12:27:58 JST 2016
memo :
'''
import os, os.path
import sys
import sqlite3
import ROOT
from ROOT import *
hOp = TH1D('hOp','',100,0,50000)
hHi = TH1D('hHi','',100,0,50000)
hLo = TH1D('hLo','',100,0,50000)
hCl = TH1D('hCl','',100,0,50000)
conn = sqlite3.connect('out/data.db',isolation_level=None)
c = conn.cursor()
for i,row in enumerate(c.execute('''SELECT datetime(date),op,hi,lo,cl,vol,adjclo FROM nikkei225 where date>='2000-01-01' ''')):
hOp.Fill(row[1])
hHi.Fill(row[2])
hLo.Fill(row[3])
hCl.Fill(row[4])
c.close()
conn.close()
hOp.Draw()
hHi.Draw('SAME')
hLo.Draw('SAME')
hCl.Draw('SAME')
raw_input('>')
| ykita0000/20160212_nikkei225 | py/testPlot.py | testPlot.py | py | 725 | python | en | code | 0 | github-code | 13 |
25967576192 | import os
from pathlib import Path
import astropy.constants as const
import h5py
import numpy as np
from tqdm import tqdm
from pyvisgen.fits.data import fits_data
from pyvisgen.gridding.alt_gridder import ms2dirty_python_fast
from pyvisgen.utils.config import read_data_set_conf
from pyvisgen.utils.data import load_bundles, open_bundles
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
def create_gridded_data_set(config):
conf = read_data_set_conf(config)
out_path_fits = Path(conf["out_path_fits"])
out_path = Path(conf["out_path_gridded"])
out_path.mkdir(parents=True, exist_ok=True)
sky_dist = load_bundles(conf["in_path"])
fits_files = fits_data(out_path_fits)
size = len(fits_files)
print(size)
###################
# test
if conf["num_test_images"] > 0:
bundle_test = int(conf["num_test_images"] // conf["bundle_size"])
size -= conf["num_test_images"]
for i in tqdm(range(bundle_test)):
(
uv_data_test,
freq_data_test,
gridded_data_test,
sky_dist_test,
) = open_data(fits_files, sky_dist, conf, i)
truth_fft_test = calc_truth_fft(sky_dist_test)
if conf["amp_phase"]:
gridded_data_test = convert_amp_phase(gridded_data_test, sky_sim=False)
truth_amp_phase_test = convert_amp_phase(truth_fft_test, sky_sim=True)
else:
gridded_data_test = convert_real_imag(gridded_data_test, sky_sim=False)
truth_amp_phase_test = convert_real_imag(truth_fft_test, sky_sim=True)
assert gridded_data_test.shape[1] == 2
out = out_path / Path("samp_test" + str(i) + ".h5")
# rescaled to level Stokes I
gridded_data_test /= 2
save_fft_pair(out, gridded_data_test, truth_amp_phase_test)
#
###################
size_train = int(size // (1 + conf["train_valid_split"]))
size_valid = size - size_train
print(f"Training size: {size_train}, Validation size: {size_valid}")
bundle_train = int(size_train // conf["bundle_size"])
bundle_valid = int(size_valid // conf["bundle_size"])
###################
# train
for i in tqdm(range(bundle_train)):
i += bundle_test
uv_data_train, freq_data_train, gridded_data_train, sky_dist_train = open_data(
fits_files, sky_dist, conf, i
)
truth_fft_train = calc_truth_fft(sky_dist_train)
# sim_real_imag_train = np.array(
# (gridded_data_train[:, 0] + 1j * gridded_data_train[:, 1])
# )
# dirty_image_train = np.abs(
# np.fft.fftshift(
# np.fft.fft2(
# np.fft.fftshift(sim_real_imag_train, axes=(1, 2)), axes=(1, 2)
# ),
# axes=(1, 2),
# )
# )
if conf["amp_phase"]:
gridded_data_train = convert_amp_phase(gridded_data_train, sky_sim=False)
truth_amp_phase_train = convert_amp_phase(truth_fft_train, sky_sim=True)
else:
gridded_data_train = convert_real_imag(gridded_data_train, sky_sim=False)
truth_amp_phase_train = convert_real_imag(truth_fft_train, sky_sim=True)
out = out_path / Path("samp_train" + str(i) + ".h5")
# rescaled to level Stokes I
gridded_data_train /= 2
save_fft_pair(out, gridded_data_train, truth_amp_phase_train)
train_index_last = i
#
###################
###################
# valid
for i in tqdm(range(bundle_valid)):
i += train_index_last
uv_data_valid, freq_data_valid, gridded_data_valid, sky_dist_valid = open_data(
fits_files, sky_dist, conf, i
)
truth_fft_valid = calc_truth_fft(sky_dist_valid)
if conf["amp_phase"]:
gridded_data_valid = convert_amp_phase(gridded_data_valid, sky_sim=False)
truth_amp_phase_valid = convert_amp_phase(truth_fft_valid, sky_sim=True)
else:
gridded_data_valid = convert_real_imag(gridded_data_valid, sky_sim=False)
truth_amp_phase_valid = convert_real_imag(truth_fft_valid, sky_sim=True)
out = out_path / Path("samp_valid" + str(i - train_index_last) + ".h5")
# rescaled to level Stokes I
gridded_data_valid /= 2
save_fft_pair(out, gridded_data_valid, truth_amp_phase_valid)
#
###################
def open_data(fits_files, sky_dist, conf, i):
sky_sim_bundle_size = len(open_bundles(sky_dist[0]))
uv_data = [
fits_files.get_uv_data(n).copy()
for n in np.arange(
i * sky_sim_bundle_size, (i * sky_sim_bundle_size) + sky_sim_bundle_size
)
]
freq_data = np.array(
[
fits_files.get_freq_data(n)
for n in np.arange(
i * sky_sim_bundle_size, (i * sky_sim_bundle_size) + sky_sim_bundle_size
)
],
dtype="object",
)
gridded_data = np.array(
[grid_data(data, freq, conf).copy() for data, freq in zip(uv_data, freq_data)]
)
bundle = np.floor_divide(i * sky_sim_bundle_size, sky_sim_bundle_size)
gridded_truth = np.array(
[
open_bundles(sky_dist[bundle])[n]
for n in np.arange(
i * sky_sim_bundle_size - bundle * sky_sim_bundle_size,
(i * sky_sim_bundle_size)
+ sky_sim_bundle_size
- bundle * sky_sim_bundle_size,
)
]
)
return uv_data, freq_data, gridded_data, gridded_truth
def calc_truth_fft(sky_dist):
# norm = np.sum(np.sum(sky_dist_test, keepdims=True, axis=1), axis=2)
# sky_dist_test = np.expand_dims(sky_dist_test, -1) / norm[:, None, None]
truth_fft = np.fft.fftshift(
np.fft.fft2(np.fft.fftshift(sky_dist, axes=(1, 2)), axes=(1, 2)), axes=(1, 2)
)
return truth_fft
def ducc0_gridding(uv_data, freq_data):
vis_ = uv_data["DATA"]
vis = np.array([vis_[:, 0, 0, 0, 0, 0, 0] + 1j * vis_[:, 0, 0, 0, 0, 0, 1]]).T
vis_compl = np.array([vis_[:, 0, 0, 0, 0, 0, 0] + 1j * vis_[:, 0, 0, 0, 0, 0, 1]]).T
uu = np.array(uv_data["UU--"], dtype=np.float64)
uu_compl = np.array(-uv_data["UU--"], dtype=np.float64)
vv = np.array(uv_data["VV--"], dtype=np.float64)
vv_compl = np.array(-uv_data["VV--"], dtype=np.float64)
ww = np.array(uv_data["WW--"], dtype=np.float64)
ww_compl = np.array(uv_data["WW--"], dtype=np.float64)
uvw = np.stack([uu, vv, ww]).T
uvw_compl = np.stack([uu_compl, vv_compl, ww_compl]).T
uvw *= const.c.value
uvw_compl *= const.c.value
# complex conjugated
uvw = np.append(uvw, uvw_compl, axis=0)
vis = np.append(vis, vis_compl)
freq = freq_data[1]
freq = (freq_data[0]["IF FREQ"] + freq).reshape(-1, 1)[0]
wgt = np.ones((vis.shape[0], 1))
mask = None
wgt[vis == 0] = 0
if mask is None:
mask = np.ones(wgt.shape, dtype=np.uint8)
mask[wgt == 0] = False
DEG2RAD = np.pi / 180
# nthreads = 4
epsilon = 1e-4
# do_wgridding = False
# verbosity = 1
# do_sycl = False # True
# do_cng = False # True
# ntries = 1
fov_deg = 0.02 # 1e-5 # 3.3477833333331884e-5
npixdirty = 64 # get_npixdirty(uvw, freq, fov_deg, mask)
pixsize = fov_deg / npixdirty * DEG2RAD
# mintime = 1e300
grid = ms2dirty_python_fast(
uvw, freq, vis, npixdirty, npixdirty, pixsize, pixsize, epsilon, False
)
grid = np.rot90(np.fft.fftshift(grid))
# assert grid.shape[0] == 256
return grid
def grid_data(uv_data, freq_data, conf):
cmplx = uv_data["DATA"]
real = np.squeeze(cmplx[..., 0, 0, 0]) # .ravel()
imag = np.squeeze(cmplx[..., 0, 0, 1]) # .ravel()
# weight = np.squeeze(cmplx[..., 0, 2])
freq = freq_data[1]
IF_bands = (freq_data[0]["IF FREQ"] + freq).reshape(-1, 1)
u = np.repeat([uv_data["UU--"]], real.shape[1], axis=0)
v = np.repeat([uv_data["VV--"]], real.shape[1], axis=0)
u = (u * IF_bands).T.ravel()
v = (v * IF_bands).T.ravel()
real = real.ravel()
imag = imag.ravel()
samps = np.array(
[
np.append(u, -u),
np.append(v, -v),
np.append(real, real),
np.append(imag, -imag),
]
)
# Generate Mask
N = conf["grid_size"] # image size
fov = (
conf["fov_size"] * np.pi / (3600 * 180)
) # hard code #default 0.00018382, FoV from VLBA 163.7 <- wrong!
# depends on setting of simulations
delta = 1 / fov
bins = np.arange(start=-(N / 2) * delta, stop=(N / 2 + 1) * delta, step=delta)
mask, *_ = np.histogram2d(samps[0], samps[1], bins=[bins, bins], normed=False)
mask[mask == 0] = 1
mask_real, x_edges, y_edges = np.histogram2d(
samps[0], samps[1], bins=[bins, bins], weights=samps[2], normed=False
)
mask_imag, x_edges, y_edges = np.histogram2d(
samps[0], samps[1], bins=[bins, bins], weights=samps[3], normed=False
)
mask_real /= mask
mask_imag /= mask
mask_real = np.rot90(mask_real, 1)
mask_imag = np.rot90(mask_imag, 1)
gridded_vis = np.zeros((2, N, N))
gridded_vis[0] = mask_real
gridded_vis[1] = mask_imag
return gridded_vis
def convert_amp_phase(data, sky_sim=False):
if sky_sim:
amp = np.abs(data)
phase = np.angle(data)
data = np.stack((amp, phase), axis=1)
else:
test = data[:, 0] + 1j * data[:, 1]
amp = np.abs(test)
phase = np.angle(test)
data = np.stack((amp, phase), axis=1)
return data
def convert_real_imag(data, sky_sim=False):
if sky_sim:
real = data.real
imag = data.imag
data = np.stack((real, imag), axis=1)
else:
real = data[:, 0]
imag = data[:, 1]
data = np.stack((real, imag), axis=1)
return data
def save_fft_pair(path, x, y, name_x="x", name_y="y"):
"""
write fft_pairs created in second analysis step to h5 file
"""
x = x[:, :, :65, :]
y = y[:, :, :65, :]
with h5py.File(path, "w") as hf:
hf.create_dataset(name_x, data=x)
hf.create_dataset(name_y, data=y)
hf.close()
if __name__ == "__main__":
create_gridded_data_set(
"/net/big-tank/POOL/projects/radio/test_rime/create_dataset.toml"
)
| radionets-project/pyvisgen | pyvisgen/gridding/gridder.py | gridder.py | py | 10,426 | python | en | code | 2 | github-code | 13 |
31228855737 | from django.contrib.auth import get_user_model
from django.core.validators import MinValueValidator
from django.db import models
User = get_user_model()
class Ingredients(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Название'
)
measurement_unit = models.CharField(
max_length=100,
verbose_name='Единица измерения'
)
class Meta:
verbose_name_plural = 'Ингредиенты'
constraints = [
models.UniqueConstraint(
fields=['name', 'measurement_unit'],
name='unique_Ingredient'
),
]
def __str__(self):
return ', '.join((self.name, self.measurement_unit))
class Tags(models.Model):
name = models.CharField(max_length=200, unique=True, verbose_name='Имя')
color = models.CharField(max_length=10, unique=True, verbose_name='Цвет')
slug = models.CharField(max_length=100, unique=True)
class Meta:
verbose_name_plural = 'Теги'
def __str__(self):
return self.name
class Recipes(models.Model):
tags = models.ManyToManyField(
Tags, related_name='recipes', verbose_name='Тег'
)
author = models.ForeignKey(
User, on_delete=models.CASCADE,
related_name='recipe',
verbose_name='Автор'
)
ingredients = models.ManyToManyField(
Ingredients,
related_name='recipes',
through='IngredientRecipe',
verbose_name='Ингредиенты'
)
name = models.CharField(max_length=200, verbose_name='Название')
image = models.ImageField(
upload_to='recipes_images/',
verbose_name='Изображение'
)
text = models.TextField(verbose_name='Описание')
cooking_time = models.IntegerField(
verbose_name='Время приготовления, мин',
validators=[MinValueValidator(
1, 'Время приготовления должно быть положительным числом'
)]
)
pub_date = models.DateTimeField(
auto_now_add=True, verbose_name='Дата публикации'
)
class Meta:
verbose_name_plural = 'Рецепты'
ordering = ['-pub_date']
def __str__(self):
return self.name
class IngredientRecipe(models.Model):
ingredient = models.ForeignKey(
Ingredients,
on_delete=models.CASCADE,
verbose_name='Ингредиент'
)
recipe = models.ForeignKey(
Recipes,
on_delete=models.CASCADE,
verbose_name='Рецепт'
)
amount = models.FloatField(
verbose_name='Количество',
validators=[MinValueValidator(
0.001, 'Количество ингредианта должно быть положительным числом'
)]
)
class Meta:
verbose_name_plural = 'Ингредиенты'
constraints = [
models.UniqueConstraint(
fields=['ingredient', 'recipe'],
name='unique_IngredientRecipe'
),
]
def __str__(self):
return f'{self.ingredient} для {self.recipe}'
class Subscription(models.Model):
subscriber = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='subscribed',
verbose_name='Подписчик'
)
subscribed = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='subscribers',
verbose_name='Автор'
)
class Meta:
verbose_name_plural = 'Подписки'
constraints = [
models.UniqueConstraint(
fields=['subscriber', 'subscribed'],
name='unique_subscription'
),
]
class Favorite(models.Model):
lover = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='favorites',
verbose_name='Подписчик'
)
recipe = models.ForeignKey(
Recipes,
on_delete=models.CASCADE,
related_name='lovers',
verbose_name='Рецепт'
)
class Meta:
verbose_name_plural = 'Избранные рецепты'
constraints = [
models.UniqueConstraint(
fields=['lover', 'recipe'],
name='unique_Favorite'
),
]
class ShopingCart(models.Model):
buyer = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='recipes_cart',
verbose_name='Покупатель'
)
recipe = models.ForeignKey(
Recipes,
on_delete=models.CASCADE,
related_name='buyer',
verbose_name='Рецепт'
)
class Meta:
verbose_name_plural = 'Рецепты в корзине'
| palmage/foodgram-project-react | backend/recipes/models.py | models.py | py | 4,917 | python | en | code | 0 | github-code | 13 |
27812813973 | # 0. 导入需要的包和模块
from PyQt5.Qt import *
class Window(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("交互状态的学习")
self.resize(500, 500)
self.setup_ui()
def setup_ui(self):
# 添加三个子控件
label = QLabel(self)
label.setText('标签')
label.move(100, 50)
label.hide()
le = QLineEdit(self)
le.setText('文本框')
le.move(100, 100)
btn = QPushButton(self)
btn.setText('登录')
btn.move(100, 150)
btn.setEnabled(False)
def text_cao(text):
print('文本内容发生了改变', text)
btn.setEnabled(len(text) > 0)
le.textChanged.connect(text_cao)
def check():
print('登录按钮被点击了')
# 1. 获取文本框的文本
context = le.text()
# 判断文本内容是否为Sz
if context == 'Sz':
label.setText('登录成功')
else:
label.setText('登录失败')
label.show()
label.adjustSize()
btn.pressed.connect(check)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec())
| zhangzhaozhe/test-demo | 04-QWidget-交互状态的案例.py | 04-QWidget-交互状态的案例.py | py | 1,350 | python | zh | code | 0 | github-code | 13 |
7631363442 | # G[3] PyPy3 메모리 115364 KB 시간 192 ms / Python3 메모리 30840 KB 시간 600 ms
import sys
input = sys.stdin.readline
dice = [[0, 2, 0], [4, 1, 3], [0, 5, 0], [0, 6, 0]]
MOVE = [(0, 1), (1, 0), (0, -1), (-1, 0)]
x, y, d = 0, 0, 0
ans = 0
def rollTheDice(d):
if d == 0:
temp = dice[1][2]
dice[1] = [dice[3][1], dice[1][0], dice[1][1]]
dice[3][1] = temp
elif d == 1:
temp = dice[3][1]
dice[3][1] = dice[2][1]
dice[2][1] = dice[1][1]
dice[1][1] = dice[0][1]
dice[0][1] = temp
elif d == 2:
temp = dice[1][0]
dice[1] = [dice[1][1], dice[1][2], dice[3][1]]
dice[3][1] = temp
elif d == 3:
temp = dice[0][1]
dice[0][1] = dice[1][1]
dice[1][1] = dice[2][1]
dice[2][1] = dice[3][1]
dice[3][1] = temp
def bfs(i, j):
queue = [(i, j)]
visited = [[False for _ in range(M)] for _ in range(N)]
visited[i][j] = True
cnt = 0
while queue:
x, y = queue.pop(0)
cnt += 1
for dx, dy in MOVE:
if 0 <= x+dx < N and 0 <= y+dy < M and not visited[x+dx][y+dy] and arr[x+dx][y+dy] == arr[x][y]:
queue.append((x+dx, y+dy))
visited[x+dx][y+dy] = True
return cnt
N, M, K = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(N)]
for _ in range(K):
dx, dy = MOVE[d]
if 0 > x+dx or x+dx >= N or 0 > y+dy or y+dy >= M:
d = (d+2) % 4
dx, dy = MOVE[d]
rollTheDice(d)
x += dx
y += dy
ans += arr[x][y] * bfs(x, y)
if dice[3][1] > arr[x][y]:
d = (d+1) % 4
elif dice[3][1] < arr[x][y]:
d = (d-1) % 4
print(ans) | nuuuri/algorithm | 그래프/BOJ_23288.py | BOJ_23288.py | py | 1,732 | python | en | code | 0 | github-code | 13 |
6337497562 | '''
User Story 06
Divorce before death
'''
from datetime import datetime
def indivDeaths(input, newFam):
fams = []
indivs = []
for i, b in zip(input, newFam):
if i[2] != "NA":
fams = []
fams.append(b[0])
fams.append(i[0])
fams.append(i[2])
fams.append(i[3])
fams.append(i[5])
indivs.append(fams)
return indivs
def checkFams(input, indivs):
errors = []
for i in input:
for j in indivs:
if j[3] == i[0] or j[4] == i[0]: #checks
if i[6] != "NA": #if dead
try:
deathDate = datetime.strptime(i[6], '%Y-%m-%d')
divDate = datetime.strptime(j[2], '%Y-%m-%d')
except ValueError:
deathDate = datetime.strptime("2018-01-01", '%Y-%m-%d')
divDate = datetime.strptime("2018-01-01", '%Y-%m-%d')
if divDate > deathDate:
print("ERROR: INDIVIDUAL: US06: " + i[0] + ": Divorce date occurs after their date of death on line " + str(j[0]))
errors.append("ERROR: INDIVIDUAL: US06: " + i[0] + ": Divorce date occurs after their date of death on line " + str(j[0]))
return errors
def main(inputindi, inputfam, newFam):
#indivDeaths(tables[1])
return checkFams(inputindi, indivDeaths(inputfam, newFam)) | chloequinto/SSW_555_Project | package/userStories/us06.py | us06.py | py | 1,572 | python | en | code | 0 | github-code | 13 |
3086645915 | """ WebConfiguration -- Singleton for holding global configuration
This file defines the global configuration and makes it accessible as a
singleton variable. Any changes made to an instance of the configuration
are propagated to all other instances.
A default configuration is loaded, if not specified otherwise.
Author: Nils Dycke (dycke@ukp...)
"""
import redis
import os
import logging
# holds the configuration object
_singleton = None
# env variables
RABBITMQ_HOST = os.getenv("RABBITMQ_HOST")
RABBITMQ_PORT = os.getenv("RABBITMQ_PORT")
REDIS_HOST = os.getenv("REDIS_HOST")
REDIS_PORT = os.getenv("REDIS_PORT")
BROKER_PORT = os.getenv("BROKER_PORT")
# default config of all environmental parameters.
# the parameters in "app" are passed to the flask app specifically
# the parameters in "grobid" are passed to grobid specifically
DEFAULT = {
"name": "broker",
"log": True,
"debug": False,
"secret_key": "DEFAULT-SECRET-KEY",
"session_backend": f"redis://{REDIS_HOST}:{REDIS_PORT}",
"result_backend": f"redis://{REDIS_HOST}:{REDIS_PORT}",
"app": {
"host": "0.0.0.0",
"port": BROKER_PORT,
}
}
DEFAULT_DEV = {
"name": "broker",
"log": True,
"debug": False, # currently: discouraged; you need a proper debugger setup for that to work
"secret_key": "DEBUGGING-SECRET-KEY",
"session_backend": f"redis://{REDIS_HOST}:{REDIS_PORT}",
"result_backend": f"redis://{REDIS_HOST}:{REDIS_PORT}",
"app": {
"host": "127.0.0.1",
"port": BROKER_PORT
}
}
def instance(**kwargs):
"""
Returns the global configuration; if not existent yet, a new one is created by the
given paramters.
:param kwargs: see WebConfiguration
:return: the web configuration object in use
"""
global _singleton
if _singleton is None:
_singleton = WebConfiguration(**kwargs)
if len(kwargs) > 0:
logging.debug("WARNING: Singleton WebConfiguration already exists; you are trying to reconfigure it, which failed.")
return _singleton
class WebConfiguration:
"""
Object for getting and updating the paramters for running the web server.
If you need to add more components to this configuration, add an attribute
to the class, add a parameter to the update function and update the
conf attribute accordingly.
"""
def __init__(self, **kwargs):
if "dev" in kwargs:
self.dev = kwargs["dev"]
del kwargs["dev"]
else:
self.dev = False
if self.dev:
self.conf = DEFAULT_DEV.copy()
else:
self.conf = DEFAULT.copy()
if "debug" in kwargs and kwargs["debug"]:
self.conf["debug"] = True
self.debug = True
else:
self.debug = False
logging.debug(self.conf)
self.flask = None
self.session = None
self.celery = None
self.socketio = None
self.app = None
self.update(**kwargs)
def update(self, secret_key: str = None,
session_backend: str = None,
result_backend: str = None,
broker: str = None,
resources_dir: str = None,
app: dict = None,
debug: bool = None,
log: bool = None):
# update config
if secret_key is not None:
self.conf["secret_key"] = secret_key
if session_backend is not None:
self.conf["session_backend"] = session_backend
if result_backend is not None:
self.conf["result_backend"] = result_backend
if broker is not None:
self.conf["broker"] = broker
if app is not None:
self.conf["app"] = app
if debug is not None:
self.conf["debug"] = debug
if log is not None:
self.conf["log"] = log
if resources_dir is not None:
self.conf["resources_dir"] = resources_dir
# init default values based on the provided information
self.setup()
def setup(self):
self.flask = {
"SECRET_KEY": self.conf["secret_key"]
}
self.session = {
"SESSION_TYPE": "redis",
"SESSION_PERMANENT": False,
"SESSION_USE_SIGNER": True,
"SESSION_REDIS": redis.from_url(self.conf["session_backend"])
}
self.socketio = {
"cors_allowed_origins": '*',
}
self.app = {
"debug": self.conf["debug"]
}
self.app.update(self.conf["app"]) | UKPLab/CARE_broker | broker/config/WebConfiguration.py | WebConfiguration.py | py | 4,648 | python | en | code | 2 | github-code | 13 |
72220141778 | import os
import numpy as np
from tempfile import TemporaryFile
BASE_DIR = '.'
GLOVE_DIR = BASE_DIR + '/glove.840B/'
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.840B.300d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
embedding_matrix = 0.1 * np.random.randn(14099, 300)
vocab = open(os.path.join(BASE_DIR, 'vocab.dat'))
j = 0
for line in vocab:
ls = line.split(' ', 1)
i = int(ls[0])
word = ls[1]
embedding_vector = embeddings_index.get(word.rstrip())
if embedding_vector is not None:
embedding_matrix[i-1] = embedding_vector
j = j + 1
np.savetxt('wordvector_300_840B.txt', embedding_matrix)
| jin1205/CMPT741-sentiment-analysis | wordembedding.py | wordembedding.py | py | 803 | python | en | code | 0 | github-code | 13 |
10508409714 | import datetime
import json
import os
from os import path
import random
import sys
import time
import itertools
import matplotlib as mpl
from matplotlib import pyplot
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from numpy import linalg as LA
import pandas as pd
from scipy import linalg
from scipy.io import arff
from sklearn.ensemble import RandomForestRegressor
import sklearn.preprocessing as preprocessing
import sklearn.cluster as cluster
from sklearn.model_selection import train_test_split
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import adjusted_rand_score
from sklearn.decomposition import PCA, FastICA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import learning_curve
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPClassifier
from sklearn import mixture
from sklearn import random_projection
from scipy import stats
import Utils
###############################################################
################ Dataset Initialization Methods ###############
###############################################################
def initializeDataset(datasetDict, randomState):
if datasetDict['name'] == 'starcraft':
initializeStarcraftDataset(datasetDict, randomState)
elif datasetDict['name'] == 'seismic bumps':
initializeSeismicBumpsDataset(datasetDict, randomState)
def initializeStarcraftDataset(datasetDict, randomState):
# load first arff file
data_1, meta_1 = arff.loadarff('datasets/starcraft/scmPvT_Protoss_Mid.arff')
df_1 = pd.DataFrame(data_1)
df_1['opponent'] = pd.Series(np.full(df_1.shape[0], 0))
# load second arff file
data_2, meta_2 = arff.loadarff('datasets/starcraft/scmPvZ_Protoss_Mid.arff')
df_2 = pd.DataFrame(data_2)
df_2['opponent'] = pd.Series(np.full(df_2.shape[0], 1))
# concatenate the two dataframes into one
df = pd.concat([df_1, df_2])
# pull out the features and target variable
X = df.loc[:, df.columns != 'midBuild']
datasetDict['featureNames'] = list(X.columns)
#print(X.head())
Y = df['midBuild'].to_frame()
#print(Y.head())
#print(Y['midBuild'].value_counts())
datasetDict['cv'] = StratifiedKFold(n_splits=4, random_state=randomState, shuffle=True)
datasetDict['shuffle'] = True
datasetDict['trainPercentage'] = .80
# 680 was used when we were just using one data file
# dataset_dict[train_sizes_key] = [int(x) for x in np.arange(8, 680, 10)]
datasetDict['trainSizes'] = [int(x) for x in np.arange(8, 1295, 25)]
datasetDict['outputClassNames'] = ['Carrier', 'FastDT', 'FastExpand', 'FastLegs', 'FastObs', 'ReaverDrop',
'Unknown']
# do the one hot encoding using pandas instead of sklearn
# X_enc = pd.get_dummies(X)
# print(X_enc.head())
X_enc = X
# standardize because one of the features goes from 0-1 while all the others are on the same scale
# standardizing doesn't hurt
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1), copy=True)
X_std = scaler.fit_transform(X_enc)
# Some machine learning classifiers in Scikit-learn prefer that the class labels in the target variable are encoded
# with numbers. Since we only have two classes, we can use LabelEncoder.
le = preprocessing.LabelEncoder()
Y_enc = le.fit_transform(Y.values.ravel())
X = X_std
Y = Y_enc
datasetDict['X'] = X
datasetDict['Y'] = Y
def initializeSeismicBumpsDataset(datasetDict, randomState):
# code from http://nmouatta.blogspot.com/2016/09/imbalanced-class-classification-with.html
# load arff file
data, meta = arff.loadarff('datasets/seismic-bumps/seismic-bumps.arff')
df = pd.DataFrame(data)
column_labels = ['seismic',
'seismoacoustic',
'shift',
'genergy',
'gpuls',
'gdenergy',
'gdpuls',
'ghazard',
'nbumps',
'nbumps2',
'nbumps3',
'nbumps4',
'nbumps5',
'nbumps6',
'nbumps7',
'nbumps89',
'energy',
'maxenergy',
'outcome']
df.columns = column_labels
#print(df.head())
# pull out the features and target variable
X = df.loc[:, df.columns != 'outcome']
#print(X.head())
Y = df['outcome'].to_frame()
#print(Y.head())
#print(Y['outcome'].value_counts())
datasetDict['cv'] = StratifiedKFold(n_splits=4, random_state=randomState, shuffle=True)
datasetDict['shuffle'] = True
datasetDict['trainPercentage'] = .80
# with a cv of 4
datasetDict['trainSizes'] = [int(x) for x in np.arange(20, int(1549), 25)]
datasetDict['outputClassNames'] = ['Non-Hazardous', 'Hazardous']
datasetDict['scoring'] = 'f1'
# do the one hot encoding using pandas instead of sklearn
X_enc = pd.get_dummies(X)
datasetDict['featureNames'] = list(X_enc.columns)
#print(X_enc.head())
# standardizing doesn't hurt
#scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1), copy=True)
scaler = preprocessing.RobustScaler()
X_std = scaler.fit_transform(X_enc)
# Some machine learning classifiers in Scikit-learn prefer that the class labels in the target variable are encoded
# with numbers. Since we only have two classes, we can use LabelEncoder.
le = preprocessing.LabelEncoder()
Y_enc = le.fit_transform(Y.values.ravel())
X = X_std
Y = Y_enc
datasetDict['X'] = X
datasetDict['Y'] = Y
###############################################################
####### Neural Network and Support Methods for it #############
###############################################################
# code taken from my assignment #1 and modified
def runANN(config, trainX, trainY, testX, testY, datasetName, datasetDict, datasetResultsDict, randomState):
print("Running " + config['name'] + " on " + datasetName)
algResultsDict = {}
algName = config['name']
datasetResultsDict[algName] = algResultsDict
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# load dataset/algorithm specific config information (if any)
runLearningCurve = config['nnRunLearningCurve']
runModelComplexityCurve = config['nnRunModelComplexityCurve']
runFinalTest = config['nnRunFinalTest']
runGridSearchCV = config['nnRunGridSearchCV']
initialAlpha = config['nnInitialAlpha']
maxIterations = config['nnMaxIterations']
# load dataset specific configuration setting
if 'scoring' in datasetDict.keys():
scoring = datasetDict['scoring']
else:
scoring = None
shuffle = datasetDict['shuffle']
cv = datasetDict['cv']
gs_best_nn = None
###########################################################
# do the learning curve
###########################################################
if runLearningCurve:
learning_curve_results_dict = {}
algResultsDict['learningCurve'] = learning_curve_results_dict
learning_curve_train_sizes = datasetDict['trainSizes']
learning_curve_results_dict['learningCurveEstimatorSettings'] = {}
mlp = MLPClassifier(alpha=initialAlpha, random_state=randomState, max_iter=maxIterations)
if True or datasetName == "starcraft":
first_param_value = mlp.get_params()['alpha']
if first_param_value is not None:
first_param_value = str(float(first_param_value))
else:
first_param_value = 'None'
second_param_value = mlp.get_params()['learning_rate']
if second_param_value is not None:
second_param_value = second_param_value
else:
second_param_value = 'None'
doLearningCurves(mlp, algName, trainX, trainY, cv,
learning_curve_train_sizes, shuffle,
scoring,
resultsDirectory, datasetName, learning_curve_results_dict,
['alpha', 'learning_rate'], [first_param_value, second_param_value], "(Initial) ", True, randomState)
else:
print("unknown nn dataset")
sys.exit()
###########################################################
# now do the iteration 'training' curve
###########################################################
if True or datasetName == "starcraft":
training_size = 1000
num_iterations = range(1, 2000, 20)
else:
print("unknown nn dataset")
sys.exit()
doIterationLearningCurves(mlp, algName, trainX, trainY, cv,
training_size, shuffle,
scoring,
resultsDirectory, datasetName, learning_curve_results_dict,
['alpha', 'learning_rate'], [first_param_value, second_param_value], "(Initial) ", True, num_iterations, randomState)
###########################################################
# now do the validation curve
###########################################################
if runModelComplexityCurve:
validation_curve_results_dict = {}
algResultsDict['validationCurve'] = validation_curve_results_dict
model_complexity_results_dict = {}
algResultsDict['modelComplexity'] = model_complexity_results_dict
if runGridSearchCV:
gs_nn = MLPClassifier(alpha=initialAlpha, random_state=randomState, max_iter=maxIterations)
num_nodes_in_layer_values = np.arange(1, 200, 4)
num_hidden_layers_values = [1, 2]
param_grid = [
{'num_nodes_in_layer': num_nodes_in_layer_values, 'num_hidden_layers': num_hidden_layers_values}
]
grid_param_1 = num_nodes_in_layer_values
grid_param_2 = num_hidden_layers_values
grid_param_1_name = 'alpha'
grid_param_2_name = 'hidden_layer_sizes'
gs_best_nn = doGridSearchCurves(gs_nn, algName, trainX, trainY, cv, param_grid, scoring,
grid_param_1, grid_param_2, grid_param_1_name, grid_param_2_name, resultsDirectory, None, model_complexity_results_dict)
###########################################################
# now do the full training of the final model and test on the test set
###########################################################
if runFinalTest:
final_test_results_dict = {}
algResultsDict['finalTest'] = final_test_results_dict
if gs_best_nn is None:
finalAlpha = config['nnFinalAlpha']
finalMaxIterations = config['nnFinalMaxIterations']
finalNumHiddenLayers = config['nnFinalNumHiddenLayers']
finalNumNodesPerLayer = config['nnFinalNumNodesPerLayer']
#hidden_layer_sizes_values = []
if finalNumHiddenLayers == 1:
hidden_layer_sizes_values = (finalNumNodesPerLayer)
elif finalNumHiddenLayers == 2:
hidden_layer_sizes_values = (finalNumNodesPerLayer, finalNumNodesPerLayer)
else:
print("Too many hidden layers!!")
sys.exit()
gs_best_nn = MLPClassifier(alpha=finalAlpha, random_state=randomState, max_iter=finalMaxIterations, hidden_layer_sizes=hidden_layer_sizes_values)
gs_best_nn.fit(trainX, trainY)
start_time = time.time()
Y_test_pred = gs_best_nn.predict(testX)
end_time = time.time()
final_test_results_dict['finalPredictTime'] = end_time - start_time
final_test_results_dict['finalTestParams'] = gs_best_nn.get_params()
#########################################################
# do the final learning curve
#########################################################
if runLearningCurve:
first_param_value = gs_best_nn.get_params()['alpha']
if first_param_value is not None:
first_param_value = str(float(first_param_value))
else:
first_param_value = 'None'
second_param_value = gs_best_nn.get_params()['learning_rate']
if second_param_value is not None:
second_param_value = second_param_value
else:
second_param_value = 'None'
doLearningCurves(gs_best_nn, algName, trainX, trainY, cv,
learning_curve_train_sizes, shuffle,
scoring,
resultsDirectory, datasetName, learning_curve_results_dict,
['alpha', 'learning_rate'], [first_param_value, second_param_value], "(Final) ", False, randomState)
###########################################################
# now do the final iteration 'training' curve
###########################################################
if runLearningCurve:
training_size = 1000
num_iterations = range(1, 2000, 20)
doIterationLearningCurves(gs_best_nn, algName, trainX, trainY, cv,
training_size, shuffle,
scoring,
resultsDirectory, datasetName, learning_curve_results_dict,
['alpha', 'learning_rate'], [first_param_value, second_param_value], "(Final) ", True, num_iterations, randomState)
# reset this back to what it was before doing the iteration learning curves
gs_best_nn.set_params(max_iter=maxIterations)
##########################################################
# output the confusion matrix and other final errors
##########################################################
test_confusion_matrix = confusion_matrix(testY, Y_test_pred)
classes = datasetDict['outputClassNames']
# Plot non-normalized confusion matrix
plt.figure()
Utils.plot_confusion_matrix(test_confusion_matrix, classes=classes)
plt.tight_layout()
plt.savefig(path.join(resultsDirectory, algName + '_FinalTestConfusionMatrixNonNormalized.png'))
plt.show()
# Plot normalized confusion matrix
plt.figure()
Utils.plot_confusion_matrix(test_confusion_matrix, classes=classes, normalize=True)
plt.tight_layout()
plt.savefig(path.join(resultsDirectory, algName + '_FinalTestConfusionMatrixNormalized.png'))
plt.show()
if 'scoring' in datasetDict.keys() and datasetDict['scoring'] == 'f1':
test_error = 1.0 - f1_score(testY, Y_test_pred)
else:
test_error = 1.0 - gs_best_nn.score(testX, testY)
with open(path.join(resultsDirectory, algName + '_FinalError.txt'),
'w') as f:
f.write(str(test_error))
final_test_results_dict['finalTestError'] = test_error
return
def doLearningCurves(estimator, algorithm_name, X_train, Y_train, cv,
learning_curve_train_sizes, shuffle,
scoring,
dataset_results_directory, dataset_name, learning_curve_results_dict,
variable_names, variable_values_as_strings, prefix_string,
is_initial, randomState):
start_time = time.time()
train_sizes_abs, learning_train_scores, learning_validation_scores = learning_curve(estimator, X_train, Y_train,
cv=cv,
# train_sizes=np.arange(5, 1003, 10),
train_sizes=learning_curve_train_sizes,
random_state=randomState,
shuffle=shuffle,
scoring=scoring)
end_time = time.time()
if is_initial:
learning_curve_results_dict['initialLearningCurveTime'] = end_time - start_time
else:
learning_curve_results_dict['finalLearningCurveTime'] = end_time - start_time
fig = plt.figure()
ax = fig.add_subplot(111)
mean_learning_train_scores = np.mean([1.0 - x for x in learning_train_scores[:]], axis=1)
std_learning_train_scores = np.std([1.0 - x for x in learning_train_scores[:]], axis=1)
# min_learning_train_scores = np.min([1.0 - x for x in learning_train_scores[:]], axis=1)
# max_learning_train_scores = np.max([1.0 - x for x in learning_train_scores[:]], axis=1)
mean_learning_validation_scores = np.mean([1.0 - x for x in learning_validation_scores[:]], axis=1)
std_learning_validation_scores = np.std([1.0 - x for x in learning_validation_scores[:]], axis=1)
# min_learning_validation_scores = np.min([1.0 - x for x in learning_validation_scores[:]], axis=1)
# max_learning_validation_scores = np.max([1.0 - x for x in learning_validation_scores[:]], axis=1)
learning_curve_results_dict['trainSizes'] = train_sizes_abs.tolist()
#learning_curve_results_dict[mean_learning_train_scores_key] = mean_learning_train_scores.tolist()
#learning_curve_results_dict[std_learning_train_scores_key] = std_learning_train_scores.tolist()
#learning_curve_results_dict[mean_learning_validation_scores_key] = mean_learning_validation_scores.tolist()
#learning_curve_results_dict[std_learning_validation_scores_key] = std_learning_validation_scores.tolist()
ax.fill_between(train_sizes_abs, mean_learning_train_scores - std_learning_train_scores,
mean_learning_train_scores + std_learning_train_scores, alpha=0.1, color='r')
ax.fill_between(train_sizes_abs, mean_learning_validation_scores - std_learning_validation_scores,
mean_learning_validation_scores + std_learning_validation_scores, alpha=0.1, color='grey')
# ax.errorbar(train_sizes_abs, mean_learning_train_scores, yerr=[max_learning_train_scores-mean_learning_train_scores, mean_learning_train_scores-min_learning_train_scores], label='Their CV Train', color='orange', linestyle='dashed')
ax.plot(train_sizes_abs, mean_learning_train_scores, '-o', label='Train', color='orange', ms=3) #, linestyle='dashed')
# ax.errorbar(train_sizes_abs, mean_learning_validation_scores, yerr=[max_learning_validation_scores-mean_learning_validation_scores, mean_learning_validation_scores-min_learning_validation_scores], label='Their CV Validation', color='black')
ax.plot(train_sizes_abs, mean_learning_validation_scores, '-o', label='CV', color='black', ms=3)
ax.set_ylim(0.0, 1.0)
ax.set_xlabel('# Training Samples')
ax.set_ylabel('Error')
ax.minorticks_on()
ax.grid(b=True, which='major', color='b', linestyle='-', alpha=0.2)
ax.grid(b=True, which='minor', color='b', linestyle='--', alpha=0.1)
leg = ax.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.6)
title_string = ""
title_string += prefix_string + "Dataset: " + dataset_name + ", " + "Algorithm: " + algorithm_name + "\n"
for variable_index, _ in enumerate(variable_names):
title_string += variable_names[variable_index] + ": " + variable_values_as_strings[variable_index] + "\n"
with open(path.join(dataset_results_directory, algorithm_name + '_' + prefix_string +'_LearningCurve.txt'), 'w') as f:
f.write(json.dumps(title_string))
plt.title(prefix_string + ' Learning Curve')
plt.tight_layout()
plt.savefig(path.join(dataset_results_directory, algorithm_name + '_' + prefix_string +'_LearningCurve.png'))
plt.show()
# taken and modified from https://matplotlib.org/gallery/shapes_and_collections/scatter.html#sphx-glr-gallery-shapes-and-collections-scatter-py
def doIterationLearningCurves(estimator, algorithm_name, X_train, Y_train, cv,
learning_curve_train_size, shuffle,
scoring,
dataset_results_directory, dataset_name, learning_curve_results_dict,
variable_names, variable_values_as_strings, prefix_string,
is_initial, iterations, randomState):
mean_learning_train_scores = []
mean_learning_validation_scores = []
start_time = time.time()
for iteration in iterations:
estimator.set_params(max_iter=iteration)
train_sizes_abs, learning_train_scores, learning_validation_scores = learning_curve(estimator, X_train, Y_train,
cv=cv,
# train_sizes=np.arange(5, 1003, 10),
train_sizes=[learning_curve_train_size],
random_state=randomState,
shuffle=shuffle,
scoring=scoring)
mean_learning_train_scores.extend(np.mean([1.0 - x for x in learning_train_scores[:]], axis=1))
std_learning_train_scores = np.std([1.0 - x for x in learning_train_scores[:]], axis=1)
mean_learning_validation_scores.extend(np.mean([1.0 - x for x in learning_validation_scores[:]], axis=1))
std_learning_validation_scores = np.std([1.0 - x for x in learning_validation_scores[:]], axis=1)
#print("Iteration: ", iteration, " mean error: ", mean_learning_validation_scores)
end_time = time.time()
if is_initial:
learning_curve_results_dict['initialTrainingCurveTime'] = end_time - start_time
else:
learning_curve_results_dict['finalTrainingCurveTime'] = end_time - start_time
fig = plt.figure()
ax = fig.add_subplot(111)
ax.fill_between(iterations, mean_learning_train_scores - std_learning_train_scores,
mean_learning_train_scores + std_learning_train_scores, alpha=0.1, color='r')
ax.fill_between(iterations, mean_learning_validation_scores - std_learning_validation_scores,
mean_learning_validation_scores + std_learning_validation_scores, alpha=0.1, color='grey')
ax.plot(iterations, mean_learning_train_scores, '-o', label='Train', color='orange', ms=3) #, linestyle='dashed')
ax.plot(iterations, mean_learning_validation_scores, '-o', label='CV', color='black', ms=3)
ax.set_ylim(0.0, 1.0)
ax.set_xlabel('# Iterations')
ax.set_ylabel('Error')
ax.minorticks_on()
ax.grid(b=True, which='major', color='b', linestyle='-', alpha=0.2)
ax.grid(b=True, which='minor', color='b', linestyle='--', alpha=0.1)
leg = ax.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.6)
title_string = ""
title_string += prefix_string + "Dataset: " + dataset_name + ", " + "Algorithm: " + algorithm_name + "\n"
for variable_index, _ in enumerate(variable_names):
title_string += variable_names[variable_index] + ": " + variable_values_as_strings[variable_index] + "\n"
with open(path.join(dataset_results_directory, algorithm_name + '_' + prefix_string +'_TrainingCurve.txt'), 'w') as f:
f.write(json.dumps(title_string))
plt.title(prefix_string + ' Training Curve')
plt.tight_layout()
plt.savefig(path.join(dataset_results_directory, algorithm_name + '_' + prefix_string +'_TrainingCurve.png'))
plt.show()
def doGridSearchCurves(estimator, algorithm_name, X_train, Y_train,
cv, param_grid, scoring,
grid_param_1, grid_param_2, grid_param_1_name, grid_param_2_name,
dataset_results_directory, x_axis_log_base, model_complexity_results_dict):
# some of this is from http://scikit-learn.org/stable/modules/grid_search.html
# we have to do something special with nn because my param grid has # hidden layers and # nodes
# doesn't fit neatly into GridSearchCV's version of param_grid
num_nodes_in_layer_values = np.arange(1, 200, 4)
num_hidden_layers_values = [1, 2]
hidden_layer_sizes_values = []
for num_hidden_layers_value in num_hidden_layers_values:
for num_nodes_in_layer_value in num_nodes_in_layer_values:
if num_hidden_layers_value == 1:
hidden_layer_sizes_values.append((num_nodes_in_layer_value,))
else:
hidden_layer_sizes_values.append((num_nodes_in_layer_value, num_nodes_in_layer_value))
nn_param_grid = [
{'hidden_layer_sizes': hidden_layer_sizes_values, 'alpha': [0.0001]}
]
start_time = time.time()
estimator = GridSearchCV(estimator, nn_param_grid, cv=cv, scoring=scoring, return_train_score=True)
end_time = time.time()
model_complexity_results_dict['modelComplexityTime'] = end_time - start_time
# now fit the optimum model to the full training data
start_time = time.time()
estimator.fit(X_train, Y_train)
end_time = time.time()
model_complexity_results_dict['finalFitTime'] = end_time - start_time
#print(estimator.cv_results_.keys())
# Calling Method
plotGridSearch(estimator.cv_results_, grid_param_1, grid_param_2, grid_param_1_name, grid_param_2_name,
algorithm_name, dataset_results_directory, x_axis_log_base)
return estimator.best_estimator_ # this can be used to run the best estimator for final testing
# taken and modified from https://stackoverflow.com/questions/37161563/how-to-graph-grid-scores-from-gridsearchcv
def plotGridSearch(cv_results, grid_param_1, grid_param_2, grid_param_1_name, grid_param_2_name,
algorithm_name, dataset_results_directory, x_axis_log_base=None):
# Param1 is the X-axis, Param 2 is represented as a different curve (color line)
# same values as below
# this seemed to be correct
num_nodes_in_layer_values = np.arange(1, 200, 4)
num_hidden_layers_values = [1, 2]
# Get Test Scores Mean and std for each grid search
validation_errors_mean = [1.0 - x for x in cv_results['mean_test_score']]
validation_errors_mean = np.array(validation_errors_mean).reshape(len(num_hidden_layers_values), len(num_nodes_in_layer_values))
training_errors_mean = [1.0 - x for x in cv_results['mean_train_score']]
training_errors_mean = np.array(training_errors_mean).reshape(len(num_hidden_layers_values), len(num_nodes_in_layer_values))
validation_scores_sd = cv_results['std_test_score']
validation_scores_sd = np.array(validation_scores_sd).reshape(len(num_hidden_layers_values), len(num_nodes_in_layer_values))
# Plot Grid search scores
_, ax = plt.subplots(1, 1)
for idx, val in enumerate(num_hidden_layers_values):
ax.plot(num_nodes_in_layer_values, validation_errors_mean[idx,:], '-o', ms=3, label= 'CV '+ '# hidden layers' + '=' + str(val))
ax.plot(num_nodes_in_layer_values, training_errors_mean[idx, :], '-o', ms=3, label='Train '+ '# hidden layers' + '=' + str(val))
ax.set_title("Grid Search Validation Errors") #, fontsize=20, fontweight='bold')
ax.set_ylim(0.0, 1.0)
ax.set_xlabel("# Nodes Per Layer") # , fontsize=16)
ax.set_ylabel('Train Error/CV Average Error') #, fontsize=16)
ax.minorticks_on()
ax.grid(b=True, which='major', color='b', linestyle='-', alpha=0.2)
ax.grid(b=True, which='minor', color='b', linestyle='--', alpha=0.1)
leg = ax.legend(loc="best", fancybox=True) #, fontsize=15)
leg.get_frame().set_alpha(0.6)
if (x_axis_log_base is not None):
ax.set_xscale("log", basex=x_axis_log_base)
ax.grid('on')
plt.tight_layout()
plt.savefig(path.join(dataset_results_directory, algorithm_name + '_GridSearchValidation.png'))
plt.show()
###############################################################
########### Dimensionality Reduction Methods ##################
###############################################################
# code taken and modified from https://machinelearningmastery.com/feature-selection-time-series-forecasting-python/
# and https://blog.datadive.net/selecting-good-features-part-iii-random-forests/
def runRandomForestRegressor(config, X, Y, datasetName, datasetResultsDict, datasetDict, randomState):
print("Running " + config['name'] + " on " + datasetName)
algResultsDict = {}
algName = config['name']
datasetResultsDict[algName] = algResultsDict
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# fit random forest model
model = RandomForestRegressor(n_estimators=500, random_state=randomState)
model.fit(X, Y)
# show importance scores
# print(model.feature_importances_)
# plot importance scores
names = datasetDict['featureNames']
ticks = [i for i in range(len(names))]
pyplot.bar(ticks, model.feature_importances_)
pyplot.xticks(ticks, names, rotation='vertical')
# Pad margins so that markers don't get clipped by the axes
#plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.40)
if config['generateGraphs']:
pyplot.savefig(path.join(resultsDirectory, config['name'] + 'FeatureImportance.png'))
pyplot.show()
print("Features sorted by their score:")
featuresSortedByScoreDescending = sorted(zip(map(lambda x: round(x, 4), model.feature_importances_), names),
reverse=True)
algResultsDict['featuresSortedByScoreDescending'] = featuresSortedByScoreDescending
print(featuresSortedByScoreDescending)
if 'rfMinFeatureValueToInclude' in config:
minFeatureValue = config['rfMinFeatureValueToInclude']
indexesOfFeaturesToExclude = [index for index, f in enumerate(model.feature_importances_) if f < minFeatureValue]
rmTransformedX = np.delete(X, indexesOfFeaturesToExclude, axis=1)
return rmTransformedX
else:
return X
# code taken and modified from http://scikit-learn.org/stable/auto_examples/classification/plot_lda.html#sphx-glr-auto-examples-classification-plot-lda-py
# code also taken from https://www.safaribooksonline.com/library/view/python-machine-learning/9781787125933/ch05s02.html
def runLDA(config, trainX, trainY, testX, testY, datasetName, datasetResultsDict, datasetDict, randomState):
datasetName = datasetDict['name']
print("Running " + config['name'] + " on " + datasetName)
algName = config['name']
datasetResultsDict = datasetDict['results']
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# load dataset/algorithm specific config information (if any)
numFeaturesMin = config['ldaNumFeaturesMin']
numFeaturesMax = config['ldaNumFeaturesMax']
numFeaturesRange = range(numFeaturesMin, numFeaturesMax + 1)
numAverages = config['ldaNumAverages'] # how often to repeat classification
acc_clf1, acc_clf2 = [], []
for numFeatures in numFeaturesRange:
score_clf1, score_clf2 = 0, 0
for _ in range(numAverages):
#clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto', n_components=numFeatures).fit(trainX, trainY)
#clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None, n_components=numFeatures).fit(trainX, trainY)
clf1 = LinearDiscriminantAnalysis(solver='svd', n_components=numFeatures).fit(trainX, trainY)
clf2 = LinearDiscriminantAnalysis(solver='svd', n_components=numFeatures).fit(trainX, trainY)
score_clf1 += clf1.score(testX, testY)
score_clf2 += clf2.score(testX, testY)
acc_clf1.append(score_clf1 / numAverages)
acc_clf2.append(score_clf2 / numAverages)
#features_samples_ratio = np.array(numFeaturesRange) / (trainX.shape)[0]
features_samples_ratio = np.array(numFeaturesRange) / 1
lda = LinearDiscriminantAnalysis(solver='svd', n_components=trainX.shape[1]).fit(trainX, trainY)
ldaTransformedX = lda.transform(trainX)
if config['generateGraphs']:
# explained variance ratio
explainedVarianceRatioArray = lda.explained_variance_ratio_
cumulativeExplainedVarianceRatioArray = np.cumsum(explainedVarianceRatioArray)
plt.plot(cumulativeExplainedVarianceRatioArray)
plt.title("Cumulative Explained Variance for " + datasetName)
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.grid()
if config['generateGraphs']:
plt.savefig(path.join(resultsDirectory, config['name'] + 'CumExplainedVariance.png'))
plt.show()
plt.close('all')
# show scores (doesn't seem to work right)
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='navy')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='gold')
plt.xlabel('n_features')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.savefig(path.join(resultsDirectory, config['name'] + '_ClassificationAccuracy.png'))
plt.show()
return ldaTransformedX
def runRandomizedProjections(config, X, datasetDict, randomState):
datasetName = datasetDict['name']
print("Running " + config['name'] + " on " + datasetName)
algName = config['name']
datasetResultsDict = datasetDict['results']
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# load dataset/algorithm specific config information (if any)
numComponentsMin = config['rpNumComponentsMin']
numComponentsMax = config['rpNumComponentsMax']
if 'rpBestRandomState' in config:
bestRandomState = config['rpBestRandomState']
else:
bestRandomState = None
numComponentsRange = range(numComponentsMin, numComponentsMax + 1)
numTimesToRun = config['rpNumTimesToRun']
bestXTransformed = None
reconstructionErrorArrays = []
for r in range(numTimesToRun):
reconstructionErrorArrays.append([])
for numComponents in numComponentsRange:
for r in range(numTimesToRun):
randomProjection = random_projection.GaussianRandomProjection(random_state=randomState+r, n_components=numComponents)
XTransformed = randomProjection.fit_transform(X)
if bestRandomState is not None and randomState + r == bestRandomState:
bestXTransformed = XTransformed
# code borrowed from Piazza post
randMat = randomProjection.components_
XProj = XTransformed.dot(randMat)
XDiff = X - XProj
XDiffSquared = XDiff * XDiff
reconstructionError = np.sum(XDiffSquared)
reconstructionErrorArrays[r].append(reconstructionError)
if config['generateGraphs']:
colors = ['blue', 'green', 'red', 'cyan', 'magenta', 'black']
# plot reconstruction error
for r in range (numTimesToRun):
plt.plot(numComponentsRange, reconstructionErrorArrays[r], color=colors[r], label="RandomState="+str(randomState+r))
plt.title("Reconstruction Error Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of Components')
plt.ylabel('Reconstruction Error')
plt.legend(loc=1, prop={'size': 12})
plt.savefig(path.join(resultsDirectory, config['name'] + '_ReconstructionError.png'))
plt.show()
return bestXTransformed
def oldrunRandomizedProjections(config, X, datasetDict, randomState):
datasetName = datasetDict['name']
print("Running " + config['name'] + " on " + datasetName)
algName = config['name']
datasetResultsDict = datasetDict['results']
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# load dataset/algorithm specific config information (if any)
numComponentsMin = config['rpNumComponentsMin']
numComponentsMax = config['rpNumComponentsMax']
numComponentsRange = range(numComponentsMin, numComponentsMax + 1)
numTimesToRun = config['rpNumTimesToRun']
for numComponents in numComponentsRange:
randomProjection = random_projection.GaussianRandomProjection(random_state=randomState, n_components=numComponents)
X_transformed = randomProjection.fit_transform(X)
#reconstructionError = np.matmul(randomProjection.components_, X_transformed)
#reconstructionError = np.matmul(randomProjection.components_, X_transformed)
#print(str(numComponents) + " Components Reconstruction error: ", str(reconstructionError))
# this was used for eps based execution, but we won't use that anymore
#epsScale = 100.0
#scaledEPSMin = config['rpCcaledEPSMin']
#scaledEPSMax = config['rpCcaledEPSMax']
#scaledEPSStep = config['rpScaledEPSStep']
#scaledEPSRange = range(scaledEPSMin, scaledEPSMax + scaledEPSStep, scaledEPSStep)
#
#for scaledEPS in scaledEPSRange:
#randomProjection = random_projection.GaussianRandomProjection(random_state=randomState)
#randomProjection = random_projection.SparseRandomProjection(random_state=randomState)
#eps = None
#eps = float(scaledEPS)/epsScale
#randomProjection = random_projection.GaussianRandomProjection(random_state=randomState, eps=eps)
# randomProjection = random_projection.SparseRandomProjection(random_state=randomState, eps=eps)
# try:
# X_transformed = randomProjection.fit_transform(X)
# print(X_transformed.shape)
# reconstructionError = np.matmul(randomProjection.components_, X)
# print("Reconstruction error: ", str(reconstructionError))
# except Exception as e:
# print("RandomizedProject Exception " + str(e) + " for " + datasetName + " for eps: " + str(eps))
return X_transformed
def oldrunICA(config, X, datasetName, datasetResultsDict, datasetDict, randomState):
print("Running " + config['name'] + " on " + datasetName)
algResultsDict = {}
algName = config['name']
datasetResultsDict[algName] = algResultsDict
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# load dataset/algorithm specific config information (if any)
maxIterations = config['icaMaxIterations']
tolerance = config['icaTolerance']
gFunction = config['icaGFunction']
icaNumComponents = None
if 'icaNumComponents' in config:
icaNumComponents = config['icaNumComponents']
ica = FastICA(n_components=icaNumComponents, random_state=randomState, max_iter=maxIterations, tol=tolerance, fun=gFunction, whiten=True)
else:
ica = FastICA(random_state=randomState, max_iter=maxIterations, tol=tolerance, fun=gFunction, whiten=True)
icaTransformedX = ica.fit_transform(X)
# kurtosis code copied from https://programtalk.com/python-examples/scipy.stats.kurtosis/
kurtosisArray = stats.kurtosis(icaTransformedX)
absKurtosisArray = np.absolute(kurtosisArray)
# sorts by abs kurtosis value, descending
sortedIndexArray = np.argsort(absKurtosisArray)[::-1]
sortedAbsKurtosisArray = np.sort(absKurtosisArray)[::-1]
if config['generateGraphs']:
# plot adj rand index score
plt.plot(range(icaTransformedX.shape[1]), sortedAbsKurtosisArray, marker='o')
plt.title("Abs Kurtosis Analysis of " + algName + " for " + datasetName)
plt.xlabel('Sorted Component Order')
plt.ylabel('Absolute Kurtosis')
plt.savefig(path.join(resultsDirectory, config['name'] + '_Kurtosis.png'))
plt.show()
# ICA reconstruction code copied from https://www.kaggle.com/ericlikedata/reconstruct-error-of-pca
error_record = []
numComponentsRange = range(1, len(ica.components_))
for i in numComponentsRange:
ica = FastICA(n_components=i, random_state=randomState)
ica2_results = ica.fit_transform(X)
ica2_proj_back = ica.inverse_transform(ica2_results)
total_loss = LA.norm((X - ica2_proj_back), None)
error_record.append(total_loss)
# plot reconstruction error
if config['generateGraphs']:
plt.plot(numComponentsRange, error_record, marker='o')
plt.title("Reconstruction Error Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of components')
plt.ylabel('Error')
plt.savefig(path.join(resultsDirectory, config['name'] + '_ReconstructionError.png'))
plt.show()
algResultsDict['transformedX'] = icaTransformedX
print(icaTransformedX.shape)
return icaTransformedX
def runICA(config, X, datasetName, datasetResultsDict, datasetDict, randomState):
print("Running " + config['name'] + " on " + datasetName)
algResultsDict = {}
algName = config['name']
datasetResultsDict[algName] = algResultsDict
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# load dataset/algorithm specific config information (if any)
numComponentsMin = config['icaNumComponentsMin']
numComponentsMax = config['icaNumComponentsMax']
numComponentsRange = range(numComponentsMin, numComponentsMax + 1)
maxIterations = config['icaMaxIterations']
tolerance = config['icaTolerance']
gFunction = config['icaGFunction']
meanAbsKurtosisArray = []
reconstructionErrorArray = []
for numComponents in numComponentsRange:
ica = FastICA(n_components=numComponents, random_state=randomState, max_iter=maxIterations, tol=tolerance, fun=gFunction, whiten=True)
icaTransformedX = ica.fit_transform(X)
# kurtosis code copied from https://programtalk.com/python-examples/scipy.stats.kurtosis/
kurtosisArray = stats.kurtosis(icaTransformedX)
absKurtosisArray = np.absolute(kurtosisArray)
meanAbsKurtosisArray.append(np.mean(absKurtosisArray))
# ICA reconstruction code copied from https://www.kaggle.com/ericlikedata/reconstruct-error-of-pca
inverseTransform = ica.inverse_transform(icaTransformedX)
reconstructionError = LA.norm((X - inverseTransform), None)
reconstructionErrorArray.append(reconstructionError)
if config['generateGraphs']:
# plot mean kurtosis/numComponents
plt.plot(numComponentsRange, meanAbsKurtosisArray, marker='o')
plt.title("Mean Abs Kurtosis Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of Components')
plt.ylabel('Mean Absolute Kurtosis')
plt.savefig(path.join(resultsDirectory, config['name'] + '_MeanAbsKurtosis.png'))
plt.show()
# plot mean reconstruction error
plt.plot(numComponentsRange, reconstructionErrorArray, marker='o')
plt.title("Reconstruction Error Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of components')
plt.ylabel('Error')
plt.savefig(path.join(resultsDirectory, config['name'] + '_ReconstructionError.png'))
plt.show()
algResultsDict['transformedX'] = icaTransformedX
print(icaTransformedX.shape)
return icaTransformedX
def runPCA(config, X, datasetName, datasetResultsDict, datasetDict, randomState):
print("Running " + config['name'] + " on " + datasetName)
algResultsDict = {}
algName = config['name']
datasetResultsDict[algName] = algResultsDict
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# load dataset/algorithm specific config information (if any)
numComponents = None
if 'pcaNumComponents' in config:
numComponents = config['pcaNumComponents']
pca = PCA(n_components=numComponents, random_state=randomState, whiten=True).fit(X)
pcaTransformedX = pca.transform(X)
explainedVarianceRatioArray = pca.explained_variance_ratio_
algResultsDict['transformedX'] = pcaTransformedX
algResultsDict['explainedVarianceRatioArray'] = explainedVarianceRatioArray
cumulativeExplainedVarianceRatioArray = np.cumsum(explainedVarianceRatioArray)
algResultsDict['cumulativeExplainedVarianceRatioArray'] = cumulativeExplainedVarianceRatioArray
plt.plot(cumulativeExplainedVarianceRatioArray)
plt.title("Cumulative Explained Variance for " + datasetName)
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.grid()
if config['generateGraphs']:
plt.savefig(path.join(resultsDirectory, config['name'] + 'CumExplainedVariance.png'))
plt.show()
plt.close('all')
# PCA reconstruction code copied from https://www.kaggle.com/ericlikedata/reconstruct-error-of-pca
error_record = []
numComponentsRange = range(1, len(pca.components_))
for i in numComponentsRange:
pca = PCA(n_components=i, random_state=randomState)
pca2_results = pca.fit_transform(X)
pca2_proj_back = pca.inverse_transform(pca2_results)
total_loss = LA.norm((X - pca2_proj_back), None)
error_record.append(total_loss)
# plot reconstruction error
if config['generateGraphs']:
plt.plot(numComponentsRange, error_record, marker='o')
plt.title("Reconstruction Error Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of components')
plt.ylabel('Error')
plt.savefig(path.join(resultsDirectory, config['name'] + '_ReconstructionError.png'))
plt.show()
plt.close('all')
#pca = PCA(n_components=numComponents, random_state=randomState)
#pca.fit(X)
#pcaScore = pca.score(X)
#pcaScoresArray.append(pcaScore)
#print("For n_components =", numComponents,
# "The average score is : ", pcaScore)
return pcaTransformedX
###############################################################
##################### Clusterting Methods #####################
###############################################################
def runEM(config, X, Y, datasetName, datasetResultsDict, datasetDict, randomState):
print("Running " + config['name'] + " on " + datasetName)
algResultsDict = {}
algName = config['name']
datasetResultsDict[algName] = algResultsDict
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# load dataset/algorithm specific config information (if any)
numComponentsMin = config['emNumComponentsMin']
numComponentsMax = config['emNumComponentsMax']
numComponentsRange = range(numComponentsMin, numComponentsMax + 1)
covarianceTypes = config['emCovarianceTypes']
# the following code is borrowed (and modified) from
# http://scikit-learn.org/stable/auto_examples/mixture/plot_gmm_selection.html#sphx-glr-auto-examples-mixture-plot-gmm-selection-py
lowestBIC = np.infty
bic = []
for covarianceType in covarianceTypes:
adjustedRandIndexScoreArray = []
silhouetteAvgArray = []
for numComponents in numComponentsRange:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=numComponents,
covariance_type=covarianceType,
random_state=randomState)
gmm.fit(X)
emLabels = gmm.predict(X)
bic.append(gmm.bic(X))
if bic[-1] < lowestBIC:
lowestBIC = bic[-1]
best_gmm = gmm
# get adjusted rand index score
adjustedRandIndexScore = adjusted_rand_score(emLabels, Y)
adjustedRandIndexScoreArray.append(adjustedRandIndexScore)
print("adjRandIndexScore for " + str(numComponents) + ": " + str(adjustedRandIndexScore))
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouetteAvg = silhouette_score(X, emLabels)
silhouetteAvgArray.append(silhouetteAvg)
# print("For n_clusters =", numClusters,
# "The average silhouette_score is :", silhouetteAvg)
# Compute the silhouette scores for each sample
sampleSilhouetteValues = silhouette_samples(X, emLabels)
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (numComponents + 1) * 10])
y_lower = 10
for i in range(numComponents):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sampleSilhouetteValues[emLabels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / numComponents)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouetteAvg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(emLabels.astype(float) / numComponents)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
#centers = kMeansClusterer.cluster_centers_
# Draw white circles at cluster centers
#ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
#
#for i, c in enumerate(centers):
# ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for EM clustering on " + datasetName +
" with n_clusters = %d" % numComponents),
fontsize=14, fontweight='bold')
if config['generateGraphs']:
plt.savefig(path.join(resultsDirectory, config['name'] + '_' + covarianceType + '_' + str(numComponents) + '_Silhouette.png'))
plt.show()
plt.close('all')
if config['generateGraphs']:
# plot adj rand index score
plt.plot(numComponentsRange, adjustedRandIndexScoreArray, marker='o')
plt.title("Adjusted Rand Index Score Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of clusters')
plt.ylabel('Adjusted Rand Index Score')
plt.savefig(path.join(resultsDirectory, config['name'] + '_' + covarianceType + '_AdjRandIndexScore.png'))
plt.show()
# plot the avg silhouette scores
plt.plot(numComponentsRange, silhouetteAvgArray, marker='o')
plt.title("Silhouette Avg Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of clusters')
plt.ylabel('Silhouette Avg')
plt.savefig(path.join(resultsDirectory, config['name'] + '_' + covarianceType + '_SilhouetteAvg.png'))
plt.show()
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
plt.figure(figsize=(8, 6))
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(covarianceTypes, color_iter)):
xpos = np.array(numComponentsRange) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(numComponentsRange):
(i + 1) * len(numComponentsRange)],
width=.2, color=color))
plt.xticks(numComponentsRange)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model for ' + datasetName)
xpos = np.mod(bic.argmin(), len(numComponentsRange)) + .65 + \
.2 * np.floor(bic.argmin() / len(numComponentsRange))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], covarianceTypes)
# Plot the winner
#splot = plt.subplot(2, 1, 2)
#Y_ = clf.predict(X)
#for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_, color_iter)):
# v, w = linalg.eigh(cov)
# if not np.any(Y_ == i):
# continue
# plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
#
# Plot an ellipse to show the Gaussian component
# angle = np.arctan2(w[0][1], w[0][0])
# angle = 180. * angle / np.pi # convert to degrees
# v = 2. * np.sqrt(2.) * np.sqrt(v)
# ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
# ell.set_clip_box(splot.bbox)
# ell.set_alpha(.5)
# splot.add_artist(ell)
#plt.xticks(())
#plt.yticks(())
#plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
if config['generateGraphs']:
plt.savefig(path.join(resultsDirectory, config['name'] + 'BIC.png'))
plt.show()
plt.close('all')
return np.append(X, emLabels.T.reshape(X.shape[0],1), axis=1)
def runKMeans(config, X, Y, datasetName, datasetResultsDict, datasetDict, randomState):
print("Running " + config['name'] + " on " + datasetName)
algResultsDict = {}
algName = config['name']
datasetResultsDict[algName] = algResultsDict
algResultsDirectory = os.path.join(datasetDict['resultsDirectory'],
algName)
if not os.path.isdir(algResultsDirectory):
os.makedirs(algResultsDirectory)
resultsDirectory = algResultsDirectory
# load dataset/algorithm specific config information (if any)
numClustersMin = config['kMeansNumClustersMin']
numClustersMax = config['kMeansNumClustersMax']
numClustersRange = range(numClustersMin, numClustersMax + 1)
clusterSizeArray = []
silhouetteAvgArray = []
distortionsArray = []
clusterCentersArray = []
vMeasureScoreArray = []
adjustedRandIndexScoreArray = []
# the following code is borrowed (and modified) from
# http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html#sphx-glr-auto-examples-cluster-plot-kmeans-silhouette-analysis-py
for numClusters in numClustersRange:
clusterSizeArray.append(numClusters)
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (numClusters + 1) * 10])
kMeansClusterer = cluster.KMeans(n_clusters=numClusters, random_state=randomState, init='k-means++', n_init=10, max_iter=300)
km = kMeansClusterer.fit(X)
kMeansLabels = km.labels_
distortionsArray.append(km.inertia_)
clusterCenters = km.cluster_centers_
clusterCentersArray.append(clusterCenters)
vMeasureScore = v_measure_score(Y, kMeansLabels)
vMeasureScoreArray.append(vMeasureScore)
print("vMeasureScore for " + str(numClusters) + ": " + str(vMeasureScore))
adjustedRandIndexScore = adjusted_rand_score(kMeansLabels, Y)
adjustedRandIndexScoreArray.append(adjustedRandIndexScore)
print("adjRandIndexScore for " + str(numClusters) + ": " + str(adjustedRandIndexScore))
if len(datasetDict['outputClassNames']) == 2 and numClusters == 2:
f1Score = f1_score(Y, kMeansLabels)
print("f1Score for " + str(numClusters) + ": " + str(f1Score))
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouetteAvg = silhouette_score(X, kMeansLabels)
silhouetteAvgArray.append(silhouetteAvg)
#print("For n_clusters =", numClusters,
# "The average silhouette_score is :", silhouetteAvg)
# Compute the silhouette scores for each sample
sampleSilhouetteValues = silhouette_samples(X, kMeansLabels)
y_lower = 10
for i in range(numClusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sampleSilhouetteValues[kMeansLabels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / numClusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouetteAvg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(kMeansLabels.astype(float) / numClusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = kMeansClusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on " + datasetName +
" with n_clusters = %d" % numClusters),
fontsize=14, fontweight='bold')
if config['generateGraphs']:
# save the silhouette graph for this # of clusters
plt.savefig(path.join(resultsDirectory, config['name'] + '_' + str(numClusters) + '_Silhouette.png'))
plt.show()
# save the cluster centers for this # of clusters
f = open(path.join(resultsDirectory, config['name'] + '_' + str(numClusters) + '_ClusterCenters.txt'), "w+")
f.write(str(clusterCenters))
f.close()
if numClusters == len(datasetDict['outputClassNames']):
confusionMatrix = confusion_matrix(Y, kMeansLabels)
plt.figure()
Utils.plot_confusion_matrix(confusionMatrix, datasetDict['outputClassNames'], normalize=True)
plt.tight_layout()
# another way of doing it
#plt.gcf().subplots_adjust(bottom=0.15)
plt.savefig(path.join(resultsDirectory, config['name'] + '_' + str(numClusters) + '_NormalizedConfusion.png'))
plt.show()
plt.figure()
Utils.plot_confusion_matrix(confusionMatrix, datasetDict['outputClassNames'], normalize=False)
plt.tight_layout()
# another way of doing it
#plt.gcf().subplots_adjust(bottom=0.15)
plt.savefig(path.join(resultsDirectory, config['name'] + '_' + str(numClusters) + '_NonNormalizedConfusion.png'))
plt.show()
plt.close('all')
# plot distortion
if config['generateGraphs']:
# plot distortions
plt.plot(numClustersRange, distortionsArray, marker='o')
plt.title("Distortion Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.savefig(path.join(resultsDirectory, config['name'] + '_Distortion.png'))
plt.show()
#plt.close('all')
# plot silhouetteAvg
plt.plot(numClustersRange, silhouetteAvgArray, marker='o')
plt.title("Silhouette Avg Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of clusters')
plt.ylabel('Silhouette Avg')
plt.savefig(path.join(resultsDirectory, config['name'] + '_SilhouetteAvg.png'))
plt.show()
#plt.close('all')
# plot vmeasurescore
plt.plot(numClustersRange, vMeasureScoreArray, marker='o')
plt.title("V-Measure Score Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of clusters')
plt.ylabel('V-Measure Score')
plt.savefig(path.join(resultsDirectory, config['name'] + '_VMeasure.png'))
plt.show()
# plot adj rand index score
plt.plot(numClustersRange, adjustedRandIndexScoreArray, marker='o')
plt.title("Adjusted Rand Index Score Analysis of " + algName + " for " + datasetName)
plt.xlabel('Number of clusters')
plt.ylabel('Adjusted Rand Index Score')
plt.savefig(path.join(resultsDirectory, config['name'] + '_AdjRandIndexScore.png'))
plt.show()
plt.close('all')
kMeansResultDict = {}
datasetResultsDict[algName] = kMeansResultDict
kMeansResultDict['clusterSizeArray'] = clusterSizeArray
kMeansResultDict['silhouetteAvgArray'] = silhouetteAvgArray
return np.append(X, kMeansLabels.T.reshape(X.shape[0],1), axis=1)
###############################################################
##################### Main Method #############################
###############################################################
def main():
resultsDict = {}
# create the output directory, dictionary and log
experimentResultsDirectory = os.path.join(os.getcwd(),
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.makedirs(experimentResultsDirectory)
# load the configuration json file
with open('./Configuration.json') as f:
configJSON = json.load(f)
# copy config file data to results directory
with open(path.join(experimentResultsDirectory, 'config.json'), 'w') as f:
f.write(json.dumps(configJSON))
# load configuration data
version = configJSON['version']
randomState = configJSON['randomState']
np.random.seed(randomState)
# initialize the datasets
datasetsDict = {}
datasetConfigs = configJSON['datasets']
for datasetConfig in datasetConfigs:
if not datasetConfig['enabled']:
continue
datasetDict = {}
datasetsDict[datasetConfig['name']] = datasetDict
datasetDict['config'] = datasetConfig
datasetDict['name'] = datasetConfig['name']
initializeDataset(datasetDict, randomState)
# now process each dataset
for datasetName, datasetDict in datasetsDict.items():
print("******* Processing " + datasetName + "...")
datasetResultsDirectory = os.path.join(experimentResultsDirectory, datasetName)
os.makedirs(datasetResultsDirectory)
datasetDict['resultsDirectory'] = datasetResultsDirectory
datasetResultDict = {}
datasetDict['results'] = datasetResultDict
datasetResultDict['version'] = version
datasetResultDict['randomState'] = randomState
X = datasetDict['X']
Y = datasetDict['Y']
XTrainUnmodified, XTestUnmodified, YTrainUnmodified, YTestUnmodified \
= train_test_split(X, Y,
train_size=datasetDict['trainPercentage'],
test_size=1.0 -datasetDict['trainPercentage'],
random_state=randomState,
shuffle=datasetDict['shuffle'])
datasetDict['XTrainUnmodified'] = XTrainUnmodified
datasetDict['XTestUnmodified'] = XTestUnmodified
datasetDict['YTrainUnmodified'] = YTrainUnmodified
datasetDict['YTestUnmodified'] = YTestUnmodified
################################### do clustering ###################################
########### do kmeans
# get kmeans config for this dataset
kMeansConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'K-Means Clustering':
kMeansConfig = algorithmSetting
break
if kMeansConfig is None:
print("ERROR: Missing kmeans config for " + datasetName)
sys.exit()
if kMeansConfig['enabled']:
runKMeans(kMeansConfig, datasetDict['X'], datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
kMeansConfig = None
########### do expectation maximization
# get expectation maximization config for this dataset
emConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'Expectation Maximization':
emConfig = algorithmSetting
break
if emConfig is None:
print("ERROR: Missing em config for " + datasetName)
sys.exit()
if emConfig['enabled']:
runEM(emConfig, datasetDict['X'], datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
emConfig = None
################################### do dimensionality reduction #####################
########### do PCA
# get PCA config for this dataset
pcaConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'PCA':
pcaConfig = algorithmSetting
break
if pcaConfig is None:
print("ERROR: Missing pca config for " + datasetName)
sys.exit()
if pcaConfig['enabled']:
runPCA(pcaConfig, datasetDict['X'], datasetName, datasetResultDict, datasetDict, randomState)
pcaConfig = None
########### do ICA
# get ICA config for this dataset
icaConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'ICA':
icaConfig = algorithmSetting
break
if icaConfig['enabled']:
runICA(icaConfig, datasetDict['X'], datasetName, datasetResultDict, datasetDict, randomState)
icaConfig = None
########### do Randomized Projections
# get Randomized Projections config for this dataset
randomizedProjectionsConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'Randomized Projection':
randomizedProjectionsConfig = algorithmSetting
break
if randomizedProjectionsConfig['enabled']:
runRandomizedProjections(randomizedProjectionsConfig, datasetDict['X'], datasetDict, randomState)
randomizedProjectionsConfig = None
########### do LDA
# get LDA config for this dataset
ldaConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'LDA':
ldaConfig = algorithmSetting
break
if ldaConfig['enabled']:
runLDA(ldaConfig, datasetDict['XTrainUnmodified'], datasetDict['YTrainUnmodified'],
datasetDict['XTestUnmodified'], datasetDict['YTestUnmodified'],
datasetName, datasetResultDict,
datasetDict, randomState)
ldaConfig = None
########### do RandomForestRegressor
randomForestRegressorConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'Random Forest Regression':
randomForestRegressorConfig = algorithmSetting
break
if randomForestRegressorConfig['enabled']:
runRandomForestRegressor(randomForestRegressorConfig, datasetDict['X'], datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
randomForestRegressorConfig = None
################################### do clustering after dimensionality reduction ###################################
########### do kmeans
########### do kmeans with PCA
# get kmeans config for this dataset/dimensionality reduction
kMeansWithPCAConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'K-Means Clustering wPCA':
kMeansWithPCAConfig = algorithmSetting
break
if kMeansWithPCAConfig is None:
print("ERROR: Missing 'K-Means Clustering wPCA' config for " + datasetName)
sys.exit()
if kMeansWithPCAConfig['enabled']:
pcaTransformedX = runPCA(kMeansWithPCAConfig, datasetDict['X'], datasetName, datasetResultDict, datasetDict, randomState)
runKMeans(kMeansWithPCAConfig, pcaTransformedX, datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
kMeansWithPCAConfig = None
########### do kmeans with ICA
# get kmeans config for this dataset/dimensionality reduction
kMeansWithICAConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'K-Means Clustering wICA':
kMeansWithICAConfig = algorithmSetting
break
if kMeansWithICAConfig is None:
print("ERROR: Missing K-Means Clustering wICA config for " + datasetName)
sys.exit()
if kMeansWithICAConfig is not None and kMeansWithICAConfig['enabled']:
icaTransformedX = runICA(kMeansWithICAConfig, datasetDict['X'], datasetName, datasetResultDict, datasetDict, randomState)
runKMeans(kMeansWithICAConfig, icaTransformedX, datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
kMeansWithICAConfig = None
########### do kmeans with RP
# get kmeans config for this dataset/dimensionality reduction
kMeansWithRPConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'K-Means Clustering wRP':
kMeansWithRPConfig = algorithmSetting
break
if kMeansWithRPConfig is None:
print("ERROR: Missing K-Means Clustering wRC config for " + datasetName)
sys.exit()
if kMeansWithRPConfig is not None and kMeansWithRPConfig['enabled']:
rpTransformedX = runRandomizedProjections(kMeansWithRPConfig, datasetDict['X'], datasetDict, randomState)
runKMeans(kMeansWithRPConfig, rpTransformedX, datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
kMeansWithRPConfig = None
########### do kmeans with RandomForestRegresser
# get kmeans config for this dataset/dimensionality reduction
kMeansWithRFConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'K-Means Clustering wRF':
kMeansWithRFConfig = algorithmSetting
break
if kMeansWithRFConfig is None:
print("ERROR: Missing K-Means Clustering wRF config for " + datasetName)
sys.exit()
if kMeansWithRFConfig is not None and kMeansWithRFConfig['enabled']:
rfTransformedX = runRandomForestRegressor(kMeansWithRFConfig, datasetDict['X'], datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
runKMeans(kMeansWithRFConfig, rfTransformedX, datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
kMeansWithRFConfig = None
########### do expectation maximization after dimensionality reduction ###################################
########### do expectation maximization with PCA
# get EM config for this dataset/dimensionality reduction
emWithPCAConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'EM Clustering wPCA':
emWithPCAConfig = algorithmSetting
break
if emWithPCAConfig is None:
print("ERROR: Missing 'EM Clustering wPCA' config for " + datasetName)
sys.exit()
if emWithPCAConfig['enabled']:
pcaTransformedX = runPCA(emWithPCAConfig, datasetDict['X'], datasetName, datasetResultDict, datasetDict, randomState)
runEM(emWithPCAConfig, pcaTransformedX, datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
emWithPCAConfig = None
########### do expectation maximization with ICA
# get EM config for this dataset/dimensionality reduction
emWithICAConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'EM Clustering wICA':
emWithICAConfig = algorithmSetting
break
if emWithICAConfig is None:
print("ERROR: Missing 'EM Clustering wICA' config for " + datasetName)
sys.exit()
if emWithICAConfig['enabled']:
icaTransformedX = runICA(emWithICAConfig, datasetDict['X'], datasetName, datasetResultDict, datasetDict, randomState)
runEM(emWithICAConfig, icaTransformedX, datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
emWithICAConfig = None
########### do expectation maximization with RP
# get EM config for this dataset/dimensionality reduction
emWithRPConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'EM Clustering wRP':
emWithRPConfig = algorithmSetting
break
if emWithRPConfig is None:
print("ERROR: Missing EM Clustering wRC config for " + datasetName)
sys.exit()
if emWithRPConfig is not None and emWithRPConfig['enabled']:
rpTransformedX = runRandomizedProjections(emWithRPConfig, datasetDict['X'], datasetDict, randomState)
runEM(emWithRPConfig, rpTransformedX, datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
emWithRPConfig = None
########### do kmeans with RandomForestRegresser
# get EM config for this dataset/dimensionality reduction
emWithRFConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'EM Clustering wRF':
emWithRFConfig = algorithmSetting
break
if emWithRFConfig is None:
print("ERROR: Missing EM Clustering wRF config for " + datasetName)
sys.exit()
if emWithRFConfig is not None and emWithRFConfig['enabled']:
rfTransformedX = runRandomForestRegressor(emWithRFConfig, datasetDict['X'], datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
runEM(emWithRFConfig, rfTransformedX, datasetDict['Y'], datasetName, datasetResultDict, datasetDict, randomState)
emWithRFConfig = None
########### do ANN alone
# get ANN alone config for this dataset/dimensionality reduction
annConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'ANN':
annConfig = algorithmSetting
break
if annConfig is None:
print("ERROR: Missing ANN config for " + datasetName)
#sys.exit()
if annConfig is not None and annConfig['enabled']:
runANN(annConfig, datasetDict['XTrainUnmodified'], datasetDict['YTrainUnmodified'],
datasetDict['XTestUnmodified'], datasetDict['YTestUnmodified'],
datasetName, datasetDict, datasetResultDict, randomState)
annConfig = None
################################### do ANN after dimensionality reduction ###################################
#################### Each call to ANN has to have its training, validation and test sets modified by the ############
#################### dim reduction algorithms! The hyperparameters for these shouldn't be chosen by cheating #######
#################### (i.e. by looking at test data) because, in the end, we are still doing supervised learning #####
########### do PCA then ANN
# get PCA then ANN config for this dataset/dimensionality reduction
pcaANNConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'ANN wPCA':
pcaANNConfig = algorithmSetting
break
if pcaANNConfig is None:
print("ERROR: Missing PCA ANN config for " + datasetName)
sys.exit()
if pcaANNConfig is not None and pcaANNConfig['enabled']:
# run PCA twice, once for training data, once for test
pcaTransformedXTrain = runPCA(pcaANNConfig, datasetDict['XTrainUnmodified'], datasetName, datasetResultDict, datasetDict,
randomState)
pcaTransformedXTest = runPCA(pcaANNConfig, datasetDict['XTestUnmodified'], datasetName, datasetResultDict, datasetDict,
randomState)
runANN(pcaANNConfig, pcaTransformedXTrain, datasetDict['YTrainUnmodified'],
pcaTransformedXTest, datasetDict['YTestUnmodified'],
datasetName, datasetDict, datasetResultDict, randomState)
pcaANNConfig = None
########### do ICA then ANN
icaANNConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'ANN wICA':
icaANNConfig = algorithmSetting
break
if icaANNConfig is None:
print("ERROR: Missing ICA ANN config for " + datasetName)
#sys.exit()
if icaANNConfig is not None and icaANNConfig['enabled']:
# run ICA twice, once for training data, once for test
icaTransformedXTrain = runICA(icaANNConfig, datasetDict['XTrainUnmodified'], datasetName, datasetResultDict, datasetDict,
randomState)
icaTransformedXTest = runICA(icaANNConfig, datasetDict['XTestUnmodified'], datasetName, datasetResultDict, datasetDict,
randomState)
runANN(icaANNConfig, icaTransformedXTrain, datasetDict['YTrainUnmodified'],
icaTransformedXTest, datasetDict['YTestUnmodified'],
datasetName, datasetDict, datasetResultDict, randomState)
icaANNConfig = None
########### do RP then ANN
rpANNConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'ANN wRP':
rpANNConfig = algorithmSetting
break
if rpANNConfig is None:
print("ERROR: Missing RP ANN config for " + datasetName)
#sys.exit()
if rpANNConfig is not None and rpANNConfig['enabled']:
rpTransformedXTrain = runRandomizedProjections(rpANNConfig, datasetDict['XTrainUnmodified'], datasetDict, randomState)
rpTransformedXTest = runRandomizedProjections(rpANNConfig, datasetDict['XTestUnmodified'], datasetDict, randomState)
runANN(rpANNConfig, rpTransformedXTrain, datasetDict['YTrainUnmodified'],
rpTransformedXTest, datasetDict['YTestUnmodified'],
datasetName, datasetDict, datasetResultDict, randomState)
rpANNConfig = None
########### do LDA then ANN
ldaANNConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'ANN wLDA':
ldaANNConfig = algorithmSetting
break
if ldaANNConfig is None:
print("ERROR: Missing LDA ANN config for " + datasetName)
#sys.exit()
if ldaANNConfig is not None and ldaANNConfig['enabled']:
runANN(ldaANNConfig, datasetDict['XTrainUnmodified'], datasetDict['YTrainUnmodified'],
datasetDict['XTestUnmodified'], datasetDict['YTestUnmodified'],
datasetName, datasetDict, datasetResultDict, randomState)
ldaANNConfig = None
########### do RandomForestRegressor then ANN
rfANNConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'ANN wRF':
rfANNConfig = algorithmSetting
break
if rfANNConfig is None:
print("ERROR: Missing RF ANN config for " + datasetName)
#sys.exit()
if rfANNConfig is not None and rfANNConfig['enabled']:
rfTransformedXTrain = runRandomForestRegressor(rfANNConfig, datasetDict['XTrainUnmodified'], datasetDict['YTrainUnmodified'], datasetName, datasetResultDict, datasetDict, randomState)
rfTransformedXTest = runRandomForestRegressor(rfANNConfig, datasetDict['XTestUnmodified'], datasetDict['YTestUnmodified'], datasetName, datasetResultDict, datasetDict, randomState)
runANN(rfANNConfig, rfTransformedXTrain, datasetDict['YTrainUnmodified'],
rfTransformedXTest, datasetDict['YTestUnmodified'],
datasetName, datasetDict, datasetResultDict, randomState)
rfANNConfig = None
################################### do ANN after clustering to create new feature ###################################
#################### Each call to ANN has to have its training, validation and test sets modified by the ############
#################### clustering algorithms! The hyperparameters for these shouldn't be chosen by cheating #######
#################### (i.e. by looking at test data) because, in the end, we are still doing supervised learning #####
########### do kmeans
kmeansANNConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'ANN wKMeans':
kmeansANNConfig = algorithmSetting
break
if kmeansANNConfig is None:
print("ERROR: Missing KMeans ANN config for " + datasetName)
#sys.exit()
if kmeansANNConfig is not None and kmeansANNConfig['enabled']:
kmTransformedXTrain = runKMeans(kmeansANNConfig, datasetDict['XTrainUnmodified'], datasetDict['YTrainUnmodified'], datasetName, datasetResultDict, datasetDict, randomState)
kmTransformedXTest = runKMeans(kmeansANNConfig, datasetDict['XTestUnmodified'], datasetDict['YTestUnmodified'], datasetName, datasetResultDict, datasetDict, randomState)
runANN(kmeansANNConfig, kmTransformedXTrain, datasetDict['YTrainUnmodified'],
kmTransformedXTest, datasetDict['YTestUnmodified'],
datasetName, datasetDict, datasetResultDict, randomState)
kmeansANNConfig = None
########### do expectation maximization
emANNConfig = None
for algorithmSetting in datasetDict['config']['algorithmSettings']:
if algorithmSetting['name'] == 'ANN wEM':
emANNConfig = algorithmSetting
break
if emANNConfig is None:
print("ERROR: Missing EM ANN config for " + datasetName)
#sys.exit()
if emANNConfig is not None and emANNConfig['enabled']:
emTransformedXTrain = runEM(emANNConfig, datasetDict['XTrainUnmodified'], datasetDict['YTrainUnmodified'], datasetName, datasetResultDict, datasetDict, randomState)
emTransformedXTest = runEM(emANNConfig, datasetDict['XTestUnmodified'], datasetDict['YTestUnmodified'], datasetName, datasetResultDict, datasetDict, randomState)
runANN(emANNConfig, emTransformedXTrain, datasetDict['YTrainUnmodified'],
emTransformedXTest, datasetDict['YTestUnmodified'],
datasetName, datasetDict, datasetResultDict, randomState)
emANNConfig = None
# this recursively converts all the non-dict values in the dict to strings
# it's needed because json.dumps can't persist certain numpy numeric datatypes
# and crashes and therefore doesn't write out the final file!
Utils.convert_all_dict_vals_to_string(resultsDict)
with open(path.join(experimentResultsDirectory, 'results.json'), 'w') as f:
f.write(json.dumps(resultsDict))
if __name__ == "__main__":
main()
| tflaherty/tflaherty3-CS-7641-Assignment3 | UnsupervisedLearningAndDimensionalityReduction.py | UnsupervisedLearningAndDimensionalityReduction.py | py | 92,303 | python | en | code | 0 | github-code | 13 |
17080206334 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.SchoolSimpleInfo import SchoolSimpleInfo
from alipay.aop.api.domain.SchoolBaseInfo import SchoolBaseInfo
class AlipayCommerceEducateCampusInstitutionsQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEducateCampusInstitutionsQueryResponse, self).__init__()
self._school_info = None
self._school_info_list = None
@property
def school_info(self):
return self._school_info
@school_info.setter
def school_info(self, value):
if isinstance(value, SchoolSimpleInfo):
self._school_info = value
else:
self._school_info = SchoolSimpleInfo.from_alipay_dict(value)
@property
def school_info_list(self):
return self._school_info_list
@school_info_list.setter
def school_info_list(self, value):
if isinstance(value, list):
self._school_info_list = list()
for i in value:
if isinstance(i, SchoolBaseInfo):
self._school_info_list.append(i)
else:
self._school_info_list.append(SchoolBaseInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayCommerceEducateCampusInstitutionsQueryResponse, self).parse_response_content(response_content)
if 'school_info' in response:
self.school_info = response['school_info']
if 'school_info_list' in response:
self.school_info_list = response['school_info_list']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayCommerceEducateCampusInstitutionsQueryResponse.py | AlipayCommerceEducateCampusInstitutionsQueryResponse.py | py | 1,679 | python | en | code | 241 | github-code | 13 |
1748174730 | import decimal
import json
import logging
import urllib.parse as urlparse
from requests import auth, Session, codes
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, Timeout, RequestException
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
MAX_RETRIES = 3
log = logging.getLogger("MoneroRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
parent_args = []
if "message" in rpc_error:
parent_args.append(rpc_error["message"])
Exception.__init__(self, *parent_args)
self.error = rpc_error
self.code = rpc_error["code"] if "code" in rpc_error else None
self.message = rpc_error["message"] if "message" in rpc_error else None
def __str__(self):
return f"{self.code}: {self.message}"
def __repr__(self):
return f"<{self.__class__.__name__} '{self}'>"
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return float(round(o, 12))
raise TypeError(repr(o) + " is not JSON serializable.")
class AuthServiceProxy(object):
"""Extension of python-jsonrpc
to communicate with Monero (monerod, monero-wallet-rpc)
"""
retry_adapter = HTTPAdapter(max_retries=MAX_RETRIES)
__id_count = 0
def __init__(
self,
service_url,
username=None,
password=None,
service_name=None,
timeout=HTTP_TIMEOUT,
connection=None,
):
"""
:param service_url: Monero RPC URL, like http://user:passwd@host:port/json_rpc.
:param service_name: Method name of Monero RPC.
"""
self.__service_url = service_url
self.__service_name = service_name
self.__timeout = timeout
self.__url = urlparse.urlparse(service_url)
port = self.__url.port if self.__url.port else 80
self.__rpc_url = (
self.__url.scheme
+ "://"
+ self.__url.hostname
+ ":"
+ str(port)
+ self.__url.path
)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
else:
headers = {
"Content-Type": "application/json",
"User-Agent": USER_AGENT,
"Host": self.__url.hostname,
}
user = username if username else self.__url.username
passwd = password if password else self.__url.password
# Digest Authentication
authentication = None
if user is not None and passwd is not None:
authentication = auth.HTTPDigestAuth(user, passwd)
self.__conn = Session()
self.__conn.mount(
f"{self.__url.scheme}://{self.__url.hostname}", self.retry_adapter
)
self.__conn.auth = authentication
self.__conn.headers = headers
def __getattr__(self, name):
"""Return the properly configured proxy according to the given RPC method.
This maps requested object attributes to Monero RPC methods
passed to the request.
This is called before '__call__'.
:param name: Method name of Monero RPC.
"""
if name.startswith("__") and name.endswith("__"):
# Python internal stuff
raise AttributeError
if self.__service_name is not None:
name = f"{self.__service_name}.{name}"
return AuthServiceProxy(
service_url=self.__service_url, service_name=name, connection=self.__conn,
)
def __call__(self, *args):
"""Return the properly configured proxy according to the given RPC method.
This maps requested object attributes to Monero RPC methods
passed to the request.
This is called on the object '__getattr__' returns.
"""
AuthServiceProxy.__id_count += 1
log.debug(
f"-{AuthServiceProxy.__id_count}-> {self.__service_name} {json.dumps(args, default=EncodeDecimal)}"
)
# args is tuple
# monero RPC always gets one dictionary as parameter
if args:
args = args[0]
postdata = json.dumps(
{
"jsonrpc": "2.0",
"method": self.__service_name,
"params": args,
"id": AuthServiceProxy.__id_count,
},
default=EncodeDecimal,
)
return self._request(postdata)
def batch_(self, rpc_calls):
"""Batch RPC call.
Pass array of arrays: [ [ "method", params... ], ... ]
Returns array of results.
No real implementation of JSON RPC batch.
Only requesting every method one after another.
"""
results = list()
for rpc_call in rpc_calls:
method = rpc_call.pop(0)
params = rpc_call.pop(0) if rpc_call else dict()
results.append(self.__getattr__(method)(params))
return results
def _request(self, postdata):
log.debug(f"--> {postdata}")
request_err_msg = None
try:
r = self.__conn.post(
url=self.__rpc_url, data=postdata, timeout=self.__timeout
)
except (ConnectionError) as e:
request_err_msg = (
f"Could not establish a connection, original error: '{str(e)}'."
)
except (Timeout) as e:
request_err_msg = f"Connection timeout, original error: '{str(e)}'."
except (RequestException) as e:
request_err_msg = f"Request error: '{str(e)}'."
if request_err_msg:
raise JSONRPCException({"code": -341, "message": request_err_msg})
response = self._get_response(r)
if response.get("error", None) is not None:
raise JSONRPCException(response["error"])
elif "result" not in response:
raise JSONRPCException(
{"code": -343, "message": "Missing JSON-RPC result."}
)
else:
return response["result"]
def _get_response(self, r):
if r.status_code != codes.ok:
raise JSONRPCException(
{
"code": -344,
"message": f"Received HTTP status code '{r.status_code}'.",
}
)
http_response = r.text
if http_response is None:
raise JSONRPCException(
{"code": -342, "message": "Missing HTTP response from server."}
)
try:
response = json.loads(http_response, parse_float=decimal.Decimal)
except (json.JSONDecodeError) as e:
raise ValueError(f"Error: '{str(e)}'. Response: '{http_response}'.")
if "error" in response and response.get("error", None) is None:
log.error(f"Error: '{response}'")
log.debug(
f"<-{response['id']}- {json.dumps(response['result'], default=EncodeDecimal)}"
)
else:
log.debug(f"<-- {response}")
return response
| normoes/python-monerorpc | monerorpc/authproxy.py | authproxy.py | py | 7,172 | python | en | code | 7 | github-code | 13 |
21604474206 | """
Do the raffle!
TODO add docs for how this mess actually works and maybe even refactor
"""
import random
import re
from collections import defaultdict
import pandas as pd
# Name of the column in raffle.csv whose values we will match with tickets.csv
# It doesn't need to be emails-- can be any string pretty much (ensure values match with values of below col)
RANKING_IDENTIFIER_COL = 'display_name'
# Values in this col (zero indexed) in tickets.csv should match up with the values in the column above in raffle.csv
TICKET_IDENTIFIER_COL_IDX = 0
# The column index in tickets.csv that gives the number of tickets for that row
TICKET_COUNT_COL_IDX = 1
# When we make the preference grid, google sheets starts each option with the question name
PRIZE_COL_STARTS_WITH = 'Raffle Prizes Ranking'
TICKET_CSV = 'data/processed/tickets.csv'
INVENTORY_CSV = 'data/raw/prizes.csv'
PREFERENCES_CSV = 'data/processed/preferences.csv'
SEED_TXT = 'data/raw/seed.txt'
def main():
# you did this
with open(SEED_TXT, 'r', encoding='utf8') as seed_file:
seed = seed_file.read()
random.seed(seed)
# get all the stuffs
identifier_to_team = load_identifier_to_team()
team_to_tickets = load_team_to_tickets()
dist, total = get_ticket_dist(identifier_to_team, team_to_tickets)
inventory = get_inventory()
prefs = get_preferences()
# main logic
while len(dist) > 0 and total > 0 and any(inventory.values()): # while there are contestants and prizes remaining
person, total = draw_ticket(dist, total)
if person not in prefs:
continue
person_prefs = prefs[person]
# assuming we have preferences from 1 to max
prize_get = False
for i in range(1, len(inventory)):
if i not in person_prefs:
# didn't fill out all ranks, assume don't want anything
break
selected_prize = person_prefs[i]
if remove_inventory(inventory, selected_prize):
print(f'{person} drew {selected_prize}. {inventory[selected_prize]} remain...')
prize_get = True
break
# returns the key (identifier) of the person drawn
def draw_ticket(dist, total):
key = random.choices(list(dist.keys()), weights=list(dist.values()), k=1)[0]
new_total = 0
if key is not None:
new_total = total - dist[key]
del dist[key] # remove the person
return key, new_total
def get_inventory():
df = pd.read_csv(INVENTORY_CSV)
df = df.sort_values(by=['name'])
inventory = dict()
for index, row in df.iterrows():
inventory[row[0]] = row[1]
return inventory
# returns false if failed
def remove_inventory(inventory, prize_name):
if prize_name not in inventory or inventory[prize_name] == 0:
return False
inventory[prize_name] -= 1
return True
REGISTRATION_TEAM_NAME_COL_IDX = 1
# Returns a mapping of identifiers -> prize preferences (which are rank -> prize name)
def get_preferences():
df = pd.read_csv(PREFERENCES_CSV)
df = df.sort_values(by=['display_name', 'team_name'])
# prize_id to prize name
prizes = dict()
col = ''
i = prize_id = 0
start_col = end_col = name_col = None
for col in df.columns:
if col == RANKING_IDENTIFIER_COL:
name_col = i
if col.startswith(PRIZE_COL_STARTS_WITH):
if start_col is None:
start_col = i
prize = col[col.index('[') + 1:col.index(']')] # get prize name
prizes[prize_id] = prize
prize_id += 1
end_col = i + 1
i += 1
people = dict()
for index, row in df.iterrows():
prize_id = 0
ranks = dict()
i = 0
curr_name = ''
curr_dict = dict()
for value in row:
# value is a ranking of that column's prize
if i == name_col:
ranks[value] = dict()
curr_name = value
if start_col <= i < end_col:
# one of the prize columns
curr_dict[value] = prizes[prize_id]
prize_id += 1
i += 1
# assign people to their ranking preferences
identifier = curr_name + ' (' + row[REGISTRATION_TEAM_NAME_COL_IDX] + ')'
people[identifier] = curr_dict
return people
TEAM_MEMBER_COL_IDX = 0
def load_identifier_to_team():
df = pd.read_csv('data/processed/preferences.csv')
df = df.sort_values(by=['display_name', 'team_name'])
d = dict() # identifier -> team
for index, row in df.iterrows():
team = row[REGISTRATION_TEAM_NAME_COL_IDX]
identifier = row[TEAM_MEMBER_COL_IDX] + ' (' + team + ')'
d[identifier] = team
return d
SCORE_COL_IDX = 1
TEAM_NAME_COL_IDX = 0
def load_team_to_tickets() -> dict():
df = pd.read_csv('data/processed/tickets.csv')
df = df.sort_values(by=['team_name'])
d = dict() # team -> tickets
for index, row in df.iterrows():
tickets = int(row[SCORE_COL_IDX])
d[row[TEAM_NAME_COL_IDX]] = tickets
return d
def get_ticket_dist(identifier_to_team, team_to_tickets):
d = dict() # identifier -> tickets
total = 0
for identifier in identifier_to_team:
team = identifier_to_team[identifier]
if team in team_to_tickets:
d[identifier] = team_to_tickets[team]
total += d[identifier]
return d, total
if __name__ == '__main__':
print('Raffling...')
main()
print('Done!')
| calico-team/raffle-sp23-public | raffle.py | raffle.py | py | 5,549 | python | en | code | 3 | github-code | 13 |
21933654976 | from librosa import cqt, convert
import numpy as np
from common import FeatureModule
class MCQT(FeatureModule):
def __init__(self, sample_rate=22050, hop_size=441, bins_per_octave=96, n_bins=558, fmin=196,
average=True, keep_range=[], log=True, quartile=[]):
self.sample_rate = sample_rate
self.sample_rate = sample_rate
self.hop_length = hop_size
self.bins_per_octave = bins_per_octave
self.n_bins = n_bins
self.keep_range = keep_range
self.fmin = fmin
self.average = average
self.log = log
self.quartile = quartile # zero all quartles below quartile_th section
def process_audio(self, audio):
S = np.abs(cqt(audio, sr=self.sample_rate, hop_length=self.hop_length, fmin=self.fmin, n_bins=self.n_bins,
bins_per_octave=self.bins_per_octave, tuning=0.0,
filter_scale=1, norm=1, sparsity=0.01, window='hann', scale=True, pad_mode='reflect',
res_type=None, dtype=None))
if self.average:
for index in range(len(S)):
octaves = np.asarray([index + 96, index + 192, index + 288, index + 384, index + 480])
octaves = octaves[octaves < 558]
octaves = np.sum(S[octaves], axis=0)
octaves = [S[index], octaves]
S[index] = np.sum(octaves, axis=0)
if self.keep_range:
table = convert.cqt_frequencies(self.n_bins, fmin=self.fmin, bins_per_octave=self.bins_per_octave,
tuning=0.0)
lower = min((np.argwhere(table >= self.keep_range[0])))[0]
higher = max((np.argwhere(table <= self.keep_range[1])))[0]
S = S[lower:higher + 1]
if self.log:
S = np.log(S+1)
if self.quartile:
S[S < (np.max(S) * self.quartile / 10)] = 0
return S
# bbb=librosa.convert.cqt_frequencies(559, fmin=196, bins_per_octave=96, tuning=0.0)# number of bins to cover until 22025 (nyquist frequency)
# import numpy as np
# import matplotlib.pyplot as plt
# import librosa.display
#
# y1, sr1 = librosa.load(librosa.ex('trumpet'), sr=22050)
# mcqt = MCQT(keep_range=[392, 3520], quartile=3)
# S = mcqt.process_audio(y1)
# fig, ax = plt.subplots()
# img = librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max), y_axis='hz', x_axis='time', ax=ax)
# ax.set_title('Power spectrogram')
# fig.colorbar(img, ax=ax, format="%+2.0f dB")
# plt.show()
# pass
| mjhydri/Singing-Vocal-Beat-Tracking | scripts/melodic_cqt.py | melodic_cqt.py | py | 2,577 | python | en | code | 21 | github-code | 13 |
72856916498 | # -*- coding: utf-8 -*-
from app import create_app, db
def before_feature(context, feature):
context.app = create_app('test')
context.client = context.app.test_client()
context.ctx = context.app.test_request_context()
context.ctx.push()
db.create_all()
def after_feature(context, feature):
db.session.remove()
db.drop_all()
context.ctx.pop()
| fgimian/flaskage | flaskage/templates/bdd/features/environment.py | environment.py | py | 378 | python | en | code | 37 | github-code | 13 |
38053332023 | import cv2
import numpy as np
from win32api import GetSystemMetrics
import math as m
def getV(vec):
vecV = []
vecSup = vec.copy()
vecSup.sort(key=lambda x:x[0])
vecV = vecSup[:15]
vecV.sort(key=lambda x:x[1])
return vecV
def getH(vec):
vecH = []
vecSup = vec.copy()
vecSup.sort(key=lambda x:x[1])#ordena de acordo com o y
vecH = vecSup[:12]#coordenadas horizontais
vecHSup = vecH.copy()
vecHSup.sort(key=lambda x:x[0])#ordena de acordo com o x
vecH1 = vecHSup[:6]#primeira metade
vecH2 = vecHSup[6:]#segunda metade
vecH1.sort(key=lambda x:x[0])
vecH2.sort(key=lambda x:x[0])
return vecH1, vecH2
def Image_reader():
vec = []
#reading the image
image = cv2.imread("C:/Users/flabe/Desktop/2018-1/ICV/TP1/dados/pattern_0001_scan.png")
image_reserva = image.copy()
edged = cv2.Canny(image,100,200)
#cv2.imshow("Edges", edged)
#cv2.waitKey(0)
#criando imagem Branco-Preta
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
(_, contours, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#TENTATIVA DE APLICAR EROSAO PARA MELHORAR A LEITURA DOS DADOS QUE CONTEM MARCACOES BORRADAS
#kernel = np.ones((5,5), np.uint8)
#img_erosion = cv2.erode(closed, kernel, iterations=1)
#Desenhando contornos na imagem e armazenando BBs
for c in contours:
rect = cv2.boundingRect(c)
if(rect[2] > 70 and rect[3] < 40 and rect[2] < 300):
x,y,w,h = rect
w = m.ceil(w/2)
x2 = x+h*2
cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),2)
cv2.rectangle(image,(x2,y),(x2+w,y+h),(255,0,0),2)
tup1 = (x,y,w,h)
tup2 = (x2,y,w,h)
vec.append(tup1)
vec.append(tup2)
continue
if(rect[3] < 20 or rect[3] > 200):#descarta contornos com uma altura muito grande ou muito pequena
continue
if(rect[2] > 200 or rect[2] < 40):#descarta contornos com comprimento mto grande ou muito pequeno
continue
#print(cv2.contourArea(c))
x,y,w,h = rect
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
vec.append(rect)
showImage(image)
#print("VEC = ", vec)#VEC contendo todas bouding boxes
cv2.destroyAllWindows()
CalculateAnswer(vec, image_reserva, closed)
def CalculateAnswer(vec, image, closed):
h = image.shape[0]
w = image.shape[1]
ex_number = 1
vecV = getV(vec)# Retorna os contornos pretos verticais
vecH_1, vecH_2 = getH(vec)# Retorna os contornos pretos horizontais(divide entre horizontal esquerda e horizontal direita)
for i in vecV:
x,y,w,h = i
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
showImage(image)
for i in vecH_1:
x,y,w,h = i
cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),2)
showImage(image)
for i in vecH_2:
x,y,w,h = i
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
showImage(image)
#itera para a primeira metade do gabarito
for v in vecV:
boxes = []
vx, vy, vw, vh = v
for h1 in vecH_1:
h1x, h1y, h1w, h1h = h1
for bb in vec:
bbx, bby, bbw, bbh = bb
if(abs(bbx - h1x) < 25 and abs(bby-vy) < 10):
cv2.rectangle(image,(bbx,bby),(bbx+bbw,bby+bbh),(0,255,0),2)
box_tuple = (bbx, bby, bbw, bbh)
boxes.append(box_tuple)
H, W = image.shape[:2]
ratio = GetSystemMetrics(1)/float(H)
resized_image = cv2.resize(image, (int(W*ratio), int(H*ratio)) )
cv2.imshow("Show",resized_image)
cv2.waitKey(0)
soma = getResp(closed, boxes, ex_number, image)
print(ex_number, ":", soma)
ex_number = ex_number + 1
#itera para a segunda metade do gabarito
for v in vecV:
boxes = []
vx, vy, vw, vh = v
for h2 in vecH_2:
h2x, h2y, h2w, h2h = h2
for bb in vec:
bbx, bby, bbw, bbh = bb
if(abs(bbx - h2x) < 25 and abs(bby-vy) < 10):
cv2.rectangle(image,(bbx,bby),(bbx+bbw,bby+bbh),(255,0,0),2)
box_tuple = (bbx, bby, bbw, bbh)
boxes.append(box_tuple)
H, W = image.shape[:2]
ratio = GetSystemMetrics(1)/float(H)
resized_image = cv2.resize(image, (int(W*ratio), int(H*ratio)) )
cv2.imshow("Show",resized_image)
cv2.waitKey(0)
soma = getResp(closed, boxes, ex_number, image)
print(ex_number, ":", soma)
ex_number = ex_number + 1
def getResp(image, boxes, ex_number, image2):
Resp_vec = []
a = image[boxes[0][1]:(boxes[0][1]+boxes[0][3]),boxes[0][0]:(boxes[0][0]+boxes[0][2])]
b = image[boxes[1][1]:(boxes[1][1]+boxes[1][3]),boxes[1][0]:(boxes[1][0]+boxes[1][2])]
c = image[boxes[2][1]:(boxes[2][1]+boxes[2][3]),boxes[2][0]:(boxes[2][0]+boxes[2][2])]
d = image[boxes[3][1]:(boxes[3][1]+boxes[3][3]),boxes[3][0]:(boxes[3][0]+boxes[3][2])]
e = image[boxes[4][1]:(boxes[4][1]+boxes[4][3]),boxes[4][0]:(boxes[4][0]+boxes[4][2])]
count_a = np.count_nonzero(a)
Resp_vec.append(count_a)
count_b = np.count_nonzero(b)
Resp_vec.append(count_b)
count_c = np.count_nonzero(c)
Resp_vec.append(count_c)
count_d = np.count_nonzero(d)
Resp_vec.append(count_d)
count_e = np.count_nonzero(e)
Resp_vec.append(count_e)
#So coloca um quadrado cinza na posicao da linha atual
image2[boxes[0][1]:(boxes[0][1]+boxes[0][3]),boxes[0][0]:(boxes[0][0]+boxes[0][2])] = 200
image2[boxes[1][1]:(boxes[1][1]+boxes[1][3]),boxes[1][0]:(boxes[1][0]+boxes[1][2])] = 200
image2[boxes[2][1]:(boxes[2][1]+boxes[2][3]),boxes[2][0]:(boxes[2][0]+boxes[2][2])] = 200
image2[boxes[3][1]:(boxes[3][1]+boxes[3][3]),boxes[3][0]:(boxes[3][0]+boxes[3][2])] = 200
image2[boxes[4][1]:(boxes[4][1]+boxes[4][3]),boxes[4][0]:(boxes[4][0]+boxes[4][2])] = 200
#verifica qual a quantidade de pixels diferentes de zero(255) e se for alta, quer dizer que foi marcada a alternativa
#1000 significa 1000 pixels iguais a 255
soma = sum(i > 1000 for i in Resp_vec)
showImage(image2)
#retorna resposta
if(soma == 1):
for i in range(0, len(Resp_vec)):
if(Resp_vec[i] > 1000):
val = ord('A')
val = val + i
return chr(val)
elif(soma > 1):
return "NULO"
else:
return "BRANCO"
def showImage(image):
H, W = image.shape[:2]
ratio = GetSystemMetrics(1)/float(H)
resized_image = cv2.resize(image, (int(W*ratio), int(H*ratio)) )
cv2.imshow("Show",resized_image)
cv2.waitKey(0)
"""def rotateImage(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result"""
Image_reader() | Davibeltrao/ICV | tp1.py | tp1.py | py | 6,399 | python | en | code | 0 | github-code | 13 |
15603143472 | import nibabel as nib
import numpy as np
import tensorflow as tf
from termcolor import colored
def f1(y_true, y_pred):
return 1
def save_liver_segmentation(liver_image_path, model_path):
print("Application started! Loading data...")
min_img_bound = -1000
max_img_bound = 3000
img = nib.load(liver_image_path)
img_fdata = img.get_fdata()
img_fdata = np.clip(img_fdata, min_img_bound, max_img_bound)
img_fdata = (img_fdata - np.mean(img_fdata)) / np.std(img_fdata)
img_fdata = (
(img_fdata - np.min(img_fdata)) / (np.max(img_fdata) - np.min(img_fdata)))
(x,y,z) = img_fdata.shape
data = []
for k in range(z):
slice = img_fdata[:,:,k]
slice = slice.astype(np.float64) / slice.max()
slice = 255 * slice
slice = slice.astype(np.uint8)
slice = np.rot90(slice)
data.append(slice)
print(colored("Data loaded!\n", "green"))
print("Loading model...")
model = tf.keras.models.load_model(model_path, compile=False, custom_objects={'f1':f1})
print(colored("Model loaded!\n", "green"))
print("Predicting liver mask...")
result_data = [np.rot90(model.predict(x.reshape(1,512,512,1)).reshape(512,512),3) for x in data]
result = np.array(result_data).swapaxes(0, 2).swapaxes(0,1)
(x_dim,y_dim,i_dim) = result.shape
data = []
result_image = img_fdata.copy()
for x in range(x_dim):
for y in range(y_dim):
for i in range(i_dim):
if result[x][y][i] > 0.5:
result_image[x][y][i] = 1
new_image = nib.Nifti1Image(result_image, affine=img.affine)
nib.save(new_image, '/content/result_tumor_new.nii.gz')
print(colored("Mask prepared and exported to ", "green"), colored("result_tumor_new.nii.gz", "green"))
| MaksOpp/tomography-image-segmentation | src/liver_segmentation_script_final.py | liver_segmentation_script_final.py | py | 1,858 | python | en | code | 1 | github-code | 13 |
38927684056 | from setting.models import *
from django.db import models
from company.models import Branch
from user.models import Employee
from django.core import serializers
import json
class Customer(models.Model):
identification_number = models.IntegerField()
dv = models.IntegerField(default = 0)
name = models.CharField(max_length = 100)
phone = models.CharField(max_length = 12,null=True, blank=True)
address = models.CharField(max_length = 150,null=True, blank=True)
email = models.EmailField(null=True, blank=True)
email_optional = models.EmailField(null=True, blank=True)
type_document_i = models.ForeignKey(Type_Document_I, on_delete = models.CASCADE)
type_organization = models.ForeignKey(Type_Organization, on_delete = models.CASCADE)
municipality = models.ForeignKey(Municipalities, on_delete = models.CASCADE)
type_regime = models.ForeignKey(Type_Regimen, on_delete = models.CASCADE)
branch = models.ForeignKey(Branch, on_delete = models.CASCADE)
def __str__(self):
return f"{self.name} - {self.branch.name}"
@staticmethod
def dv_client(rut):
factores = [3, 7, 13, 17, 19, 23, 29, 37, 41, 43, 47, 53, 59, 67, 71]
rut_ajustado=str(rut).rjust( 15, '0')
s = sum(int(rut_ajustado[14-i]) * factores[i] for i in range(14)) % 11
if s > 1:
return 11 - s
else:
return s
@classmethod
def delete_client(cls, data):
result = False
message = None
try:
cls.objects.get(pk = data['pk_customer']).delete()
result = True
message = "Success"
except cls.DoesNotExist as e:
message = str(e)
return {'result':result, 'message':message}
@classmethod
def create_customer(cls, data):
result = False
message = None
branch = Employee.objects.get(pk = data['pk_employee']).branch
try:
customer = cls.objects.get(identification_number = data['identification_number'], branch = branch)
message = "The client already exists"
except Exception as e:
customer = cls(
identification_number = data['identification_number'],
dv = cls.dv_client(data['identification_number']),
name = data['name'],
phone = data['phone'],
address = data['address'],
email = data['email'],
type_document_i = Type_Document_I.objects.get(pk = data['type_document_identification_id']),
type_organization = Type_Organization.objects.get(pk = data['type_organization_id']),
municipality = Municipalities.objects.get(pk = data['municipality_id']),
type_regime = Type_Regimen.objects.get(pk = data['type_regime_id']),
branch = branch
)
customer.save()
result = True
message = "Success"
return {'result':result, 'message':message}
@classmethod
def update_customer(cls, data):
result = False
message = None
try:
customer = cls.objects.get(pk = data['pk_customer'])
customer.identification_number = data['identification_number']
customer.dv = cls.dv_client(data['identification_number'])
customer.name = data['name']
customer.phone = data['phone']
customer.address = data['address']
customer.email = data['email']
customer.email_optional = data['email_optional']
customer.type_document_i = Type_Document_I.objects.get(pk = data['type_document_identification_id'])
customer.type_organization = Type_Organization.objects.get(pk = data['type_organization_id'])
customer.municipality = Municipalities.objects.get(pk = data['municipality_id'])
customer.type_regime = Type_Regimen.objects.get(pk = data['type_regime_id'])
customer.save()
result = True
message = "Success"
except cls.DoesNotExist as e:
customer = None
message = str(e)
return {'result':result, 'message':message}
@classmethod
def get_list_customer(cls, data):
branch = Employee.objects.get(pk = data['pk_employee']).branch
list_customer = []
for i in cls.objects.filter(branch = branch):
serialized_customer = serializers.serialize('json', [i])
serialized_customer = json.loads(serialized_customer)[0]
data = serialized_customer['fields']
data['pk_customer'] = serialized_customer['pk']
list_customer.append(data)
return list_customer
@classmethod
def get_customer(cls, data):
serialized_customer = json.loads(serializers.serialize('json', [cls.objects.get(pk = data['pk_customer'])]))[0]
data = serialized_customer['fields']
data['name_type_document_i'] = Type_Document_I.objects.get(pk = data['type_document_i']).name
data['name_type_organization'] = Type_Organization.objects.get(pk = data['type_organization']).name
data['name_municipality'] = Municipalities.objects.get(pk = data['municipality']).name
data['name_type_regime'] = Type_Regimen.objects.get(pk = data['type_regime']).name
data['pk_customer'] = serialized_customer['pk']
return data
| cdavid58/api_new_invoice | customer/models.py | models.py | py | 4,728 | python | en | code | 0 | github-code | 13 |
74387209618 | import os
import sys
import numpy as np
import scipy.sparse as sp
import trimesh
import cv2
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def normalize_adj(features):
# normalizes symetric, binary adj matrix such that sum of each row is 1
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def construct_feed_dict(img_inp, pkl, labels, placeholders):
"""Construct feed dictionary."""
coord = pkl[0]
edges = pkl[1]
faces = pkl[2]
##lape_idx = pkl[3]
vertex_size = len(coord)
edge_size = len(edges)
iden = sp.eye(vertex_size)
adj = sp.coo_matrix((np.ones(edge_size,dtype='float32'),
(edges[:,0],edges[:,1])),shape=(vertex_size,vertex_size))
support = [sparse_to_tuple(iden), normalize_adj(adj)]
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['features']: coord})
feed_dict.update({placeholders['img_inp']: img_inp})
feed_dict.update({placeholders['edges']: edges})
feed_dict.update({placeholders['faces']: faces})
##feed_dict.update({placeholders['lape_idx']: lape_idx})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: coord[1].shape})
feed_dict.update({placeholders['dropout']: 0.})
return feed_dict
def export_img_mesh(img_inp, label, pkl, mod_seq, out1, out2, out3, savedir):
if not os.path.exists(savedir+'/img'):
os.mkdir(savedir+'/img')
if not os.path.exists(savedir+'/predict'):
os.mkdir(savedir+'/predict')
img_file = savedir+'/img/%s.png'%mod_seq
cv2.imwrite(img_file, img_inp*255)
mesh_file = savedir+'/predict/%s.obj'%mod_seq
vertices = out3
faces = pkl[2]
mesh = trimesh.Trimesh(vertices, faces, vertex_normals=None, process=False)
mesh.export(mesh_file)
print('vertices:', out3.shape, 'faces:', faces.shape, mesh_file) | Gorilla-Lab-SCUT/SkeletonBridgeRecon | Mesh_refinement/deformation/utils.py | utils.py | py | 2,602 | python | en | code | 76 | github-code | 13 |
15736352273 | import seaborn as sns
import matplotlib.pyplot as plt
import pandas
from scipy.stats import ttest_1samp, wilcoxon
def plot_correlation(variable1,variable2, name1,name2,rValue,year):
df = pandas.DataFrame({'x':variable1,'y':variable2})
# plt.figure()
sns.lmplot(x="x",y="y",data=df,fit_reg=True,height = 6)
plt.title("Year: %s" %year)
plt.text(0.1,0.9,r'R^2 = %s'%str(rValue),fontsize = 13)
plt.xlabel(name1)
plt.ylabel(name2)
fig1 = plt.gcf()
plt.show()
fig1.savefig('Plots/%s.png'%(year+'_'+name1+'_'+name2))
def get_pValue(values1,values2):
# Reference: https://pythonfordatascience.org/wilcoxon-sign-ranked-test-python/
# The hypothesis being test is:
# Null hypothesis (H0): The difference between the pairs follows a symmetric distribution around zero.
# Alternative hypothesis (HA): The difference between the pairs does not follow a symmetric distribution around zero.
# If the p-value is less than what is tested at, most commonly 0.05,
# one can reject the null hypothesis in support of the alternative.
# Calculate the correlation coeifficients
# paired t-test: doing two measurments on the same experimental unit
# t_statistic, p_value = ttest_1samp(values1 - values2, 0)
# # p < 0.05 => alternative hypothesis: the difference in mean is not equal to 0
# print("paired t-test", p_value)
# alternative to paired t-test when data has an ordinary scale or when not normally distributed
z_statistic, p_value = wilcoxon(values1 - values2)
print("paired wilcoxon-test", p_value)
return p_value
| jinchen1036/VisualizationProject | Functions.py | Functions.py | py | 1,612 | python | en | code | 0 | github-code | 13 |
31466382632 | # Write a program to find the node at which the intersection of two singly linked lists begins.
# For example, the following two linked lists:
# A: a1 → a2
# ↘
# c1 → c2 → c3
# ↗
# B: b1 → b2 → b3
# begin to intersect at node c1.
# Notes:
# If the two linked lists have no intersection at all, return null.
# The linked lists must retain their original structure after the function returns.
# You may assume there are no cycles anywhere in the entire linked structure.
# Your code should preferably run in O(n) time and use only O(1) memory.
class LinkedList(object):
"""Simple linked list class."""
def __init__(self, val):
"""."""
self.next = None
self.val = val
def intersection_finder(l1, l2):
"""Find intersecting node between two linked lists."""
l1_count, l2_count = 0, 0
temp_l1, temp_l2 = l1, l2
while temp_l1: # get len of l1
temp_l1 = temp_l1.next
l1_count += 1
while temp_l2: # get len of l2
temp_l2 = temp_l2.next
l2_count += 1
long_ll = l1 if l1_count > l2_count else l2 # determine the longer ll
short_ll = l2 if l2_count > l1_count else l1 # determine the shorter ll
short_count = l1_count if l1_count < l2_count else l2_count
diff = abs(l1_count - l2_count) # diff between the lens
for _ in range(diff): # cut off long ll head to make it as long as short ll
long_ll = long_ll.next
for _ in range(short_count): # loop through both ll together
if long_ll.next is short_ll.next: # if they share same node will reflect here
return long_ll.next.val
else: # keep iterating
long_ll = long_ll.next
short_ll = short_ll.next
return -1
if __name__ == '__main__':
l1 = LinkedList(1)
l1.next = LinkedList(2)
l1.next.next = LinkedList(3)
l2 = LinkedList(5)
l2.next = LinkedList(4)
l2.next.next = l1.next.next
print('the intersecting node between l1 and l2 is ' + str(intersection_finder(l1, l2)))
| han8909227/leetcode | linked_list/intersection_two_ll_lc160.py | intersection_two_ll_lc160.py | py | 2,111 | python | en | code | 3 | github-code | 13 |
29314449881 | import numpy as np
from numpy.random import default_rng
class GMM:
def __init__(self, num_components, dimensionality, **kwargs):
"""
kwargs: can supply the regularizer for the determinant of the covariance matrix
"""
self.num_components=num_components
self.dimensionality=dimensionality
mixing_coeffs=np.random.uniform(0, 1, size=(num_components, ))
self.mixing_coeffs=mixing_coeffs/np.sum(mixing_coeffs)
self.means=np.random.uniform(0, 1, size=(num_components, dimensionality))
self.covariances=np.array([np.eye(dimensionality) for _ in range(num_components)])
self.reg_covariance_det=kwargs.get("reg_covariance_det", 1e-6)
self.rng=default_rng()
return
def sample(self, n_samples=1):
sampled_component=(self.rng.multinomial(1, self.mixing_coeffs, size=(n_samples, ))@np.linspace(0, self.num_components-1, self.num_components)).astype('int')
raw_samples=self.rng.standard_normal(size=(n_samples, self.dimensionality))
sampled_covariances=self.covariances[sampled_component]
sampled_means=self.means[sampled_component]
cholesky_sampled_covariances=np.linalg.cholesky(sampled_covariances)
real_samples=(cholesky_sampled_covariances@raw_samples[:, :, None]).squeeze()+sampled_means
return real_samples
def pdf(self, x):
dimensionality=len(self.means[0])
x=x.reshape((-1, dimensionality))
part_prob=np.zeros((x.shape[0], self.num_components))
for i in range(self.num_components):
# self.covariances[i]=np.maximum(self.covariances[i], np.eye(self.dimensionality))
self.covariances[i]=self.covariances[i]+self.reg_covariance_det*np.eye(self.dimensionality)
exponent=np.multiply(-0.5*(x-self.means[i]), (x-self.means[i])@np.linalg.inv(self.covariances[i]))
exponent=exponent.sum(axis=1)
# print("max value of exponent of component {} before normalize: {}".format(i, exponent.sum(axis=1).max()))
# stability_threshold=exponent.max()-10
# print("stability_threshold for {} component: {}".format(i, stability_threshold))
# exponent=exponent-stability_threshold
# print(exponent)
exponential=np.exp(exponent)
part_prob[:, i]=self.mixing_coeffs[i]*np.sqrt((2*np.pi)**(-1*dimensionality))*exponential
part_prob[:, i]=part_prob[:, i]/np.sqrt(np.linalg.det(self.covariances[i]))
return np.sum(part_prob, axis=1), part_prob
def responsibilities(self, x):
a, b=self.pdf(x)
return b/a[:, None]
| rVSaxena/gmm | gmm.py | gmm.py | py | 2,392 | python | en | code | 0 | github-code | 13 |
32734155424 | import random
import json
import csv
def save_game_data(data):
with open('data.json', 'w') as file:
json.dump(data, file)
def load_game_data():
try:
with open('data.json', 'r') as file:
data = json.load(file)
except FileNotFoundError:
data = {}
return data
def update_csv(data):
with open('game_data.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Игрок", "роль"])
for player, role in data.items():
writer.writerow([player, role])
def start_game(n):
mafia = random.choice(n)
b = {m: "Мафия" if m == mafia else "Граждане" for m in n}
data = load_game_data()
data.update(b)
save_game_data(data)
update_csv(data)
print("Начало игры!")
print("Мафия:", mafia)
print("Граждане:", [player for player in n if player != mafia])
players = []
b = ['Мафия', 'Мирный житель', 'Мирный житель','Мирный житель','Мирный житель','Мирный житель']
random.shuffle(b)
# Создаем игроков и случайным образом распределяем роли
for i in range(len(b)):
player = {'Роль': b[i], 'Жив': True}
players.append(player)
# Цикл игры
while True:
for i in range(len(players)):
print(f"Игрок {i+1}")
# Запрос ввода игрока
vote = int(input("Кого хотите проверить? (Введите номер игрока): "))
# Проверка валидности ввода
if vote < 1 or vote > len(players):
print("Неверный номер игрока, попробуйте еще раз.")
continue
# Проверка роли выбранного игрока
if players[vote-1]['Роль'] == 'Мафия':
print("Игрок номер", vote, "принадлежит к мафии!")
break
else:
print("Игрок номер", vote, "не принадлежит к мафии!")
# Проверка на победу одной из сторон
mafia_count = 0
citizens_count = 0
for player in players:
if player['Жив']:
if player['Роль'] == 'Мафия':
mafia_count += 1
else:
citizens_count += 1
if mafia_count == 0:
print("Мирные жители победили!")
break
elif mafia_count >= citizens_count:
print("Мафия победила!")
break
def delete_save_data():
with open('data.json', 'w') as file:
file.write("")
with open('game_data.csv', 'w', newline='') as file:
file.write("")
def main_menu():
while True:
print("1. Начало игры")
print("2. Удалить сохранение")
print("3. Выйти")
choice = input("Введите свой выбор: ")
if choice == "1":
players = input("Введите имена игроков (через запятую): ").split(",")
start_game(players)
elif choice == "2":
delete_save_data()
print("Save data deleted.")
elif choice == "3":
break
else:
print("Неверный выбор. Пожалуйста, попробуйте снова.")
if __name__ == "__main__":
main_menu()
| Kobrar0112/python14112023 | Igra.py | Igra.py | py | 3,787 | python | ru | code | 0 | github-code | 13 |
5151955279 | # This program uses the Turtle module to draw repeating squares
# 17 October 2019
# CTI-110 P4T1a - Shapes
# John Fueyo
# Import turtle
# Outer loop "count" iterates 100 times
# Inner loop "square" makes a square
# After "square" exits, complete outer loop "count"
# Store turtle (x-cord - 3). in var. "x" to move turtle
# left by 3
# Store turtle y-cord. in var. "y"
# Add 3 to var. squareSide to make next square made larger
# Pen goes up
# Turtle goes to (x, y) coordinates stored in variables "x" and "y"
# Pen goes down
# Begin new iteration of loop "count"
#Named constants
NUM_SQUARES = 100
OFFSET = 3
#Variables
squareSide = 5
x = 0
y = 0
import turtle
turtle.showturtle()
turtle.pensize(1)
turtle.speed(0)
for count in range(NUM_SQUARES):
for square in range(4):
turtle.forward(squareSide)
turtle.left(90)
x = turtle.xcor() - OFFSET
y = turtle.ycor()
squareSide = squareSide + OFFSET
turtle.penup()
turtle.goto(x, y)
turtle.pendown()
turtle.hideturtle()
turtle.done()
| Jfueyo/cti110 | p4t1a_fueyo.py | p4t1a_fueyo.py | py | 1,049 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.