seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
74377298215 | import key_transformator
import random
import string
# header: 137, 80, 78, 71, 13, 10, 26, 10
# encrypted: 200, 10, 1, 3, 79, 81, 74, 79
# key: 65, 90, 79, 68, 66, 91, 80, 69
key_length = 4
def generate_initial_key():
return ''.join(random.choice(string.ascii_uppercase) for _ in range(4))
def xor(s1, s2):
res = [chr(0)]*key_length
for i in range(len(res)):
q = ord(s1[i])
d = ord(s2[i])
k = q ^ d
res[i] = chr(k)
res = ''.join(res)
return res
def add_padding(img):
l = key_length - len(img)%key_length
img += chr(l)*l
return img
with open('flag.png', 'rb') as f:
img = f.read()
img = add_padding(img)
key = generate_initial_key()
enc_data = ''
for i in range(0, len(img), key_length):
enc = xor(img[i:i+key_length], key)
key = key_transformator.transform(key)
enc_data += enc
with open('encrypted.png', 'wb') as f:
f.write(enc_data) | zvikam/Checkpoint-CSA | 2018/png++/encrypt.py | encrypt.py | py | 994 | python | en | code | 1 | github-code | 90 |
632553252 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Arne F. Meyer <arne.f.meyer@gmail.com>
# License: GPLv3
"""
Run motion registration on video file(s)
"""
from __future__ import print_function
import click
import sys
import os
import os.path as op
import glob
import numpy as np
import matplotlib.pyplot as plt
try:
import mousecam.motionregistration as mmr
from mousecam.util import selectROI, get_first_frame
from mousecam.util import plotting as mcp
except ImportError:
sys.path.append(op.join(op.split(__file__)[0], '..'))
import mousecam.motionregistration as mmr
from mousecam.util import selectROI, get_first_frame
from mousecam.util import plotting as mcp
def find_video_files(path, pattern=None, recursive=False, overwrite=False):
video_files = []
if recursive:
for root, _, _ in os.walk(path, topdown=False):
files = find_video_files(root, pattern=pattern,
recursive=False)
if files is not None and len(files) > 0:
if isinstance(files, list):
video_files.extend(files)
else:
video_files.append(files)
elif op.isfile(path):
video_files.append(path)
else:
if pattern is None:
pattern = '*.h264'
files = glob.glob(op.join(path, pattern))
if not overwrite:
ff = []
for f in files:
mc_file = op.splitext(f)[0] + '_motion_registration.npz'
if not op.exists(mc_file):
print("Adding file:", f)
ff.append(f)
else:
print("Skipping file:", f)
files = ff
video_files.extend(files)
return sorted(video_files)
def run_moco(video_path, bbox, offset=0, headless=True, use_average=True,
max_frames=-1, stddev=3., plot_results=True):
first_frame = get_first_frame(video_path)
if bbox is None or len(bbox) == 0:
bbox = selectROI(first_frame)
brightness_mask = None # use rectangular bbox also for brightness
frames, bbox, br_bbox, br = mmr.get_frames(video_path, bbox,
num_frames=max_frames,
brightness_mask=brightness_mask,
offset=offset)
if frames is not None:
# ignore all frames with average brightness exceeding 3 std dev
yy = np.mean(br_bbox)
yerr = np.std(br_bbox)
valid = np.logical_and(br_bbox >= yy-stddev*yerr,
br_bbox <= yy+stddev*yerr)
template = np.mean(frames[valid, :, :], axis=0)
# run moco using wrapper class
moco = mmr.Moco()
xy_shift = moco.run(frames,
headless=headless,
template=template,
use_average=use_average)
result_file = op.splitext(video_path)[0] + '_motion_registration.npz'
np.savez(result_file,
video_path=video_path,
xy_shift=xy_shift,
bbox=bbox,
brightness_bbox=br_bbox,
brightness_frames=br,
brightness_mask=brightness_mask,
template=template)
mean_mvmt = np.mean(np.abs(np.diff(xy_shift, axis=0)), axis=0)
print("average movement x/y:", mean_mvmt[0], mean_mvmt[1])
if plot_results:
fig, axarr = plt.subplots(nrows=2, ncols=1, sharex=True)
xx = 1 + np.arange(xy_shift.shape[0])
red = mcp.NICE_COLORS['new mpl red']
blue = mcp.NICE_COLORS['new mpl blue']
ax = axarr[0]
ax.plot(xx, xy_shift[:, 0], '-', color=red, lw=2, label='x')
ax.plot(xx, xy_shift[:, 1], '-', color=blue, lw=2, label='y')
ax.set_xlabel('Frame index')
ax.set_ylabel('Shift (pixels)')
ax.set_ylim(np.min(xy_shift)-1, np.max(xy_shift)+1)
ax.legend(loc='best', fontsize=8)
ax = axarr[1]
ax.plot(xx, br_bbox, '-', color=red, lw=2)
ax.axhline(yy, color=blue)
ax.axhline(yy - 2*yerr, color=blue, ls='--')
ax.axhline(yy + 2*yerr, color=blue, ls='--')
ax.set_xlabel('Frame index')
ax.set_ylabel('Mean brightness')
for ax in axarr.flat:
mcp.set_font_axes(ax, add_size=2)
mcp.simple_xy_axes(ax)
ax.xaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
fig.set_size_inches(7, 3)
fig.tight_layout()
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--bbox', '-b', nargs=4, type=int, default=None)
@click.option('--frames', '-f', default=-1, type=int)
@click.option('--offset', '-o', default=0, type=int)
@click.option('--overwrite', '-w', is_flag=True)
@click.option('--headless', '-H', is_flag=True)
@click.option('--average', '-a', is_flag=True)
@click.option('--recursive', '-r', is_flag=True)
@click.option('--pattern', '-p', default='*.h264', type=str)
@click.option('--batch', '-B', is_flag=True)
@click.option('--show', '-s', is_flag=True)
def cli(path=None, bbox=None, frames=-1, overwrite=False,
headless=False, average=False, offset=0,
recursive=False, pattern=None, batch=False,
show=False):
video_files = find_video_files(path, pattern, recursive,
overwrite=overwrite)
if len(video_files) > 0:
for f in video_files:
print("Found file:", f)
bboxes = []
if bbox is None or len(bbox) == 0:
if batch:
# select ROIs (bounding boxes) for all video files
for video_file in video_files:
first_frame = get_first_frame(video_file)
bbox = selectROI(first_frame, title=video_file)
bboxes.append(bbox)
else:
first_frame = get_first_frame(video_files[0])
bbox = selectROI(first_frame)
bboxes = len(video_files) * [bbox]
else:
bboxes = len(video_files) * [bbox]
for bbox, video_file in zip(bboxes, video_files):
if bbox is not None:
run_moco(video_file, bbox,
offset=offset,
use_average=average,
headless=headless,
max_frames=frames,
plot_results=show)
else:
print("Skipping video file:", video_file)
if show:
plt.show()
if __name__ == '__main__':
cli()
| arnefmeyer/mousecam | mousecam/scripts/motion_registration.py | motion_registration.py | py | 6,890 | python | en | code | 21 | github-code | 90 |
18103826259 | import math
def prime_array(n=1000):
List = [2]
i = 3
while i <= n:
judge = True
for k in List :
if math.sqrt(i) < k :
break
if i%k == 0:
judge = False
break
if judge :
List.append(i)
i += 2
return List
def main():
cnt=0
prime_List=prime_array(10**4)
n=input()
for i in range(n):
a=input()
if a<=10**4:
for j in prime_List:
if a==j:
cnt+=1
break
else:
judge=True
for j in prime_List:
if a%j==0:
judge=False
break
if judge:
cnt+=1
print('%d' % cnt)
if __name__=='__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02257/s053793174.py | s053793174.py | py | 568 | python | en | code | 0 | github-code | 90 |
70098420777 | # -*- coding: utf-8 -*-
import requests
import re
import scrapy
from ..items import MJItem
from bs4 import BeautifulSoup
from scrapy.loader import ItemLoader
class onem3point(scrapy.Spider):
# 注释-必须字段,爬虫名,scrapy list命令行会列出name
name = 'mj_spider'
# 注释-必须字段,允许的爬取的url域名,如果url中域名不是这段不进行爬取。这里是python的列表类型,可以放多个链接
allowed_domians = ['www.1point3acres.com']
# 注释-必须字段,爬虫启动时首先打开的url。这里是python的列表类型,可以放多个链接# -*- coding: utf-8 -*-
start_urls = ['http://www.1point3acres.com/bbs/forum-145-1.html']
def parse_info(self,response):
item_loader = ItemLoader(item=MJItem(), response=response)
#postlist = response.css("div[id='postlist']")
#:::tags的提取
#tags = postlist.xpath("//div[@class='pcb']//u//b//text()").extract()[3] 注意/和//的用法,有的不是直接在第一层子类
#postlist.xpath("//div[@class='pcb']//u//b[4]//text()")-->css对象(xpath内部数组从1开始计数)
#tags = postlist.css("div[class='pcb'] u b font::text").extract() 注意要一起精确到font
item_loader.add_value("url", response.url)
item_loader.add_css("title", "span#thread_subject::text")
item_loader.add_xpath("tags", "//div[@id='postlist']//div[@class='pcb']//u//b[4]//text()")
item_loader.add_xpath("content", "//div[@id='postlist']//td[@class='t_f']//text()")
mj_item = item_loader.load_item()
yield mj_item
def parse_link(self,response):
soup = BeautifulSoup(response.text, 'lxml')
threads = soup.find('table', {'summary':'forum_145'}).find_all('tbody')
print(len(threads))
for thread in threads:
mj_info = MJItem()
if thread.get('id').find('normalthread')==-1:
continue
link = thread.find('th',{'class':'common'}).find('a', {"class":'s','class':'xst'}).get('href')
#yield scrapy.Request(link, meta={'mj_info':mj_info}, callback=self.parse_info)
yield scrapy.Request(link, callback=self.parse_info)
def parse(self,response):
soup = BeautifulSoup(response.text, 'lxml')
url = 'http://www.1point3acres.com/bbs/forum.php?mod=forumdisplay&fid=145&sortid=311&%1=&sortid=311&page={}'
pages = soup.find('span', {'id':'fd_page_bottom'}).find_all('a')
total = 0
for page in pages:
try:
page = int(re.sub('\D','',page.get_text()))
except:
page = 0
if page > total:
total = page
print(total)
for i in range(1, 2):
yield scrapy.Request(url.format(i), callback=self.parse_link)
| emmazh507/1m3Spider | onem3_spider/spiders/1m3spider.py | 1m3spider.py | py | 2,940 | python | en | code | 0 | github-code | 90 |
15410568513 | #!/usr/bin/env python
# coding: utf-8
# In[195]:
import requests
from bs4 import BeautifulSoup
import pandas as pd
import json
import gspread
from datetime import date
# In[196]:
def update(value_df):
sheet_id = '1x4A_IVSNKxa08qvViYp4KuG9Of7UuEbqcWllPk0i7fk'
sheet_name = 'tripadvisor'
gc = gspread.service_account('cred/travel_expense_credential.json')
spreadsheet = gc.open_by_key(sheet_id)
worksheet = spreadsheet.worksheet(sheet_name)
now_df = pd.DataFrame(worksheet.get_all_records())
from_,to_ = now_df.shape
from_cell = 'A'+str(from_+2)+':'+'D'+str(from_+1+value_df.shape[0])
worksheet.update(from_cell, value_df.values.tolist())
return print("successfully update")
# In[197]:
def get_rakuten():
url = 'https://www.rakuten.com/tripadvisor.com?query=trip&position=2&type=suggest&store=12003'
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
raku_name = []
raku_rate = []
for i in soup.find_all('a',{'data-amp-evt-sig':"module_name,category_name,module_type"}):
raku_name.append(i.find("span" ,{"class":"cb-cats-list-title"}).get_text())
raku_rate.append(i.find("span",{"class":"cb-cats-list-amt cb"}).get_text())
raku_df = pd.DataFrame(list(zip(raku_name,raku_rate)),columns=['name','rate'])
raku_df["date"] = date.today().strftime('%Y-%m-%d')
raku_df["rate"] = raku_df["rate"].apply(lambda x: float(x.strip('%'))*0.01)
raku_df["source"] = 'Rakuten'
return raku_df
# In[198]:
# In[199]:
def get_topcashback():
url = 'https://www.topcashback.com/tripadvisor-hotels/'
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
name = []
for i in soup.find_all('div',{"class":"gecko-small-text-wrap"}):
name.append(i.get_text().strip())
cash = []
for i in soup.find_all('span',{"class":"cashback-desc"}):
cash.append(i.get_text().strip())
df = pd.DataFrame(list(zip(name,cash)),columns=['name','rate'])
df["date"] = date.today().strftime('%Y-%m-%d')
df["source"] = 'Topcashback'
df = df[df.name!= 'TripAdvisor Plus Subscription']
df["rate"] = df["rate"].apply(lambda x: float(x.strip('%'))*0.01)
df = df.reset_index(drop=True)
return df
# In[ ]:
if __name__ == '__main__':
raku_df = get_rakuten()
update(raku_df)
topcash_df = get_topcashback()
update(topcash_df)
| Popoben240/cashback | shopback.py | shopback.py | py | 2,439 | python | en | code | 0 | github-code | 90 |
16830417920 | import unittest
import zipfile
from StringIO import StringIO
import tempfile
import shutil
import boto3
import dill
import moto
import mock
import pip
from easy_lambda.deployment import Lambda, DeploymentPackage
@moto.mock_lambda
class Test(unittest.TestCase):
def setUp(self):
super(Test, self).setUp()
self.client = boto3.client('lambda', region_name='us-west-2')
@mock.patch('easy_lambda.deployment.DeploymentPackage.copy_env')
def test_create(self, mock):
value = 1
function_name = 'test_function'
@Lambda(name=function_name, bucket='test', key='test', client=self.client)
def foo():
return value
package = DeploymentPackage(foo)
zfp = zipfile.ZipFile(StringIO(package.zip_bytes(foo.dumped_code)), "r")
func = dill.load(zfp.open('.lambda.dump'))
self.assertEqual(func(), value)
resp_create = foo.create()
self.assertEqual(resp_create['FunctionName'], function_name)
# moto doesn't support ZipFile only lambda deployments, while
# aws doen't allow other arguments when scpesifying ZipFile argument
#resp_get = foo.get()
#self.assertEqual(resp_get['Configuration']['FunctionName'], function_name)
@unittest.skip('slow')
class PackageTestCase(unittest.TestCase):
def setUp(self):
self.venv = tempfile.mkdtemp()
# <http://stackoverflow.com/a/19404371/2183102>
pip.main(['install', 'requests', '-t', self.venv])
shutil.copytree(self.venv, self.venv + '/lib/python2.7/site-packages')
def test_copy_env(self):
package = DeploymentPackage(None, None, None)
with zipfile.ZipFile(StringIO(), 'w', zipfile.ZIP_DEFLATED) as dest:
package.copy_env(dest, venv_path=self.venv)
self.assertTrue(dest.namelist(), 'For now just test that it is not empty')
def tearDown(self):
shutil.rmtree(self.venv)
| ZhukovAlexander/lambdify | tests/test_lambda.py | test_lambda.py | py | 1,951 | python | en | code | 50 | github-code | 90 |
9914016198 | # -*- coding: UTF-8 -*-
# Interstitial Error Detector
# Version 0.2, 2013-08-28
# Copyright (c) 2013 AudioVisual Preservation Solutions
# All rights reserved.
# Released under the Apache license, v. 2.0
# Created on May 14, 2014
# @author: Furqan Wasi <furqan@avpreserve.com>
from PySide.QtCore import *
from PySide.QtGui import *
from time import strftime, time
from math import floor
from Core import DirsHandlerCore, SharedApp
from GUI import DAWDirsGUI, ReferenceDirsGUI
"""
Interstitial Directory GUI Manager
"""
class DirsHandlerGUI(QWidget):
"""
Application Directories Handler GUI Class
"""
def __init__(self):
"""
Constructor
"""
super(DirsHandlerGUI, self).__init__()
self.Interstitial = SharedApp.SharedApp.App
self.dirs_handler_core = DirsHandlerCore.DirsHandlerCore()
self.daw_dirs_gui = {}
self.reference_dirs_gui = {}
self.number_of_daw_dirs = 1
self.number_of_ref_dirs = 1
self.daw_group_box = QGroupBox(self.Interstitial.label['DAWDir'])
self.ref_group_box = QGroupBox(self.Interstitial.label['refDir'])
for index_daw in xrange(0, self.number_of_daw_dirs):
self.daw_dirs_gui[index_daw] = DAWDirsGUI.DAWDirsGUI()
for index_ref in xrange(0, self.number_of_ref_dirs):
self.reference_dirs_gui[index_ref] = ReferenceDirsGUI.ReferenceDirsGUI()
self.daw_qh_box = QFormLayout()
self.ref_qh_box = QFormLayout()
if self.Interstitial.Configuration.getOsType() == 'linux':
self.daw_qh_box.setSpacing(0)
self.ref_qh_box.setSpacing(0)
def createDAWDirectories(self):
"""
Create DAW Directories
@return: None
"""
return self.setupDAWGUI()
def setupDAWGUI(self):
"""
Setup Reference DAW Box Layout
@return: daw_group_box
"""
self.loading_daw_label = QLabel('Please Wait.....Removing DAW Directory!')
self.loading_daw_label.setHidden(True)
self.daw_qh_box.addWidget(self.loading_daw_label)
# Create & Load Widgets and Triggers for Reference DAW
for index_daw in xrange(0, self.number_of_daw_dirs):
self.daw_dirs_gui[index_daw].createDirectoriesInfo(self.number_of_daw_dirs)
self.daw_dirs_gui[index_daw].setTriggers()
self.daw_qh_box = self.daw_dirs_gui[index_daw].AddWidgets(self.daw_qh_box)
# Load New Add Buttons Widget , Trigger
self.add_new_daw = QPushButton(self.Interstitial.label['addNewDAW'], self)
self.add_new_daw.clicked.connect(self.addNewDawDirectory)
if self.Interstitial.Configuration.getOsType() == 'windows':
self.add_new_daw.setMaximumSize(140, 30)
self.add_new_daw.setMinimumSize(140, 30)
else:
self.add_new_daw.setMaximumSize(200, 30)
self.add_new_daw.setMinimumSize(200, 30)
self.daw_qh_box.addWidget(self.add_new_daw)
self.daw_group_box.setLayout(self.daw_qh_box)
return self.daw_group_box
def setupReferenceGUI(self):
"""
Setup Reference GUI Box Layout
@return: reference_group_box
"""
self.loading_ref_label = QLabel('Please Wait.....Removing Reference Directory!')
self.loading_ref_label.setHidden(True)
self.ref_qh_box.addWidget(self.loading_ref_label)
# Create & Load Widgets and Triggers for Reference GUI
for index_ref in xrange(0, self.number_of_ref_dirs):
self.reference_dirs_gui[index_ref].createDirectoriesInfo(self.number_of_ref_dirs)
self.reference_dirs_gui[index_ref].setTriggers()
self.ref_qh_box = self.reference_dirs_gui[index_ref].AddWidgets(self.ref_qh_box)
# Load New Add Buttons Widget , Trigger
self.add_new_ref = QPushButton(self.Interstitial.label['addNewRef'], self)
self.add_new_ref.clicked.connect(self.addNewReferenceDirectory)
self.ref_qh_box.addWidget(self.add_new_ref)
self.add_new_ref.setMaximumSize(220, 30)
self.add_new_ref.setMinimumSize(220, 30)
self.ref_group_box.setLayout(self.ref_qh_box)
return self.ref_group_box
def createRefDirectories(self):
"""
Create Reference Directories
@return: None
"""
return self.setupReferenceGUI()
def addNewDawDirectory(self):
"""
Add New Daw Directory
@return: None
"""
self.daw_dirs_gui[self.number_of_daw_dirs] = DAWDirsGUI.DAWDirsGUI()
self.daw_dirs_gui[self.number_of_daw_dirs].createDirectoriesInfo()
self.daw_dirs_gui[self.number_of_daw_dirs].setTriggers()
self.daw_qh_box = self.daw_dirs_gui[self.number_of_daw_dirs].AddWidgets(self.daw_qh_box)
# Adding Space for new Directories in Group Box
self.daw_group_box.setLayout(self.daw_qh_box)
self.number_of_daw_dirs += 1
self.dirs_handler_core.number_of_daw_core = self.number_of_daw_dirs
if self.number_of_daw_dirs == 7:
self.add_new_daw.setDisabled(True)
QCoreApplication.processEvents()
def addNewReferenceDirectory(self):
"""
Add New Reference Directory
@return: None
"""
self.reference_dirs_gui[self.number_of_ref_dirs] = ReferenceDirsGUI.ReferenceDirsGUI()
self.reference_dirs_gui[self.number_of_ref_dirs].createDirectoriesInfo()
self.reference_dirs_gui[self.number_of_ref_dirs].setTriggers()
self.ref_qh_box = self.reference_dirs_gui[self.number_of_ref_dirs].AddWidgets(self.ref_qh_box)
# Adding Space for new Directories in Group Box
self.ref_group_box.setLayout(self.ref_qh_box)
self.number_of_ref_dirs += 1
self.dirs_handler_core.number_of_ref_core = self.number_of_ref_dirs
if self.number_of_ref_dirs == 7:
self.add_new_ref.setDisabled(True)
QCoreApplication.processEvents()
def RunExecutor(self, manifest_path):
"""
Run Executor To Test Audio File
@return: None
"""
self.dirs_handler_core.setNumberOfDawCore(self.number_of_daw_dirs)
self.dirs_handler_core.setNumberOfRefCore(self.number_of_ref_dirs)
for index_daw in xrange(0, self.number_of_daw_dirs):
for index_ref in xrange(0, self.number_of_ref_dirs):
# Set Directories Core Information to be used for executor
self.dirs_handler_core.setDawDirsCore(self.daw_dirs_gui[index_daw].getGuiDawText(), index_daw)
self.dirs_handler_core.setRefDirsCore(self.reference_dirs_gui[index_ref].getGuiRefText(), index_ref)
# Launch The Scanner to Test Audio Files
self.dirs_handler_core.run_executor(manifest_path, QCoreApplication.instance())
| WeAreAVP/interstitial | GUI/DirsHandlerGUI.py | DirsHandlerGUI.py | py | 7,085 | python | en | code | 9 | github-code | 90 |
74785440937 | from collections import defaultdict
class Graph():
def __init__(self):
self.graph=defaultdict(list)
def addEdge(self,u,v):
self.graph[u].append(v)
self.graph[v].append(u)
def findWeight(self,node,visited):
visited[node]=True
net_weighht=0
to_return=-9999999
for each in self.graph[node]:
net_weighht=0
if each not in visited:
net_weighht+=self.findWeight2(each,visited)
to_return = max(net_weighht, to_return)
return to_return
def findWeight2(self,node,visited):
visited[node]=True
result=node
for each in self.graph[node]:
if each not in visited:
result+=self.findWeight2(each,visited)
return result
var=Graph()
var.addEdge(1,2)
var.addEdge(2,3)
var.addEdge(3,6)
var.addEdge(2,8)
print(var.findWeight(2,{})) | codejigglers/leetcodes | preparation/Graphs/city_problem_find_max_of_all.py | city_problem_find_max_of_all.py | py | 910 | python | en | code | 0 | github-code | 90 |
4949564708 | import sys
from collections import deque
input = sys.stdin.readline
d = [(0,1),(0,-1),(1,0), (-1,0)]
if __name__ == '__main__':
t = int(input())
for _ in range(t):
v,e = map(int, input().split())
print(2-v+e) | sumi-0011/algo | 백준/Bronze/10569. 다면체/다면체.py | 다면체.py | py | 250 | python | en | code | 0 | github-code | 90 |
36678324523 | import responses
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from unicoremc.managers.infrastructure import (
GeneralInfrastructureManager, InfrastructureError)
from unicoremc.models import Project, AppType, publish_to_websocket
from unicoremc.tests.utils import setup_responses_for_logdriver
class GeneralInfrastructureManagerTest(TestCase):
def setUp(self):
post_save.disconnect(publish_to_websocket, sender=Project)
User = get_user_model()
user = User.objects.create_user(
'tester', 'test@example.org', 'tester')
app_type = AppType._for(
'gem', 'Girl Effect', 'unicore-cms',
'universalcore/unicore-cms-gem')
self.project = Project(application_type=app_type,
country='ZA', owner=user)
self.project.save()
setup_responses_for_logdriver(self.project)
self.general_im = GeneralInfrastructureManager()
self.project_im = self.project.infra_manager
def tearDown(self):
post_save.connect(publish_to_websocket, sender=Project)
@responses.activate
def test_get_marathon_app(self):
app = self.general_im.get_marathon_app(self.project.app_id)
self.assertEqual(app['id'], '/%s' % (self.project.app_id,))
@responses.activate
def test_get_marathon_app_tasks(self):
[task] = self.general_im.get_marathon_app_tasks(self.project.app_id)
self.assertEqual(task['appId'], '/%s' % (self.project.app_id,))
self.assertEqual(
task['id'], '%s.the-task-id' % (self.project.app_id,))
self.assertEqual(task['ports'], [8898])
self.assertEqual(task['host'], 'worker-machine-1')
@responses.activate
def test_get_marathon_info(self):
info = self.general_im.get_marathon_info()
self.assertEqual(info['name'], 'marathon')
self.assertEqual(info['frameworkId'], 'the-framework-id')
@responses.activate
def test_get_worker_info(self):
worker = self.general_im.get_worker_info('worker-machine-1')
self.assertEqual(worker['id'], 'worker-machine-id')
@responses.activate
def test_get_app_log_info(self):
[info] = self.general_im.get_app_log_info(self.project.app_id)
self.assertEqual(
info,
{
'task_host': 'worker-machine-1',
'task_id': '%s.the-task-id' % (self.project.app_id,),
'task_dir': (
'worker-machine-id/frameworks/the-framework-id'
'/executors/%s.the-task-id/runs/latest') % (
self.project.app_id,),
}
)
@responses.activate
def test_get_task_log_info(self):
info = self.general_im.get_task_log_info(
self.project.app_id,
'%s.the-task-id' % (self.project.app_id,),
'worker-machine-1')
self.assertEqual(
info,
{
'task_host': 'worker-machine-1',
'task_id': '%s.the-task-id' % (self.project.app_id,),
'task_dir': (
'worker-machine-id/frameworks/the-framework-id'
'/executors/%s.the-task-id/runs/latest') % (
self.project.app_id,),
}
)
@responses.activate
def test_project_infra_manager_get_marathon_app(self):
app = self.project_im.get_project_marathon_app()
self.assertEqual(app['id'], '/%s' % (self.project.app_id,))
@responses.activate
def test_project_infra_manager_get_project_log_info(self):
[info] = self.project_im.get_project_log_info()
self.assertEqual(
info,
{
'task_host': 'worker-machine-1',
'task_id': '%s.the-task-id' % (self.project.app_id,),
'task_dir': (
'worker-machine-id/frameworks/the-framework-id'
'/executors/%s.the-task-id/runs/latest') % (
self.project.app_id,),
}
)
@responses.activate
def test_project_infra_manager_get_project_task_log_info(self):
info = self.project_im.get_project_task_log_info(
'%s.the-task-id' % (self.project.app_id,))
self.assertEqual(
info,
{
'task_host': 'worker-machine-1',
'task_id': '%s.the-task-id' % (self.project.app_id,),
'task_dir': (
'worker-machine-id/frameworks/the-framework-id'
'/executors/%s.the-task-id/runs/latest') % (
self.project.app_id,),
}
)
@responses.activate
def test_project_infra_manager_get_project_non_existent(self):
self.assertRaises(
InfrastructureError,
self.project_im.get_project_task_log_info, 'non-existing-task-id')
| universalcore/unicore-mc | unicoremc/tests/test_infrastructure_manager.py | test_infrastructure_manager.py | py | 5,006 | python | en | code | 0 | github-code | 90 |
17544018347 | import numpy as np
def sk_50_rand_params():
"""hyperparameters for random sk graph with size N=50"""
sk_50_rand_dic = {
"num_runs" : 1,
"num_timesteps_per_run" : 2500,
"cac_time_step" : 0.04,
"cac_r" : 0.3,
"cac_alpha" : 0.7,
"cac_beta" : 0.25,
"cac_gamma" : 0.010,
"cac_delta" : 12,
"cac_mu" : 0.8,
"cac_rho" : 1.2,
"cac_tau" : 150
}
return sk_50_rand_dic
def sk_100_fc_params():
"""hyperparameters for fully connected sk graph with size N=100"""
sk_100_fc_dic = {
"num_runs" : 1,
"num_timesteps_per_run" : 3000,
"cac_time_step" : 0.05,
"cac_r" : -0.2,
"cac_alpha" : 0.7,
"cac_beta" : 0.3,
"cac_gamma" : 0.010,
"cac_delta" : 12,
"cac_mu" : 0.8,
"cac_rho" : 1.2,
"cac_tau" : 150
}
return sk_100_fc_dic
def maxcut_100_params():
"""hyperparameters for 50% edge density MAX-CUT instance size N=100"""
maxcut_100_dic = {
"num_runs" : 1,
"num_timesteps_per_run" : 2500,
"cac_time_step" : 0.04,
"cac_r" : -0.3,
"cac_alpha" : 0.7,
"cac_beta" : 0.25,
"cac_gamma" : 0.010,
"cac_delta" : 12,
"cac_mu" : 0.8,
"cac_rho" : 1.2,
"cac_tau" : 150
}
return maxcut_100_dic
def maxcut_200_params():
"""hyperparameters for 50% edge density MAX-CUT instance size N=200"""
# same as MAX-CUT instance size N=100
return maxcut_100_params()
def maxcut_500_params():
"""hyperparameters for 50% edge density MAX-CUT instance size N=500"""
maxcut_500_dic = {
"num_runs" : 1,
"num_timesteps_per_run" : 25000,
"cac_time_step" : 0.02,
"cac_r" : 0.9,
"cac_alpha" : 1.1,
"cac_beta" : 0.35,
"cac_gamma" : 0.0005,
"cac_delta" : 15,
"cac_mu" : 0.7,
"cac_rho" : 1,
"cac_tau" : 200
}
def G1_params(J):
"""hyperparameters for Stanford GSet problem 1 (G1)"""
N = J.shape[0]
G1_dic = {
"num_runs" : 1,
"num_timesteps_per_run" : 200000,
"cac_r" : 0.2,
"cac_alpha" : 1.0,
"cac_beta" : 3*N/(np.sum(np.abs(J))),
"cac_gamma" : 0.075/N,
"cac_delta" : 9,
"cac_rho" : 1,
"cac_tau" : 9*N
}
return G1_dic
def G2_params(J):
"""hyperparameters for Stanford GSet problem 2 (G2)"""
N = J.shape[0]
G2_dic = {
"num_runs" : 1,
"num_timesteps_per_run" : 200000,
"cac_r" : 0.2,
"cac_alpha" : 1.0,
"cac_beta" : 3*N/(np.sum(np.abs(J))),
"cac_gamma" : 0.075/N,
"cac_delta" : 7,
"cac_mu" : 1,
"cac_rho" : 1,
"cac_tau" : 7*N
}
return G2_dic
def G42_params(J):
"""hyperparameters for Stanford GSet problem 42 (G42)"""
N = J.shape[0]
G42_dic = {
"num_runs" : 1,
"num_timesteps_per_run" : 200000,
"cac_r" : 0.1,
"cac_alpha" : 1.0,
"cac_beta" : 3*N/(np.sum(np.abs(J))),
"cac_gamma" : 0.065/N,
"cac_delta" : 7,
"cac_mu" : 1,
"cac_rho" : 1,
"cac_tau" : 7*N
}
return G42_dic
| mcmahon-lab/cim-optimizer | cim_optimizer/optimal_params.py | optimal_params.py | py | 3,248 | python | en | code | 21 | github-code | 90 |
73616381098 | # coding=utf-8
# ---------------------------------------------------------------
# Desenvolvedor: Arannã Sousa Santos
# Mês: 12
# Ano: 2015
# Projeto: pagseguro_xml
# e-mail: asousas@live.com
# ---------------------------------------------------------------
import logging
import sys
import unittest
from decimal import Decimal
from ...core.base_classes import TagDataHoraUTC
class ClasseAssinaturaRequisicaoTest(unittest.TestCase):
def test_parse_xml(self):
from pagseguro_xml.assinatura.v2.classes import ClasseAssinaturaRequisicao
log = self.logger.getLogger(u'%s.%s' % (__package__, self.__class__.__name__))
log.debug(u'Criando instancia de "ClasseAssinaturaRequisicao"')
result = ClasseAssinaturaRequisicao()
log.debug(u'Gerando PARSE do xml')
result.xml = self.xml
log.debug(u'Quantidade de alertas no "parse": %s' % len(result.alertas))
for a in result.alertas:
log.debug(u'Alerta: %s' % a)
data = TagDataHoraUTC()
# data.valor = u'2011-02-16T20:14:35.000-02:00'
log.debug(u'Testando valores da "preApprovalRequest"')
self.assertEqual(result.redirectURL.valor, u'http://www.seusite.com.br/retorno.php')
self.assertEqual(result.reviewURL.valor, u'http://www.seusite.com.br/revisao.php')
self.assertEqual(result.reference.valor, u'REF1234')
self.assertEqual(result.receiver.email.valor, u'nao@sei.com')
self.assertEqual(result.sender.email.valor, u'cliente@uol.com.br')
self.assertEqual(result.sender.name.valor, u'Nome do Cliente')
self.assertEqual(result.sender.phone.areaCode.valor, 11)
self.assertEqual(result.sender.phone.number.valor, 56273440)
self.assertEqual(result.sender.address.street.valor, u'Avenida Brigadeiro Faria Lima')
self.assertEqual(result.sender.address.number.valor, u'1384')
self.assertEqual(result.sender.address.complement.valor, u'Andar')
self.assertEqual(result.sender.address.district.valor, u'Jardim Paulistano')
self.assertEqual(result.sender.address.postalCode.valor, u'01452002')
self.assertEqual(result.sender.address.city.valor, u'São Paulo')
self.assertEqual(result.sender.address.state.valor, u'SP')
self.assertEqual(result.sender.address.country.valor, u'BRA')
self.assertEqual(result.preApproval.charge.valor, u'auto')
self.assertEqual(result.preApproval.name.valor, u'Seguro contra roubo do Notebook')
self.assertEqual(result.preApproval.details.valor, u'Todo dia 28 será cobrado o valor de R$100,00 referente ao seguro contra roubo de Notebook')
self.assertEqual(result.preApproval.amountPerPayment.valor, Decimal(u'100.00'))
self.assertEqual(result.preApproval.period.valor, u'MONTHLY')
data.valor = u'2014-01-21T00:00:000-03:00'
self.assertEqual(result.preApproval.finalDate.valor, data.valor)
self.assertEqual(result.preApproval.maxTotalAmount.valor, Decimal(u'2400.00'))
log.debug(u'Valores da "preApprovalRequest" OK')
def setUp(self):
logging.basicConfig(stream=sys.stderr)
logging.getLogger(u'%s.%s' % (__package__, self.__class__.__name__)).setLevel(logging.DEBUG)
self.logger = logging
self.xml = u'''<?xml version="1.0" encoding="ISO-8859-1" standalone="yes"?>
<preApprovalRequest>
<redirectURL>http://www.seusite.com.br/retorno.php</redirectURL>
<reviewURL>http://www.seusite.com.br/revisao.php</reviewURL>
<reference>REF1234</reference>
<receiver>
<email>nao@sei.com</email>
</receiver>
<sender>
<name>Nome do Cliente</name>
<email>cliente@uol.com.br</email>
<phone>
<areaCode>11</areaCode>
<number>56273440</number>
</phone>
<address>
<street>Avenida Brigadeiro Faria Lima</street>
<number>1384</number>
<complement>Andar</complement>
<district>Jardim Paulistano</district>
<postalCode>01452002</postalCode>
<city>São Paulo</city>
<state>SP</state>
<country>BRA</country>
</address>
</sender>
<preApproval>
<charge>auto</charge>
<name>Seguro contra roubo do Notebook</name>
<details>Todo dia 28 será cobrado o valor de R$100,00 referente ao seguro contra roubo de Notebook</details>
<amountPerPayment>100.00</amountPerPayment>
<period>MONTHLY</period>
<finalDate>2014-01-21T00:00:000-03:00</finalDate>
<maxTotalAmount>2400.00</maxTotalAmount>
</preApproval>
</preApprovalRequest>'''
class ClasseAssinaturaRespostaTest(unittest.TestCase):
def test_parse_xml(self):
from pagseguro_xml.assinatura.v2.classes import ClasseAssinaturaResposta
log = self.logger.getLogger(u'%s.%s' % (__package__, self.__class__.__name__))
log.debug(u'Criando instancia de "ClasseAssinaturaResposta"')
result = ClasseAssinaturaResposta()
log.debug(u'Gerando PARSE do xml')
result.xml = self.xml
log.debug(u'Quantidade de alertas no "parse": %s' % len(result.alertas))
for a in result.alertas:
log.debug(u'Alerta: %s' % a)
data = TagDataHoraUTC()
data.valor = u'2014-01-21T00:00:00.000-03:00'
log.debug(u'Testando valores da "preApprovalRequest"')
self.assertEqual(result.code.valor, u'DC2DAC98FBFBDD1554493F94E85FAE05')
self.assertEqual(result.date.valor, data.valor)
log.debug(u'Valores da "preApprovalRequest" OK')
def setUp(self):
logging.basicConfig(stream=sys.stderr)
logging.getLogger(u'%s.%s' % (__package__, self.__class__.__name__)).setLevel(logging.DEBUG)
self.logger = logging
self.xml = u'''<?xml version="1.0" encoding="ISO-8859-1" standalone="yes"?>
<preApprovalRequest>
<code>DC2DAC98FBFBDD1554493F94E85FAE05</code>
<date>2014-01-21T00:00:00.000-03:00</date>
</preApprovalRequest>'''
| arannasousa/pagseguro_xml | pagseguro_xml/tests/test_classes_assinatura/test_requisicao_v2.py | test_requisicao_v2.py | py | 6,104 | python | en | code | 0 | github-code | 90 |
5250675231 | import argparse
from utils.atari_util import *
from utils.utils import *
from models.self_attn_cnn_gru import *
import torch
import yaml
import gym.wrappers
import os
def parse_args(
) -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--config_path", type=str, default=None, help="Path to the config.")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
with open(args.config_path, "r") as f:
config = yaml.load(f)["params"]
env = make_env(config["common"]["env_name"],
config["common"]["height"],
config["common"]["width"]
)
if config["paths"]["trained_model_path_to_test"] is None:
print("trained model's path is empty - check it!")
config["paths"]["trained_model_path_to_test"] = "./"
model_path: str = os.path.join(config["paths"]["trained_model_path_to_test"], config["experiment"] + ".pt")
videos_path: str = config["paths"]["test_stupid_videos_path"]
if os.path.isfile(model_path):
agent = torch.load(model_path)
videos_path: str = config["paths"]["test_trained_videos_path"]
else:
agent = SelfAttnRecurrentAgent(obs_shape=env.observation_space.shape,
n_actions=env.action_space.n,
linear_dim=config["model"]["linear_dim"],
conv_filters_num=config["model"]["conv_filters_num"],
hidden_dim=config["model"]["hidden_dim"]
)
if config["paths"]["trained_weigths_path"]:
agent.load_state_dict(torch.load(config["paths"]["trained_weights_path"]))
with gym.wrappers.Monitor(env=env,
directory=videos_path,
force=True) as env_monitor:
final_rewards = evaluate(agent, env_monitor, n_games=config["test"]["game_count"]) | phesla/pomdp_rnn_for_atari_games | cli/test.py | test.py | py | 2,014 | python | en | code | 1 | github-code | 90 |
10865477422 | """
documents.py
Created by Zongsi Zhang, 09/12/2017
"""
import copy
import math
import operator
class Document(object):
"""
One Document correspondes to a web page, it stores a dictionary of words' count
Attributes:
term_dict: a dictionary records terms and its count in webpage
links: a list of url current page point out to
"""
def __init__(self, term_list, links=[]):
""" constructor
instantiate a Document with a term_list to be converted into dict
Args:
term_list: A list of str
links: A list of str
Returns: None
Raises:
TypeError: raise when parameter type not match
"""
# do type check
if not isinstance(term_list, list):
raise TypeError('term_list must be of type list')
if not isinstance(links, list):
raise TypeError('links must be of type list')
self.term_dict = {x: term_list.count(x) for x in term_list}
self.links = copy.deepcopy(links)
def __init__(self, t_dict, links=[]):
""" constructor
instantiate a Document with a dict of word count
Args:
t_dict: A dict of word count
links: A list of str
Returns: None
Raises:
TypeError: raise when parameter type not match
"""
# do type check
if not isinstance(t_dict, dict):
raise TypeError('t_dict must be of type dict')
if not isinstance(links, list):
raise TypeError('links must be of type list')
self.term_dict = copy.deepcopy(t_dict)
self.links = copy.deepcopy(links)
def set_links(self, links):
""" set links
Args:
links: A list of str
Returns: None
Raises:
TypeError: raise when parameter type not match
"""
if not isinstance(links, list):
raise TypeError('links must be of type list')
self.links = copy.deepcopy(links)
class DocumentSet(object):
""" DocumentSet
Document set hold the the document set around the web page that is to be analyzed.
Attributes:
self.main_doc : page to be analized
self.env_docs : list of environment docs, environment docs are docs for pages linked by main page
"""
def __init__(self, main_doc):
""" init
Construct a DocumentSet with main document
Args:
main_doc: Document object of main page
Returns: None
Raises:
TypeError: raise when parameter type not match
"""
if not isinstance(main_doc, Document):
raise TypeError('term must be of type Document')
self.main_doc = main_doc
self.env_docs = []
def add_env_page(self, env_page):
""" Add Env Page
append a new env_page to env_docs
Args:
env_page: a Document object of envrionment pages
Returns: None
Raises:
TypeError: raise when parameter type not match
"""
if not isinstance(env_page, Document):
raise TypeError('env_page must be of type Document')
self.env_docs.append(env_page)
def __count_term_in_env(self, term):
""" Count term in environment
calculate idf of a term in main doc
Args:
term: a str
Returns: double value of idf
Raises:
TypeError: raise when parameter type not match
"""
# type check
if not isinstance(term, str):
raise TypeError('term must be of type str')
total_cnt = float(len(self.env_docs)) + 1.0
if total_cnt == 1.0:
return 1.0
cnt = 1.0
for doc in self.env_docs:
if term in doc.term_dict:
cnt += 1.0
return math.log(total_cnt / cnt)
def statistic_tf(self):
""" Statistic TF
calculate and sort terms in main doc by tf
Args: None
Returns: a dictionary in descending order of values
Raises:
"""
return sorted(self.main_doc.term_dict.items(), key=operator.itemgetter(1), reverse=True)
def statistic_tfidf(self):
""" Statistic TF-IDF
calculate and sort terms in main doc by tf-idf
Args: None
Returns: a dictionary in descending order of values
Raises:
"""
# calculate df-idf for all words
count_dict = {x: self.main_doc.term_dict[x] * self.__count_term_in_env(x) for x in self.main_doc.term_dict}
# sort them by df and idf
return sorted(count_dict.items(), key=operator.itemgetter(1), reverse=True)
| zongsizhang/TopicExtracter | documents.py | documents.py | py | 3,964 | python | en | code | 0 | github-code | 90 |
24726867189 | # which is a neural network model for the sentiment evaluation, which based on a deeply cleaned token list
# deprecated
# begin crafting the neural models
# in order to represent a meaningful model, a lower-level framework should be used
TRAIN_PICK='train.pickle';
TEST_PICK='test.pickle';
NORM_TRAIN_PICK='norm_train.pickle';
NORM_TEST_PICK='norm_test.pickle';
VOCAB_PICK='vocab.pickle'; # for dumping training vocabulary
REP_RULE_PICK='rule.pickle'; # for dumping training replace rule
FREQ_DIST_PICK='freq.pickle'; # for dumping training freq list
NORMAL_MAP_PICK='mapping.pickle'; # for dumping indexing of vocabulary
INVERSE_MAP_PICK='reversed_mapping.pickle'; # for dumping the inversed dictioinary of the vocabulary
MODEL_PATH='record/weights.{epoch:02d}-{val_loss:.2f}.hdf5';
import os
import keras
# which should still use a relatively lower-level api to do the manipulation
from keras.models import Model
from keras.layers import Dense, Input, Flatten, MaxPooling1D, TimeDistributed
from keras.layers import Embedding, Conv1D, Conv2D, AveragePooling1D, LSTM, Dropout, ZeroPadding1D, RepeatVector
from keras.preprocessing import sequence as SequenceHelper
from .utils import util_dump, util_load
from .context import DIR
# which may be done in a batch level
# a sequence of tokens
# return a value from [-1, 1]
def nn_analysis(tokens):
return 0.0;
# this model not finished right now
# which return a keras neural model(not compiled)
def construct_model(vocab_size, embed_dim, low, high):
# always low=3, high=7
embedding_layer= Embedding(vocab_size+1, embed_dim, input_length=high);
seq_input = Input(shape=(high, ), dtype='int32');
x = LSTM(output_dim=64, return_sequences=True)(embedding_layer(seq_input));
x = Dropout(0.5)(x);
x = LSTM(output_dim=32)(x);
x= Dropout(0.25)(x);
pred = Dense(1, activation='tanh')(Dense(8, activation='tanh')(x));
return Model(seq_input, pred);
# trainning the model according to the
# vocab_size: the size of the vocabulary, predetermined hyper-parameter
# embed_dim: the dimension of the word vector, predetermined hyper-parameter
# low: the smallest number of sentence components
# so as high
def proc(train_set, test_set, vocab_size, embed_dim=128, low=3, high=7, _batch_size=128):
cp_point= keras.callbacks.ModelCheckpoint(MODEL_PATH, verbose=1, monitor='val_loss',save_best_only=False, save_weights_only=False, mode='auto', period=1)
print('Begin Padding');
padded_x_train= SequenceHelper.pad_sequences(train_set['x'], maxlen=high, value=vocab_size, padding='post');
padded_x_test= SequenceHelper.pad_sequences(test_set['x'], maxlen=high, value=vocab_size, padding='post');
print('End Padding');
model=construct_model(vocab_size, embed_dim, low, high);
model.compile(optimizer='adagrad',loss='mse',metrics=['accuracy']);
model.summary();
model.fit(padded_x_train, train_set['y'], nb_epoch=10, batch_size=_batch_size, callbacks=[cp_point], validation_data=(padded_x_test, test_set['y']));
return model;
if(__name__=='__main__'):
train_set=util_load(os.path.join(DIR, NORM_TRAIN_PICK));
test_set=util_load(os.path.join(DIR, NORM_TEST_PICK));
vocab_size=10000;
proc(train_set, test_set, vocab_size);
| ravenSanstete/duality | duality/nn_model.py | nn_model.py | py | 3,278 | python | en | code | 0 | github-code | 90 |
18473808859 | N, x = map(int, input().split())
n = [1]
p = [1]
for i in range(1, N+1):
n.append(n[-1] * 2 + 3)
p.append(p[-1] * 2 + 1)
lev = N
n1 = [0, 1, 2, 3, 3]
ans = 0
while lev >= 0:
if lev ==0:
ans += 1
break
if n[lev]//2 + 2 <= x:
ans += p[lev-1] + 1
x -= n[lev-1] + 2
elif n[lev]//2 + 1 == x:
ans += p[lev-1] + 1
break
elif x==1:break
else: x -= 1
lev -= 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03209/s473964939.py | s473964939.py | py | 452 | python | en | code | 0 | github-code | 90 |
72587044776 | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from .other.layers import unetConv2
class UNet3plus(nn.Module):
''' UNet 3+ '''
def __init__(self, in_channels=3, n_classes=1, feature_scale=4, is_deconv=True, is_batchnorm=True):
super(UNet3plus, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
## -------------Encoder--------------
self.conv1 = unetConv2(self.in_channels, filters[0], self.is_batchnorm)
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = unetConv2(filters[0], filters[1], self.is_batchnorm)
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = unetConv2(filters[1], filters[2], self.is_batchnorm)
self.maxpool3 = nn.MaxPool2d(kernel_size=2)
self.conv4 = unetConv2(filters[2], filters[3], self.is_batchnorm)
self.maxpool4 = nn.MaxPool2d(kernel_size=2)
self.conv5 = unetConv2(filters[3], filters[4], self.is_batchnorm)
## -------------Decoder--------------
self.CatChannels = filters[0]
self.CatBlocks = 5
self.UpChannels = self.CatChannels * self.CatBlocks
'''stage 4d'''
# h1->320*320, hd4->40*40, Pooling 8 times
self.h1_PT_hd4 = nn.MaxPool2d(8, 8, ceil_mode=True)
self.h1_PT_hd4_conv = nn.Conv2d(filters[0], self.CatChannels, 3, padding=1)
self.h1_PT_hd4_bn = nn.BatchNorm2d(self.CatChannels)
self.h1_PT_hd4_relu = nn.ReLU(inplace=True)
# h2->160*160, hd4->40*40, Pooling 4 times
self.h2_PT_hd4 = nn.MaxPool2d(4, 4, ceil_mode=True)
self.h2_PT_hd4_conv = nn.Conv2d(filters[1], self.CatChannels, 3, padding=1)
self.h2_PT_hd4_bn = nn.BatchNorm2d(self.CatChannels)
self.h2_PT_hd4_relu = nn.ReLU(inplace=False)
# h3->80*80, hd4->40*40, Pooling 2 times
self.h3_PT_hd4 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.h3_PT_hd4_conv = nn.Conv2d(filters[2], self.CatChannels, 3, padding=1)
self.h3_PT_hd4_bn = nn.BatchNorm2d(self.CatChannels)
self.h3_PT_hd4_relu = nn.ReLU(inplace=False)
# h4->40*40, hd4->40*40, Concatenation
self.h4_Cat_hd4_conv = nn.Conv2d(filters[3], self.CatChannels, 3, padding=1)
self.h4_Cat_hd4_bn = nn.BatchNorm2d(self.CatChannels)
self.h4_Cat_hd4_relu = nn.ReLU(inplace=False)
# hd5->20*20, hd4->40*40, Upsample 2 times
self.hd5_UT_hd4 = nn.Upsample(scale_factor=2, mode='bilinear') # 14*14
self.hd5_UT_hd4_conv = nn.Conv2d(filters[4], self.CatChannels, 3, padding=1)
self.hd5_UT_hd4_bn = nn.BatchNorm2d(self.CatChannels)
self.hd5_UT_hd4_relu = nn.ReLU(inplace=False)
# fusion(h1_PT_hd4, h2_PT_hd4, h3_PT_hd4, h4_Cat_hd4, hd5_UT_hd4)
self.conv4d_1 = nn.Conv2d(self.UpChannels, self.UpChannels, 3, padding=1) # 16
self.bn4d_1 = nn.BatchNorm2d(self.UpChannels)
self.relu4d_1 = nn.ReLU(inplace=False)
'''stage 3d'''
# h1->320*320, hd3->80*80, Pooling 4 times
self.h1_PT_hd3 = nn.MaxPool2d(4, 4, ceil_mode=True)
self.h1_PT_hd3_conv = nn.Conv2d(filters[0], self.CatChannels, 3, padding=1)
self.h1_PT_hd3_bn = nn.BatchNorm2d(self.CatChannels)
self.h1_PT_hd3_relu = nn.ReLU(inplace=False)
# h2->160*160, hd3->80*80, Pooling 2 times
self.h2_PT_hd3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.h2_PT_hd3_conv = nn.Conv2d(filters[1], self.CatChannels, 3, padding=1)
self.h2_PT_hd3_bn = nn.BatchNorm2d(self.CatChannels)
self.h2_PT_hd3_relu = nn.ReLU(inplace=False)
# h3->80*80, hd3->80*80, Concatenation
self.h3_Cat_hd3_conv = nn.Conv2d(filters[2], self.CatChannels, 3, padding=1)
self.h3_Cat_hd3_bn = nn.BatchNorm2d(self.CatChannels)
self.h3_Cat_hd3_relu = nn.ReLU(inplace=False)
# hd4->40*40, hd4->80*80, Upsample 2 times
self.hd4_UT_hd3 = nn.Upsample(scale_factor=2, mode='bilinear') # 14*14
self.hd4_UT_hd3_conv = nn.Conv2d(self.UpChannels, self.CatChannels, 3, padding=1)
self.hd4_UT_hd3_bn = nn.BatchNorm2d(self.CatChannels)
self.hd4_UT_hd3_relu = nn.ReLU(inplace=False)
# hd5->20*20, hd4->80*80, Upsample 4 times
self.hd5_UT_hd3 = nn.Upsample(scale_factor=4, mode='bilinear') # 14*14
self.hd5_UT_hd3_conv = nn.Conv2d(filters[4], self.CatChannels, 3, padding=1)
self.hd5_UT_hd3_bn = nn.BatchNorm2d(self.CatChannels)
self.hd5_UT_hd3_relu = nn.ReLU(inplace=False)
# fusion(h1_PT_hd3, h2_PT_hd3, h3_Cat_hd3, hd4_UT_hd3, hd5_UT_hd3)
self.conv3d_1 = nn.Conv2d(self.UpChannels, self.UpChannels, 3, padding=1) # 16
self.bn3d_1 = nn.BatchNorm2d(self.UpChannels)
self.relu3d_1 = nn.ReLU(inplace=False)
'''stage 2d '''
# h1->320*320, hd2->160*160, Pooling 2 times
self.h1_PT_hd2 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.h1_PT_hd2_conv = nn.Conv2d(filters[0], self.CatChannels, 3, padding=1)
self.h1_PT_hd2_bn = nn.BatchNorm2d(self.CatChannels)
self.h1_PT_hd2_relu = nn.ReLU(inplace=False)
# h2->160*160, hd2->160*160, Concatenation
self.h2_Cat_hd2_conv = nn.Conv2d(filters[1], self.CatChannels, 3, padding=1)
self.h2_Cat_hd2_bn = nn.BatchNorm2d(self.CatChannels)
self.h2_Cat_hd2_relu = nn.ReLU(inplace=False)
# hd3->80*80, hd2->160*160, Upsample 2 times
self.hd3_UT_hd2 = nn.Upsample(scale_factor=2, mode='bilinear') # 14*14
self.hd3_UT_hd2_conv = nn.Conv2d(self.UpChannels, self.CatChannels, 3, padding=1)
self.hd3_UT_hd2_bn = nn.BatchNorm2d(self.CatChannels)
self.hd3_UT_hd2_relu = nn.ReLU(inplace=False)
# hd4->40*40, hd2->160*160, Upsample 4 times
self.hd4_UT_hd2 = nn.Upsample(scale_factor=4, mode='bilinear') # 14*14
self.hd4_UT_hd2_conv = nn.Conv2d(self.UpChannels, self.CatChannels, 3, padding=1)
self.hd4_UT_hd2_bn = nn.BatchNorm2d(self.CatChannels)
self.hd4_UT_hd2_relu = nn.ReLU(inplace=False)
# hd5->20*20, hd2->160*160, Upsample 8 times
self.hd5_UT_hd2 = nn.Upsample(scale_factor=8, mode='bilinear') # 14*14
self.hd5_UT_hd2_conv = nn.Conv2d(filters[4], self.CatChannels, 3, padding=1)
self.hd5_UT_hd2_bn = nn.BatchNorm2d(self.CatChannels)
self.hd5_UT_hd2_relu = nn.ReLU(inplace=False)
# fusion(h1_PT_hd2, h2_Cat_hd2, hd3_UT_hd2, hd4_UT_hd2, hd5_UT_hd2)
self.conv2d_1 = nn.Conv2d(self.UpChannels, self.UpChannels, 3, padding=1) # 16
self.bn2d_1 = nn.BatchNorm2d(self.UpChannels)
self.relu2d_1 = nn.ReLU(inplace=False)
'''stage 1d'''
# h1->320*320, hd1->320*320, Concatenation
self.h1_Cat_hd1_conv = nn.Conv2d(filters[0], self.CatChannels, 3, padding=1)
self.h1_Cat_hd1_bn = nn.BatchNorm2d(self.CatChannels)
self.h1_Cat_hd1_relu = nn.ReLU(inplace=False)
# hd2->160*160, hd1->320*320, Upsample 2 times
self.hd2_UT_hd1 = nn.Upsample(scale_factor=2, mode='bilinear') # 14*14
self.hd2_UT_hd1_conv = nn.Conv2d(self.UpChannels, self.CatChannels, 3, padding=1)
self.hd2_UT_hd1_bn = nn.BatchNorm2d(self.CatChannels)
self.hd2_UT_hd1_relu = nn.ReLU(inplace=False)
# hd3->80*80, hd1->320*320, Upsample 4 times
self.hd3_UT_hd1 = nn.Upsample(scale_factor=4, mode='bilinear') # 14*14
self.hd3_UT_hd1_conv = nn.Conv2d(self.UpChannels, self.CatChannels, 3, padding=1)
self.hd3_UT_hd1_bn = nn.BatchNorm2d(self.CatChannels)
self.hd3_UT_hd1_relu = nn.ReLU(inplace=False)
# hd4->40*40, hd1->320*320, Upsample 8 times
self.hd4_UT_hd1 = nn.Upsample(scale_factor=8, mode='bilinear') # 14*14
self.hd4_UT_hd1_conv = nn.Conv2d(self.UpChannels, self.CatChannels, 3, padding=1)
self.hd4_UT_hd1_bn = nn.BatchNorm2d(self.CatChannels)
self.hd4_UT_hd1_relu = nn.ReLU(inplace=False)
# hd5->20*20, hd1->320*320, Upsample 16 times
self.hd5_UT_hd1 = nn.Upsample(scale_factor=16, mode='bilinear') # 14*14
self.hd5_UT_hd1_conv = nn.Conv2d(filters[4], self.CatChannels, 3, padding=1)
self.hd5_UT_hd1_bn = nn.BatchNorm2d(self.CatChannels)
self.hd5_UT_hd1_relu = nn.ReLU(inplace=False)
# fusion(h1_Cat_hd1, hd2_UT_hd1, hd3_UT_hd1, hd4_UT_hd1, hd5_UT_hd1)
self.conv1d_1 = nn.Conv2d(self.UpChannels, self.UpChannels, 3, padding=1) # 16
self.bn1d_1 = nn.BatchNorm2d(self.UpChannels)
self.relu1d_1 = nn.ReLU(inplace=False)
# output
self.outconv1 = nn.Conv2d(self.UpChannels, n_classes, 3, padding=1)
# initialise weights
def forward(self, inputs):
## -------------Encoder-------------
h1 = self.conv1(inputs) # h1->320*320*64
h2 = self.maxpool1(h1)
h2 = self.conv2(h2) # h2->160*160*128
h3 = self.maxpool2(h2)
h3 = self.conv3(h3) # h3->80*80*256
h4 = self.maxpool3(h3)
h4 = self.conv4(h4) # h4->40*40*512
h5 = self.maxpool4(h4)
hd5 = self.conv5(h5) # h5->20*20*1024
## -------------Decoder-------------
h1_PT_hd4 = self.h1_PT_hd4_relu(self.h1_PT_hd4_bn(self.h1_PT_hd4_conv(self.h1_PT_hd4(h1))))
h2_PT_hd4 = self.h2_PT_hd4_relu(self.h2_PT_hd4_bn(self.h2_PT_hd4_conv(self.h2_PT_hd4(h2))))
h3_PT_hd4 = self.h3_PT_hd4_relu(self.h3_PT_hd4_bn(self.h3_PT_hd4_conv(self.h3_PT_hd4(h3))))
h4_Cat_hd4 = self.h4_Cat_hd4_relu(self.h4_Cat_hd4_bn(self.h4_Cat_hd4_conv(h4)))
hd5_UT_hd4 = self.hd5_UT_hd4_relu(self.hd5_UT_hd4_bn(self.hd5_UT_hd4_conv(self.hd5_UT_hd4(hd5))))
hd4 = self.relu4d_1(self.bn4d_1(self.conv4d_1(
torch.cat((h1_PT_hd4, h2_PT_hd4, h3_PT_hd4, h4_Cat_hd4, hd5_UT_hd4), 1)))) # hd4->40*40*UpChannels
h1_PT_hd3 = self.h1_PT_hd3_relu(self.h1_PT_hd3_bn(self.h1_PT_hd3_conv(self.h1_PT_hd3(h1))))
h2_PT_hd3 = self.h2_PT_hd3_relu(self.h2_PT_hd3_bn(self.h2_PT_hd3_conv(self.h2_PT_hd3(h2))))
h3_Cat_hd3 = self.h3_Cat_hd3_relu(self.h3_Cat_hd3_bn(self.h3_Cat_hd3_conv(h3)))
hd4_UT_hd3 = self.hd4_UT_hd3_relu(self.hd4_UT_hd3_bn(self.hd4_UT_hd3_conv(self.hd4_UT_hd3(hd4))))
hd5_UT_hd3 = self.hd5_UT_hd3_relu(self.hd5_UT_hd3_bn(self.hd5_UT_hd3_conv(self.hd5_UT_hd3(hd5))))
hd3 = self.relu3d_1(self.bn3d_1(self.conv3d_1(
torch.cat((h1_PT_hd3, h2_PT_hd3, h3_Cat_hd3, hd4_UT_hd3, hd5_UT_hd3), 1)))) # hd3->80*80*UpChannels
h1_PT_hd2 = self.h1_PT_hd2_relu(self.h1_PT_hd2_bn(self.h1_PT_hd2_conv(self.h1_PT_hd2(h1))))
h2_Cat_hd2 = self.h2_Cat_hd2_relu(self.h2_Cat_hd2_bn(self.h2_Cat_hd2_conv(h2)))
hd3_UT_hd2 = self.hd3_UT_hd2_relu(self.hd3_UT_hd2_bn(self.hd3_UT_hd2_conv(self.hd3_UT_hd2(hd3))))
hd4_UT_hd2 = self.hd4_UT_hd2_relu(self.hd4_UT_hd2_bn(self.hd4_UT_hd2_conv(self.hd4_UT_hd2(hd4))))
hd5_UT_hd2 = self.hd5_UT_hd2_relu(self.hd5_UT_hd2_bn(self.hd5_UT_hd2_conv(self.hd5_UT_hd2(hd5))))
hd2 = self.relu2d_1(self.bn2d_1(self.conv2d_1(
torch.cat((h1_PT_hd2, h2_Cat_hd2, hd3_UT_hd2, hd4_UT_hd2, hd5_UT_hd2), 1)))) # hd2->160*160*UpChannels
h1_Cat_hd1 = self.h1_Cat_hd1_relu(self.h1_Cat_hd1_bn(self.h1_Cat_hd1_conv(h1)))
hd2_UT_hd1 = self.hd2_UT_hd1_relu(self.hd2_UT_hd1_bn(self.hd2_UT_hd1_conv(self.hd2_UT_hd1(hd2))))
hd3_UT_hd1 = self.hd3_UT_hd1_relu(self.hd3_UT_hd1_bn(self.hd3_UT_hd1_conv(self.hd3_UT_hd1(hd3))))
hd4_UT_hd1 = self.hd4_UT_hd1_relu(self.hd4_UT_hd1_bn(self.hd4_UT_hd1_conv(self.hd4_UT_hd1(hd4))))
hd5_UT_hd1 = self.hd5_UT_hd1_relu(self.hd5_UT_hd1_bn(self.hd5_UT_hd1_conv(self.hd5_UT_hd1(hd5))))
hd1 = self.relu1d_1(self.bn1d_1(self.conv1d_1(
torch.cat((h1_Cat_hd1, hd2_UT_hd1, hd3_UT_hd1, hd4_UT_hd1, hd5_UT_hd1), 1)))) # hd1->320*320*UpChannels
d1 = self.outconv1(hd1) # d1->320*320*n_classes
return d1
| FengheTan9/Medical-Image-Segmentation-Benchmarks | src/network/conv_based/UNet3plus.py | UNet3plus.py | py | 12,072 | python | en | code | 36 | github-code | 90 |
18496641579 | def main():
n = int(input())
data = [input() for _ in range(n)]
ok = True
if len(set(data)) != n: ok = False
for i in range(n-1):
if data[i][-1] != data[i+1][0]:
ok = False
print("Yes" if ok else "No")
main() | Aasthaengg/IBMdataset | Python_codes/p03261/s847915531.py | s847915531.py | py | 255 | python | en | code | 0 | github-code | 90 |
38421845489 | import os
import sys
from tqdm import tqdm
from tensorboardX import SummaryWriter
import shutil
import warnings
warnings.filterwarnings('ignore')
import argparse
import logging
import torch.nn as nn
from torch.nn.modules.loss import CrossEntropyLoss
import torch.optim as optim
from torchvision import transforms
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from utils import ramps, losses, metrics, test_patch
from dataloaders.dataset import *
from networks.net_factory import net_factory
from utils.mixmatch_util import mix_module
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type=str, default='Pancreas_CT', help='Pancreas_CT,LA')
parser.add_argument('--root_path', type=str, default='../', help='Name of Dataset')
parser.add_argument('--exp', type=str, default='debug', help='exp_name')
parser.add_argument('--model', type=str, default='VNet_4out', help='model_name')
parser.add_argument('--max_iteration', type=int, default=15000, help='maximum iteration to train')
parser.add_argument('--max_samples', type=int, default=80, help='maximum samples to train')
parser.add_argument('--labeled_bs', type=int, default=2, help='batch_size of labeled data per gpu') # 2
parser.add_argument('--batch_size', type=int, default=4, help='batch_size of labeled data per gpu') # 4
parser.add_argument('--base_lr', type=float, default=0.01, help='maximum epoch number to train')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--labelnum', type=int, default=6, help='trained samples')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
parser.add_argument('--consistency', type=float, default=1, help='consistency_weight')
parser.add_argument('--consistency_rampup', type=float, default=40.0, help='consistency_rampup')
parser.add_argument('--temperature', type=float, default=0.1, help='temperature of sharpening')
parser.add_argument('--lamda', type=float, default=0.5, help='weight to balance all losses')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
snapshot_path = args.root_path + "model/{}_{}_{}_labeled/{}".format(args.dataset_name, args.exp, args.labelnum,
args.model)
num_classes = 2
if args.dataset_name == "LA":
# patch_size = (32, 32, 32) # for debug use, quickly the training process
patch_size = (112, 112, 80)
args.root_path = args.root_path + 'data/LA'
args.max_samples = 80
args.max_iteration = 15000
elif args.dataset_name == "Pancreas_CT":
patch_size = (96, 96, 96)
# patch_size = (32, 32, 32)
args.root_path = args.root_path + 'data/Pancreas'
args.max_samples = 62
args.max_iteration = 15000
train_data_path = args.root_path
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
labeled_bs = args.labeled_bs
max_iterations = args.max_iteration
base_lr = args.base_lr
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def sharpening(P):
T = 1 / args.temperature
P_sharpen = P ** T / (P ** T + (1 - P) ** T)
return P_sharpen
class WeightEMA(object):
def __init__(self, model, ema_model, alpha=0.999):
self.model = model
self.ema_model = ema_model
self.alpha = alpha
self.params = list(model.state_dict().values())
self.ema_params = list(ema_model.state_dict().values())
self.wd = 0.02 * args.base_lr
# Initialize the parameters of both models to the same value first
for param, ema_param in zip(self.params, self.ema_params):
param.data.copy_(ema_param.data)
def step(self):
# Afterwards, perform smooth updates on the ema model every time
one_minus_alpha = 1.0 - self.alpha
for param, ema_param in zip(self.params, self.ema_params):
if ema_param.dtype == torch.float32:
# 0.99 * previous model parameters+0.01 * updated model parameters
ema_param.mul_(self.alpha)
ema_param.add_(param * one_minus_alpha)
# customized weight decay
param.mul_(1 - self.wd)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if __name__ == "__main__":
## make logger file
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
# shutil.copytree('../code/', snapshot_path + '/code', shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
def create_model(ema=False):
# Network definition
net = net_factory(net_type=args.model, in_chns=1, class_num=num_classes, mode="train")
model = net.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
# init the model, include the teacher network and student network
model = create_model(ema=False) #student network
ema_model = create_model(ema=True) #teacher network
# init the dataset
if args.dataset_name == "LA":
db_train = LAHeart_no_read(base_dir=train_data_path,
split='train',
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(patch_size),
ToTensor(),
]))
elif args.dataset_name == "Pancreas_CT":
db_train = Pancreas_no_read(base_dir=train_data_path,
split='train',
transform=transforms.Compose([
RandomCrop(patch_size),
ToTensor(),
]))
#set the labeled num selection for the training dataset
labelnum = args.labelnum
labeled_idxs = list(range(labelnum))
unlabeled_idxs = list(range(labelnum, args.max_samples))
batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, args.batch_size, args.batch_size - labeled_bs)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler, num_workers=4, pin_memory=True,
worker_init_fn=worker_init_fn)
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
ema_optimizer = WeightEMA(model, ema_model, alpha=0.99)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} itertations per epoch".format(len(trainloader)))
MSE_cri = losses.mse_loss
iter_num,best_dice = 0,0
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
kl_distance = nn.KLDivLoss(reduction='none')
max_epoch = max_iterations // len(trainloader) + 1
lr_ = base_lr
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
l_image,l_label = volume_batch[:args.labeled_bs],label_batch[:args.labeled_bs]
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
X = list(zip(l_image, l_label))
U = unlabeled_volume_batch
X_prime, U_prime, pseudo_label = mix_module(X, U, eval_net=ema_model, K=2, T=0.5, alpha=0.75,
mixup_mode='_x', aug_factor=torch.tensor(1).cuda())
model.train()
X_data = torch.cat([torch.unsqueeze(X_prime[0][0],0), torch.unsqueeze(X_prime[1][0],0)],0) # 需要unsqueeze 一下
X_label = torch.cat([torch.unsqueeze(X_prime[0][1], 0), torch.unsqueeze(X_prime[1][1], 0)],0)
U_data = torch.cat([torch.unsqueeze(U_prime[0][0], 0), torch.unsqueeze(U_prime[1][0], 0),
torch.unsqueeze(U_prime[2][0], 0),torch.unsqueeze(U_prime[3][0], 0)],0)
X_data ,X_label = X_data.cuda(),X_label.cuda().float()
U_data ,U_data_pseudo = U_data.cuda(),pseudo_label.cuda().float()
X = torch.cat((X_data, U_data,volume_batch[args.labeled_bs:]), 0)
out_1_all, out_2_all,out_3_all,out_4_all = model(X)
out_1,out_2,out_3,out_4 = \
out_1_all[:-args.labeled_bs],out_2_all[:-args.labeled_bs],out_3_all[:-args.labeled_bs],out_4_all[:-args.labeled_bs]
out_1_u,out_2_u,out_3_u,out_4_u = \
out_1_all[-args.labeled_bs:],out_2_all[-args.labeled_bs:],out_3_all[-args.labeled_bs:],out_4_all[-args.labeled_bs:]
out_1_s,out_2_s,out_3_s,out_4_s = \
torch.softmax(out_1, dim=1), torch.softmax(out_2, dim=1),torch.softmax(out_3, dim=1),torch.softmax(out_4, dim=1)
out_1_u_s,out_2_u_s,out_3_u_s,out_4_u_s = \
torch.softmax(out_1_u, dim=1), torch.softmax(out_2_u, dim=1), torch.softmax(out_3_u, dim=1),torch.softmax(out_4_u, dim=1)
o_1_u_s = torch.softmax(out_1_all[args.labeled_bs:], dim=1)
o_2_u_s = torch.softmax(out_2_all[args.labeled_bs:], dim=1)
o_3_u_s = torch.softmax(out_3_all[args.labeled_bs:], dim=1)
o_4_u_s = torch.softmax(out_4_all[args.labeled_bs:], dim=1)
loss_seg_ce_lab, loss_seg_ce_unlab = 0, 0
loss_seg_dice_lab, loss_seg_dice_unlab = 0, 0
loss_seg_ce_lab += ce_loss(out_1[:args.labeled_bs], X_label[:args.labeled_bs].long()) + \
ce_loss(out_2[:args.labeled_bs], X_label[:args.labeled_bs].long()) + \
ce_loss(out_3[:args.labeled_bs], X_label[:args.labeled_bs].long())+\
ce_loss(out_4[:args.labeled_bs], X_label[:args.labeled_bs].long())
loss_seg_dice_lab += dice_loss(out_1_s[:args.labeled_bs],X_label[:args.labeled_bs].unsqueeze(1)) + \
dice_loss(out_2_s[:args.labeled_bs], X_label[:args.labeled_bs].unsqueeze(1)) + \
dice_loss(out_3_s[:args.labeled_bs], X_label[:args.labeled_bs].unsqueeze(1)) + \
dice_loss(out_4_s[:args.labeled_bs], X_label[:args.labeled_bs].unsqueeze(1))
loss_seg_ce_unlab += ce_loss(out_1[args.labeled_bs:], U_data_pseudo[:].long()) + \
ce_loss(out_2[args.labeled_bs:], U_data_pseudo[:].long()) +\
ce_loss(out_3[args.labeled_bs:], U_data_pseudo[:].long()) + \
ce_loss(out_4[args.labeled_bs:], U_data_pseudo[:].long())
loss_seg_dice_unlab += dice_loss(out_1_s[args.labeled_bs:], U_data_pseudo[:].unsqueeze(1)) + \
dice_loss(out_2_s[args.labeled_bs:], U_data_pseudo[:].unsqueeze(1)) + \
dice_loss(out_3_s[args.labeled_bs:], U_data_pseudo[:].unsqueeze(1)) + \
dice_loss(out_4_s[args.labeled_bs:], U_data_pseudo[:].unsqueeze(1))
supervised_loss = 0.5 * (loss_seg_ce_lab + loss_seg_dice_lab)
pseudo_loss = 0.5 * (loss_seg_dice_unlab + loss_seg_ce_unlab)
preds = (o_1_u_s + o_2_u_s + o_3_u_s + o_4_u_s) / 4
variance_1 = torch.sum(kl_distance(torch.log(o_1_u_s), preds), dim=1, keepdim=True)# 只是用来计算kl,固定操作,多加一个log
exp_variance_1 = torch.exp(-variance_1)
variance_2 = torch.sum(kl_distance(torch.log(o_2_u_s), preds), dim=1, keepdim=True)
exp_variance_2 = torch.exp(-variance_2)
variance_3 = torch.sum(kl_distance(torch.log(o_3_u_s), preds), dim=1, keepdim=True)
exp_variance_3 = torch.exp(-variance_3)
variance_4 = torch.sum(kl_distance(torch.log(o_4_u_s), preds), dim=1, keepdim=True)
exp_variance_4 = torch.exp(-variance_4)
consis_dist_1 = (preds - o_1_u_s) ** 3
consis_loss_1 = torch.mean(consis_dist_1 * exp_variance_1) / (torch.mean(exp_variance_1) + 1e-8) + torch.mean(variance_1)
consis_dist_2 = (preds - o_2_u_s) ** 3
consis_loss_2 = torch.mean(consis_dist_2 * exp_variance_2) / (torch.mean(exp_variance_2) + 1e-8) + torch.mean( variance_2)
consis_dist_3 = ( preds - o_3_u_s) ** 3
consis_loss_3 = torch.mean(consis_dist_3 * exp_variance_3) / (torch.mean(exp_variance_3) + 1e-8) + torch.mean(variance_3)
consis_dist_4 = (preds - o_4_u_s) ** 3
consis_loss_4 = torch.mean(consis_dist_4 * exp_variance_4) / (
torch.mean(exp_variance_4) + 1e-8) + torch.mean(variance_4)
sharp1 = sharpening(out_1_u_s)
sharp2 = sharpening(out_2_u_s)
sharp3 = sharpening(out_3_u_s)
sharp4 = sharpening(out_4_u_s)
loss_consist = (consis_loss_1 + consis_loss_2 + consis_loss_3 + consis_loss_4)/4 \
+(MSE_cri(sharp1,out_1_u_s) + MSE_cri(sharp2,out_2_u_s) +
MSE_cri(sharp3,out_3_u_s)+MSE_cri(sharp4,out_4_u_s))/4
consistency_weight = get_current_consistency_weight(iter_num // 150)
loss = supervised_loss + pseudo_loss + consistency_weight * loss_consist
iter_num = iter_num + 1
optimizer.zero_grad()
loss.backward()
optimizer.step()
ema_optimizer.step()
update_ema_variables(model, ema_model, 0.99, iter_num)
consistency_loss1 = 0
if iter_num % 100 == 0:
logging.info('iteration %d : loss : %03f, loss_d: %03f, loss_cosist: %03f' % (
iter_num, loss, supervised_loss, loss_consist))
writer.add_scalar('Labeled_loss/loss_seg_dice', loss_seg_dice_lab, iter_num)
writer.add_scalar('Labeled_loss/pseudo_loss', pseudo_loss, iter_num)
writer.add_scalar('Labeled_loss/loss_seg_ce', loss_seg_ce_lab, iter_num)
writer.add_scalar('Co_loss/consistency_loss', loss_consist, iter_num)
writer.add_scalar('Co_loss/consist_weight', consistency_weight, iter_num)
if iter_num % 500 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = torch.argmax(out_1_s, dim=1, keepdim=True)[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1) * 100
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1) * 100
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num % 200 == 0:
model.eval()
if args.dataset_name == "LA":
dice_sample = test_patch.var_all_case(model, num_classes=num_classes, patch_size=patch_size,
stride_xy=18, stride_z=4, dataset_name='LA')
elif args.dataset_name == "Pancreas_CT":
dice_sample = test_patch.var_all_case(model, num_classes=num_classes, patch_size=patch_size,
stride_xy=32, stride_z=32, dataset_name='Pancreas_CT')
if dice_sample > best_dice:
best_dice = dice_sample
save_mode_path = os.path.join(snapshot_path, 'iter_{}_dice_{}.pth'.format(iter_num, best_dice))
save_best_path = os.path.join(snapshot_path, '{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best_path)
logging.info("save best model to {}".format(save_mode_path))
writer.add_scalar('Var_dice/Dice', dice_sample, iter_num)
writer.add_scalar('Var_dice/Best_dice', best_dice, iter_num)
model.train()
if iter_num >= max_iterations:
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
print('best_dice',best_dice)
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
| ortonwang/PLGDF | code/train.py | train.py | py | 18,053 | python | en | code | 1 | github-code | 90 |
70035545577 | """Test both structuring and unstructuring."""
from dataclasses import MISSING, dataclass, fields, make_dataclass
from typing import Optional, Union
import pytest
from hypothesis import assume, given
from hypothesis.strategies import sampled_from
from convclasses import Converter, UnstructureStrategy, mod
from . import nested_typed_classes, simple_typed_attrs, simple_typed_classes
unstructure_strats = sampled_from(list(UnstructureStrategy))
@given(simple_typed_classes(), unstructure_strats)
def test_simple_roundtrip(cls_and_vals, strat):
"""
Simple classes with metadata can be unstructured and restructured.
"""
converter = Converter(unstruct_strat=strat)
cl, vals = cls_and_vals
inst = cl(*vals)
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(simple_typed_attrs(defaults=True), unstructure_strats)
def test_simple_roundtrip_defaults(cls_and_vals, strat):
"""
Simple classes with metadata can be unstructured and restructured.
"""
a, _ = cls_and_vals
cl = make_dataclass("HypClass", [("a", a.type, a)])
converter = Converter(unstruct_strat=strat)
inst = cl()
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(simple_typed_classes())
def test_simple_name_modifiers(cls_and_vals):
"""
Simple classes with metadata can be unstructured and restructured.
"""
a, vals = cls_and_vals
converter = Converter()
if len(fields(a)) > 0:
fld = mod.name("t-t", fields(a)[0])
cl = make_dataclass("HypClass", [("t_t", fld.type, fld)])
inst = cl(vals[0])
assert converter.unstructure(inst).get("t-t", MISSING) is not MISSING
else:
cl = make_dataclass("HypClass", [])
inst = cl()
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(nested_typed_classes, unstructure_strats)
def test_nested_roundtrip(cls_and_vals, strat):
"""
Nested classes with metadata can be unstructured and restructured.
"""
converter = Converter(unstruct_strat=strat)
cl, vals = cls_and_vals
# Vals are a tuple, convert into a dictionary.
inst = cl(*vals)
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(
simple_typed_classes(defaults=False),
simple_typed_classes(defaults=False),
unstructure_strats,
)
def test_union_field_roundtrip(cl_and_vals_a, cl_and_vals_b, strat):
"""
Classes with union fields can be unstructured and structured.
"""
converter = Converter(unstruct_strat=strat)
cl_a, vals_a = cl_and_vals_a
cl_b, vals_b = cl_and_vals_b
a_field_names = {a.name for a in fields(cl_a)}
b_field_names = {a.name for a in fields(cl_b)}
assume(a_field_names)
assume(b_field_names)
common_names = a_field_names & b_field_names
assume(len(a_field_names) > len(common_names))
@dataclass
class C(object):
a: Union[cl_a, cl_b]
inst = C(a=cl_a(*vals_a))
if strat is UnstructureStrategy.AS_DICT:
assert inst == converter.structure(converter.unstructure(inst), C)
else:
# Our disambiguation functions only support dictionaries for now.
with pytest.raises(ValueError):
converter.structure(converter.unstructure(inst), C)
def handler(obj, _):
return converter.structure(obj, cl_a)
converter._union_registry[Union[cl_a, cl_b]] = handler
assert inst == converter.structure(converter.unstructure(inst), C)
del converter._union_registry[Union[cl_a, cl_b]]
@given(simple_typed_classes(defaults=False))
def test_optional_field_roundtrip(cl_and_vals):
"""
Classes with optional fields can be unstructured and structured.
"""
converter = Converter()
cl, vals = cl_and_vals
@dataclass
class C(object):
a: Optional[cl]
inst = C(a=cl(*vals))
assert inst == converter.structure(converter.unstructure(inst), C)
inst = C(a=None)
unstructured = converter.unstructure(inst)
assert inst == converter.structure(unstructured, C)
| zeburek/convclasses | tests/metadata/test_roundtrips.py | test_roundtrips.py | py | 4,087 | python | en | code | 3 | github-code | 90 |
19981809252 | import xlrd
def read_excel():
f = xlrd.open_workbook('D:\\IPsave.xls')
mysheet = f.sheets()
mysheet1 = mysheet[0]
mycol = mysheet1.col_values(0)
mycol.pop(0)
print(mycol)
if __name__ == '__main__':
read_excel()
| yaunsine/Python3 | read_IP.py | read_IP.py | py | 253 | python | en | code | 1 | github-code | 90 |
25749040204 | #!encoding:utf-8
import scrapy
import re
import os
import json
import pymysql
from datetime import datetime
from sina_crawler.items import SinaCrawlerItem
from scrapy.selector import Selector
class SinaCrawlerSpider(scrapy.Spider):
'''Spider: crawling financial news starting from sina
The SinaCrawlerSpider class starts from finance.sina.com.cn and proceeds crawling
following a depth-first algorithm, tests the validity of returned content of news
and stores the content into the database of server.
Attributes:
name: The name of crawler.
allowed_domains: List of allowed domains for crawler.
start_urls: List of url which the spider starts at.
crawled_urls: Set of urls which have been crawled.
visited_urls: Set of urls visited for the depth-first algorithm.
custom_settings:
DEPTH_PRIORITY: set 1 as default.
DEPTH_LIMIT: set 4 as default.
CLOSESPIRDER_TIMEOUT: set 420 as default, 7 mins timed out.
cookies: Dict of cookies. Refer to scrapy for details.
headers: Dict of user-agent string. Refer to scrapy for details.
meta: Dict of some attributes. Refer to scrapy for details.
'''
name = "sina_crawler"
allowed_domains = ["finance.sina.com.cn"]
start_urls = ["https://finance.sina.com.cn/"]
crawled_urls = set()
visited_urls = set()
custom_settings = {
'DEPTH_PRIORITY' : 1,
'DEPTH_LIMIT' : 4,
'CLOSESPIDER_TIMEOUT' : 420,
}
cookies = {}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}
meta = {'dont_redirect': True,
'handle_httpstatus_list': [301, 302]}
def __init__(self):
'''Initiate to get crawled_urls for later use.'''
f = open("/root/originaltech/crawler/sina_crawler/sina_crawler/spiders/crawled_urls", "r")
line = f.readline()
while line != "":
line = line.strip("\n")
self.crawled_urls.add(line)
line = f.readline()
f.close()
print("crawled " + str(len(self.crawled_urls)) + " websites")
def start_requests(self):
'''The main function calls Request from scrapy.'''
yield scrapy.Request(self.start_urls[0], callback = self.parse, headers = self.headers, cookies = self.cookies, meta = self.meta)
def parse(self, response):
'''The function processing callback informatino in scrapy.Request'''
if response.url in self.visited_urls:
return
print("parsing " + str(response.url))
self.visited_urls.add(response.url)
lower_url = response.url.lower()
selector = Selector(response)
if lower_url.endswith("htm") or lower_url.endswith("html"):
item = SinaCrawlerItem()
item['title'] = selector.xpath('//h1[@class="main-title"]/text()').extract_first()
item['time'] = selector.xpath('//span[@class="date"]/text()').extract_first()
content_list = selector.xpath('//*[@id="artibody"]/p/text()').extract()
item['content'] = " ".join(content_list).replace('\u3000', '')
item['source'] = selector.xpath('//div[@class="date-source"]/a/text()').extract_first()
item['url'] = response.url
keyword_list = selector.xpath('//div[@class="keywords"]/a/text()').extract()
item['keywords'] = ",".join(keyword_list)
item['topic'] = selector.xpath('//div[@data-sudaclick="content_relativetopics_p"]/a/text()').extract_first()
if item['title'] != None:
# Got right url:
self.crawled_urls.add(response.url)
# Process errors.
if item['time'] == None:
item['time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if item['source'] == None:
item['source'] = ''
if item['keywords'] == None:
item['keywords'] = ''
if item['topic'] == None:
item['topic'] = ''
yield item
for sel in selector.xpath('//a'):
# Crawl further in the links available.
link = sel.xpath("@href").extract_first()
if link != None and not link.endswith("pdf"):
url = link
if not link.startswith("http"):
url = os.path.split(response.url)[0] + '/' + link
url = self.processUrl(url)
if url != None and url.find("finance.sina.com") != -1:
yield scrapy.Request(url, callback = self.parse, headers = self.headers, cookies = self.cookies, meta = self.meta)
def processUrl(self, url):
'''Process urls in the right form for scrapy.Request.'''
items = url.split("/")
ss = list()
for s in items:
if s == ".":
continue
elif s == "..":
ss.pop()
else:
ss.append(s)
u = "/".join(ss)
if u.endswith("pdf") or u.endswith("zip"):
return None
return u
| wzyxwqx/OriginalTech | Crawler/sina_crawler/sina_crawler/spiders/sina_crawler_spider.py | sina_crawler_spider.py | py | 5,234 | python | en | code | 0 | github-code | 90 |
18362593499 | n,k=map(int,input().split())
a=list(map(int,input().split()))
s=sum(a)
def make_divisors(n):
divisors = []
for i in range(1, int(n**0.5)+1):
if n % i == 0:
divisors.append(i)
if i != n // i:
divisors.append(n//i)
divisors.sort()
return divisors
l=make_divisors(s)
ans=1
for y in l[1:]:
x=[i%y for i in a]
x.sort()
ss=sum(x)
l1=[0]
for i in range(n):
l1.append(l1[-1]+x[i])
l2=[0]
for i in range(n):
l2.append(l2[-1]+y-x[-i-1])
l2.reverse()
c=-1
for i in range(n+1):
if l1[i]==l2[i]:
c=l1[i]
break
if c>-1 and c<=k:
ans=max(ans,y)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02955/s770273402.py | s770273402.py | py | 709 | python | en | code | 0 | github-code | 90 |
18211724849 | from collections import deque
n,m = map(int,input().split())
load = [list(map(int,input().split())) for _ in range(m)]
goal = [[] for _ in range(n)]
for i in load:
goal[i[0]-1].append(i[1]-1)
goal[i[1]-1].append(i[0]-1)
q = deque([0])
ans = [-1 for _ in range(n)]
ans[0] = 0
while q:
check = q.popleft()
for j in goal[check]:
if ans[j] == -1:
ans[j] = check+1
q.append(j)
if -1 in ans:
print("No")
else:
print("Yes")
for p in range(1,n):
print(ans[p])
| Aasthaengg/IBMdataset | Python_codes/p02678/s761170327.py | s761170327.py | py | 521 | python | en | code | 0 | github-code | 90 |
19349296177 | import numpy as np
import plotly.graph_objects as go
import plotly.express as px
from . import plot_utils
#######################################################################################################################
def scatter_geo(ds, prop="pressure", stat="count", title=None, cmap="ylorrd"):
"""
Plot a scatter plot on top of a world map baesd on df.
Parameters
----------
ds : xarray Dataset
A dataset of aggregated data with a specified format
prop : str, default 'pressure'
Atmospheric property of interest
stat : str, default 'count'
A statistic of interest to show on the plot. Options: count, mean, median, std, min, max
Returns
-------
fig : plotly.graph_objs._figure.Figure
Plotly figure
"""
df = plot_utils.scatter_geo(ds, prop, stat)
if stat=="total count":
stat = "count"
animation_frame = None
else:
animation_frame = "date"
scaled_col = f"scaled_{stat}" # column name of scaled variable in df
size_col = scaled_col if scaled_col in df.columns else stat # determine which column shapes size and color
fig = px.scatter_geo(df, lat="lat", lon="lng", size=size_col, color=size_col,
hover_data=[stat], title=title, animation_frame=animation_frame,
projection="natural earth", color_continuous_scale=cmap)
return fig
#######################################################################################################################
def lines(ds, prop="pressure", stat="mean", title=None):
"""
Plot trend lines across date dimension
Parameters
----------
ds : xarray Dataset
A dataset of aggregated data with a specified format
prop : str, default 'pressure'
Atmospheric property of interest
stat : str, default 'count'
A statistic of interest to show on the plot. Options: count, mean, median, std, min, max
Returns
-------
fig : plotly.graph_objs._figure.Figure
Plotly figure
"""
df = plot_utils.lines(ds, prop, stat)
num_of_colors = len(df["latlng"].drop_duplicates())
print(f"Number of lines: {num_of_colors}")
print("""\nIn the legend:
- Double click on a specific line to hide all the rest of the lines
- Single click on a line to hide it""")
fig = px.line(df, x="date", y=stat, color="latlng", title=title)
return fig
#######################################################################################################################
def add_dropdown(figure, labels=None):
"""
In case of a figure with multiple traces, adds dropdown menu to the figure.
Parametes
---------
labels: array-like
List of str, labels for the drop down, if not provided, labels will be
index numbers
"""
num_traces = len(figure.data)
if num_traces > 1:
if labels:
if len(labels) != num_traces:
raise ValueError("labels must have a length according to number of traces")
else:
labels = range(1, num_traces+1)
buttons = [dict(label=labels[i],
method="update",
args=[dict(visible=np.insert(np.zeros(num_traces-1, dtype=bool), i, 1)),
dict(title=figure.data[i].name)])
for i in range(num_traces)]
figure.update_layout(updatemenus=[go.layout.Updatemenu(active=0, buttons=buttons)],
title_text=figure.data[0].name)
# make only the first trace visible, at first
figure.data[0].visible = True
for i in range(1,num_traces):
figure.data[i].visible = False
#######################################################################################################################
def scatter_geo_layout(deg=2.5, colorscale="jet"):
"""
Defines a template for displaying scatter geo plots
Parameters
----------
deg: float, default 2.5
Degrees for grid spacing
colorscale: str, default "jet"
One of plotly options for colorscale.
Returns
-------
figure: plotly.graph_objs.Layout
A layout object, ready for the update_layout method
"""
grid_dict = dict(showgrid = True,
gridcolor = "black",
gridwidth=0.1,
dtick=deg)
geo_dict = dict(projection_type='natural earth',
showcountries=True,
countrycolor="white",
showcoastlines=False,
showland=True,
landcolor="#c2c2c2",
showocean=True,
oceancolor="#e6fcfc",
showlakes=True,
lakecolor="#3399FF",
showrivers=True,
showframe=False,
# bgcolor="#f2eded",
lonaxis=grid_dict,
lataxis=grid_dict)
colorbar_dict = dict(thicknessmode="fraction",
thickness=0.01,
xpad=0)
coloraxis={"colorbar": colorbar_dict, "colorscale": colorscale}
margin={"l":0, "b":0, "t":50}
return go.Layout(coloraxis=coloraxis, margin=margin, geo=geo_dict)
####################################################################################################################### | udiy/udidata | udidata/plot/plot.py | plot.py | py | 5,542 | python | en | code | 0 | github-code | 90 |
28171479357 | """
Django settings for electron_pdf project.
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%f=rv%f*qgq%k@14-l9f5si6e98pp0+p9jvsf*nfc-9q3x=7oq'
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'electron_pdf'
]
ROOT_URLCONF = 'electron_pdf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = os.path.join(BASE_DIR, 'electron_pdf', 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'electron_pdf', 'media')
MEDIA_URL = '/media/'
ELECTRON_PDF_DEBUG = False
| namespace-ee/django-electron-pdf | electron_pdf/settings.py | settings.py | py | 1,299 | python | en | code | 7 | github-code | 90 |
19324354423 | from flask import Flask, render_template, request, redirect, url_for
from store_management_system import StoreManagementSystem
app = Flask(__name__)
inventory_file = "inventory.json"
store = StoreManagementSystem(inventory_file)
@app.route("/", methods=["GET"])
def index():
inventory = store.display_inventory()
return render_template("index.html", inventory=inventory)
@app.route("/add_item", methods=["POST"])
def add_item():
item = request.form.get("item", "").strip()
if not item or not item.isalpha():
return "Invalid item name. Please enter a valid item name (only letters are allowed).", 400
quantity = request.form.get("quantity", type=int)
if quantity is None or quantity <= 0:
return "Invalid quantity. Please enter a valid positive integer value.", 400
message = store.add_item(item, quantity)
return redirect(url_for("index"))
@app.route("/remove_item", methods=["POST"])
def remove_item():
item = request.form.get("item")
quantity = request.form.get("quantity", type=int)
if item and quantity is not None:
message = store.remove_item(item, quantity)
return redirect(url_for("index"))
@app.route("/sell_item", methods=["POST"])
def sell_item():
item = request.form.get("item")
quantity = request.form.get("quantity", type=int)
price_per_unit = request.form.get("price_per_unit", type=float)
if item and quantity is not None and price_per_unit is not None:
result = store.sell_item(item, quantity, price_per_unit)
if isinstance(result, dict):
return render_template("bill.html", result=result)
return redirect(url_for("index"))
if __name__ == "__main__":
# Driver code to populate initial inventory
store.add_item("Apple", 10)
store.add_item("Banana", 5)
store.add_item("Orange", 8)
store.add_item("Mango", 12)
app.run(debug=True)
| sarthaknimbalkar/Store-Management-system | app.py | app.py | py | 1,958 | python | en | code | 0 | github-code | 90 |
4880499024 | from fileinput import filename
import json
from fpdf import FPDF
pdf = FPDF('P', 'mm', 'Letter')
pdf.add_page()
with open('resume.json') as resume:
data = json.load(resume)
pdf.ln(5)
pdf.set_font("times", 'B', 16)
pdf.cell(200, 6, data['Name'], ln=1)
pdf.set_font("times", '', 11)
pdf.cell(0, 5, data['ContactNo'], ln=1)
pdf.cell(0, 5, data['Email'], ln=1)
pdf.cell(0, 5, data['Address'], ln=1)
pdf.set_font("times", 'B', 14)
pdf.cell(100, 15, "Educational Background", ln=1)
pdf.set_font("times", 'B', 12)
pdf.cell(0, 6.5, "Tertiary:", ln=1)
pdf.set_font("times", '', 11)
pdf.cell(0, 6, data['College'], ln=1)
pdf.cell(0, 6, data['Address1'], ln=1)
pdf.cell(0, 6, data['Year1'], ln=1)
pdf.set_font("times", 'B', 12)
pdf.cell(0, 6.5, "Senior High School:", ln=1)
pdf.set_font("times", '', 11)
pdf.cell(0, 6, data['SeniorHigh'], ln=1)
pdf.cell(0, 6, data['Address2'], ln=1)
pdf.cell(0, 6, data['Year2'], ln=1)
pdf.set_font("times", 'B', 12)
pdf.cell(0, 6.5, "Secondary:", ln=1)
pdf.set_font("times", '', 11)
pdf.cell(0, 6, data['Secondary'], ln=1)
pdf.cell(0, 6, data['Address3'], ln=1)
pdf.cell(0, 6, data['Year3'], ln=1)
pdf.set_font("times", 'B', 14)
pdf.cell(100, 15, "Skills", ln=1)
pdf.set_font("times", '', 11)
pdf.cell(0, 5, data['Skill1'], ln=1)
pdf.cell(0, 5, data['Skill2'], ln=1)
pdf.cell(0, 5, data['Skill3'], ln=1)
pdf.cell(0, 5, data['Skill4'], ln=1)
pdf.cell(0, 5, data['Skill5'], ln=1)
pdf.cell(0, 5, data['Skill6'], ln=1)
pdf.set_font("times", 'B', 14)
pdf.cell(100, 15, "Character Reference", ln=1)
pdf.set_font("times", '', 11)
pdf.cell(0, 5, data['CharacterReference1'], ln=1)
pdf.cell(0, 5, data['CharacterReference2'], ln=1)
pdf.cell(0, 5, data['CharacterReference3'], ln=1)
pdf.cell(0, 5, data['CharacterReference4'], ln=1)
pdf.cell(0, 5, data['CharacterReference5'], ln=1)
pdf.set_line_width(0.5)
pdf.line(x1=12, y1=39, x2=205, y2=39)
pdf.line(x1=12, y1=127, x2=205, y2=127)
pdf.line(x1=12, y1=173, x2=205, y2=173)
pdf.output("LOPEZ_Y-ANLAHSOPHIAA.pdf") | yanlahlopez/Assignment-9 | resume1.py | resume1.py | py | 2,008 | python | en | code | 1 | github-code | 90 |
13872908025 | # -*- coding: utf-8 -*-
"""
PHYS 512
Assignment 2 Problem 1
@author: James Nathan White (260772425)
"""
#I always import this stuff
import matplotlib.pyplot as plt
import random as r
import glob
import numpy as np
from scipy.stats import chi2
import scipy.optimize as opt
from scipy.stats import norm
import pandas as pd
from scipy import interpolate
################################################################################
##### Variable-Step-Size-Integrator from Class (With Repetative Function Calls)#
def lorentz(x):
return 1/(1+x**2)
def lazy_integrate_step(fun,x1,x2,tol):
#print('integrating from ',x1,' to ',x2)
x=np.linspace(x1,x2,5)
y=fun(x)
area1=(x2-x1)*(y[0]+4*y[2]+y[4])/6
area2=(x2-x1)*( y[0]+4*y[1]+2*y[2]+4*y[3]+y[4])/12
myerr=np.abs(area1-area2)
neval=len(x) #let's keep track of function evaluations
if myerr<tol:
return area2, myerr, neval
else:
xm=0.5*(x1+x2)
a1, leftErr, leftEval= lazy_integrate_step(fun,x1,xm,tol/2)
a2, rightErr, rightEval = lazy_integrate_step(fun,xm,x2,tol/2)
sumError = leftErr + rightErr
totEval = neval + leftEval + rightEval
return a1+a2, sumError, totEval
################################################################################
##### Variable-Step-Size-Integrator (Without Repetative Function Calls)#########
def integrate_step(fun,x1,x2,tol, XLIST = np.array([]), YLIST = np.array([])):
# print('integrating from ',x1,' to ',x2)
x=np.linspace(x1,x2,5)
y=np.zeros(len(x))
for i in range(len(x)):
if x[i] in XLIST: #if the point x[i] for this iteration is already present
index = np.where(XLIST == x[i])[0] #in the ongoing list of domain points for the entire
y[i] = YLIST[index] #integral (XLIST), then y(x[i]) has already been calculated
#and is not calculated again. Instead it is given the
#precalculated value from the ongoing list of y-values for
#the entire integral: YLIST
else: #if the point x[i] is not in XLIST, then y(x[i]) is not in
y[i] = fun(x[i]) #YLIST, so y(x[i]) is calculated here, and then XLIST and
XLIST = list(np.append(XLIST, x[i])) #YLIST are updated to inclued x[i] and y(x[i]) in the
YLIST = list(np.append(YLIST, y[i])) #correct order of x[i] ascending
XLIST, YLIST = [list(tuple) for tuple in zip(*sorted(zip(XLIST, YLIST)))]
XLIST = np.array(XLIST)
YLIST = np.array(YLIST)
area1=(x2-x1)*(y[0]+4*y[2]+y[4])/6
area2=(x2-x1)*( y[0]+4*y[1]+2*y[2]+4*y[3]+y[4])/12
myerr=np.abs(area1-area2)
neval=len(YLIST) #By my above book-keeping, the number of times y(x) is
#evaluated is simply len(YLIST)
# print("y(x) evaluations so far:", len(YLIST))
if myerr<tol: #If error is tolerable, returns area for this portion
return area2, myerr, neval, XLIST, YLIST #of the integral
else: #If error is not tolerable, computes integral of each half
xm=0.5*(x1+x2) #of the domain separately, doubling the precision. This is
#done via recurssion
a1, leftErr, leftEval, XLIST, YLIST= integrate_step(fun,x1,xm,tol/2, XLIST, YLIST)
a2, rightErr, rightEval, XLIST, YLIST = integrate_step(fun,xm,x2,tol/2, XLIST, YLIST)
sumError = leftErr + rightErr
totEval = len(YLIST)
return a1+a2, sumError, totEval, XLIST, YLIST
###############################################################################
#y=e^x integration with improved integrator
EXPstart = -1
EXPstop = 1
EXPf,EXPerr,EXPneval,EXPxlist,EXPylist=integrate_step(np.exp,EXPstart,EXPstop,1e-3)
EXPtrue=np.exp(EXPstop)-np.exp(EXPstart)
#y=e^x integration with integrator from class
lazyEXPf,lazyEXPerr,lazyEXPneval=lazy_integrate_step(np.exp,EXPstart,EXPstop,1e-3)
print("Numerical Integral of e^x from -1 to 1:", EXPf)
print("Function Evaluations for Integral of e^x (Improved way):", EXPneval)
print("Function Evaluations for Integral of e^x ('Lazy' way):", lazyEXPneval, '\n')
#Lorentzian integration with improved integrator
LORENTZstart = -1
LORENTZstop = 1
LORENTZf,LORENTZerr,LORENTZneval,LORENTZxlist,LORENTZylist=integrate_step(lorentz,LORENTZstart,LORENTZstop,1e-3)
#Lorentzian integration with integrator from class
lazyLORENTZf,lazyLORENTZerr,lazyLORENTZneval=lazy_integrate_step(lorentz,LORENTZstart,LORENTZstop,1e-3)
print("Numerical Integral of the Lorentzian from -1 to 1:", LORENTZf)
print("Function Evaluations for Integral of the Lorentzian (Improved way):", LORENTZneval)
print("Function Evaluations for Integral of the Lorentzian ('Lazy' way):", lazyLORENTZneval, '\n')
#sin(x) integration with improved integrator
SINstart = 0
SINstop = np.pi
SINf,SINerr,SINneval,SINxlist,SINylist=integrate_step(np.sin,SINstart,SINstop,1e-3)
#sin(x) integration with integrator from class
lazySINf,lazySINerr,lazySINneval=lazy_integrate_step(np.sin,SINstart,SINstop,1e-3)
print("Numerical Integral of sin(x) from 0 to pi:", SINf)
print("Function Evaluations for Integral of sin(x) (Improved way):", SINneval)
print("Function Evaluations for Integral of sin(x) ('Lazy' way):", lazySINneval, '\n')
############### Plot of Sampled Points for e^x ################################
fig, ax = plt.subplots(1, figsize=(10,10))
#ax = f1.add_subplot(2,1,1)
#ax.plot(np.linspace(EXPstart, EXPstop, 100), np.exp(np.linspace(EXPstart, EXPstop, 100)), 'r.')
ax.plot(EXPxlist, EXPylist, "m*", markersize = 15, label = 'sample points')
ax.set_xlabel("x")
ax.tick_params(axis='x', labelsize=15)
ax.set_ylabel("y=e^x")
ax.tick_params(axis='y', labelsize=15)
ax.set_title("Points Sampled to Compute Integral of e^x")
plt.show()
############################# SCRAP ###########################################
"""
def inlist(array, item):
if item in array:
itemindex = np.where(array==item)
mylist = itemindex[0].tolist()
return mylist #Returns index(es) of 'item' in 'array'
else:
return None#print(item, " not in list")
"""
| jwhitebored/PHYS-512 | Assignment 2/PHYS 512 Assignment 2 P1 Draft Final.py | PHYS 512 Assignment 2 P1 Draft Final.py | py | 6,803 | python | en | code | 0 | github-code | 90 |
23535041085 | B1 = (2,2,3)
B2 = (1,0,4)
Bs = [B1,B2]
start_i = B1[i][1]
start_j = B2[j][1]
j = cont
i = cont + 1
rooms_i = Bs[i][0]
rooms_j = Bs[j][0]
while i + j < 2*len(Bs):
# Inicializacao
# use start from some, walk in the same
if start_j < start_i:
if start > start_j:
# keep same start
pass
else:
start = start_j
rooms += rooms_j
if end_i < end_j:
start = end_i
else:
start = end_j
end = start_i
rooms += rooms_j
else:
start = start_i
end = start_j
rooms += rooms_i
new_element = (rooms, marker, end)
start = end
| vitorpbarbosa7/mit_6.006 | psets/ps2-template/book_test.py | book_test.py | py | 569 | python | en | code | 0 | github-code | 90 |
13245430630 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy
from torch.autograd import Variable
#from utils import *
import torch
import torch.nn as nn
import numpy as np
import torch
np.random.seed(1337)
torch.manual_seed(1337)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class ResidualNorm (nn.Module):
def __init__ (self, size, dropout):
super(ResidualNorm, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward (self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
class MLP (nn.Module):
def __init__(self, model_depth, ff_depth, dropout):
super(MLP, self).__init__()
self.w1 = nn.Linear(model_depth, ff_depth)
self.w2 = nn.Linear(ff_depth, model_depth)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w2(self.dropout(F.relu(self.w1(x))))
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
################################################################
# attention
class MultiHeadAttention (nn.Module):
def __init__ (self, n_heads, model_depth, bias=True):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.dk = model_depth//n_heads
self.WQ = nn.Linear(model_depth, model_depth, bias=bias)
self.WK = nn.Linear(model_depth, model_depth, bias=bias)
self.WV = nn.Linear(model_depth, model_depth, bias=bias)
self.WO = nn.Linear(model_depth, model_depth, bias=bias)
def forward (self, x, kv, mask):
batch_size = x.size(0)
Q = self.WQ(x ).view(batch_size, -1, self.n_heads, self.dk).transpose(1,2)
K = self.WK(kv).view(batch_size, -1, self.n_heads, self.dk).transpose(1,2)
V = self.WV(kv).view(batch_size, -1, self.n_heads, self.dk).transpose(1,2)
x = attention(Q, K, V, mask=mask)
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads*self.dk)
return self.WO(x)
def attention (Q,K,V, mask=None):
dk = Q.size(-1)
T = (Q @ K.transpose(-2, -1))/math.sqrt(dk)
if mask is not None:
T = T.masked_fill_(mask.unsqueeze(1)==0, -1e9)
T = F.softmax(T, dim=-1)
return T @ V
################################################################
# encoder
class Encoder (nn.Module):
def __init__ (self, n_layers, n_heads, model_depth, ff_depth, dropout):
super(Encoder, self).__init__()
self.layers = nn.ModuleList([EncoderLayer(n_heads, model_depth, ff_depth, dropout) for i in range(n_layers)])
self.lnorm = LayerNorm(model_depth)
def forward (self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.lnorm(x)
class EncoderLayer (nn.Module):
def __init__ (self, n_heads, model_depth, ff_depth, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(n_heads, model_depth)
self.resnorm1 = ResidualNorm(model_depth, dropout)
self.ff = MLP(model_depth, ff_depth, dropout)
self.resnorm2 = ResidualNorm(model_depth, dropout)
def forward (self, x, mask):
x = self.resnorm1(x, lambda arg: self.self_attn(arg,arg,mask))
x = self.resnorm2(x, self.ff)
return x
################################################################
# decoder
class Decoder (nn.Module):
def __init__ (self, n_layers, n_heads, model_depth, ff_depth, dropout):
super(Decoder, self).__init__()
self.layers = nn.ModuleList([DecoderLayer(n_heads, model_depth, ff_depth, dropout) for i in range(n_layers)])
self.lnorm = LayerNorm(model_depth)
def forward (self, x, src_out, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, src_out, src_mask, tgt_mask)
return self.lnorm(x)
class DecoderLayer (nn.Module):
def __init__ (self, n_heads, model_depth, ff_depth, dropout):
super(DecoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(n_heads, model_depth)
self.resnorm1 = ResidualNorm(model_depth, dropout)
self.enc_attn = MultiHeadAttention(n_heads, model_depth)
self.resnorm2 = ResidualNorm(model_depth, dropout)
self.ff = MLP(model_depth, ff_depth, dropout)
self.resnorm3 = ResidualNorm(model_depth, dropout)
def forward (self, x, src_out, src_mask, tgt_mask):
x = self.resnorm1(x, lambda arg: self.self_attn(arg,arg, tgt_mask))
x = self.resnorm2(x, lambda arg: self.enc_attn(arg,src_out, src_mask))
x = self.resnorm3(x, self.ff)
return x
################################################################
# embedder
class Embedding(nn.Module):
def __init__(self, vocab_size, model_depth):
super(Embedding, self).__init__()
self.lut = nn.Embedding(vocab_size, model_depth)
self.model_depth = model_depth
self.positional = PositionalEncoding(model_depth)
def forward(self, x):
emb = self.lut(x) * math.sqrt(self.model_depth)
return self.positional(emb)
class PositionalEncoding(nn.Module):
def __init__(self, model_depth, max_len=5000):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, model_depth)
position = torch.arange(0.0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0.0, model_depth, 2) *
-(math.log(10000.0) / model_depth))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
################################################################
# transformer
class Generator (nn.Module):
def __init__(self, model_depth, vocab_size):
super(Generator, self).__init__()
self.ff = nn.Linear(model_depth, vocab_size)
def forward(self, x):
return F.log_softmax(self.ff(x), dim=-1)
class Transformer (nn.Module):
def __init__ (self, vocab_size, n_layers, n_heads, model_depth, ff_depth, dropout):
super(Transformer, self).__init__()
self.model_depth = model_depth
self.encoder = Encoder(n_layers, n_heads, model_depth, ff_depth, dropout)
self.decoder = Decoder(n_layers, n_heads, model_depth, ff_depth, dropout)
if vocab_size is not None:
if type(vocab_size) is int:
self.set_vocab_size(vocab_size)
else:
self.set_vocab_size(vocab_size[0], vocab_size[1])
def set_vocab_size (self, src_vocab_size, tgt_vocab_size=None):
if tgt_vocab_size is None:
self.src_embedder = Embedding(src_vocab_size, self.model_depth)
self.tgt_embedder = self.src_embedder
self.generator = Generator(self.model_depth, src_vocab_size)
else:
self.src_embedder = Embedding(src_vocab_size, self.model_depth)
self.tgt_embedder = Embedding(tgt_vocab_size, self.model_depth)
self.generator = Generator(self.model_depth, tgt_vocab_size)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform(p)
def forward(self, src, tgt, src_mask, tgt_mask):
enc_out = self.encoder(self.src_embedder(src), src_mask)
dec_out = self.decoder(self.tgt_embedder(tgt), enc_out, src_mask, tgt_mask)
return dec_out
| sergsb/IUPAC2Struct | transformer.py | transformer.py | py | 8,020 | python | en | code | 25 | github-code | 90 |
2409144919 | import json
a = {
"data_center_id": 1,
"id": 12,
"cus_brand_id": 63,
"cus_brand_name": "0427 - 品牌 - 文琦",
"cus_id": 111,
"cus_name": "IP_带宽_机柜_01",
"contact_id": 141,
"contact_name": "IP_带宽_机柜_01",
"contact_phone": "13454564522",
"service_content": "设备 SN : 6F010311 设.位宜: R720 JJHMSZI 6F010311 32 一 33 操作要求:帮忙接上显示器看下什么情况,然后,关机把内存都重新拔插下。",
"estimated_time": "2022-12-05 10:15:15",
"priority": 1,
"wot_id": 5,
"operate_type": 4,
"sh_eq": {
"cser": [{
"id": 4,
"gsf_id": 1,
"me_id": 1,
}],
"mter": []
}
,
"cab_eq": {
"cser": [{
# "id": 4,
"r_id": 1,
"me_id": 1,
"u_bit_id_list": [1, 2]
}],
"mter": []
}
}
# 客服工单关联
print(json.dumps(a, ensure_ascii=False))
| ZainLiu/YXtest | 客服工单/创建工单数据.py | 创建工单数据.py | py | 983 | python | en | code | 0 | github-code | 90 |
23470322307 | # reverse_string.py
s1 = "Forever Young"
print(s1)
s2 = ""
for i in range(len(s1) - 1, -1, -1):
s2 = s2 + s1[i]
print(s2)
s3 = ""
for c in s1:
s3 = c + s3
print(s3)
print(s1[::-1])
| dbiersach/scicomp101 | Session 08 - Histograms and Code Breaking/instructor/reverse_string.py | reverse_string.py | py | 193 | python | en | code | 0 | github-code | 90 |
18458661469 | n=int(input())
a=list(map(int,input().split()))
b=list(map(int,input().split()))
dif=sum(a)-sum(b)
if dif<0:
print(-1)
elif dif==0:
cnt=0
for i in range(n):
if a[i]!=b[i]:
cnt+=1
print(cnt)
else:
difference=[]
count_0=0
for i in range(n):
if a[i]-b[i]>0:
difference.append(a[i]-b[i])
elif a[i]==b[i]:
count_0+=1
difference=sorted(difference)
cnt=0
keep=0
for i in range(len(difference)):
if cnt+difference[i]<=dif:
cnt+=difference[i]
keep+=1
else:
break
print(n-count_0-keep) | Aasthaengg/IBMdataset | Python_codes/p03151/s766024025.py | s766024025.py | py | 651 | python | en | code | 0 | github-code | 90 |
18208617899 | n = int(input())
a = list(map(int, input().split()))
if a[0] != 0:
if n == 0 and a[0] == 1:
print(1)
else:
print(-1)
exit()
ruiseki = a[::-1]
for i in range(1, n+1):
ruiseki[i] += ruiseki[i-1]
ruiseki = ruiseki[::-1]
node = [1]
komoti = [1]
for i in range(1, n+1):
ai = a[i]
if komoti[-1]*2 >= ai:
node.append(min(ruiseki[i], komoti[-1]*2))
komoti.append(node[-1]-ai)
else:
print(-1)
exit()
print(sum(node)) | Aasthaengg/IBMdataset | Python_codes/p02665/s930806016.py | s930806016.py | py | 487 | python | en | code | 0 | github-code | 90 |
23881424246 | class KeyStream():
def __init__(self):
self.N = 256
self.key = [i for i in range(self.N)]
self.i = 0
self.j = 0
self.counter = 0
self.factor = 256
def print(self):
print(self.key)
def reset(self):
self.key = [i for i in range(self.N)]
self.i = 0
self.j = 0
self.counter = 0
def reset_index(self):
self.i = 0
self.j = 0
def permute(self, K:str):
j = 0
for i in range(self.N):
j = (j + self.key[i] + ord(K[i % len(K)])) % len(self.key)
self.key[i], self.key[j] = self.key[j], self.key[i]
self.factor = 1 << (len(K) % 8)
def generate(self):
self.i = (self.i + 1) % self.N
self.j = (self.j + self.key[self.i]) % self.N
self.key[self.i], self.key[self.j] = self.key[self.j], self.key[self.i]
t = (self.key[self.i] + self.key[self.j]) % self.N
self.counter += 1
ctr = (self.counter // self.factor) % self.N
return self.key[t] ^ ctr
def xor(a, b):
# return a ^ b
return (a & ~b) | (~a & b)
def bytes2str(bytearr:bytes) -> str:
return "".join([chr(i) for i in bytearr])
def str2bytes(string:str) -> bytes:
return [ord(i) for i in string]
def encrypt(plain:bytes, key:str) -> bytes:
cipher = b''
KS = KeyStream()
KS.permute(key)
for p in plain:
k = KS.generate()
c = p ^ k
cipher += c.to_bytes(1, "little")
return cipher
def encrypt_text(plaintext:str, key:str) -> str:
result = bytes2str(encrypt(str2bytes(plaintext), key))
print("plaintext:", plaintext)
print("encrypted:", str2bytes(result))
return result
def decrypt(cipher:bytes, key:str) -> bytes:
plain = b''
KS = KeyStream()
KS.permute(key)
for c in cipher:
k = KS.generate()
p = c ^ k
plain += p.to_bytes(1, "little")
return plain
def decrypt_text(ciphertext:str, key:str) -> str:
return bytes2str(encrypt(str2bytes(ciphertext), key))
def printfile(filename:str, buffer:str):
with open(filename, "wb") as file:
file.write(buffer)
if __name__ == '__main__':
openmode = "rb"
filename = "dump/blue.png"
plain = open(filename, openmode).read()
key = "thisissecretkey"
cipher = encrypt(plain, key)
result = decrypt(cipher, key)
printfile("dump/output", result) | farishasim/StegoRC4 | cipher/rc4.py | rc4.py | py | 2,430 | python | en | code | 0 | github-code | 90 |
1563919940 | from SciStreams.config import validate_md
def test_validate_md():
''' Test it runs with some data we expect to test.
Note validation is created from external yml file
'''
vdict = dict(
# name, type
detector_SAXS_x0_pix="number",
detector_SAXS_y0_pix="number",
motor_SAXSx="number",
motor_SAXSy="number",
scan_id="int",
measurement_type="str",
sample_savename="str",
sample_name="str",
motor_bsx="number",
motor_bsy="number",
motor_bsphi="number",
detector_SAXS_distance_m="number",
calibration_energy_keV="number",
calibration_wavelength_A="number",
experiment_alias_directory="str",
experiment_cycle="str",
experiment_group="str",
filename="str",
sample_exposure_time="number",
stitchback="int",
)
md = dict(sample_name="test",
motor_bsx=-15.17,
motor_bsy=-16.9,
motor_bsphi=-12,
motor_SAXSx=-65,
motor_SAXSy=-72.,
detector_SAXS_x0_pix=0.,
detector_SAXS_y0_pix=0.,
scan_id=0,
detector_SAXS_distance_m=5.,
calibration_energy_keV=13.5,
calibration_wavelength_A=0.9184,
measurement_type="foo",
experiment_alias_directory="/foo",
experiment_cycle="2017_3",
experiment_group="SciStream-test",
filename="foo.tiff",
# updated every time
sample_savename="out",
sample_exposure_time=10.,
stitchback=True)
validate_md(md, validate_dict=vdict)
md2 = md.copy()
md2.pop('sample_name')
| CFN-softbio/SciStreams | SciStreams/tests/test_validate_data.py | test_validate_data.py | py | 1,765 | python | en | code | 0 | github-code | 90 |
42717971776 | import socket
import os
from tqdm import tqdm
class Server:
def __init__(self):
self.connectionOpen = False
self.connection = ''
self.clientAddr = ''
self.address = ''
def close(self):
self.connection.close()
def listening(self, port, ip = ''):
"""
Creates a socket tcp IPV4
ip: IP which will connect this server
(default '')
port: port to listen to
self.connection receives connection
self.clientAddr receives the cliente address
"""
self.address = (ip, port)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(self.address)
print(f"\nServer is listening on {ip}:{port}")
server.listen()
conn, addr = server.accept()
print(f"\nClient connected: {addr}")
self.connectionOpen = True
self.connection = conn
self.clientAddr = addr
def sendData(self, data, size = 1024, format='utf-8',binary = False):
"""
Send data to the client, return the client answer
data: data to be sended
format: data format (default utf-8)
size: size of the packet (default 1024)
"""
if not self.connectionOpen:
print("\nNo client connected")
return
else:
self.connection.send(data)
resp = self.connection.recv(size)
return resp
def sendFile(self, filePath, format = "utf-8", packetSize = 1024):
file = open(filePath, 'rb')
fileSize = os.path.getsize(filePath)
self.sendData(f"{filePath}@{fileSize}".encode('utf-8'))
bar = tqdm(range(fileSize), f"Sending {filePath}", unit="B", unit_scale=True, unit_divisor=packetSize)
while True:
data = file.read(packetSize)
if not data:
break
self.sendData(data)
bar.update(packetSize)
self.sendData("Envio_Completo".encode('utf-8'))
| Luksmito/mini_ftp_server | Classes/Servidor.py | Servidor.py | py | 2,187 | python | en | code | 1 | github-code | 90 |
27968220352 | import uuid
import json
import random
from django.core.management.base import BaseCommand
from users import models as user_models
from locations import models as location_models
from notifications import models as notification_models
from django_seed import Seed
from django.db.models.expressions import RawSQL
from locations import reversePlace, locationThumbnail
from googleplaces import GooglePlaces
from django.conf import settings
from math import radians, degrees, sin, cos, asin, acos, sqrt
def createCity(cityId):
def get_locations_nearby_coords(latitude, longitude, max_distance=3000):
gcd_formula = "6371 * acos(cos(radians(%s)) * \
cos(radians(latitude)) \
* cos(radians(longitude) - radians(%s)) + \
sin(radians(%s)) * sin(radians(latitude)))"
distance_raw_sql = RawSQL(
gcd_formula,
(latitude, longitude, latitude)
)
qs = location_models.City.objects.all().annotate(distance=distance_raw_sql).order_by('distance')
if max_distance is not None:
qs = qs.filter(distance__lt=max_distance)
for i in qs:
pass
return qs
cityLatitude, cityLongitude, cityName, countryCode = reversePlace.reverse_place(cityId)
nearCities = get_locations_nearby_coords(cityLatitude, cityLongitude, 3000)[:20]
if cityLatitude and cityLongitude and cityName and countryCode:
try:
country = location_models.Country.objects.get(country_code=countryCode)
except location_models.Country.DoesNotExist:
with open('pinner/locations/countryData.json', mode='rt', encoding='utf-8') as file:
countryData = json.load(file)
currentCountry = countryData[countryCode]
countryName = currentCountry['name']
countryNameNative = currentCountry['native']
countryCapital = currentCountry['capital']
countryCurrency = currentCountry['currency']
countryPhone = currentCountry['phone']
countryEmoji = currentCountry['emoji']
continentCode = currentCountry['continent']
latitude = currentCountry['latitude']
longitude = currentCountry['longitude']
try:
continent = location_models.Continent.objects.get(continent_code=continentCode)
except:
with open('pinner/locations/continentData.json', mode='rt', encoding='utf-8') as file:
continentData = json.load(file)
continentName = continentData[continentCode]
try:
gp = locationThumbnail.get_photos(term=continentName).get_urls()
continentPhotoURL = gp+"?ixlib=rb-0.3.5&q=100&fm=jpg&crop=entropy&cs=faces&h=450&w=450&fit=crop"
continentThumbnailURL = gp+"?ixlib=rb-0.3.5&q=100&fm=jpg&crop=entropy&cs=faces&h=80&w=80&fit=crop"
except:
continentPhotoURL = None
continentThumbnailURL = None
continent = location_models.Continent.objects.create(
continent_name=continentName,
continent_photo=continentPhotoURL,
continent_thumbnail=continentThumbnailURL,
continent_code=continentCode
)
try:
gp = locationThumbnail.get_photos(term=countryName).get_urls()
countryPhotoURL = gp+"?ixlib=rb-0.3.5&q=100&fm=jpg&crop=entropy&cs=faces&h=450&w=450&fit=crop"
countryThumbnailURL = gp+"?ixlib=rb-0.3.5&q=100&fm=jpg&crop=entropy&cs=faces&h=80&w=80&fit=crop"
except:
countryPhotoURL = None
countryThumbnailURL = None
country = location_models.Country.objects.create(
country_code=countryCode,
country_name=countryName,
country_name_native=countryNameNative,
country_capital=countryCapital,
country_currency=countryCurrency,
country_phone=countryPhone,
country_emoji=countryEmoji,
country_photo=countryPhotoURL,
country_thumbnail=countryThumbnailURL,
continent=continent,
latitude=latitude,
longitude=longitude
)
try:
gp = locationThumbnail.get_photos(term=cityName).get_urls()
cityPhotoURL = gp+"?ixlib=rb-0.3.5&q=100&fm=jpg&crop=entropy&cs=faces&h=450&w=450&fit=crop"
cityThumbnailURL = gp+"?ixlib=rb-0.3.5&q=100&fm=jpg&crop=entropy&cs=faces&h=80&w=80&fit=crop"
except:
cityPhotoURL = None
cityThumbnailURL = None
city = location_models.City.objects.create(
city_id=cityId,
city_name=cityName,
country=country,
city_photo=cityPhotoURL,
city_thumbnail=cityThumbnailURL,
latitude=cityLatitude,
longitude=cityLongitude
)
print(city.city_name)
for i in nearCities:
city.near_city.add(i)
city.save()
cityNames = [
"TOKYO, Japan",
"JAKARTA, Indonesia",
"New York",
"SEOUL, South Korea",
"MANILA, Philippines",
"Mumbai, India",
"Sao Paulo, Brazil",
"MEXICO CITY, Mexico",
"Delhi, India",
"Osaka, Japan",
"CAIRO, Egypt",
"Kolkata, India",
"Los Angeles",
"Shanghai, China",
"MOSCOW, Russia",
"BEIJING, China",
"BUENOS AIRES, Argentina",
"Istanbul, Turkey",
"Rio de Janeiro, Brazil",
"PARIS, France",
"Karachi, Pakistan",
"Nagoya, Japan",
"Chicago",
"Lagos, Nigeria",
"LONDON, United Kingdom",
"BANGKOK, Thailand",
"KINSHASA, Dem Rep of Congo",
"TEHRAN, Iran",
"LIMA, Peru",
"Dongguan, China",
"BOGOTA, Colombia",
"Chennai, India",
"DHAKA, Bangladesh",
"Essen, Germany",
"Tianjin, China",
"Lahore, Pakistan",
"Bangalore, India",
"Hyderabad, India",
"Johannesburg, South Africa",
"BAGHDAD, Iraq",
"Toronto, Canada",
"SANTIAGO, Chile",
"KUALA LUMPUR, Malaysia",
"San Francisco",
"Philadelphia",
"Wuhan, China",
"Miami",
"Dallas",
"MADRID, Spain",
"Ahmedabad, India",
"Boston",
"Belo Horizonte, Brazil",
"KHARTOUM, Sudan",
"Saint Petersburg, Russia",
"Shenyang, China",
"Houston",
"Pune, India",
"RIYADH, Saudi Arabia",
"SINGAPORE, Singapore",
"WASHINGTON",
"Yangon, Myanmar",
"Milan, Italy",
"Atlanta",
"Chongqing, China",
"Alexandria, Egypt",
"Nanjing, China",
"Guadalajara, Mexico",
"Barcelona, Spain",
"Chengdu, China",
"Detroit",
"ANKARA, Turkey",
"ATHENS, Greece",
"BERLIN, Germany",
"Sydney, Australia",
"Monterrey, Mexico",
"Phoenix",
"Busan, South Korea",
"Recife, Brazil",
"Bandung, Indonesia",
"Porto Alegre, Brazil",
"Melbourne, Australia",
"LUANDA, Angola",
"ALGIERS, Algeria",
"Hà Noi, Viet Nam",
"Montréal, Canada",
"PYONGYANG, North Korea",
"Surat, India",
"Fortaleza, Brazil",
"Medellín, Colombia",
"Durban, South Africa",
"Kanpur, India",
"ADDIS ABABA, Ethiopia",
"NAIROBI, Kenya",
"Jeddah, Saudi Arabia",
"Naples, Italy",
"KABUL, Afghanistan",
"Salvador, Brazil",
"Harbin, China",
"Kano, Nigeria",
"CAPE TOWN, South Africa",
"Curitiba, Brazil",
"Surabaya, Indonesia",
"San Diego",
"Seattle",
"ROME, Italy",
"Dar es Salaam, Tanzania",
"Taichung, China",
"Jaipur, India",
"CARACAS, Venezuela",
"DAKAR, Senegal",
"Kaohsiung, China",
"Minneapolis",
"Lucknow, India",
"AMMAN, Jordan",
"Tel Aviv-Yafo, Israel",
"Guayaquil, Ecuador",
"KYIV, Ukraine",
"Faisalabad, Pakistan",
"Mashhad, Iran",
"Izmir, Turkey",
"Rawalpindi, Pakistan",
"TASHKENT, Uzbekistan",
"Katowice, Poland",
"Campinas, Brazil",
"Daegu, South Korea",
"Changsha, China",
"Nagpur, India",
"San Juan, Philippines",
"Aleppo, Syria",
"LISBON, Portugal",
"Frankfurt am Main, Germany",
"Nanchang, China",
"Birmingham, United Kingdom",
"Tampa",
"Medan, Indonesia",
"TUNIS, Tunisia",
"Manchester, United Kingdom",
"PORT-AU-PRINCE, Haiti",
"DAMASCUS, Syria",
"Fukuoka, Japan",
"SANTO DOMINGO, Dominican Republic",
"HAVANA, Cuba",
"Cali, Colombia",
"Denver",
"St. Louis, United",
"Colombo, Brazil",
"Dubai, United Arab Emirates",
"Baltimore",
"Sapporo, Japan",
"Rotterdam, Netherlands",
"Vancouver, Canada",
"Preston, United Kingdom",
"Patna, India",
"WARSAW, Poland",
"Bonn, Germany",
"ACCRA, Ghana",
"BUCHAREST, Romania",
"Yokohama, Japan",
"Incheon, South Korea",
"BRASILIA, Brazil",
"West Midlands, United Kingdom",
"Giza, Egypt",
"Quezon City, Philippines",
"Chittagong, Bangladesh",
"STOCKHOLM, Sweden",
"Puebla de Zaragoza, Mexico",
"BAKU, Azerbaijan",
"Ibadan, Nigeria",
"Brisbane, Australia",
"MINSK, Belarus",
"Sikasso, Mali",
"Maracaibo, Venezuela",
"Hamburg, Germany",
"BUDAPEST, Hungary",
"Manaus, Brazil",
"Ségou, Mali",
"VIENNA, Austria",
"Indore, India",
"ASUNCION, Paraguay",
"Tianmen, China",
"BELGRADE, Serbia",
"Nakuru, Kenya",
"Koulikoro, Mali",
"Kobe, Japan",
"Hama, Syria",
"Esfahan, Iran",
"TRIPOLI, Libya",
"West Yorkshire, United Kingdom",
"Vadodara, India",
"QUITO, Ecuador",
"Jinjiang, China",
"Mopti, Mali",
"Perth, Australia",
"Daejeon, South Korea",
"Kyoto, Japan",
"Xiantao, China",
"Tangerang, Indonesia",
"Kharkiv, Ukraine",
"Gwangju, South Korea",
"Semarang, Indonesia",
"Novosibirsk, Russia",
"Neijiang, China",
"MAPUTO, Mozambique",
"Douala, Cameroon",
"Kayes, Mali",
"Tabriz, Iran",
"Homs, Syria",
"MONTEVIDEO, Uruguay",
"Ekaterinoburg, Russia",
"Juárez, Mexico",
"Kawasaki, Japan",
"Tijuana, Mexico",
"Bursa, Turkey",
"Al-Hasakeh, Syria",
"Makkah, Saudi Arabia",
"YAOUNDE, Cameroon",
"Palembang, Indonesia",
"Nizhny Novgorod, Russia",
"León, Mexico",
"Guarulhos, Brazil",
"Heze, China",
"Auckland, New Zealand",
"Omdurman, Sudan",
"Valencia, Venezuela",
"San Antonio",
"Almaty, Kazakhstan",
"PHNOM PENH, Cambodia",
"Yiyang, China",
"Goiânia, Braz",
"Cixi, China",
"Karaj, Iran",
"MOGADISHU, Somalia",
"Varanasi, India",
"Córdoba, Argentina",
"KAMPALA, Uganda",
"Shiraz, Iran",
"Multan, Pakistan",
"Madurai, India",
"München, Germany",
"Kalyan, India",
"Quanzhou, China",
"Adana, Turkey",
"Bazhong, China",
"Fès, Morocco",
"OUAGADOUGOU, Burkina Faso",
"Caloocan, Philippines",
"Kalookan, Philippines",
"Saitama, Japan",
"PRAGUE, Czech Republic",
"Kumasi, Ghana",
"Meerut, India",
"Hyderabad, Pakistan",
"OTTAWA, Canada",
"Yushu, China",
"Barranquilla, Colombia",
"Hiroshima, Japan",
"Chifeng, China",
"Nashik, India",
"Makasar, Indonesia",
"SOFIA, Bulgaria",
"Rizhao, China",
"Davao, Philippines",
"Samara, Russia",
"Omsk, Russia",
"Gujranwala, Pakistan",
"Adelaide, Australia",
"La Matanza, Argentina",
"Rosario, Argentina",
"Jabalpur, India",
"Kazan, Russia",
"Jimo, China",
"Dingzhou, China",
"Calgary, Canada",
"YEREVAN, Armenia",
"Jamshedpur, India",
"Zürich, Switzerland",
"Pikine-Guediawaye, Senegal",
"Anqiu, China",
"Chelyabinsk, Russia",
"CONAKRY, Guinea",
"Asansol, India",
"Ulsan, South Korea",
"Toluca, Mexico",
"Marrakech, Morocco",
"Dhanbad, India",
"TBILISI, Georgia",
"Hanchuan, China",
"LUSAKA, Zambia",
"Qidong, China",
"Faridabad, India",
"Rostov-na-Donu, Russia",
"Edmonton, Canada",
"Allahabad, India",
"Beiliu, China",
"Dnipropetrovsk, Ukraine",
"Gongzhuling, China",
"Qinzhou, China",
"Ufa, Russia",
"Sendai, Japan",
"Volgograd, Russia",
"GUATEMALA CITY, Guatemala",
"AMSTERDAM, Netherlands",
"BRUSSELS, Belgium",
"BAMAKO, Mali",
"Ziyang, China",
"ANTANANARIVO, Madagascar",
"Amritsar, India",
"Vijayawada, India",
"Haora, India",
"Donetsk, Ukraine",
"Fuzhou, China",
"Pimpri Chinchwad, India",
"DUBLIN, Ireland",
"Rajkot, India",
"Sao Luís, Brazil",
"Béni-Mellal, Morocco",
"Kaduna, Nigeria",
"Kitakyushu, Japan",
"Perm, Russia",
"Odessa, Ukraine",
"Qom, Iran",
"Yongchuan, China",
"Peshawar, Pakistan",
"ULAANBAATAR, Mongolia",
"Sao Gonçalo, Brazil",
"Ghaziabad, India",
"Köln, Germany",
"Ahwaz, Iran",
"Suwon, South Korea",
"San Luis Potosí, Mexico",
"Gaziantep, Turkey",
"Krasnoyarsk, Russia",
"Chiba, Japan",
"Voronezh, Russia",
"Durg-Bhilai Nagar, India",
"Maceió, Brazil",
"Al-Madinah, Saudi Arabia",
"Seongnam, South Korea",
"San Jose",
"MANAGUA, Nicaragua",
"Safi, Morocco",
"Soweto, South Africa",
"Cartagena, Colombia",
"Torino, Italy",
"Lattakia, Syria",
"Mérida, Mexico",
"Göteborg, Sweden",
"Torreón, Mexico",
"Salé, Morocco",
"Tyneside, United Kingdom",
"Shubra-El-Khema, Egypt",
"Mombasa, Kenya",
"TEGUCIGALPA, Honduras",
"Tiruchchirappalli, India",
"Saratov, Russia",
"Santiago de los Caballeros, Dominican",
"LA PAZ, Bolivia",
"Sakai, Japan",
"El Alto, Bolivia",
"Bogor, Indonesia",
"Kermanshah, Iran",
"Liverpool, United Kingdom",
"Yanshi, China",
"Guwahati, India",
"Konya, Turkey",
"Barquisimeto, Venezuela",
"Valencia, Spain",
"Guilin, China",
"Hamamatsu, Japan",
"Deir El-Zor, Syria",
"BISHKEK, Kyrgyzstan",
"BENGHAZI, Libya",
"Zaporizhya, Ukraine",
"Gaoyou, China",
"Marseille, France",
"Bandar Lampung, Indonesia",
"Niigata, Japan",
"Indianapolis",
"Haiphong, Viet Nam",
"Arequipa, Peru",
"Jacksonville",
"Tanger, Morocco",
"Dandong, China",
"KISHINEV, Moldova",
"Krasnodar, Russia",
"ZAGREB, Croatia",
"Port Elizabeth, South Africa",
"Mendoza, Argentina",
"Khulna, Bangladesh",
"Malang, Indonesia",
"Padang, Indonesia",
"Chihuahua, Mexico",
"Campo Grande, Brazil",
"Lódz, Poland",
"Goyang, South Korea",
"Benin City, Nigeria",
"Bucheon, South Korea",
"Kraków, Poland",
"Lviv, Ukraine",
"Salem, India",
"Ad-Dammam, Saudi Arabia",
"Samut Prakan, Thailand",
"Nampho, North Korea",
"Columbus",
"Bareilly, India",
"JERUSALEM, Israel",
"Cuernavaca, Mexico",
"RIGA, Latvia",
"Québec, Canada",
"Cebu, Philippines",
"Aguascalientes, Mexico",
"Tolyatti, Russia",
"Hamilton, Canada",
"Osasco, Brazil",
"Nonthaburi, Thailand",
"Blantyre City, Malawi",
"Hamhung, North Korea",
"Jalandhar, India",
"Al-Rakka, Syria",
"NIAMEY, Niger",
"Xiangtan, China",
"Winnipeg, Canada",
"Oran, Algeria",
"Kota, India",
"Sevilla, Spain",
"Navi Mumbai, India",
"Port Harcourt, Nigeria",
"Saltillo, Mexico",
"Khartoum North, Sudan",
"Shizuoka, Japan",
"Yuanjiang, China",
"Raipur, India",
"Kryviy Rig, Ukraine",
"Querétaro, Mexico",
"PRETORIA, South Africa",
"Meknès, Morocco",
"Okayama, Japan",
"Santo André, Brazil",
"RABAT, Morocco",
"Pakanbaru, Indonesia",
"Memphis",
"Joao Pessoa, Brazil",
"KATHMANDU, Nepal",
"Antalya, Turkey",
"Kumamoto, Japan",
"Palermo, Italy",
"Nottingham, United Kingdom",
"Mosul, Iraq",
"Hermosillo, Mexico",
"Morelia, Mexico",
"Tétouan, Morocco",
"Barnaul, Russia",
"Jaboatao dos Guarapes, Brazil",
"Cotonou, Benin",
"Zaragoza, Spain",
"Tampico, Mexico",
"Morón, Argentina",
"La Plata, Argentina",
"Ciudad Guayana, Venezuela",
"Moradabad, India",
"Acapulco, Mexico",
"Veracruz, Mexico",
"Ulyanovsk, Russia",
"Wroclaw, Poland",
"Puente Alto, Chile",
"Gorakhpur, India",
"Fort Worth",
"San Miguel de Tucumán, Argentina",
"The Hague, Netherlands",
"Culiacán Rosales, Mexico",
"Maiduguri, Nigeria",
"Genova, Italy",
"Izhevsk, Russia",
"Jeonju, South Korea",
"COLOMBO, Sri Lanka",
"Zaria, Nigeria",
"Anlu, China",
"Sao José dos Campos, Brazil",
"Charlotte",
"Malmö, Sweden",
"Kagoshima, Japan",
"Yaroslave, Russia",
"Contagem, Brazil",
"Zamboanga, Philippines",
"Orumiyeh, Iran",
"Kisumu, Kenya",
"Uberlândia, Brazil",
"El Paso",
"Yunzhou, China",
"Kénitra, Morocco",
"Diyarbakir, Turkey",
"Jurong, China",
"Cúcuta, Colombia",
"Dortmund, Germany",
"Cochabamba, Bolivia",
"Cheongju, South Korea",
"Chongjin, North Korea",
"Stuttgart, Germany",
"KINGSTON, Jamaica",
"Milwaukee",
"Sorocaba, Brazil",
"Glasgow, United Kingdom",
"Khabarovsk, Russia",
"Irkutsk, Russia",
"Tyumen, Russia",
"Lomas de Zamora, Argentina",
"Funabashi, Japan",
"Düsseldorf, Germany",
"Içel, Turkey",
"Maanshan, China",
"Bandjarmasin, Indonesia",
"Callao, Peru",
"Poznan, Poland",
"Kayseri, Turkey",
"Quetta, Pakistan",
"HELSINKI, Finland",
"Novokuznetsk, Russia",
"Málaga, Spain",
"Hachioji, Japan",
"Ribeirao Prêto,",
"NOUAKCHOTT, Mauritania",
"Dezhou, China",
"Makhachkala, Russia",
"Bristol, United Kingdom",
"ASTANA, Kazakhstan",
"Yizhou, China",
"Nashville-Davidson",
"Orenburg, Russia",
"Cancun, Mexico",
"OSLO, Norway",
"Cuiabá, Brazil",
"VILNIUS, Lithuania",
"Bremen, Germany",
"Feira de Santana, Brazil",
"Portland",
"Reynosa, Mexico",
"Ilorin, Nigeria",
"Oklahoma City",
"Nakhon Ratchasima, Thailand",
"Kerman, Iran",
"ISLAMABAD, Pakistan",
"DUSHANBE, Tajikistan",
"VIENTIANE, Laos",
"ABU DHABI, United Arab Emirates",
"Shimkent, Kazakhstan",
"Imbaba, Egypt",
"SKOPLJE, Macedonia",
"Kadhimain, Iraq",
"Kemerovo, Russia",
"Duisburg, Germany",
"Rasht, Iran"
]
class Command(BaseCommand):
help = "It seeds the DB with tons of stuff"
def handle(self, *args, **options):
# CREATE CITY
google_places = GooglePlaces(settings.GOOGLE_MAPS_KEY)
for i in cityNames:
query_result = google_places.text_search(query=i, language="en",
types="(cities,)",
)
createCity(query_result.places[0].place_id)
# CREATE USER
user_seeder = Seed.seeder()
randomCountry = location_models.Country.objects.all()
randomCity = location_models.City.objects.all()
with open('pinner/users/adjectives.json', mode='rt', encoding='utf-8') as adjectives:
with open('pinner/users/nouns.json', mode='rt', encoding='utf-8') as nouns:
adjectives = json.load(adjectives)
nouns = json.load(nouns)
user_seeder.add_entity(
user_models.User,
300,
{
"uuid": lambda x: uuid.uuid4(),
"username": lambda x: random.choice(adjectives) + random.choice(nouns).capitalize(),
"residence": lambda x: random.choice(randomCountry),
"nationality": lambda x: random.choice(randomCountry),
"is_staff": False,
"is_superuser": False,
"current_city": lambda x: random.choice(randomCity),
"current_country": None,
"current_continent": None,
"is_dark_mode": True,
"is_hide_photos": False,
"is_hide_trips": False,
"is_hide_cities": False,
"is_hide_countries": False,
"is_hide_continents": False,
"is_auto_location_report": True,
"fbId": None,
"appleId": None,
"is_verified_phone_number": False,
"is_verified_email_address": False,
"avatar_url": None,
"app_avatar_url": None,
"push_token": None,
"distance": 0,
"website": None,
},
)
user_seeder.execute()
# CREATE MOVENOTIFICATION
move_otification_seeder = Seed.seeder()
allUsers = user_models.User.objects.all()
randomCity = location_models.City.objects.all()
move_otification_seeder.add_entity(
notification_models.MoveNotification,
2000,
{
"actor": lambda x: random.choice(allUsers),
"city": lambda x: random.choice(randomCity),
"country": None,
"continent": None,
},
)
move_otification_seeder.execute()
# UPDATE USER
allUser = user_models.User.objects.all()
for user in allUser:
distance = 0
user.current_country = user.current_city.country
user.current_continent = user.current_city.country.continent
trips = notification_models.MoveNotification.objects.filter(actor=user).order_by('-created_at')
try:
for i, trip in enumerate(trips):
try:
lon1, lat1, lon2, lat2 = map(
radians, [trips[i].city.longitude, trips[i].city.latitude, trips[i+1].city.longitude, trips[i+1].city.latitude])
dist = 6371 * (
acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon1 - lon2))
)
distance += dist
except (ZeroDivisionError, IndexError) as e:
print(e)
user.distance = round(distance)
user.save()
except notification_models.MoveNotification.DoesNotExist:
pass
# UPDATE MOVENOTIFICATION
allMoveNotification = notification_models.MoveNotification.objects.all()
for i in allMoveNotification:
i.country = i.city.country
i.continent = i.city.country.continent
i.save()
self.stdout.write(self.style.SUCCESS(f"Everything seeded"))
| plusbeauxjours/pinner-backend | pinner/users/management/commands/mega_seed.py | mega_seed.py | py | 23,337 | python | en | code | 0 | github-code | 90 |
35698852635 | # This file computes the square root of a number x, with a given precision.
x=19023192.0 # Debug value
precision=0.000001 # Precision requested
if x<0:
print("Error: x<0")
exit()
if x==0:
print("square root: 0")
exit()
error=1000.0
# 1st term Taylor series truncation approximation (iterative method)
###
# Approximation of f(x) = sqrt(x) using Taylor series truncation at the 1st term:
# given a guess g_1 such that g_1^2<x, we can approximate f(x) by a truncated Taylor series g_2 = g_1 + .5 f'(g_1^2) (x-g_1^2).
# By a standard theorem (Taylor?) the error term is bounded by (1/6) * (1/(2 g_1)) * abs(g_1^2-x)^2
# When g_1^2>x we can bound by (1/6) * (1/2y) * abs(g_1^2-x)^2 for any y such that y^2<x
iteration=0
sqrt_guess = 5.0
error = abs(sqrt_guess*sqrt_guess - x)
print("Error: " + str(error))
while error>precision:
print(iteration)
iteration+=1
sqrt_guess = sqrt_guess + .5 * (1/(2* sqrt_guess)) * (x-sqrt_guess*sqrt_guess)
error = abs(sqrt_guess*sqrt_guess - x)
print("Guess: " + str(sqrt_guess))
print("Error: " + str(error))
print("Approximation: " + str(sqrt_guess))
print("Error: " + str(error))
| Pozidriv/Square-Root | sq_root.py | sq_root.py | py | 1,145 | python | en | code | 0 | github-code | 90 |
3833273959 | import pandas as pd
from omnivector.abstraction import AbstractDB
class LanceDB(AbstractDB):
"""
LanceDB is a vector database that uses Lance to store and search vectors.
"""
def __init__(self):
super().__init__()
def create_index(self):
# not sure how to do this in Lance
pass
def delete(self, ids):
import lancedb
db = lancedb.connect(self.config["lancedb"]["DB_PATH"])
tbl = db.open_table("my_table")
ids = ", ".join(str(v) for v in ids)
tbl.delete(f"id IN ({ids})")
def add(self, ids, vectors, metadata=None):
import lancedb
data = pd.DataFrame({"id": ids})
db = lancedb.connect(self.config["lancedb"]["DB_PATH"])
if metadata is not None:
meta_df = pd.DataFrame.from_records(metadata)
data = pd.concat([data, meta_df], axis=1)
data["vector"] = vectors.tolist()
try:
tbl = db.open_table("my_table")
tbl.add(data)
except:
db.create_table("my_table", data)
def vector_search(self, vector, k=3):
import lancedb
db = lancedb.connect(self.config["lancedb"]["DB_PATH"])
tbl = db.open_table("my_table")
return tbl.search(vector).limit(k).to_df()
| vinid/omnivector | omnivector/lancedb.py | lancedb.py | py | 1,303 | python | en | code | 0 | github-code | 90 |
40328493195 | # -*- coding: utf-8 -*-
# 安装mysql数据库:zy@ubuntu:~$ sudo apt-get install mysql-server mysql-client
# 中间会让设置一次root用户的密码
# 安装python包:zy@ubuntu:~$ sudo pip3 install PyMySQL
# http://www.runoob.com/python3/python3-mysql.html
# 创建数据库
# 使用客户端Navicat for MySQL连接mysql数据库;
# 新建数据库:file-> New Database
# 设置数据库的名字: Database Name: yhb
# 设置数据库字符编码: Character set: utf8mb4 -- UTF-8 Unicode # utf8mb4是utf8的超集
# 设置排序规则: Collation: utf8mb4_unicode_520_ci
# 例如:utf8_danish_ci
# ci是'case insensitive'的缩写;表示不分大小写
# 同一个character set的不同collation的区别在于排序、字符春对比的准确度(相同两个字符在不同国家的语言中的排序规则可能是不同的)以及性能。
# 例如:utf8_general_ci在排序的准确度上要差于utf8_unicode_ci, 当然,对于英语用户应该没有什么区别。但性能上(排序以及比对速度)要略优于utf8_unicode_ci.
import pymysql.cursors
# 连接到数据库
connection = pymysql.connect(host='localhost',
user='user',
password='passwd',
db='db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
# 添加一个记录
sql = "INSERT INTO `users` (`email`, `password`) VALUES (%s, %s)"
cursor.execute(sql, ('webmaster@python.org', 'very-secret'))
# connection 不会自动提交. 因此需要显式commit
connection.commit()
with connection.cursor() as cursor:
# 读取一条记录
sql = "SELECT `id`, `password` FROM `users` WHERE `email`=%s"
cursor.execute(sql, ('webmaster@python.org',))
result = cursor.fetchone()
print(result)
finally:
connection.close()
# 示例二
# 打开数据库连接
db = pymysql.connect("localhost","testuser","test123","TESTDB" )
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用 execute() 方法执行 SQL 查询
cursor.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取单条数据.
data = cursor.fetchone()
print ("Database version : %s " % data)
# 关闭数据库连接
db.close()
# 创建数据库表
# 打开数据库连接
db = pymysql.connect("localhost","testuser","test123","TESTDB" )
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用 execute() 方法执行 SQL,如果表存在则删除
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
# 使用预处理语句创建表
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NAME CHAR(20),
AGE INT,
SEX CHAR(1),
INCOME FLOAT )"""
cursor.execute(sql)
# 关闭数据库连接
db.close()
# 数据库插入操作
# 打开数据库连接
db = pymysql.connect("localhost","testuser","test123","TESTDB" )
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 插入语句
sql = """INSERT INTO EMPLOYEE(FIRST_NAME,
LAST_NAME, AGE, SEX, INCOME)
VALUES ('Mac', 'Mohan', 20, 'M', 2000)"""
sql2 = "INSERT INTO EMPLOYEE(FIRST_NAME, \
LAST_NAME, AGE, SEX, INCOME) \
VALUES ('%s', '%s', '%d', '%c', '%d' )" % \
('Mac', 'Mohan', 20, 'M', 2000)
try:
# 执行sql语句
cursor.execute(sql)
cursor.execute(sql2)
# 提交到数据库执行
db.commit()
except:
# 如果发生错误则回滚
db.rollback()
# 关闭数据库连接
db.close()
# 查询EMPLOYEE表中salary(工资)字段大于1000的所有数据
# 打开数据库连接
db = pymysql.connect("localhost","testuser","test123","TESTDB" )
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 查询语句
sql = "SELECT * FROM EMPLOYEE \
WHERE INCOME > '%d'" % (1000)
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
fname = row[0]
lname = row[1]
age = row[2]
sex = row[3]
income = row[4]
# 打印结果
print ("fname=%s,lname=%s,age=%d,sex=%s,income=%d" % \
(fname, lname, age, sex, income ))
except:
print ("Error: unable to fecth data")
# 关闭数据库连接
db.close()
# 更新操作用于更新数据表的的数据,以下实例将 TESTDB表中的 SEX 字段全部修改为 'M',AGE 字段递增1
# 打开数据库连接
db = pymysql.connect("localhost","testuser","test123","TESTDB" )
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 更新语句
sql = "UPDATE EMPLOYEE SET AGE = AGE + 1 WHERE SEX = '%c'" % ('M')
try:
# 执行SQL语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
except:
# 发生错误时回滚
db.rollback()
# 关闭数据库连接
db.close()
# 示例:删除数据表 EMPLOYEE 中 AGE 大于 20 的所有数据
# 打开数据库连接
db = pymysql.connect("localhost","testuser","test123","TESTDB" )
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 删除语句
sql = "DELETE FROM EMPLOYEE WHERE AGE > '%d'" % (20)
try:
# 执行SQL语句
cursor.execute(sql)
# 提交修改
db.commit()
except:
# 发生错误时回滚
db.rollback()
# 关闭连接
db.close()
# 示例,事务机制可以确保数据一致性。
# 事务应该具有4个属性:原子性、一致性、隔离性、持久性。这四个属性通常称为ACID特性。
# 对于支持事务的数据库, 在Python数据库编程中,当游标建立之时,就自动开始了一个隐形的数据库事务。
# commit()方法游标的所有更新操作,rollback()方法回滚当前游标的所有操作。每一个方法都开始了一个新的事务。
# SQL删除记录语句
sql = "DELETE FROM EMPLOYEE WHERE AGE > '%d'" % (20)
try:
# 执行SQL语句
cursor.execute(sql)
# 向数据库提交
db.commit()
except:
# 发生错误时回滚
db.rollback()
# pymysql.Connect()参数说明
# host(str): MySQL服务器地址
# port(int): MySQL服务器端口号
# user(str): 用户名
# passwd(str): 密码
# db(str): 数据库名称
# charset(str): 连接编码
#
# connection对象支持的方法
# cursor() 使用该连接创建并返回游标
# commit() 提交当前事务
# rollback() 回滚当前事务
# close() 关闭连接
#
# cursor对象支持的方法
# execute(op) 执行一个数据库的查询命令
# fetchone() 取得结果集的下一行
# fetchmany(size) 获取结果集的下几行
# fetchall() 获取结果集中的所有行
# rowcount() 返回数据条数或影响行数
# close() 关闭游标对象
def create_modify_time():
"""插入记录时间及修改时间"""
sql3 = '''CREATE TABLE class_member(
id TINYINT(2) AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(20) NOT NULL UNIQUE,
age TINYINT(2) NOT NULL,
create_time DATETIME NOT NULL,
modify_time TIMESTAMP
);'''
import time
db = pymysql.connect(host=MYSQL_HOST,
user=MYSQL_USER,
password=MYSQL_PASSWORD,
db=MYSQL_DB,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = db.cursor()
cursor.execute(sql3)
cursor.execute('INSERT INTO class_member(name,age,create_time) VALUES ("jack",24,NOW())')
db.commit()
time.sleep(2)
cursor.execute('INSERT INTO class_member(name,age,create_time) VALUES ( "lily",25,NOW())')
db.commit()
time.sleep(2)
cursor.execute('INSERT INTO class_member(name,age,create_time) VALUES ("lucy",25,NOW())')
db.commit()
time.sleep(2)
cursor.execute('UPDATE class_member SET age=25 WHERE name="jack"')
db.commit()
time.sleep(2)
# cursor.execute('create table dj1 (a char(1), b TIMESTAMP(6) )')
# cursor.execute('insert into dj1 values (1,null)') # b字段自动补全插入时间
# In[201]: cursor.execute('update dj1 set a=9 where a=1; ') # 修改后,b字段的时间也会更新
# In[172]: cursor.execute('CREATE TABLE python_tstamps ( a char(1), ts TIMESTAMP(6) )')
# In[230]: cursor.execute('create table dj2 (a char(1),b TIMESTAMP(6) NOT NULL DEFAULT 20170329)')
# Out[230]: 0
# In[232]: cursor.execute('create table dj2 (a char(1),b TIMESTAMP(6) NOT NULL DEFAULT 20170329, c timestamp(6) NOT NULL DEFAULT 20170330 )')
# Out[232]: 0
# In[239]: cursor.execute('create table dj3 (a char(1),b TIMESTAMP(6) , c timestamp(6) NOT NULL DEFAULT 20170330 )')
# Out[239]: 0
# 字段类型为TIMESTAMP时,若设置了默认值,则数据有更新时,其值并不变动;当不设置默认值是,数据有变动,则也会变动;并且不设置默认值,需在设置默认值之前设置
# 简言之:有个属性ON UPDATE CURRENT_TIMESTAMP,导致更新数据时,即便未涉及到该列,该列数据也被自动更新。
# '1.MySQL默认表的第一个timestamp字段为NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP属性。
# 2.MySQL只允许一个timestamp字段拥有[DEFAULT CURRENT_TIMESTAMP |ON UPDATE CURRENT_TIMESTAMP]属性'
# cursor.execute('create table dj1 (a char(1), b TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP , c TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP )')
| gswyhq/hello-world | mysql/使用python3操作MySQL.py | 使用python3操作MySQL.py | py | 9,569 | python | zh | code | 9 | github-code | 90 |
31291403310 | import os
from tqdm import tqdm
import csv
import io
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import zipfile
from pathlib import Path
import numpy as np
import cv2
def load_data(file_path):
with open(file_path, 'r') as csvfile:
rows = list(csv.reader(csvfile, delimiter='\t'))[1:]
return rows
def load_images(dataset_zip_path, rows):
'''
:param images_folder:
:param rows:
:return: mp arrays of images,y_true, titles
'''
path = Path(dataset_zip_path)
parent_dir = path.parent.absolute()
first_image_list = []
second_image_list = []
first_image_title_list = []
second_image_title_list = []
y_true_array = []
num_of_images = len(rows)
for i, row in tqdm(iterable=enumerate(rows), total=num_of_images):
first_image, second_image, y_true, first_image_title, second_image_title = loadImagePairFromRow(
dataset_zip_path,
row)
first_image_list.append(first_image)
second_image_list.append(second_image)
y_true_array.append(y_true)
first_image_title_list.append(first_image_title)
second_image_title_list.append(second_image_title)
first_image_list = np.array(first_image_list).astype('float32')
second_image_list = np.array(second_image_list).astype('float32')
y_true_array = np.array(y_true_array)
first_image_title_list = np.array(first_image_title_list)
second_image_title_list = np.array(second_image_title_list)
return first_image_list, second_image_list, y_true_array, first_image_title_list, second_image_title_list
def loadImagePairFromRow(dataset_zip_path, row):
image_title = []
if len(row) == 3:
# same
person_name = row[0]
first_image_number = row[1]
second_image_number = row[2]
first_image = loadImage(dataset_zip_path, person_name, first_image_number)
first_image_title = person_name + "_" + first_image_number
second_image = loadImage(dataset_zip_path, person_name, second_image_number)
second_image_title = person_name + "_" + second_image_number
return first_image, second_image, 1.0, first_image_title, second_image_title
else:
# different
person_name = row[0]
first_image_number = row[1]
second_person_name = row[2]
second_image_number = row[3]
first_image_title = person_name + "_" + first_image_number
second_image_title = second_person_name + "_" + second_image_number
first_image = loadImage(dataset_zip_path, person_name, first_image_number)
second_image = loadImage(dataset_zip_path, second_person_name, second_image_number)
return first_image, second_image, 0.0, first_image_title, second_image_title
def loadImage(images_folder, person_name, image_number):
filename = r"{0}/{1}/{1}_{2:04d}.jpg".format(images_folder, person_name, int(image_number))
im = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
#im = Image.open(filename).convert('L')
im = resize(im)
im = np.expand_dims(np.array(im), -1)
return im # (250,250,1)
def resize(im):
dim = (105,105)
resized = cv2.resize(im ,dim, interpolation = cv2.INTER_AREA)
return resized
def split_train_datasets(train_dataset, ratio=0.1):
num_train_samples = int(len(train_dataset) * (1.0 - ratio))
train = train_dataset[:num_train_samples]
val = train_dataset[num_train_samples:]
return train, val
def print_images(row, row_title):
fig, axes = plt.subplots(nrows=1, ncols=2)
axis = axes.ravel()
axis[0].imshow(row[0])
axis[0].set_title(row_title[0])
axis[1].imshow(row[1])
axis[1].set_title(row_title[1])
plt.show()
def save_dataset_to_npy(data_dir, train_dataset, y_array_train, first_image_title_train_list, second_image_title_train_list,test_dataset, y_array_test, first_image_title_test_list, second_image_title_test_list):
print('saving dataset to npy files')
is_npy_saved = False
try:
# check for npy file
train_npy = os.path.join(data_dir , 'pairsDevTrain.npy')
np.save(train_npy,train_dataset)
y_array_train_npy = os.path.join(data_dir , 'y_array_train.npy')
np.save(y_array_train_npy,y_array_train)
first_image_title_train_list_npy = os.path.join(data_dir , 'first_image_title_train_list.npy')
np.save(first_image_title_train_list_npy,first_image_title_train_list)
second_image_title_train_list_npy = os.path.join(data_dir , 'second_image_title_train_list.npy')
np.save(second_image_title_train_list_npy,second_image_title_train_list)
test_npy = os.path.join(data_dir , 'pairsDevTest.npy')
np.save(test_npy,test_dataset)
y_array_test_npy = os.path.join(data_dir , 'y_array_test.npy')
np.save(y_array_test_npy,y_array_test)
first_image_title_test_list_npy = os.path.join(data_dir , 'first_image_title_test_list.npy')
np.save(first_image_title_test_list_npy,first_image_title_test_list)
second_image_title_test_list_npy = os.path.join(data_dir , 'second_image_title_test_list.npy')
np.save(second_image_title_test_list_npy,second_image_title_test_list)
is_npy_saved = True
except Exception as e:
is_npy_saved = False
print(e)
raise
print('saved dataset to npy files in: ' , data_dir)
return is_npy_saved
def load_dataset_from_npy(data_dir):
print('loading dataset from npy files in: ' , data_dir)
is_npy_loaded = False
train_dataset = None
y_array_train = None
first_image_title_train_list = None
second_image_title_train_list = None
test_dataset= None
y_array_test = None
first_image_title_test_list = None
second_image_title_test_list = None
try:
# check for npy file
train_npy = os.path.join(data_dir , 'pairsDevTrain.npy')
if Path(train_npy).is_file():
train_dataset = np.load(train_npy)
is_npy_loaded = True
else:
is_npy_loaded = False
y_array_train_npy = os.path.join(data_dir , 'y_array_train.npy')
if Path(y_array_train_npy).is_file():
y_array_train = np.load(y_array_train_npy)
is_npy_loaded = True
else:
is_npy_loaded = False
first_image_title_train_list_npy = os.path.join(data_dir , 'first_image_title_train_list.npy')
if Path(first_image_title_train_list_npy).is_file():
first_image_title_train_list = np.load(first_image_title_train_list_npy)
is_npy_loaded = True
else:
is_npy_loaded = False
second_image_title_train_list_npy = os.path.join(data_dir , 'second_image_title_train_list.npy')
if Path(second_image_title_train_list_npy).is_file():
second_image_title_train_list = np.load(second_image_title_train_list_npy)
is_npy_loaded = True
else:
is_npy_loaded = False
test_npy = os.path.join(data_dir , 'pairsDevTest.npy')
if Path(test_npy).is_file():
test_dataset = np.load(test_npy)
is_npy_loaded = True
else:
is_npy_loaded = False
y_array_test_npy = os.path.join(data_dir , 'y_array_test.npy')
if Path(y_array_test_npy).is_file():
y_array_test = np.load(y_array_test_npy)
is_npy_loaded = True
else:
is_npy_loaded = False
first_image_title_test_list_npy = os.path.join(data_dir , 'first_image_title_test_list.npy')
if Path(first_image_title_test_list_npy).is_file():
first_image_title_test_list = np.load(first_image_title_test_list_npy)
is_npy_loaded = True
else:
is_npy_loaded = False
second_image_title_test_list_npy = os.path.join(data_dir , 'second_image_title_test_list.npy')
if Path(second_image_title_test_list_npy).is_file():
second_image_title_test_list = np.load(second_image_title_test_list_npy)
is_npy_loaded = True
else:
is_npy_loaded = False
except Exception as e:
is_npy_loaded = False
print(e)
raise
if is_npy_loaded:
print('loaded dataset from npy files in: ' , data_dir)
else:
print('no npy files found')
return is_npy_loaded ,train_dataset, y_array_train, first_image_title_train_list, second_image_title_train_list, \
test_dataset, y_array_test, first_image_title_test_list, second_image_title_test_list
def load_dataset(dataset_zip_path, train_file, test_file):
'''
:param images_folder:
:param train_file:
:param test_file:
:return:
'''
path = Path(dataset_zip_path)
data_dir = path.parent.absolute()
is_npy_loaded ,train_dataset, y_array_train, first_image_title_train_list, second_image_title_train_list, \
test_dataset, y_array_test, first_image_title_test_list, second_image_title_test_list = load_dataset_from_npy(data_dir)
if not is_npy_loaded:
# check if the zip was extracted already
images_folder = os.path.join(data_dir, 'lfw2/lfw2')
if not os.path.isdir(images_folder):
with zipfile.ZipFile(dataset_zip_path, 'r') as zip_ref:
zip_ref.extractall(data_dir)
train_rows = load_data(train_file)
first_image_train_list, second_image_train_list, y_array_train, first_image_title_train_list, second_image_title_train_list = load_images(
images_folder, train_rows)
# normalize data
first_image_train_list = pre_process(first_image_train_list)
second_image_train_list = pre_process(second_image_train_list)
train_dataset = [first_image_train_list, second_image_train_list]
test_rows = load_data(test_file)
first_image_test_list, second_image_test_list, y_array_test, first_image_title_test_list, second_image_title_test_list = load_images(
images_folder,
test_rows)
# normalize data
first_image_test_list = pre_process(first_image_test_list)
second_image_test_list = pre_process(second_image_test_list)
test_dataset = [first_image_test_list, second_image_test_list]
save_dataset_to_npy(data_dir, train_dataset, y_array_train, first_image_title_train_list, second_image_title_train_list,test_dataset, y_array_test, first_image_title_test_list, second_image_title_test_list)
return train_dataset, y_array_train, first_image_title_train_list, second_image_title_train_list, \
test_dataset, y_array_test, first_image_title_test_list, second_image_title_test_list
def pre_process(image_list):
return image_list / 255
def split_train_val(train_dataset, y_array_train, ratio=0.1):
train_ratio = 1.0 - ratio
total_samples = len(train_dataset[0])
train_samples = int(total_samples * train_ratio)
val_dataset = [train_dataset[0][train_samples:], train_dataset[1][train_samples:]]
y_array_val = y_array_train[train_samples:]
train_dataset = [train_dataset[0][:train_samples], train_dataset[1][:train_samples]]
y_array_train = y_array_train[:train_samples]
return train_dataset, y_array_train, val_dataset, y_array_val
# if __name__ == '__main__':
# images_folder = r'C:\Users\USER\Desktop\lfwa\lfw2\lfw2'
# train_file = r'C:\Users\USER\Desktop\lfwa\lfw2\lfw2\pairsDevTrain.txt'
# test_file = r'C:\Users\USER\Desktop\lfwa\lfw2\lfw2\pairsDevTest.txt'
# train_dataset, y_array_train, train_titles, test_dataset, y_array_test, test_titles = load_dataset(images_folder,
# train_file,
# test_file)
# print_images(train_dataset[0], train_titles[0])
| southjohn64/ex2_dl | data_loader.py | data_loader.py | py | 11,048 | python | en | code | 0 | github-code | 90 |
46067153669 | import csv
import pandas as pd
tournaments = pd.read_csv('C:/Users/garye/Downloads/finaldf.csv')
prize_money = pd.read_csv('C:/Users/garye/Downloads/prize_money.csv', encoding = 'latin1')
#print(tournaments)
#print(prize_money.title)
count =0
csv_file = open('tournamentIndex.csv','w', newline='')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Tournament','title','Type'])
for i,j in tournaments.iterrows():
for x,y in prize_money.iterrows():
if j.Tournament in y.Tournament and j.Type in y.Type:
count += 1
print(j.Tournament)
print(j.Type)
print(y.Tournament)
print(y.Type)
print()
csv_writer.writerow([j.Tournament,y.Tournament,j.Type])
print(count)
csv_file.close()
| GaryKellyIT/Visualising-Data | Data_Vis_Assignment1/Python/indexMatcher.py | indexMatcher.py | py | 805 | python | en | code | 0 | github-code | 90 |
19949440636 | import psycopg2 , csv
from config import host,database,user,password
conn = psycopg2.connect(
host= host
,user= user
,password=password
,database=database
)
conn.autocommit = True
cur = conn.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS PhoneBook(
id SERIAL PRIMARY KEY
,name VARCHAR(50)
,phone VARCHAR(20)
)""")
def add_data(name , phone):
with conn.cursor() as cursor:
cur.execute("""INSERT INTO PhoneBook(name , phone)
VALUES (%s , %s)""", (name , phone)
)
def delete(pattern):
with conn.cursor() as cursor:
cur.execute("""DELETE FROM PhoneBooK WHERE name ILIKE %s OR phone ILIKE %s
""" , ('%'+pattern+'%' , '%'+pattern+'%' ))
def update(name , phone):
with conn.cursor() as cursor:
cur.execute("""SELECT FROM PhoneBook WHERE name = %s OR phone = %s
""" , (name , phone))
user = cur.fetchone()
if user == None:
cur.execute("""INSERT INTO PhoneBook (name , phone)
VALUES (%s , %s )
""" , (name , phone))
else:
cur.execute("""UPDATE PhoneBook
SET phone = %s WHERE name = %s""" ,(phone , name))
def querying_data():
with conn.cursor() as cursor:
print('If you want to return all records, enter "all" ')
print('If you want to return records with pagination, enter "pagination"')
print('If you want to return all records based on a pattern, enter "ptn"')
choice = input()
if choice == 'all':
cur.execute("SELECT * FROM PhoneBook")
rows = cur.fetchall()
for row in rows:
print(rows)
elif choice == 'pagination':
limit = input('Number of limit: ')
offset = input('Number of offset: ')
cur.execute("""SELECT * FROM PhoneBook
LIMIT %s
OFFSET %s""" , (limit , offset))
rows = cur.fetchall()
for row in rows:
print(row)
elif choice == 'ptn':
pattern = input('Enter a pattern: ')
cur.execute("""SELECT * FROM PhoneBooK WHERE name ILIKE %s OR phone ILIKE %s
""" , ('%'+pattern+'%' , '%'+pattern+'%' ))
rows = cur.fetchall()
for row in rows:
print(row)
def upload_csv(filename):
with conn.cursor() as cursor:
with open (filename , 'r') as csvfile:
reader = csv.reader(csvfile)
next(reader)
for row in reader:
name = row[0]
phone = row[1]
cur.execute("""INSERT INTO PhoneBook(name , phone)
VALUES (%s , %s )
""" , (name , phone))
def add_many_users():
with conn.cursor() as cursor:
users = []
while True:
name = input("Enter name (or 'e' to exit): ")
if name.lower() == 'e':
break
phone = input('Enter phone: ')
users.append((name, phone))
for name, phone in users:
add_data(name, phone)
incorrect = []
for name, phone in users:
if len(phone) != 10 or not phone.isdigit():
incorrect.append((name, phone))
return incorrect
while True:
print('PHONEBOOK PROGRAM')
print('1. Add Entry')
print('2. Querying data')
print('3. Upload csv File')
print('4. Delete Entry')
print('5. Add many new users')
print('6. Quit')
choice = input('Enter your choice (1-6): ')
if choice == '1':
name = input("Enter name: ")
phone = input('Enter phone: ')
update(name , phone)
elif choice == '2':
querying_data()
elif choice == '3':
filename = input('Enter csv filename: ')
upload_csv(filename)
elif choice == '6':
break
elif choice == '4':
pattern = input('Enter a pattern to delete: ')
delete(pattern)
elif choice == '5':
incorrect = add_many_users()
if incorrect:
print("The following entries have incorrect phone numbers:")
for name, phone in incorrect:
print(name, phone)
else:
print('Invalid choice.')
conn.close() | dosymzhvnn/pp2-22B030169 | tsis11/phonebook2.py | phonebook2.py | py | 4,351 | python | en | code | 0 | github-code | 90 |
15456581036 | from collections import namedtuple
# Use with https://github.com/nidefawl/cpp-indexer
cpp_index_file = "cpp-index.class.csv"
def getDepthInTree(cpp_class_list, cpp_class):
""" Calculate depth of hierarchy """
if len(cpp_class.baseclasses) == 0:
return 0
depth = 0
for baseclass in cpp_class.baseclasses:
for other_class in cpp_class_list:
if other_class.name == baseclass:
depth = max(depth, getDepthInTree(cpp_class_list, other_class))
return depth + 1
def main():
first_line = None
rest_lines = []
with open(cpp_index_file, "r") as f:
first_line = f.readline().strip()
rest_lines = [line.strip() for line in f.readlines()]
if first_line is None:
print("Error: first line is None")
return
column_names = first_line.split(",")
empty_tuple = namedtuple("cppclass", field_names=column_names)
cpp_class_list = []
for lineIdx in rest_lines:
lineIdx = lineIdx.strip()
if lineIdx == "":
continue
data = lineIdx.split(",")
data = [d.strip("\"").strip() for d in data]
baseclass_list = data[2].split(";")
data[2] = [] if baseclass_list == [""] else baseclass_list
class_tuple = empty_tuple(*data)
# print(class_tuple)
cpp_class_list.append(class_tuple)
print("Total number of classes: {}".format(len(cpp_class_list)))
final_classes = []
for cpp_class in cpp_class_list:
if len(cpp_class.baseclasses) == 0:
continue
# check if any other class derives from cpp_class
derived = False
for other_class in cpp_class_list:
if cpp_class.name in other_class.baseclasses:
derived = True
break
if not derived:
depth = getDepthInTree(cpp_class_list, cpp_class)
final_classes.append((cpp_class, depth))
# sort final_classes by depth
final_classes = sorted(final_classes, key=lambda x: x[1], reverse=False)
print("Total number of final classes: {}".format(len(final_classes)))
editLocsPerFile = {}
for cpp_class, depth in final_classes:
# print(cpp_class, "Depth: {}".format(depth))
# print(cpp_class.baseclasses, cpp_class.name, "Depth: {}".format(depth))
file = f'/data/dev/daw/src/{cpp_class.file}'
lineIdx, colIdx, fileOffset = cpp_class.location.split(":")
lineIdx, colIdx, fileOffset = int(lineIdx), int(colIdx), int(fileOffset)
# read in that line
with open(file, "r") as f:
listLines = f.readlines()
if lineIdx > len(listLines):
print("Error: line number is too high")
continue
strLineCurrent = listLines[int(lineIdx) - 1].strip()
# print("File: {}, Line: {}, Col: {}, Offset: {}".format(file, line, col, offset))
if " : " in strLineCurrent and not 'final' in strLineCurrent:
typeDeclSplit = strLineCurrent.split(" : ", 1)
strLineNew = typeDeclSplit[0] + " final : " + typeDeclSplit[1]
if not file in editLocsPerFile:
editLocsPerFile[file] = []
editLocsPerFile[file].append((lineIdx, colIdx, fileOffset, strLineNew))
print("Total number of files to edit: {}".format(len(editLocsPerFile)))
for file, editLocs in editLocsPerFile.items():
print(file)
with open(file, "r") as f:
listLines = f.readlines()
for lineIdx, colIdx, fileOffset, strLineNew in editLocs:
if lineIdx > len(listLines):
print("Error: line number is too high")
continue
strLineCurrent = listLines[int(lineIdx) - 1].strip()
print("-", strLineCurrent)
print("+", strLineNew)
listLines[lineIdx - 1] = strLineNew + "\n"
with open(file, "w") as f:
f.writelines(listLines)
if __name__ == "__main__":
main()
| nidefawl/bass-studio | scripts/cpp-index-find-final-classes.py | cpp-index-find-final-classes.py | py | 4,048 | python | en | code | 74 | github-code | 90 |
22875896861 | #! python3
# printTable.py - Takes a list of list of strings and displays it in
# a well organised table (right-justified)
tableData = [['apples', 'oranges', 'cherries', 'banana'],
['Alice', 'Bob', 'Carol', 'David'],
['dogs', 'cats', 'moose', 'goose']]
def printTable(tableData):
colWidths = [0] * len(tableData)
count = 0
table = ''
for i in tableData:
maxLen = 0
for j in i:
if maxLen < len(j):
maxLen = len(j)
colWidths[count] = maxLen
count += 1
#print(colWidths) #Test
#for i in tableData:
count = 0
while count < len(i):
outerCount = 0
while outerCount < len(tableData):
data = tableData[outerCount][count]
table = table + data.rjust(colWidths[outerCount])
outerCount += 1
table = table + '\n'
count += 1
print(table)
printTable(tableData)
| ssamjang/automatePython | printTable.py | printTable.py | py | 952 | python | en | code | 0 | github-code | 90 |
18823863859 | '''
指定した項目を更新する。
rating属性を削除する。
[Notes] rating属性が存在していなくてもエラーは吐かれない。(そのままスルー。)
'''
from decimal import Decimal
from pprint import pprint
import json
import boto3
def update_book(isbn, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource(
'dynamodb',
endpoint_url="http://localhost:8000",
region_name='dummy',
aws_access_key_id='dummy',
aws_secret_access_key='dummy')
table = dynamodb.Table('Books')
response = table.update_item(
Key={
'isbn': int(isbn)
},
UpdateExpression="remove rating",
ReturnValues="UPDATED_NEW"
)
return response
if __name__ == '__main__':
update_response = update_book(9784003202524)
print("Update book succeeded:")
pprint(update_response)
| makes-trail/application-sample | dynamodb/crud/BooksItemUpdate04.py | BooksItemUpdate04.py | py | 926 | python | ja | code | 0 | github-code | 90 |
25908853037 | from pyModbusTCP.server import ModbusServer, DataBank
from time import sleep
from random import uniform
#Create an Instance of ModbusServer
server=ModbusServer("127.0.0.1",502,no_block=True)
try:
print("Server Starting....")
server.start()
print("Server is online")
state = [0]
while True:
continue
except:
server.stop()
print("Server is stoping....")
server.stop()
print("Server is offline")
| meen2nont/ModbusSimulator_SDM230 | Modbus_server.py | Modbus_server.py | py | 442 | python | en | code | 1 | github-code | 90 |
9856339905 | from tkinter import *
import tkinter.ttk as ttk
import os
import time
from tkinter import messagebox
import tkinter.messagebox
import sqlite3
import pyttsx3
conn = sqlite3.connect('database1.db')
c = conn.cursor()
root = Tk()
root.title("HOSPITAL MANAGEMENT SYSTEM")
root.configure(width=1500,height=600,bg='BLACK')
root.geometry("1350x900")
engine = pyttsx3.init()
volume = engine.getProperty('volume')
engine.setProperty('volume', volume-0)
rate = engine.getProperty('rate')
engine.setProperty('rate', rate-5000)
voices = engine.getProperty("voices")
engine.say("Patients Registration ")
engine.runAndWait()
TableMargin = Frame(root)
TableMargin.place(x=400,y=100)
ids = []
PId = StringVar()
PName = StringVar()
PAge = StringVar()
PGender = StringVar()
PPhone = StringVar()
PBGroup = StringVar()
PLoc = StringVar()
PEmail = StringVar()
localtime = StringVar()
gender = IntVar()
engine1 = pyttsx3.init()
volume = engine1.getProperty('volume')
engine1.setProperty('volume', volume-0)
rate = engine1.getProperty('rate')
voices = engine1.getProperty("voices")
for voice in voices:
engine1.setProperty("voice", voice.id)
engine1.runAndWait()
def OnSelected(event):
curItem = tree.focus()
contents =(tree.item(curItem))
selecteditem = contents['values']
ids = selecteditem[0]
name = selecteditem[1]
age = selecteditem[2]
gender = selecteditem[3]
phone = selecteditem[4]
bg = selecteditem[5]
lo = selecteditem[6]
email = selecteditem[7]
PId.set(ids)
PName.set(name)
PAge.set(age)
PGender.set(gender)
PPhone.set(phone)
PBGroup.set(bg)
PLoc.set(lo)
PEmail.set(email)
engine1.say(f"patient's ID:{ids}")
engine1.runAndWait()
def new():
if PId.get() != "" or PName.get() != ""or PAge.get() != "" or PGender.get()!="" or PBGroup.get() != "" or PLoc.get() != "" or PEmail.get() != "":
engine1.say("You click New Button So all Entry Box BLANKS ")
engine1.runAndWait()
PId.set("")
PName.set("")
PAge.set("")
PGender.set("")
PPhone.set("")
PBGroup.set("")
PLoc.set("")
PEmail.set("")
def back():
root.destroy()
os.system('python menu.py')
def delete():
if PId.get() != "":
sql = "SELECT * FROM Patients_reg WHERE ID LIKE ?"
sear = c.execute(sql, (PId.get(),))
l = 0
for j in sear:
l = l+1
if l != 0:
tkinter.messagebox.showinfo("DELETE", "SUCCESSFULLY DELETE DATA")
engine1.say("SUCCESSFULLY DELETE DATA")
sql2 = "DELETE FROM Patients_reg WHERE ID LIKE ?"
c.execute(sql2, (PId.get(),))
conn.commit()
engine1.runAndWait()
PId.set("")
PName.set("")
PAge.set("")
PGender.set("")
PPhone.set("")
PBGroup.set("")
PLoc.set("")
PEmail.set("")
elif l == 0:
engine1.say("Your Enter Patients Id Is Not Present in database ")
engine1.runAndWait()
tkinter.messagebox.showinfo("Warning", "Your Enter Patients Id Is Not Present in Database")
else:
engine1.say("Please Enter Patients Id ")
engine1.runAndWait()
tkinter.messagebox.showinfo("Warning", "Please Enter Patients Id ")
def add():
var1 = PId.get()
var2 = PName.get()
var3 = PAge.get()
var4 = PGender.get()
var5 = PPhone.get()
var6 = PBGroup.get()
var7 = PLoc.get()
var8 = PEmail.get()
if var1 == "":
print("please enter all values")
engine1.say("Please Fill All ENTRY Box")
engine1.runAndWait()
elif var2 == "":
print("please enter all values")
engine1.say("Please Fill All ENTRY Box")
engine1.runAndWait()
elif var3 == "":
print("please enter all values")
engine1.say("Please Fill All ENTRY Box")
engine1.runAndWait()
elif var4 == "":
print("please enter all values")
engine1.say("Please Fill All ENTRY Box")
engine1.runAndWait()
elif var5 == "":
print("please enter all values")
engine1.say("Please Fill All ENTRY Box")
engine1.runAndWait()
elif var6 == "":
print("please enter all values")
engine1.say("Please Fill All ENTRY Box")
engine1.runAndWait()
elif var7 == "":
print("please enter all values")
engine1.say("Please Fill All ENTRY Box")
engine1.runAndWait()
elif var8 == "":
print("please enter all values")
engine1.say("Please Fill All ENTRY Box")
engine1.runAndWait()
else:
engine1.say("Now You Fill All Entry Box So Try add DATA Store IN DATABASE SYSTEM")
engine1.runAndWait()
# ADD DATA INTO THE DATABASE SQLLITE3
'''command="create table Patients_reg(name text,age text,gender text,phone text,Bgroup text,ploc text,sch_time text) "
conn.execute(command)'''
command = "create table if not exists Patients_reg(name text,age text,gender text,phone text,Bgroup text,ploc text,Email text) "
conn.execute(command)
command = "insert into Patients_reg(id,name,age,gender,phone,Bgroup,ploc,Email)values(?,?,?,?,?,?,?,?)"
add=conn.execute(command, ( var1,var2, var3, var4, var5, var6, var7,var8))
conn.commit()
tkinter.messagebox.showinfo("Data Store", "SUCCESSFULLY Data Store")
engine1.say("Patients DATA Store IN DATABASE SYSTEM")
engine1.runAndWait()
def update():
var1 = PId.get()
var2 = PName.get()
var3 = PAge.get()
var4 = PGender.get()
var5 = PPhone.get()
var6 = PBGroup.get()
var7 = PLoc.get()
var8 = PEmail.get()
if var1 != "":
sql = "SELECT * FROM Patients_reg WHERE ID LIKE ?"
sear = c.execute(sql, (var1,))
l = 0
for j in sear:
l = l+1
if l == 0:
engine1.say("Your Enter Patients Id Is Not Present in database So Not Update Any Data ")
engine1.runAndWait()
tkinter.messagebox.showinfo("Warning", "Your Enter Patients Id Is Not Present in Database So Not Update Any Data")
else:
query = "UPDATE Patients_reg SET name=?,age=?,gender=?,phone=?,bgroup=?,ploc=?,Email=? WHERE ID LIKE?"
c.execute(query, (var2, var3, var4, var5, var6, var7, var8, var1))
conn.commit()
engine1.say("Successfully DATA UPDATED")
engine1.runAndWait()
tkinter.messagebox.showinfo("UPDATE","SUCCESSFULLY UPDATE DATA")
print("Patient's Name: "+var2+" Data Update")
PId.set("")
PName.set("")
PAge.set("")
PGender.set("")
PPhone.set("")
PBGroup.set("")
PLoc.set("")
PEmail.set("")
else:
engine1.say("Please Enter Patients Id ")
engine1.runAndWait()
tkinter.messagebox.showinfo("Warning", "Please Enter Patients Id ")
def search():
enter = Id_en.get()
print("YOUR INPUT IS :"+enter)
sql = "SELECT * FROM Patients_reg WHERE ID LIKE ?"
execute = c.execute(sql, (enter,))
l=0
for i in execute:
l = l+1
ID = i[0]
name = i[1]
age = i[2]
gender = i[3]
phone = i[4]
bgroup = i[5]
location = i[6]
email = i[7]
if l == 0:
PId.set("")
PName.set("")
PAge.set("")
PGender.set("")
PPhone.set("")
PBGroup.set("")
PLoc.set("")
PEmail.set("")
engine1.say("Please Enter Correct Id ")
engine1.runAndWait()
else:
PId.set(ID)
PName.set(name)
PAge.set(age)
PGender.set(gender)
PPhone.set(phone)
PBGroup.set(bgroup)
PLoc.set(location)
PEmail.set(email)
engine1.say(f"Your Patient's ID is {ID}")
engine1.runAndWait()
label0 = Label(root,text=" Hospital MANAGEMENT SYSTEM ", bg="black",
fg="white", font=("Times", 30))
label0.grid(columnspan=6, padx=10, pady=10)
label = Label(root, text=" Date : ", bg="black", fg="white", font=("Times", 17))
label.place(x=530, y=60)
a=time.asctime(time.localtime(time.time()))
localtime.set(a)
print(a)
e=Entry(root, textvariable=localtime, font=("arial", 20, "bold"), width=22, bg="orange")
e.place(x=615, y=60)
logo = Label(root, text="Total Patient's Register:", font=("arial", 12, "bold"), fg="orange", bg="black",)
logo.place(x=70, y=80)
box = Text(root, font=("arial", 10, "bold"), width=35, height=1)
box.place(x=60, y=110)
Id = Label(root, text="Patient's ID :", font=("arial", 14, "bold"), fg="white", activebackground="RED",
background="black")
Id.place(x=0, y=150)
Id_en = Entry(root, font=("arial",12,"bold"), textvariable=PId)
Id_en.place(x=160, y=150)
name = Label(root, text="Patient's Name:", font=("arial", 14, "bold"), fg="white", activebackground="RED", background="black")
name.place(x=0, y=190)
name_en = Entry(root, font=("arial",12,"bold"),textvariable=PName)
name_en.place(x=160, y=190)
age = Label(root,text="Patient's Age :" ,font=("arial", 14, "bold"), fg="white", activebackground="RED", bg="black")
age.place(x=0,y=230)
age_en = Entry(root ,font=("arial", 12, "bold"),textvariable=PAge)
age_en.place(x=160,y=230)
e_gender = Label(root,text="Patient Gender :",font=("arial",14,"bold"),fg="white",activebackground="RED",background="black")
e_gender.place(x=0,y=270)
gender_en = Entry(root ,font=("arial", 12, "bold"),textvariable=PGender)
gender_en.place(x=160,y=270)
Phone = Label(root,text="Contact :",font=("arial",14,"bold"),fg="white",activebackground="RED",background="black")
Phone.place(x=0,y=310)
Phone_en = Entry(root, font=("arial", 12, "bold"),textvariable=PPhone)
Phone_en.place(x=160,y=310)
Bgroup = Label(root,text=" Blood Group :",font=("arial",14,"bold"),fg="white",activebackground="RED",background="black")
Bgroup.place(x=0,y=350)
Bgroup_en = Entry(root, font=("arial", 12, "bold"),textvariable=PBGroup)
Bgroup_en.place(x=160,y=350)
ploc = Label(root,text="location :",font=("arial",14,"bold"),fg="white",activebackground="RED",background="black")
ploc.place(x=0,y=390)
ploc_en = Entry(root, font=("arial", 12, "bold"),textvariable=PLoc)
ploc_en.place(x=160,y=390)
email = Label(root,text="Email:",font=("arial",14,"bold"),fg="white",activebackground="RED",background="black")
email.place(x=0,y=430)
email_en = Entry(root, font=("arial", 12, "bold"),textvariable=PEmail)
email_en.place(x=160, y=430)
b1 = Button(root,text="ADD",font=("arial",12,"bold"),fg="red",activebackground="RED",background="black",command=add)
b1.place(x=0,y=470)
b1 = Button(root,text="UPDATE",font=("arial",12,"bold"),fg="red",activebackground="RED",background="black",command=update)
b1.place(x=60,y=470)
b1 = Button(root,text="SEARCH",font=("arial",12,"bold"),fg="red",activebackground="RED",background="black",command=search)
b1.place(x=150,y=470)
b1 = Button(root,text="DELETE",font=("arial",12,"bold"),fg="red",activebackground="RED",background="black",command=delete)
b1.place(x=250,y=470)
b1 = Button(root,text="NEW",font=("arial",12,"bold"),fg="red",activebackground="RED",background="black",command=new)
b1.place(x=340,y=470)
b1 = Button(root,text="BACK",font=("arial",12,"bold"),fg="red",activebackground="RED",background="black",command=back)
b1.place(x=150,y=520)
L1 = Label(TableMargin,text="Patients Data",font=("arial",20,"bold"),fg="red",)
L1.pack()
scrollbarx = Scrollbar(TableMargin, orient=HORIZONTAL)
scrollbary = Scrollbar(TableMargin, orient=VERTICAL)
tree = ttk.Treeview(TableMargin, columns=( "Patients_ID", "Patient_Name", "Patient_Age", "Patient_Gender", "Contact", "Blood_Group","Address","Email"), height=50, selectmode="extended", yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set)
scrollbary.config(command=tree.yview)
scrollbary.pack(side=RIGHT, fill=Y)
scrollbarx.config(command=tree.xview)
scrollbarx.pack(side=BOTTOM, fill=X)
tree.heading('Patients_ID', text="Patients_ID", anchor=W)
tree.heading('Patient_Name', text="Patient_Name", anchor=W)
tree.heading('Patient_Age', text="Patient_Age", anchor=W)
tree.heading('Patient_Gender', text="Patient_Gender", anchor=W)
tree.heading('Contact', text="Contact", anchor=W)
tree.heading('Blood_Group', text="Blood_Group", anchor=W)
tree.heading('Address', text="Address", anchor=W)
tree.heading('Email', text="Email", anchor=W)
tree.column('#0', stretch=NO, minwidth=0, width=0)
tree.column('#1', stretch=NO, minwidth=0, width=80,)
tree.column('#2', stretch=NO, minwidth=0, width=110)
tree.column('#3', stretch=NO, minwidth=0, width=110)
tree.column('#4', stretch=NO, minwidth=0, width=100)
tree.column('#5', stretch=NO, minwidth=0, width=110)
tree.column('#6', stretch=NO, minwidth=0, width=100)
tree.column('#7', stretch=NO, minwidth=0, width=100)
tree.column('#8', stretch=NO, minwidth=0, width=100)
tree.bind('<Double-Button-1>', OnSelected)
tree.pack()
sql2 = "SELECT ID FROM Patients_reg "
result = c.execute(sql2)
for row in result:
ID = row[0]
ids.append(ID)
new = sorted(ids)
final_id = new[len(ids)-1]
box.insert(END,"Total Registration of Patient's :" + str(final_id))
c.execute("SELECT * FROM Patients_reg")
fetch = c.fetchall()
for data in fetch:
tree.insert('', 'end', values=(data))
print(data)
root.mainloop()
| MuhammadZubair786/Hospital-Management-Project-In-Python | hospital/new.py | new.py | py | 13,480 | python | en | code | 1 | github-code | 90 |
35580438835 | import requests
import sys
import csv
from interfaces import BalanceResponse
# Normally I'd create a .env file and secrets from there
# but this is a test token
API_KEY = 'D6HFJ69KZZ23JN8EP86KJPBVE3NKHP5BSR'
BASE_URL = 'https://api.etherscan.io/api'
def get_crypto_balance_by_address(address: str, tag: str="latest") -> BalanceResponse:
"""
Get the ethereum balance for a specific address
https://docs.etherscan.io/api-endpoints/accounts
"""
endpoint = f'{BASE_URL}?module=account&action=balancemulti&address={address}&apikey={API_KEY}&tag={tag}'
response = requests.get(endpoint)
response.raise_for_status()
return response.json()
def write_to_csv(data: BalanceResponse, csv_filename: str, address: str):
with open(csv_filename, 'w', newline='') as csvfile:
fieldnames = ['address', 'balance'] # Add more fields as needed
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for item in data["result"]:
writer.writerow({
'address': item['account'],
'balance': item['balance'],
})
def main():
if len(sys.argv) != 2:
print("Usage: python3 main.py <crypto_address>")
sys.exit(1)
crypto_address = sys.argv[1]
crypto_data = get_crypto_balance_by_address(crypto_address)
if crypto_data:
csv_filename = 'ethereum.csv'
write_to_csv(crypto_data, csv_filename, crypto_address)
print(f"Data written to {csv_filename}")
else:
print("Failed to retrieve data.")
if __name__ == "__main__":
# It's best practice in Python to have a main() function
# to isolate code from the global scope
# and initialize it with the thunder name check
# if it's not used as a module
main()
| carkod/elliptic-challenge | main.py | main.py | py | 1,812 | python | en | code | 0 | github-code | 90 |
3139988258 | import math
import copy
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
genres = ["Action","Adventure","Animation","Children's","Comedy","Crime","Documentary","Drama","Fantasy",
"Film-Noir","Horror","Musical","Mystery","Romance","Sci-Fi","Thriller","War","Western"]
genres_tmdb = ["Action", "Adventure" , "Animation", "Comedy", "Crime", "Documentary", "Drama", "Family",
"Fantasy", "History", "Horror", "Music", "Mystery", "Romance", "Science Fiction", "TV Movie",
"Thriller", "War", "Western"]
genres_tmdb_dict = {28 : "Action",12 : "Adventure", 16 : "Animation", 35 : "Comedy", 80: "Crime",
99: "Documentary",18 : "Drama",10751 : "Family",14 : "Fantasy", 36 : "History",
27 : "Horror", 10402 : "Music",9648 : "Mystery", 10749 : "Romance", 878 : "Science Fiction",
10770 : "TV Movie", 53 : "Thriller", 10752 : "War", 37 : "Western"}
genres_tmdb_dict_inv = {v: k for k, v in genres_tmdb_dict.items()}
def normalisation(df):
return (df-df.min())/(df.max()-df.min())
def toTarget(list, method, etalon=""):
final = []
if(method=="median"):
temoin = np.median([list])
elif(method=="mean"):
temoin = np.mean([list])
elif(method=="vs"):
temoin = etalon
else:
temoin = 0
print(method, temoin)
for inter in list:
target = -1
if (method=="vs" and inter == temoin) or (method!="vs" and inter >= temoin):
target = 1
final.append(target)
return final
class Engineering():
def __init__(self, name):
self.name = name+"Engineering"
self.df = {}
self.index = []
print(self.name, "init in process")
def toDataFrame(self, method="median", axis='', etalon="", withTarget=True, toStack=[]):
cp = copy.deepcopy(self.df)
if withTarget:
if axis == '':
temoin = list(cp)[-1]
else:
temoin = axis
target = cp[temoin]
del cp[temoin]
stack = {}
for k,v in self.df.items():
if (not(isinstance(v[0], int) or isinstance(v[0], float)) and k in cp.keys()) or k in toStack:
print("stack", k)
stack[k] = v
del cp[k]
result = normalisation(pd.DataFrame(cp, index=self.index))
for k, v in stack.items():
result[k] = v
if withTarget:
result["target"] = toTarget(target, method, etalon)
return result
class UtilsEngineering(Engineering):
def __init__(self, base, complement):
super().__init__("Utils")
self.actors = {}
self.actorsReversed = {}
self.actorsPlayedMovies = {}
self.actorsMeanMovies = {}
self.plays = {}
self.languages = {}
self.genres = genres_tmdb_dict
self.prop_women_actors = []
self.prop_women_crew = []
#introduire complements
acteurs, equipes = complement
#operation de base 1
for lista in acteurs:
for a in lista:
# affecte une valeur à une clé si la clé n'est pas utilisée
res = self.actors.setdefault(a['name'], len(self.actors))
if res == len(self.actors)-1:
self.actorsReversed[len(self.actors)-1] = a['name']
#on affecte à chaque acteur le nombre de films par categorie dans lequel il a joué et ce pour chaque catégorie
for i in self.actors.keys():
self.actorsPlayedMovies[i] = {}
for k in genres_tmdb_dict.keys() :
self.actorsPlayedMovies[i][genres_tmdb_dict[k]] = 0
self.actorsPlayedMovies[i]["Total"] = 0
for i_film in range(len(base)) :
desc_film = base[i_film]
for act in range(len(acteurs[i_film])) :
actorName = acteurs[i_film][act]["name"]
self.actorsPlayedMovies[actorName]["Total"] += 1
for id in desc_film["genre_ids"] :
genre = genres_tmdb_dict[id]
self.actorsPlayedMovies[actorName][genre] += 1
#on affecte a chaque le nom des acteurs
for i_film in range(len(base)):
name = base[i_film]["original_title"]
self.plays[name] = []
for a in acteurs[i_film] :
self.plays[name].append(a["name"])
#on affecte a chaque acteur la moyenne par catégorie dans lequel l'acteur a joué et ce pour chaque catégorie.
ke_act = self.actors.keys()
ke_gen = genres_tmdb_dict.keys()
for i in ke_act :
self.actorsMeanMovies[i] = dict()
for k in ke_gen :
self.actorsMeanMovies[i][genres_tmdb_dict[k]] = 0
self.actorsMeanMovies[i]["Total"] = 0
for i_film in range(len(base)) :
desc_film = base[i_film]
vote = desc_film["vote_average"]
for act in range(len(acteurs[i_film])) :
actor_name = acteurs[i_film][act]["name"]
self.actorsMeanMovies[actor_name]["Total"] += vote
for id in desc_film["genre_ids"] :
genre = genres_tmdb_dict[id]
self.actorsMeanMovies[actor_name][genre] += vote
ke_gen = list(genres_tmdb_dict_inv.keys())
ke_gen.append("Total")
for act in ke_act :
for k in ke_gen :
if(self.actorsMeanMovies[act][k] > 0):
self.actorsMeanMovies[act][k] = (self.actorsMeanMovies[act][k] / self.actorsPlayedMovies[act][k])
#on trouve toutes les langues originales des films
language = []
cpt = 0
for fi in base :
la = fi["original_language"]
if la not in language :
language.append(la)
self.languages[la] = cpt
cpt +=1
#on calcule la proportion d'actrices par films
for j in range(len(acteurs)) :
f = 0
total = 0
for i in range(len(acteurs[j])):
if acteurs[j][i]['gender'] == 1 :
f += 1
total+= 1
if total != 0 :
self.prop_women_actors.append(f/total)
else :
self.prop_women_actors.append(0)
#on calcule la proportion de femmes dans l'equipe (crew) par film
for j in range(len(equipes)) :
f = 0
total = 0
for i in range(len(equipes[j])):
if equipes[j][i]['gender'] == 1 :
f += 1
total+= 1
if total != 0 :
self.prop_women_crew.append(f/total)
else :
self.prop_women_crew.append(0)
print(self.name, "init successful")
def toDataFrame(self, method):
pass
class GenresEngineering(Engineering):
def __init__(self, base, complement):
super().__init__("Genres")
genres = {}
nbFilms = {}
averageRating = {}
ratingCount = {}
indice = base.index.values.tolist()
for i in range(len(base)):
genre = base.iloc[i]['genres'].split('|')
film = self.linkFilm(i, base.iloc[i]['movieId'], complement)
for g in genre:
#on initialise les genres possibles
if g in genres.keys():
genres[g].append(base.iloc[i]['movieId'])
else:
genres[g] = [base.iloc[i]['movieId']]
#on compte les tailles
if g in nbFilms.keys():
nbFilms[g] += 1
else:
nbFilms[g] = 1
#on ajoute les nombres de votes
if g in ratingCount.keys():
ratingCount[g] += film['vote_count']
else:
ratingCount[g] = film['vote_count']
#on ajoute les notes moyennes
if g in averageRating.keys():
averageRating[g] += film['vote_average']*film['vote_count']
else:
averageRating[g] = film['vote_average']*film['vote_count']
#on effectue la moyenne des note moyennes(cela n'a pas de sens mais on fait avec ce que l'on a)
for k in genres.keys():
averageRating[k] /= ratingCount[k]
#on rempli le dictionnaire
self.df["quantite"] = []
self.df["engagement"] = []
self.df["note"] = []
for k in genres.keys():
self.index.append(k)
self.df["quantite"].append(nbFilms[k])
self.df["engagement"].append(ratingCount[k])
self.df["note"].append(averageRating[k])
print(self.name, "init successful")
def linkFilm(self, i, movieId, complement):
links, films = complement
inter = int(links.loc[links["movieId"] == movieId]["tmdbId"])
if(films[i]['id'] == inter):
return films[i]
else:
#print("miss")
for film in films:
if film['id'] == inter:
return film
print("FAIL")
exit(0)
print("FAIL", movieId, len(indice))
exit(0)
class GenresClusterEngineering(Engineering):
def __init__(self, base):
super().__init__("GenreCluster")
for v in genres_tmdb_dict.values():
self.df[v] = []
for i in range(len(base)):
for k in self.df.keys():
if genres_tmdb_dict_inv[k] in base[i]["genre_ids"]:
self.df[k].append(1)
else:
self.df[k].append(0)
self.index.append(base[i]["original_title"])
print(self.name, "init successful")
class MoviesEngineering(Engineering):
def __init__(self, base, complement):
super().__init__("Movies")
self.df["vote_count"] = []
self.df["mean_main_actors"] = []
self.df["original_language"] = []
self.df["popularity"] = []
self.df["note"] = []
self.df['month_release'] = []
self.df['nb_producers'] = []
self.df['nb_words_overview'] = []
self.df['prop_women_actors'] = []
self.df['prop_women_crew'] = []
#la base correspond a la base films
#on introduit les complements
plays, actorsMeanMovies, languages, equipes, prop_women_actor, prop_women_crew = complement
#on effectue les operations de Base
for i in range(len(base)):
title = base[i]["original_title"]
#nombre de vote total
self.df["vote_count"].append(base[i]["vote_count"])
acteurs = plays[title]
acteurs = acteurs[0:5]
genres_id = base[i]["genre_ids"]
n = 0
total = 0
genres = []
for g in genres_id:
genres.append(genres_tmdb_dict[g])
for g in genres:
for act in acteurs:
n += actorsMeanMovies[act][g]
total += 1
if total == 0:
self.df["mean_main_actors"].append(0)
else:
self.df["mean_main_actors"].append(n/total)
la = base[i]["original_language"]
nbr = languages[la] / len(languages)
#on donne a la langue une valeur numérique
self.df["original_language"].append(la)
#on attribue la popularité
if "popularity" not in base[i].keys():
self.df["popularity"].append(0)
else:
self.df["popularity"].append(base[i]["popularity"])
self.df["note"].append(base[i]["vote_average"])
#on ajoute le nombre de mots de la description
self.df['nb_words_overview'].append(len(base[i]['overview'].split(' ')))
#on ajoute le mois de sortie
ke = list(base[i].keys())
if 'release_date' not in ke or len(base[i]['release_date']) == 0 :
self.df['month_release'].append(-1)
else :
self.df['month_release'].append(int(base[i]['release_date'].split('-')[1]))
#on ajoute le nombre de producteurs et de producteurs exécutifs
p = 0
for c in equipes[i] :
if c['job'] == 'Producer' or c['job'] == 'Executive Producer':
p+=1
self.df['nb_producers'].append(p)
#on ajoute la proportion de femmes parmi les acteurs
self.df['prop_women_actors'].append(prop_women_actor[i])
#on ajoute la proportion de femmes parmi l'equipe
self.df['prop_women_crew'].append(prop_women_crew[i])
self.index.append(base[i]["title"])
print(self.name, "init successful")
class MoviesGenresEngineering(Engineering):
def __init__(self, base, complement):
super().__init__("Movies")
self.df["vote_count"] = []
self.df["mean_main_actors"] = []
self.df["original_language"] = []
self.df["popularity"] = []
self.df["note"] = []
self.df["genre_id"] = []
#on introduit les complements
plays, actorsMeanMovies, languages = complement
#on effectue les operations de Base
for i in range(len(base)):
title = base[i]["original_title"]
#on calcule la note moyenne des notes moyennes des 5 premiers acteurs par categories
acteurs = plays[title]
acteurs = acteurs[0:5]
genres_id = base[i]["genre_ids"]
n = 0
total = 0
genres = []
for g in genres_id:
genres.append(genres_tmdb_dict[g])
for g in genres:
for act in acteurs:
n += actorsMeanMovies[act][g]
total += 1
la = base[i]["original_language"]
nbr = languages[la] / len(languages)
#pour chaque genre on va rajouter une ligne dans le dataframe avec comme seul attribut qui diffère le genre
for g in genres_id :
#ajout de langue original
self.df["original_language"].append(nbr)
#ajout vote total
self.df["vote_count"].append(base[i]["vote_count"])
#ajout de la note moyenne des note moyenne des acteurs
if total == 0:
self.df["mean_main_actors"].append(0)
else:
self.df["mean_main_actors"].append(n/total)
#ajout de la popularit&
if "popularity" not in base[i].keys():
self.df["popularity"].append(0)
else:
self.df["popularity"].append(base[i]["popularity"])
#ajout de la note
self.df["note"].append(base[i]["vote_average"])
#ajout de l'id du genre
self.df["genre_id"].append(g)
self.index.append(base[i]["title"])
print(self.name, "init successful")
| ohouens/3i026 | iads/engineering.py | engineering.py | py | 15,218 | python | en | code | 0 | github-code | 90 |
45836426910 | from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.core.mail import send_mail
from .models import Email,News
from django.contrib import messages
from django.template.loader import render_to_string
import smtplib
from zeal.settings import EMAIL_HOST_USER
from django.core.mail import send_mail
from main.views import home
# Create your views here.
def send_newsletter(request):
if request.method == "POST" and not request.user.is_staff:
email = request.POST['email']
if Email.objects.filter(email=email).exists():
messages.info(request, 'You are already in our Family')
else:
data = Email(email=email)
data.save()
return redirect(home)
if request.user.is_staff:
if request.method == 'POST':
context = News.objects.all()
subject = "Zeal Newsletter"
plain_message="New Content uploaded do pay a visit to our site"
message = render_to_string('mail-news.html',{'context':context})
message.content_subtype = "html"
subscriber = Email.objects.all()
user_email = User.objects.all()
recievers = []
for i in user_email:
if '@'in i.email:
recievers.append(i.email)
for i in subscriber:
if '@' in i.email:
recievers.append(i.email)
# print(recievers)
send_mail(subject, plain_message, EMAIL_HOST_USER, recievers,html_message=message)
return render(request,"newsletter.html")
return redirect(home)
| sanujsood/Zeal | newsletter/views.py | views.py | py | 1,831 | python | en | code | 0 | github-code | 90 |
12593584447 | import argparse
import math
from datetime import datetime
import numpy as np
import tensorflow as tf
import socket
import importlib
import zipfile
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
import dataset
import cluster
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='model_rpm', help='Model name [default: pointnet2_cls_ssg]')
parser.add_argument('--train_list', default='datalist/RPM_train.txt', help='Datalist for training')
parser.add_argument('--test_list', default='datalist/RPM_test.txt', help='Datalist for testing')
parser.add_argument('--num_point', type=int, default=2048, help='Point Number [default: 1024]')
parser.add_argument('--num_frame', type=int, default=5, help='Frames number need to be generated [default: 9]')
parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 16]')
parser.add_argument('--model_path', default='../output/YOUR_MODEL_PATH/ckpts/model.ckpt-90', help='model checkpoint file path [default: log/model.ckpt]')
parser.add_argument('--eval_dir', default='../output/YOUR_MODEL_PATH/eval/', help='eval folder path')
FLAGS = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=str(FLAGS.gpu)
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
NUM_FRAME = FLAGS.num_frame
GPU_INDEX = FLAGS.gpu
TRAIN_LIST = FLAGS.train_list
TEST_LIST = FLAGS.test_list
DATA_PATH = os.path.join(ROOT_DIR, '../data/')
EVAL_DIR = FLAGS.eval_dir
if not os.path.exists(EVAL_DIR): os.mkdir(EVAL_DIR)
if not os.path.exists(EVAL_DIR+'/pointcloud'): os.mkdir(EVAL_DIR+'/pointcloud')
if not os.path.exists(EVAL_DIR+'/seg'): os.mkdir(EVAL_DIR+'/seg')
LOG_FOUT = open(os.path.join(EVAL_DIR, 'log_evaluate.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_PATH = FLAGS.model_path
# Shapenet official train/test split
TEST_DATASET = dataset.MotionDataset(data_path=DATA_PATH, train_list=TRAIN_LIST, test_list=TEST_LIST, num_point=NUM_POINT, num_frame=NUM_FRAME, split='test', batch_size=BATCH_SIZE)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def evaluate():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, pc_target_pl, disp_target_pl, part_seg_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT, NUM_FRAME)
gt_mov_seg = tf.cast(tf.greater( part_seg_pl, 0), tf.int32)
is_training_pl = tf.placeholder(tf.bool, shape=())
print("--- Get model ---")
pred_pc, pred_disp, pred_seg, mov_mask, simmat_logits = MODEL.get_model(pointclouds_pl, NUM_FRAME, is_training_pl)
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Restore variables from disk.
saver.restore(sess, MODEL_PATH)
log_string("Model restored.")
ops = {'pointclouds_pl': pointclouds_pl,
'pc_target_pl': pc_target_pl,
'disp_target_pl': disp_target_pl,
'part_seg_pl': part_seg_pl,
'is_training_pl': is_training_pl,
'pred_pc': pred_pc,
'pred_seg': pred_seg,
'simmat_logits': simmat_logits}
eval_one_epoch(sess, ops)
def eval_one_epoch(sess, ops):
""" ops: dict mapping from string to tf ops """
is_training = False
log_string(str(datetime.now()))
test_idxs = np.arange(0, len(TEST_DATASET))
num_batches = len(TEST_DATASET)
total_correct_seg = 0
total_seen_seg = 0
sum_ap = 0
batch_idx = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_pc, batch_pc_target, batch_disp_target, batch_mov_seg, batch_part_seg = TEST_DATASET.get_batch(test_idxs, start_idx, end_idx)
feed_dict = {ops['pointclouds_pl']: batch_pc,
ops['pc_target_pl']: batch_pc_target,
ops['disp_target_pl']: batch_disp_target,
ops['part_seg_pl']: batch_part_seg,
ops['is_training_pl']: is_training}
pred_pc_val, pred_seg_val, simmat_logits_val = sess.run([ops['pred_pc'], ops['pred_seg'], ops['simmat_logits']], feed_dict=feed_dict)
pred_seg_label = np.argmax(pred_seg_val[0], 1)
correct_seg = np.sum(pred_seg_label == batch_mov_seg[0])
total_correct_seg += correct_seg
total_seen_seg += NUM_POINT
simmat = simmat_logits_val[0]
out_name = TEST_DATASET.get_name(batch_idx)
ptspos = batch_pc[0,:,:3]
mov_seg = pred_seg_label
gt_part_seg = batch_part_seg[0]
if np.sum(mov_seg) <= 64:
part_seg = np.zeros((NUM_POINT))
log_string("WARING: mov points less than 64")
else:
part_seg, proposals = cluster.GroupMergingSimDist(ptspos, simmat, mov_seg)
ap = cluster.ComputeAP( part_seg, gt_part_seg )
sum_ap += ap
log_string('%d: %s'%(batch_idx, out_name))
log_string('EVAL: AP: %f, movmask_acc: %f\n' % (ap, correct_seg / NUM_POINT))
for frame in range(NUM_FRAME):
np.savetxt(EVAL_DIR+'/pointcloud/'+out_name+'_'+str(frame+1)+'.pts', pred_pc_val[0,frame], fmt='%.8f')
with open(EVAL_DIR+'/seg/'+out_name+'.seg', 'w') as f:
for i in range(NUM_POINT):
f.writelines(str(part_seg[i])+'\n')
log_string('----------------STATISTICS----------------')
log_string('Mean Mov mask Accuracy: %f'% (total_correct_seg / float(total_seen_seg)))
log_string('Mean Average Precision: %f'%(sum_ap / num_batches))
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
evaluate()
LOG_FOUT.close()
| Salingo/RPM-Net | code/test.py | test.py | py | 5,642 | python | en | code | 25 | github-code | 90 |
26679597213 | class Sandwich:
def __init__(self, type='sandwich', bread='white', price = 0.00, *args):
"""
Initialize this sandwich with provided type, bread, fillings
"""
self.type = type #type of sandwich, ie sandwich, hot-dog, taco
self.bread = bread #type of bread, ie wheat, rye, corn tortilla
self.price = price #price of sandwich
self.fillings = []
#add all specified fillings to fillings list
for filling in args:
self.fillings.append(filling)
def set_price(self, price):
"""Set the price of this sandwich, overwriting previous price."""
if(price > 0):
self.price = price
else:
print("Price must be greater than 0.")
def fillings_to_string(self):
"""Convert all fillings present in this sandwich to string format."""
fillings_string = ''
for filling in self.fillings:
fillings_string += f"{filling}, "
return fillings_string
def print_sandwich_details(self):
"""Print details of this sandwich to console."""
print(f"This is a {self.type}, with {self.fillings_to_string()}on {self.bread}.")
print(f"Price is {self.price}.") | toomeyDev/SandwichShop | sandwich_shop/sandwich.py | sandwich.py | py | 1,237 | python | en | code | 0 | github-code | 90 |
3820991495 | from random import shuffle
#تابع ایجاد دست
def deal(numhands , n = 5):
deck = [r+s for r in "23456789TJQKA" for s in "SHDC"]
shuffle(deck)
return(list(deck[n*i : n*(i+1)] for i in range(numhands)))
# تابع RANK
def card_ranks(hand):
return sorted(['--23456789TJQKA'.index(r) for r,s in hand ],reverse = True)
#تابع دست STRAIGHT
def straight(ranks):
return(max(ranks)-min(ranks) == 4) and len(set(ranks)) == len(ranks)
#تابع KIND
def kind(n,ranks):
for r in ranks:
if ranks.count(r) == n:
return r
return None
#jتابع flush
def flush(hand):
suits = [s for r,s in hand]
return len (set(suits)) == 1
h = ['TH','2H','7H','QH','4H']
flush(h)
#تابعtwo_pair
def two_pair(ranks):
pair = kind(2,ranks)
low_pair = kind(2,list(reversed (ranks)))
if low_pair != pair:
return pair,low_pair
else:
return None
def hand_rank(hand):
ranks = card_ranks(hand)
if straight(ranks) and flush(hand):
return 8 , max(ranks)
elif kind(4,ranks):
return 7 , kind(4, ranks)
elif kind(3, ranks) and kind(2, ranks):
return 6 , kind(3, ranks)
elif flush(hand):
return 5 , max(ranks)
elif straight(ranks):
return 4 , max(ranks)
elif kind (3, ranks):
return 3 , kind(3, ranks)
elif two_pair(ranks):
return 2 , two_pair(ranks)
elif kind(2, ranks):
return 1, kind(2, ranks)
else:
return 0, ranks
def poker(hands):
return max(hands,key = hand_rank)
hands = deal(10)
for hand in hands:
print(hand, hand_rank(hand))
winner = poker(hands)
print(winner, hand_rank(winner))
| Fahime-omd/poker.py | poker.py | poker.py | py | 1,754 | python | en | code | 0 | github-code | 90 |
28734020310 | # This is the train script for CNN - Deep_Learning_Basics
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
# Define the root directory where your data is located
data_dir = r"C:\Users\jaiva\Desktop\kode\image\data" # Replace with your data directory
# Define the batch size and image size
batch_size = 64
img_height = 180
img_width = 180
# Create an ImageDataGenerator for data augmentation and normalization
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2 # Split the data into training and validation sets
)
# Load and prepare the training dataset
train_generator = train_datagen.flow_from_directory(
data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse', # For integer-encoded labels
subset='training' # Specify that this is the training dataset
)
# Load and prepare the validation dataset
validation_generator = train_datagen.flow_from_directory(
data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse',
subset='validation' # Specify that this is the validation dataset
)
# Create the CNN model
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(img_height, img_width, 3)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(len(train_generator.class_indices)) # Output layer with the number of classes
])
# Compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# Train the model
history = model.fit(
train_generator,
validation_data=validation_generator,
epochs=10 # You can adjust the number of epochs as needed
)
# Save the model
model.save('cnn_model.h5')
# Plot the training and validation accuracy
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()
# Evaluate the model on the test dataset
test_loss, test_acc = model.evaluate(validation_generator, verbose=2)
print(f"Accuracy on Validation Dataset: {test_acc}")
plt.subplot(1, 2, 2)
plt.bar(['Validation'], [test_acc], color='skyblue', label='Validation Accuracy')
plt.ylim([0, 1])
plt.title('Validation Accuracy')
plt.legend(loc='lower right')
plt.show()
| jaivanth07/ORIGINAL | temp.py | temp.py | py | 2,897 | python | en | code | 0 | github-code | 90 |
42323421410 | def danger(board, row, col):
for (i,j) in board:
if row == i or col ==j or abs(row-i) == abs(col-j):
return True
return False
def place(board, left):
if not left:
return (True, board)
row = left[0]
for col in range(1, 9):
if not danger(board, row, col):
board.append((row, col))
(ok, board) = place(board, left[1:])
if ok:
return (ok, board)
else:
board.remove((row, col))
return (False, board)
def place_queens(placed):
row = []
board = []
left = []
for p in placed:
y = ord(p[0])-96
x = int(p[1])
row.append(y)
if danger(board, y, x): return set()
board.append((y, x))
for i in range(1, 9):
if i not in row:
left.append(i)
ok, board = place(board, left)
if ok:
return set([chr(i+96)+str(j) for i,j in board])
return set()
| rawgni/empireofcode | place_queens.py | place_queens.py | py | 976 | python | en | code | 0 | github-code | 90 |
18249739929 |
N = int(input())
A = list(map(int,input().split()))
# print('A:',A)
#まず、一気に数えてしまおう!
num = [0]*N
for i in range(N):
num[A[i]-1]+=1
# print('num:',num)
#その数のペアはいくつ?
C = [0]*N
cnt=0 #1つも抜かさない時の選び出す場合の数
for i in range(N):
if num[i]!=0 and num[i]!=1:
dammy = (num[i] * (num[i]-1))//2
cnt+=dammy
C[i] = dammy
# print('cnt,C:',cnt,C)
#実際に計算していく
for i in range(N):
# print('A[i]',A[i])
print(cnt - C[A[i]-1] + ((num[A[i]-1]-1)*(num[A[i]-1]-2)) //2)
# print('ue')
| Aasthaengg/IBMdataset | Python_codes/p02732/s871086688.py | s871086688.py | py | 613 | python | ja | code | 0 | github-code | 90 |
25201141089 | import pandas as pd
# read csv file
df = pd.read_csv('filename.csv')
column1=df['GeneIDs']
print("temp:",column1)
# select the column you want to split
column2 = df['Seq']
# split the column by the newline character
split_column = column2.str.split('\n', expand=True)
# access the first part of the split column
first_part = split_column[0]
print("1stpart:",first_part)
# access the second part of the split column
second_part = split_column[1]
# remove newline characters from the second part
second_part = second_part.str.replace('\n', '')
print("2ndpart:",second_part)
#print(len(column1))
emptylist=[]
mainlist=[]
x=0
for x in range(len(column1)):
#print(column1[x])
emptylist.append(column1[x])
emptylist.append(first_part[x])
emptylist.append(second_part[x])
print(emptylist)
mainlist.append(emptylist)
emptylist=[]
# Create the pandas DataFrame
df = pd.DataFrame(mainlist, columns=['GeneIDs','Header','Seq'])
# Save dataframe as csv file in the current folder
df.to_csv('filename2nd.csv', index = False, encoding='utf-8')
# print dataframe.
print(df) | flyfir248/UPGMA-Gene-analysis-test | splitgeneheader.py | splitgeneheader.py | py | 1,087 | python | en | code | 0 | github-code | 90 |
36295247157 | """Modele mediante una funcion matematica y diseñe un programa
recursivo sin cadenas, tuplas o listas que retorne el primer digito de
un numero natural n (leído de izquierda a derecha). Por ejemplo,
primero(86420) = 8."""
def qdigit(m, counter):
if m//10==0:
return counter+1
else:
counter += 1
return qdigit(m//10, counter)
| RosanaR2017/PYTHON | 6.first_digit.py | 6.first_digit.py | py | 375 | python | es | code | 0 | github-code | 90 |
1120403662 | import gym
import numpy as np
from ray.rllib.env import MultiAgentEnv
from src.SimulatorParameters import sim_params
from src.Environment import Environment
class PredatorEnv(gym.Env, MultiAgentEnv):
def __init__(self):
self.action_space = gym.spaces.Discrete(5)
self.observation_space = gym.spaces.Box(low=np.array([0, 0, 0, 0]),
high=np.array([sim_params["hunter_max_age"], np.inf, sim_params["environment_width"], sim_params["environment_height"]]))
self.environment = Environment(sim_params["environment_width"], sim_params["environment_height"],
sim_params["max_amount_of_prey"], sim_params["prey_max_age"],
sim_params["prey_birth_rate"],
sim_params["max_amount_of_hunters"], sim_params["hunter_max_age"],
sim_params["hunter_energy_to_reproduce"],
sim_params["hunter_energy_per_prey_eaten"],
sim_params["hunter_init_energy"])
def reset(self):
print("Number of predators: " +str(len(self.environment.predator_list)))
self.environment.reset()
return self.environment.predator_obs()
def step(self, action):
self.environment.step(env="predator", actions=action)
#print(self.environment.predator_dones())
return self.environment.predator_obs(), self.environment.predator_rewards(), self.environment.predator_dones(), {} | WouterHuygen/multi-agent-reinforcement-learning | src/PredatorEnvironment.py | PredatorEnvironment.py | py | 1,588 | python | en | code | 0 | github-code | 90 |
29703351052 | """
new:创建并返回,静态方法
init:初始化
"""
class UserInfo:
case = None # 类属性 例子初始值为空
isinit = False # 类属性 默认没有初始化
# 判断创建次数
def __new__(cls, *args, **kwargs):
if cls.case is None:
cls.case = object.__new__(cls)
return cls.case
def __init__(self):
if UserInfo.isinit is False:
self.name = '李大爷'
UserInfo.isinit = True
user0 = UserInfo() # 创建对象0
user1 = UserInfo() # 创建对象1
user2 = UserInfo() # 创建对象2
print(user0)
print(user1)
print(user2)
user0.name = '白崇禧'
print(user0.name)
print(user1.name)
print(user2.name)
| Bngzifei/PythonNotes | 学习路线/1.python基础/练习/单例模式.py | 单例模式.py | py | 648 | python | en | code | 1 | github-code | 90 |
20671414960 | from text.models import TextFile
from django.utils.timezone import now
SOFT_MAX_LENGTH: int = 300
def split_text(text: str):
tmp_paras: list[str] = text.split("\n")
merged_paras: list[str] = []
current_para: str = ""
for tmp_para in tmp_paras:
current_para += tmp_para
if len(current_para) > SOFT_MAX_LENGTH:
merged_paras.append(current_para)
current_para = ""
if len(current_para) > 0:
merged_paras.append(current_para)
return merged_paras
def fetch_text(username: str):
text_models = TextFile.objects.filter(finish_flag=1, username=username)
if len(text_models) > 0:
text_model = text_models.first()
text_model.operate_time = now()
text_model.save()
with open(text_model.file.path) as handle:
content = handle.read()
model_id = text_model.id
else:
text_models = TextFile.objects.filter(finish_flag=0)
text_model = text_models.first()
text_model.finish_flag = 1
text_model.username = username
text_model.operate_time = now()
text_model.save()
with open(text_model.file.path) as handle:
content = handle.read()
model_id = text_model.id
return content, model_id
| pxxgogo/misscut_overwatch | misscut_overwatch/text/ops.py | ops.py | py | 1,290 | python | en | code | 0 | github-code | 90 |
35985423635 | import os
import cv2
import torch
import torch.nn as nn
import numpy as np
from argparse import ArgumentParser
from model import Model
from para import Parameter
from data.utils import normalize, normalize_reverse
from os.path import join, exists, isdir, dirname, basename
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--src', type=str, required=True, help="the path of input video or video dir")
parser.add_argument('--ckpt', type=str, required=True, help="the path of checkpoint of pretrained model")
parser.add_argument('--dst', type=str, help="where to store the results")
args = parser.parse_args()
para = Parameter().args
model = Model(para).cuda()
checkpoint_path = args.ckpt
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage.cuda())
model = nn.DataParallel(model)
model.load_state_dict(checkpoint['state_dict'])
if not isdir(args.src):
vid_cap = cv2.VideoCapture(args.src)
num_frames = int(vid_cap.get(cv2.CAP_PROP_FRAME_COUNT))
args.src = join(dirname(args.src), basename(args.src).replace('.', '_'))
os.makedirs(args.src, exist_ok=True)
for i in range(num_frames):
try:
ret, img = vid_cap.read()
cv2.imwrite(join(args.src, '{:08d}.png'.format(i)), img)
except:
break
img_paths = sorted(os.listdir(args.src), key=lambda x: int(x.split('.')[0]))
save_dir = args.dst
if not exists(save_dir):
os.makedirs(save_dir)
seq_length = len(img_paths)
if para.test_frames > seq_length:
para.test_frames = seq_length
start = 0
end = para.test_frames
val_range = 2.0 ** 8 - 1
suffix = 'png'
while True:
input_seq = []
for frame_idx in range(start, end):
blur_img_path = join(args.src, img_paths[frame_idx])
blur_img = cv2.imread(blur_img_path).transpose(2, 0, 1)[np.newaxis, ...]
input_seq.append(blur_img)
input_seq = np.concatenate(input_seq)[np.newaxis, :]
model.eval()
with torch.no_grad():
input_seq = normalize(torch.from_numpy(input_seq).float().cuda(), centralize=para.centralize,
normalize=para.normalize, val_range=val_range)
output_seq = model([input_seq, ])
if isinstance(output_seq, (list, tuple)):
output_seq = output_seq[0]
output_seq = output_seq.squeeze(dim=0)
for frame_idx in range(para.past_frames, end - start - para.future_frames):
blur_img = input_seq.squeeze(dim=0)[frame_idx]
blur_img = normalize_reverse(blur_img, centralize=para.centralize, normalize=para.normalize,
val_range=val_range)
blur_img = blur_img.detach().cpu().numpy().transpose((1, 2, 0)).squeeze()
blur_img = blur_img.astype(np.uint8)
blur_img_path = join(save_dir, '{:08d}_input.{}'.format(frame_idx + start, suffix))
deblur_img = output_seq[frame_idx - para.past_frames]
deblur_img = normalize_reverse(deblur_img, centralize=para.centralize, normalize=para.normalize,
val_range=val_range)
deblur_img = deblur_img.detach().cpu().numpy().transpose((1, 2, 0)).squeeze()
deblur_img = np.clip(deblur_img, 0, val_range)
deblur_img = deblur_img.astype(np.uint8)
deblur_img_path = join(save_dir, '{:08d}_{}.{}'.format(frame_idx + start, para.model.lower(), suffix))
cv2.imwrite(blur_img_path, blur_img)
cv2.imwrite(deblur_img_path, deblur_img)
if end == seq_length:
break
else:
start = end - para.future_frames - para.past_frames
end = start + para.test_frames
if end > seq_length:
end = seq_length
start = end - para.test_frames
| zzh-tech/ESTRNN | inference.py | inference.py | py | 4,098 | python | en | code | 273 | github-code | 90 |
18417321549 |
import bisect
N = int(input())
S = '0'+input()+'0'
lst1 = []
lst2 = [0]*(N+2)
for i in range(1,N+1):
if S[i] == '#':
lst1.append(i)
if S[N+1-i] == '.':
lst2[N+1-i] = lst2[N+2-i] + 1
else:
lst2[N+1-i] = lst2[N+2-i]
if len(lst1) == N or lst2[1] == N:
print(0)
exit()
rlt = N+1
for i in range(len(lst1)):
rlt = min(rlt, i+lst2[lst1[i]])
rlt = min(len(lst1), rlt)
print(rlt) | Aasthaengg/IBMdataset | Python_codes/p03069/s484414640.py | s484414640.py | py | 415 | python | en | code | 0 | github-code | 90 |
24859704684 | x_wins = False
o_wins = False
game_on = True
turn = 1
players_turn = 1
grid_dict = {0: " ", 1: " ", 2: " ", 3: " ", 4: " ", 5: " ", 6: " ", 7: " ", 8: " "}
grid_disp = f" {grid_dict[0]} | {grid_dict[1]} | {grid_dict[2]} 1 | 2 | 3 \n" \
f"----------- -----------\n" \
f" {grid_dict[3]} | {grid_dict[4]} | {grid_dict[5]} 4 | 5 | 6 \n" \
f"----------- -----------\n" \
f" {grid_dict[6]} | {grid_dict[7]} | {grid_dict[8]} 7 | 8 | 9 \n"
winning_moves = [(0,1,2), (3,4,5), (6,7,8), (0,3,6), (1,4,7), (2,5,8), (2,4,6), (0,4,8)]
def check_for_winner(player, moves):
if player == 1:
for win_move in winning_moves:
if win_move[0] in moves and win_move[1] in moves and win_move[2] in moves:
return True
return False
if player == 2:
for win_move in winning_moves:
if win_move[0] in moves and win_move[1] in moves and win_move[2] in moves:
return True
return False
player_1_moves = []
player_2_moves = []
while game_on:
if turn == 1:
print("Let's play Tic Tac Toe!\nX goes first!")
if turn == 9:
game_on = False
print(grid_disp)
if players_turn == 1:
move = int(input("Player 1's Turn (X)! Please enter a number to place your X (Must enter a valid number or you lose):"))-1
if grid_dict[move] == " ":
player_1_moves.append(move)
grid_dict[move] = "X"
grid_disp = f" {grid_dict[0]} | {grid_dict[1]} | {grid_dict[2]} 1 | 2 | 3 \n" \
f"----------- -----------\n" \
f" {grid_dict[3]} | {grid_dict[4]} | {grid_dict[5]} 4 | 5 | 6 \n" \
f"----------- -----------\n" \
f" {grid_dict[6]} | {grid_dict[7]} | {grid_dict[8]} 7 | 8 | 9 \n"
if check_for_winner(player=1, moves=player_1_moves):
x_wins = True
game_on = False
players_turn = 2
turn += 1
else:
print("You can't go there, please try again!")
else:
move = int(input("Player 2's Turn (O)! Please enter a number to place your O (Must enter a valid number or you lose):"))-1
if grid_dict[move] == " ":
player_2_moves.append(move)
grid_dict[move] = "O"
grid_disp = f" {grid_dict[0]} | {grid_dict[1]} | {grid_dict[2]} 1 | 2 | 3 \n" \
f"----------- -----------\n" \
f" {grid_dict[3]} | {grid_dict[4]} | {grid_dict[5]} 4 | 5 | 6 \n" \
f"----------- -----------\n" \
f" {grid_dict[6]} | {grid_dict[7]} | {grid_dict[8]} 7 | 8 | 9 \n"
if check_for_winner(player=2, moves=player_2_moves):
o_wins = True
game_on = False
players_turn = 1
turn += 1
else:
print("You can't go there, please try again!")
if x_wins:
print("Player 1 (X) Wins!")
elif o_wins:
print("Player 2 (O) Wins!")
else:
print("Tie!")
| Gstclair1/tic-tac-toe | main.py | main.py | py | 3,165 | python | en | code | 0 | github-code | 90 |
23553292821 | from django.urls import path
from . import views
urlpatterns = [
path("messages/", views.messages, name='messages'),
path("delete_message/<int:pk>/", views.delete_message, name='delete_message'),
path("chat/<int:pk>/", views.chat, name='chat'),
path("edit_chat/<int:pk>/", views.edit_chat, name='edit_chat'),
path("delete_chat/<int:pk>/", views.delete_chat, name='delete_chat'),
path("add_users_to_chat/<int:pk>/", views.add_users_to_chat, name='add_users_to_chat'),
path("chat_users/<int:pk>/", views.chat_users, name='chat_users'),
path("remove_user_from_chat/<int:pk>/<int:user_pk>/", views.remove_user_from_chat, name='remove_user_from_chat'),
path("leave_chat/<int:pk>/", views.leave_chat, name='leave_chat'),
path('create_private_chat/<int:pk>/', views.create_private_chat, name='create_private_chat')
] | SLDem/followerr | chats/urls.py | urls.py | py | 850 | python | en | code | 1 | github-code | 90 |
1814907776 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 22 10:21:59 2020
@author: Rajesh
"""
"""
Name:
Intersection
Filename:
Intersection.py
Problem Statement:
With two given lists [1,3,6,78,35,55] and [12,24,35,24,88,120,155]
Write a program to make a list whose elements are intersection of the above given lists.
"""
list1 = [1,3,6,78,35,55]
list2 = [12,24,35,24,88,120,155]
s1=set(list1)
s2=set(list2)
s3 = s1.intersection(s2)
list3 = list(s3)
print(list3)
########### OR ##########
| Rajesh-sharma92/FTSP_2020 | Python_CD6/Intersection.py | Intersection.py | py | 537 | python | en | code | 3 | github-code | 90 |
39443133501 | class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
# Merge the two sorted arrays
merged = []
i, j = 0, 0
while i < len(nums1) and j < len(nums2):
if nums1[i] < nums2[j]:
merged.append(nums1[i])
i += 1
else:
merged.append(nums2[j])
j += 1
merged += nums1[i:]
merged += nums2[j:]
# Calculate the median of the merged array
n = len(merged)
if n % 2 == 0:
return (merged[n // 2 - 1] + merged[n // 2]) / 2
else:
return merged[n // 2]
| ady1210/ArraySolutions | median_of_two_sorted_arrays.py | median_of_two_sorted_arrays.py | py | 670 | python | en | code | 4 | github-code | 90 |
38465835060 | # -*- coding: utf-8 -*-
"""
Created on Sun May 3 22:14:59 2020
@author: shaurya
"""
import turtle
tur = turtle.Turtle()
for i in range(50):
tur.forward(50)
tur.right(144)
turtle.done() | shauryasharma30/Python-Scripts | Spirals/SpiralTraversing#.py | SpiralTraversing#.py | py | 223 | python | en | code | 0 | github-code | 90 |
34899675156 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
""" Super Class """
class Optimizer(object):
"""
This is a template for implementing the classes of optimizers
"""
def __init__(self, net, lr=1e-4):
self.net = net # the model
self.lr = lr # learning rate
""" Make a step and update all parameters """
def step(self):
#### FOR RNN / LSTM ####
if hasattr(self.net, "preprocess") and self.net.preprocess is not None:
self.update(self.net.preprocess)
if hasattr(self.net, "rnn") and self.net.rnn is not None:
self.update(self.net.rnn)
if hasattr(self.net, "postprocess") and self.net.postprocess is not None:
self.update(self.net.postprocess)
#### MLP ####
if not hasattr(self.net, "preprocess") and \
not hasattr(self.net, "rnn") and \
not hasattr(self.net, "postprocess"):
for layer in self.net.layers:
self.update(layer)
""" Classes """
class SGD(Optimizer):
""" Some comments """
def __init__(self, net, lr=1e-4, weight_decay=0.0):
self.net = net
self.lr = lr
self.weight_decay = weight_decay
def update(self, layer):
for n, dv in layer.grads.items():
#############################################################################
# TODO: Implement the SGD with (optional) Weight Decay #
#############################################################################
pass
# theta = v, dv = DLoss/dw = delta_theta * J(v), lr = Eta, weight_decay = lambda
v = layer.params[n]
v = v - (self.lr * dv ) - (self.weight_decay * v)
layer.params[n] = v
#############################################################################
# END OF YOUR CODE #
#############################################################################
class Adam(Optimizer):
""" Some comments """
def __init__(self, net, lr=1e-3, beta1=0.9, beta2=0.999, t=0, eps=1e-8, weight_decay=0.0):
self.net = net
self.lr = lr
self.beta1, self.beta2 = beta1, beta2
self.eps = eps
self.mt = {}
self.vt = {}
self.t = t
self.weight_decay=weight_decay
def update(self, layer):
#############################################################################
# TODO: Implement the Adam with [optinal] Weight Decay #
#############################################################################
pass
# print(self.mt, self.vt)
for n, dx in layer.grads.items():
#x is thetaT
x = layer.params[n]
#m, v, t at t = t-1
if n not in self.mt or n not in self.vt:
self.mt[n] = np.zeros_like(x)
self.vt[n] = np.zeros_like(x)
m, v, t= self.mt[n], self.vt[n],self.t
beta1, beta2 = self.beta1, self.beta2
#Updating t=> t = t +1 (t goes from t-1 to t)
t = t+1
m = beta1 * m + (1.0-beta1) * dx
v = beta2 * v + (1.0-beta2) * (dx**2)
mb = m / (1.0 - beta1**float(t))
vb = v / (1.0 - beta2**float(t))
x_next = x - (self.lr * mb / (np.sqrt(vb) + self.eps)) - (self.weight_decay * x)
#Updating variables
self.mt[n], self.vt[n], self.t = m, v, t
layer.params[n] = x_next
#############################################################################
# END OF YOUR CODE #
#############################################################################
| snehabandi/Deep-Learning-and-its-Applications | assignment1/lib/optim.py | optim.py | py | 4,006 | python | en | code | 1 | github-code | 90 |
75166967976 | from PIL import Image
import os, glob, numpy as np
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Input
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization,Activation,ZeroPadding2D,Add
from keras.layers import GlobalAveragePooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import matplotlib.pyplot as plt
from keras import backend as K
import tensorflow as tf
from tensorflow.python.framework import ops as tf_ops
from keras.optimizers import Adam
from tensorflow.keras.applications import InceptionV3
# caltech_dir = '../data/image/project2/'
# categories = ["0", "1", "2", "3","4","5","6","7",
# "8","9"] # y, 분류대상(파일 이름)
# nb_classes = len(categories) # y의 갯수 (10개)
# image_w = 255 # 이미지의 너비 설정
# image_h = 255 # 이미지의 높이 설정
# pixels = image_h * image_w * 3 # shape = 255,255,3
# X = []
# y = []
# for idx, cat in enumerate(categories):
# #one-hot 돌리기.
# label = [0 for i in range(nb_classes)]
# label[idx] = 1
# print(label)
# image_dir = caltech_dir + "/" + cat # caltech_dir = '../data/image/project2/'
# files = glob.glob(image_dir+"/*.jpg")
# print(cat, " 파일 길이 : ", len(files))
# # 이미지 불러오기
# for i, f in enumerate(files):
# img = Image.open(f)
# img = img.convert("RGB")
# img = img.resize((image_w, image_h))
# data = np.asarray(img)
# X.append(data)
# y.append(label)
# if i % 700 == 0:
# print(cat, " : ", f)
# X = np.array(X)
# y = np.array(y)
# #1 0 0 0 이면 Beagle
# #0 1 0 0 이면
X_train, X_test, y_train, y_test = train_test_split(X, y)
xy = (X_train, X_test, y_train, y_test)
np.save("../data/npy/P_project2.npy", xy)
# print("ok", len(y))
# print(X_train.shape) # (2442, 255, 255, 3)
# print(X_train.shape[0]) # 2442
X_train, X_test, y_train, y_test = np.load("../data/npy/P_project2.npy",allow_pickle=True)
print(X_train.shape)
print(X_train.shape[0])
# categories = ["Beaggle", "Bichon Frise", "Border Collie","Bulldog", "Corgi","Poodle","Retriever","Samoyed",
# "Schnauzer","Shih Tzu",]
# nb_classes = len(categories)
# #일반화
# X_train = X_train.astype(float) / 255
# X_test = X_test.astype(float) / 255
# idg = ImageDataGenerator(
# width_shift_range=(0.1),
# height_shift_range=(0.1)
# )
# train_generator = idg.flow(X_train,y_train,batch_size=32,seed=2020)
# valid_generator = (X_test,y_test)
input_tensor = Input(shape=(255, 255, 3), dtype='float32', name='input')
def conv1_layer(x):
x = ZeroPadding2D(padding=(3, 3))(x)
x = Conv2D(64, (7, 7), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1,1))(x)
return x
def conv2_layer(x):
x = MaxPooling2D((3, 3), 2)(x)
shortcut = x
for i in range(3):
if (i == 0):
x = Conv2D(64, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x)
shortcut = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
# 안녕~~
shortcut = x
else:
x = Conv2D(64, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv3_layer(x):
shortcut = x
for i in range(4):
if(i == 0):
x = Conv2D(128, (1, 1), strides=(2, 2), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x)
shortcut = Conv2D(512, (1, 1), strides=(2, 2), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(128, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv4_layer(x):
shortcut = x
for i in range(6):
if(i == 0):
x = Conv2D(256, (1, 1), strides=(2, 2), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (1, 1), strides=(1, 1), padding='valid')(x)
shortcut = Conv2D(1024, (1, 1), strides=(2, 2), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv5_layer(x):
shortcut = x
for i in range(3):
if(i == 0):
x = Conv2D(512, (1, 1), strides=(2, 2), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(2048, (1, 1), strides=(1, 1), padding='valid')(x)
shortcut = Conv2D(2048, (1, 1), strides=(2, 2), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(2048, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut]) # 중간 가중치가 엮여서 나온다.
x = Activation('relu')(x)
shortcut = x
return x
x = conv1_layer(input_tensor)
x = conv2_layer(x)
x = conv3_layer(x)
x = conv4_layer(x)
x = conv5_layer(x)
x = GlobalAveragePooling2D()(x)
x = Flatten() (x)
output_tensor = Dense(10, activation='softmax')(x)
# model = Model(input_tensor, output_tensor)
# model.summary()
# model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-5,epsilon=None), metrics=['acc'])
# model_path = '../data/modelcheckpoint/Pproject0.hdf5'
# checkpoint = ModelCheckpoint(filepath=model_path , monitor='val_loss', verbose=1, save_best_only=True)
# early_stopping = EarlyStopping(monitor='val_loss', patience=150)
# # lr = ReduceLROnPlateau(patience=30, factor=0.5,verbose=1)
# learning_history = model.fit_generator(train_generator,epochs=1000, steps_per_epoch=66,
# validation_data=valid_generator, callbacks=[early_stopping,checkpoint]) | lynhyul/AIA | project/CNN2_ResNet50.py | CNN2_ResNet50.py | py | 9,576 | python | en | code | 3 | github-code | 90 |
5126838547 |
#import scipy, scipy.ndimage
#import sparseconvnet as scn
from torch.nn.modules.module import Module
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
from torch.autograd import Function
class PcNormalizeFunction(Function):
@staticmethod
def forward(ctx, points):
batch_size, npoint, _ = points.size()
rem = torch.zeros(batch_size, 4, dtype=torch.float32)
for i in range(batch_size):
pc = points[i, :, :3]
centroid = pc.mean(axis=0)
pc = pc - centroid
m = torch.max(torch.sqrt(torch.sum(pc ** 2, axis=1)))
pc = pc / m
points[i,:,:3] = pc
rem[i, :3] = centroid
rem[i, 3] = m
return {'x': points, 'norm': rem}
def backward(ctx, gradOutput):
return None
class PcNormalize(Module):
def __init__(self):
super(PcNormalize, self).__init__()
def forward(self, points):
result = PcNormalizeFunction.apply(points)
return result
class TransformFunction(Function):
@staticmethod
def forward(ctx, points, feas, masks, scale=20, full_scale=4096):
batch_size, npoints, channel = points.size()
points = scale * points[:,:,:3]
locs, feats = [], []
offsets = torch.zeros(batch_size, 3)
for i in range(batch_size):
a = points[i, :, :]
b = feas[i, :, :]
m = a.min(0)#[min_x, min_y, min_z]
M = a.max(0)#[max_x, max_y, max_z]
q = M - m
#if range M-m > full_scale; offset = -m-random_crop; if M-m < full_scale, offset = -m + random_crop
#voxel range [0, 4095]. Centering the points.
offset = -m + np.clip(full_scale - M + m - 0.001, 0, None) * np.random.rand(3) + np.clip(full_scale - M + m + 0.001, None, 0)*np.random.rand(3)
a += offset
idxs = (a.min(1) >= 0) * (a.max(1) < full_scale) * (masks[i * npoints: (i + 1) * npoints] > 0) # remove outliers if any of the [x, y, z] out of [0, full_scale]
a = a[idxs]
b = b[idxs]
masks[i * npoints: (i + 1) * npoints] *= idxs
# temp[idxs]
# masks
a = torch.from_numpy(a).long()
locs.append(torch.cat([a, torch.LongTensor(a.shape[0], 1).fill_(i)], 1)) #[x, y, z, idx of frames]
feats.append(torch.from_numpy(b) + torch.randn(3) * 0.1)
offsets[i, :, :] = offset
# labels.append(torch.from_numpy(c))
locs = torch.cat(locs, 0)#list to tensor [x, y, z, idx]
feats = torch.cat(feats, 0)
# labels = torch.cat(labels, 0)
return {'x': [locs, feats], 'offset': offsets}
@staticmethod
def backward(ctx, gradOutput):
return None, None, None, None
#from pointnet [x y z] to sparse conv [x, y, z]
# xyz = points[i, :, :3] * norm[i, 3] + norm[i, :3]
# xyz = xyz * scale + offsets[i,:]
class Transform(Module):
def __init__(self, scale=20, full_scale=4096):
super(Transform, self).__init__()
self.scale = scale
self.full_scale = full_scale
def forward(self, points, masks):
x = TransformFunction.apply(points, masks, self.scale, self.full_scale)
return x
| feihuzhang/LiDARSeg | code/data_utils/transform.py | transform.py | py | 3,306 | python | en | code | 62 | github-code | 90 |
26487760020 | import os
path = os.path.join(os.path.dirname('input.txt'))
def get_monkey_decisions(path_to_file):
with open(path_to_file) as file:
monkeys = [monkey.split("\n")
for monkey in file.read().strip().split("\n\n")]
decisions = []
for monkey in monkeys:
decisions.append([])
decisions[-1].append([int(x)
for x in monkey[1].strip("Starting items:").split(", ")])
decisions[-1].append(eval("lambda old: o" +
monkey[2].strip("Operation: w =")))
decisions[-1].append(int(monkey[3].strip("Test: divisable by")))
decisions[-1].append(int(monkey[4].strip("If true: throw to monkey")))
decisions[-1].append(int(monkey[5].strip("If false: throw to monkey")))
return decisions
def one_round(monkeys, part):
modulo = 1
for monkey in monkeys:
modulo *= monkey[2]
counters = []
for monkey in monkeys:
counters.append(len(monkey[0]))
while len(monkey[0]):
inspected_item = monkey[0].pop()
inspected_item = (monkey[1](inspected_item) %
modulo) // (5-2*part) # // 3 for part == 1
if inspected_item % monkey[2] == 0:
monkeys[monkey[3]][0].append(inspected_item)
else:
monkeys[monkey[4]][0].append(inspected_item)
return counters
def monkey_business(monkeys, n, part):
overall_activity = []
for i in range(len(monkeys)):
overall_activity.append(0)
for i in range(n):
activity = one_round(monkeys, part)
for i in range(len(overall_activity)):
overall_activity[i] += activity[i]
return max(overall_activity) * max([x for x in overall_activity if x != max(overall_activity)])
if __name__ == "__main__":
monkeys = get_monkey_decisions(
'/home/hagay/Development/advent-of-code/advent-of-code-2022/day-11/input.txt')
print(monkey_business(monkeys, 20, 1)) # Part One
monkeys = get_monkey_decisions(
'/home/hagay/Development/advent-of-code/advent-of-code-2022/day-11/input.txt')
print(monkey_business(monkeys, 10000, 2)) # Part Two
| HagayHaut/advent-of-code | 2022/day-11/script.py | script.py | py | 2,202 | python | en | code | 1 | github-code | 90 |
24603896449 | import sys
import json
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
class IBMXforce:
def checkIBMxForce(self, domain):
print('[*] IBM xForce Check: {}'.format(domain))
s = requests.Session()
# Hack to prevent cert warnings
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
useragent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0'
try:
url = 'https://exchange.xforce.ibmcloud.com/api/url/{}'.format(domain)
headers = {
'User-Agent': useragent,
'Accept': 'application/json, text/plain, */*',
'Accept-Language': 'en-GB,en;q=0.5',
'x-ui': 'XFE',
'Referer': "https://exchange.xforce.ibmcloud.com/url/{}".format(domain),
'Connection': 'close'
}
response = s.get(url, headers=headers, verify=False)
if response.status_code == 404:
print('[-] IBM x-Force does not have entries for the domain!')
return "-"
responseJson = json.loads(response.text)
print("\033[1;32m[!] Site categorized as: {}\033[0;0m"\
.format(" | ".join(responseJson["result"].get('cats', {}).keys())))
return " | ".join(responseJson["result"].get('cats', {}).keys())
except Exception as e:
print('[-] Error retrieving IBM x-Force reputation!')
return "-"
if __name__ == "__main__":
domain = sys.argv[1]
xf = IBMXforce(domain)
xf.checkIBMxForce()
| l0gan/domainCat | modules/ibmxforce.py | ibmxforce.py | py | 1,652 | python | en | code | 62 | github-code | 90 |
5995200135 | import numpy as np
import matplotlib.pyplot as plt
def stacked_bar(data, series_labels=None, category_labels=None,
show_values=False, value_format="{}", y_label=None,
grid=True, reverse=False, y_limit=None, size_plot=None, use_dataframe=False, throw_zeros=False,dict_colors={}):
"""Plots a stacked bar chart with the data and labels provided.
Keyword arguments:
data -- 2-dimensional numpy array or nested list containing data for each series in rows
series_labels -- list of series labels (these appear in the legend)
category_labels -- list of category labels (these appear on the x-axis)
show_values -- If True then numeric value labels will be shown on each bar
value_format -- Format string for numeric value labels (default is "{}")
y_label -- Label for y-axis (str)
grid -- If True display grid
reverse -- If True reverse the order that the series are displayed (left-to-right or right-to-left)
y_limit -- containes a int\float that will be the highest y value shown in the graph and y axis
size_plot -- contains an array of [ width , hight] we want the plot square area size will be
use_dataframe -- Bool, if true, data is treated as pandas df with series labels and category labels as rows and colums respectivly
throw_zeros -- Only applicable if use_dataframe is True, throws rows with all zeros in them
"""
if throw_zeros and not use_dataframe:
# TODO make throw zeros work without df too
raise ValueError("throw_zeros only works if use_dataframe is chosen")
# if throw zeros, remove rows with all zeros
if throw_zeros:
data = data[(data.T != 0).any()]
# if data frame extract info from dataframe
if use_dataframe:
# remove no_change filter if needed:
if 'no_change' in data.index:
data = data.drop(['no_change'])
series_labels = data.index
category_labels = data.columns
data = data.values
ny = len(data[0])
ind2 = range(ny)
axes = []
cum_size = np.zeros(ny)
data = np.array(data)
if reverse:
data = np.flip(data, axis=1)
category_labels = reversed(category_labels)
if size_plot:
fig = plt.figure(figsize=size_plot)
plt.rcParams['font.size'] = '20'
suit_colors_dict = {}
for index, column in enumerate(series_labels):
suit_colors_dict[index] = dict_colors[column]
#print(data)
sum_column = np.sum(data, axis=0)
#print("old_data",data)
#print("sum_column", sum_column)
data = data.astype(float)
for row_index in range(len(data)):
for column_index in range(len(data[row_index])):
if data[row_index][column_index] != 0.0:
#print("before", "data[row_index][column_index]",data[row_index][column_index],"sum_column[column_index]*100", sum_column[column_index]*100)
data[row_index][column_index] = format(data[row_index][column_index]/sum_column[column_index]*100, '.2f')
#print("after:","\n","data[row_index][column_index]",data[row_index][column_index])
#print("new data", data)
#print("category_labels",category_labels )
#print("series_labels",series_labels)
# set the text in the same color as the bar
for i, row_data in enumerate(data):
axes.append(plt.bar(ind2, row_data, bottom=cum_size,
label=series_labels[i]))
for row in range(len(row_data)):
axes[i][row].set_color(suit_colors_dict[i])
cum_size += row_data
if not category_labels is None:
plt.xticks(ind2, category_labels, rotation=20, fontsize=30)
if y_label != None:
plt.ylabel(y_label, fontsize=30)
plt.legend()
if grid:
plt.grid()
if y_limit != None:
plt.ylim(0, y_limit)
if show_values:
max_tmp = []
for axis in axes:
max_tmp.append(max([bar.get_height() for bar in axis]))
max_height_data = max(max_tmp)
proportion_to_high = 0.08*max_height_data
need_arrow = 0.08*max_height_data
start_extra_heights = [axes[-1][i].get_y() + axes[-1][i].get_height() for i in range(len(axes[-1]))]
jumps = [proportion_to_high for i in range(len(axes[0]))]
for index,axis in enumerate(axes):
for counter, bar in enumerate(axis):
max_height = start_extra_heights[counter]
w, h = bar.get_width(), bar.get_height()
if 0.0 < h < need_arrow:
plt.annotate(value_format.format(h)+'%', xy=(bar.get_x(), bar.get_y()),
xytext=(bar.get_x() + 0.2, max_height + jumps[counter]), color=suit_colors_dict[index],
arrowprops=dict(arrowstyle="->"))
jumps[counter] += proportion_to_high * 1.2
elif h > 0.0:
plt.text(bar.get_x() + w / 2, bar.get_y() + h / 2, value_format.format(h)+'%', ha="center",
va="center")
# adding the number of total lines of the original pileups
for index, bar in enumerate(axes[-1]):
max_height = start_extra_heights[index]
if max_height == 0.0:
max_height = 1.3
plt.annotate(value_format.format(sum_column[index]), xy=(bar.get_x(), bar.get_y()+bar.get_height()),
xytext=(bar.get_x(), max_height + jumps[index]),
arrowprops=dict(arrowstyle='fancy'))
return plt, axes
| Lammlab/Resic | Experiments/forontiers_jupyter/bar_utils.py | bar_utils.py | py | 5,634 | python | en | code | 3 | github-code | 90 |
39248509237 | """
Created on October 20, 2023
Helper functions for the L2O project
"""
# Modules
# =============================================================================
from __future__ import annotations
# Standard
from abc import abstractmethod
from typing import Iterable, List, Optional, Tuple, Type
# Third-party
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from f3dasm import ExperimentData
# Authorship & Credits
# =============================================================================
__author__ = 'Martin van der Schelling (martin_van_der_schelling@brown.edu)'
__credits__ = ['Martin van der Schelling']
__status__ = 'Stable'
# =============================================================================
# Types & constants
# =============================================================================
# Labels of the output data
RAW_DATASET_LABEL = 'path_raw'
PERFORMANCE_DATASET_LABEL = 'path_post'
# Type aliases
PerformanceDataset = Type[xr.Dataset]
RawDataset = Type[xr.Dataset]
StrategyDataArray = Type[xr.DataArray]
StrategyDataset = Type[xr.Dataset]
# Loading data
# =============================================================================
def open_one_dataset_post(experimentdata: ExperimentData,
index: int) -> PerformanceDataset:
"""Open the post-processed data of one benchmark problem
Parameters
----------
experimentdata
ExperimentData object
index
Index of the job to open
Returns
-------
Post-processed data of one benchmark problem in xarray format
"""
return xr.open_dataset(experimentdata.path /
experimentdata.output_data.to_dataframe()
[PERFORMANCE_DATASET_LABEL].loc[index])
def open_one_dataset_raw(experimentdata: ExperimentData,
index: int) -> RawDataset:
"""Open the raw data of one benchmark problem
Parameters
----------
experimentdata
ExperimentData object
index
Index of the job to open
Returns
-------
Raw data of one benchmark problem in xarray format
"""
return xr.open_dataset(experimentdata.path /
experimentdata.output_data.to_dataframe()
[RAW_DATASET_LABEL].loc[index])
def open_all_datasets_post(experimentdata:
ExperimentData) -> PerformanceDataset:
"""Open the post-processed data of all benchmark problems
Parameters
----------
experimentdata
ExperimentData object
Returns
-------
Post-processed data of all benchmark problems in xarray format
Note
----
You need to have the package dask installed in order to load all the
datasets at once.
"""
return xr.open_mfdataset([experimentdata.path / path for path in
experimentdata.output_data.to_dataframe()
[PERFORMANCE_DATASET_LABEL]])
# Protocol class
# =============================================================================
class Strategy:
name: str = "strategy"
@abstractmethod
def __call__(self, xarr: PerformanceDataset) -> StrategyDataArray:
...
class CustomStrategy(Strategy):
"""Create a strategy of the optimizer to use given features
of the benchmark problem
"""
name: str = "custom_strategy"
@abstractmethod
def predict(self, features: xr.Dataset) -> Iterable[str]:
"""
Method to predict the optimizer to use given features of the
benchmark problem. THis method needs to be implemented by the user.
Parameters
----------
features : xr.Dataset
Features of the benchmark problem
Returns
-------
Iterable[str]
Optimizer to be used for each of the test problems
Raises
------
NotImplementedError
If the method is not implemented by the user
"""
...
def __call__(self, xarr: PerformanceDataset) -> StrategyDataArray:
allowed_features = xarr.drop(['perf_profile', 'ranking'])
predictions = self.predict(allowed_features)
return xr.DataArray(predictions, dims=['itemID'],
coords={'itemID': xarr['itemID']})
# Standard strategy classes
# =============================================================================
class WorstPerfProfile(Strategy):
name: str = 'worst_perf_profile'
def __call__(self, xarr: PerformanceDataset) -> StrategyDataArray:
return xarr['perf_profile'].mean('realization').idxmax(
'optimizer', fill_value='RandomSearch').sel(
output_dim='y').drop('output_dim')
class BestPerfProfile(Strategy):
name: str = 'best_perf_profile'
def __call__(self, xarr: PerformanceDataset) -> StrategyDataArray:
return xarr['perf_profile'].mean('realization').idxmin(
'optimizer', fill_value='RandomSearch').sel(
output_dim='y').drop('output_dim')
# =============================================================================
def create_strategy_xarr(combined_data: PerformanceDataset) -> StrategyDataset:
return xr.Dataset({strategy.name: strategy for strategy in [
xr.DataArray(opt, dims=['itemID'],
coords={'itemID': combined_data['itemID']},
name=opt) for opt in combined_data['optimizer'].values]})
# =============================================================================
class StrategyManager:
"""
Class to manage the strategies of the optimizer to use given features
"""
def __init__(self, data: ExperimentData | xr.Dataset,
strategy_list: Optional[List[CustomStrategy]] = None):
"""
Parameters
----------
data : ExperimentData | xr.Dataset`
ExperimentData object or post-processed data xarray
strategy_list : List[CustomStrategy], optional
List of strategies to use
"""
# Configure the strategies
if isinstance(data, ExperimentData):
self.combined_data = open_all_datasets_post(data)
elif isinstance(data, xr.Dataset):
self.combined_data = data
self.strategies = create_strategy_xarr(self.combined_data)
if strategy_list is None:
strategy_list = []
strategy_list.extend(
[BestPerfProfile(), WorstPerfProfile()])
self._add_strategies(strategy_list)
def _add_strategies(self, strategy_list: List[Strategy]):
for strategy in strategy_list:
self.strategies[strategy.name] = strategy(self.combined_data)
def plot(self, title: Optional[str] = None) -> \
Tuple[plt.Figure, plt.Axes]:
"""
Plot the performance profiles of the strategies
Parameters
----------
title : Optional[str]
Title of the plot
Returns
-------
Tuple[plt.Figure, plt.Axes]
Figure and axes of the plot
"""
xr_f = self.compute_performance_profiles()
# Plotting
fig, ax = plt.subplots(figsize=(10, 5))
for strategy in xr_f.strategy:
f_one = xr_f.sel(f=1.0, strategy=strategy).values[0]
ax.plot(xr_f.f, xr_f.sel(strategy=strategy),
label=f"{strategy.values} = {f_one}", zorder=-1)
ax.scatter(1.0, xr_f.sel(f=1.0, strategy=strategy),
marker="x", zorder=1)
if title is not None:
ax.set_title(title)
ax.set_xlabel("Performance ratio (f)")
ax.set_ylabel("Fraction of problems solved")
ax.legend()
return fig, ax
def compute_performance_profiles(self) -> xr.DataArray:
"""
Compute the performance profiles of the strategies
Returns
-------
xr.DataArray
Performance profiles of the strategies
"""
perf_profile = xr.concat([self.combined_data.sel(
itemID=self.strategies['itemID'],
optimizer=self.strategies[s])
for s in self.strategies.data_vars],
dim=xr.DataArray(self.strategies.data_vars, dims='strategy'))
pp = perf_profile.stack({'problem': ['itemID', 'realization']})[
'perf_profile']
# fraction of runs that are within a factor f of the best run
f_values = np.linspace(1, 5, 300)
xr_f = xr.concat([(pp <= f).mean(dim='problem')
for f in f_values],
dim=xr.DataArray(f_values, dims='f'))
return xr_f
# Plotting tools
# =============================================================================
def plot_perf_profile(data: ExperimentData | xr.Dataset,
title: Optional[str] = None) -> \
Tuple[plt.Figure, plt.Axes]:
"""
Plot the performance profiles of the strategies
Parameters
----------
data : ExperimentData | xr.Dataset
ExperimentData object or xr.Dataset containing the performance profiles
of the strategies
title : Optional[str]
Title of the plot
Returns
-------
Tuple[plt.Figure, plt.Axes]
Figure and axes of the plots
"""
if isinstance(data, ExperimentData):
data = open_all_datasets_post(data)
pp = data.stack({'problem': ['itemID', 'realization']})['perf_profile']
f_values = np.linspace(1, 5, 300)
xr_f = xr.concat([(pp <= f).mean(dim='problem')
for f in f_values], dim=xr.DataArray(f_values, dims='f'))
# Plotting
fig, ax = plt.subplots(figsize=(10, 5))
for optimizer in xr_f.optimizer:
f_one = xr_f.sel(f=1.0, optimizer=optimizer).values[0]
ax.plot(xr_f.f, xr_f.sel(optimizer=optimizer),
label=f"{optimizer.values} = {f_one}", zorder=-1)
ax.scatter(1.0, xr_f.sel(f=1.0, optimizer=optimizer),
marker="x", zorder=1)
if title is not None:
ax.set_title(title)
ax.set_xlabel("Performance ratio (f)")
ax.set_ylabel("Fraction of problems solved")
ax.legend()
return fig, ax
| bessagroup/3dasm_course | Projects/L2O/l2o.py | l2o.py | py | 10,704 | python | en | code | 10 | github-code | 90 |
1873046825 | # Discord Bot using python3 to send command to the Pokecatcher channel.
import random
import discord
from discord.ext import tasks
# Init the discord client
discord_client = discord.Client()
channel_id = 000000000000000000 # Replace with your channel ID
# Set the channel object using our channel ID number
channel = discord_client.get_channel(channel_id)
# List we will use these in the example to send random messages to the server
messages = [ "INPUT THE TEXT OR COMMAND YOU WANT TO SEND HERE" ]
# Single message to get sent to the server string
single_message = "This will send over and over if multi_message = False."
# We will use this boolean to determine if we are just sending message string or a random one from messages list
multi_message = False
# Create a loop task for every 60 minutes [1 hour]
@tasks.loop(minutes = 60) # You can change this to seconds, hours, days, etc.
async def send_message():
# Call channel so we can modify it's value
global channel
# Make sure channel isn't null
if channel == None:
channel = discord_client.get_channel(channel_id)
# Wait for the discord bot to load before we start posting
await discord_client.wait_until_ready()
# Check to see if we are going to be sending different messages every hour or not
if multi_message:
# Print to output
print("Posted random message.")
# Send message to Discord channel
await channel.send(f"{random.choice(messages)}")
else:
print("Posted single message")
await channel.send(f"{single_message}")
# On bot ready
@discord_client.event
async def on_ready():
# Check to see if we are going to be sending different messages every hour or not
if multi_message:
print("* Sending random messages to the channel...")
else:
print("* Sending single message to the channel...")
# Start the message sending loop
send_message.start()
# Finished setting up
print("The Bot ready.")
# Launch the Discord bot
print("+ Loading Discord message posting bot...")
discord_client.run("INPUT YOUR BOT TOKEN HERE") | olivier987654/A_Year_Of_Python | 2022-10-26/2022_10_26.py | 2022_10_26.py | py | 2,129 | python | en | code | 0 | github-code | 90 |
18543799969 | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def calc_max(A):
Amax = [0] * len(A)
cal = 0
for i, (x, v) in enumerate(A):
cal += v
if i == 0:
Amax[i] = max(cal - x, 0)
else:
Amax[i] = max(cal - x, Amax[i - 1])
return Amax
def solve(A, Amax, B, Bmax, ans):
N = len(A)
for i in range(N - 1):
if ans < Amax[i] - A[i][0] + Bmax[N - i - 2]:
ans = Amax[i] - A[i][0] + Bmax[N - i - 2]
return ans
def main():
N, C, *XV = map(int, read().split())
A = [0] * N
for i, (x, v) in enumerate(zip(*[iter(XV)] * 2)):
A[i] = (x, v)
Amax = calc_max(A)
B = [(C - x, v) for x, v in reversed(A)]
Bmax = calc_max(B)
ans = max(Amax[-1], Bmax[-1])
ans = max(ans, solve(A, Amax, B, Bmax, ans))
ans = max(ans, solve(B, Bmax, A, Amax, ans))
print(ans)
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03372/s242279835.py | s242279835.py | py | 1,043 | python | en | code | 0 | github-code | 90 |
20135881766 | # -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief:
import os
################## for text classification ##################
# sample
# train_path = "../data/nn/sample_training.csv"
# train_seg_path = "../data/nn/sample_train_seg.txt"
# test_seg_path = "../data/nn/sample_test_seg.txt"
# sentence_path = "../data/nn/sample_sentence.txt"
# path of training data
train_path = "../data/nn/train.csv"
# path of train segment data, train_seg_path will be build by segment
train_seg_path = "../data/nn/train_seg.txt"
# test_seg_path is part of train_seg_path
test_seg_path = "../data/nn/test_seg.txt"
# path of train sentence, if this file does not exist,
# it will be built from train_seg_path data by w2v_model.py train
sentence_path = "../data/nn/sentence.txt"
# vocab
word_vocab_path = "../data/nn/word_vocab.pkl"
word_vocab_start = 2
pos_vocab_path = "../data/nn/pos_vocab.pkl"
pos_vocab_start = 1
label_vocab_path = "../data/nn/label_vocab.pkl"
# embedding
w2v_dim = 256
w2v_bin_path = "../data/nn/w2v.bin"
w2v_path = "../data/nn/w2v.pkl"
w2v_train_path = "../data/nn/w2v_train.pkl"
p2v_path = "../data/nn/p2v.pkl" # pos vector path
pos_dim = 64
# train param
max_len = 300 # max len words of sentence
min_count = 3
num_workers = 4 # threads
batch_size = 64
nb_labels = 11 # num batches labels
nb_epoch = 2
keep_prob = 0.5
word_keep_prob = 0.9
pos_keep_prob = 0.9
kfold = 2 # 2 or more, default 10
# directory to save the trained model
# create a new directory if the dir does not exist
model_save_dir = "../data/nn/output"
if not os.path.exists(model_save_dir):
os.mkdir(model_save_dir)
model_save_temp_dir = "../data/nn/temp_output"
if not os.path.exists(model_save_temp_dir):
os.mkdir(model_save_temp_dir)
# best
best_result_path = "../data/nn/output/best_result.csv" | shirleywan/nlp-project | 文本分析/classifier-in-action-master/neural_network/config.py | config.py | py | 1,809 | python | en | code | 2 | github-code | 90 |
18204061039 | import sys
def factorization(n):
arr = []
temp = n
for i in range(2, int(-(-n**0.5//1))+1):
if temp%i==0:
cnt=0
while temp%i==0:
cnt+=1
temp //= i
arr.append([i, cnt])
if temp!=1:
arr.append([temp, 1])
if arr==[]:
arr.append([n, 1])
return arr
n=int(input())
if n==1:
print(0)
sys.exit()
l=factorization(n)
ans =0
for x in l:
a=x[0] #素数
b=x[1] #素数の个数
k=0
t=0
while(True):
t += 1
k += t
if k==b:
ans += t
break
elif k > b:
ans += t-1
break
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02660/s286068246.py | s286068246.py | py | 691 | python | zh | code | 0 | github-code | 90 |
31658225000 | def Anagram(s1,s2):
str1=sorted(s1)
str2=sorted(s2)
if len(str1)!=len(str2):
return False
else:
for i in range(len(str1)):
if str1[i]!=str2[i]:
return False
return True
if __name__=='__main__':
#s1 = ['l', 'h', 'u', 'v', 'o']
#s2 = ['h', 's', 'o', 'v', 'u']
#or
s1='shuvo'
s2='ovshp'
print(Anagram(s1, s2))
| mahdis4092/Python-Data-structure-and-Algorithms | Array Problem solve/Anagram problem solution.py | Anagram problem solution.py | py | 406 | python | kn | code | 0 | github-code | 90 |
41266359003 | import csv
import os
import shutil
from PIL import Image
def attribute(i):
if int(i) ==0:
return "sepal length in cm"
if int(i) ==1:
return "sepal width in cm"
if int(i)==2:
return "petal length in cm"
if int(i) ==3:
return "petal width in cm"
def draw_ori_tree():
csvfile=open("dec_tree.csv","r")
reader=csv.reader(csvfile)
gv_file=open('ori_tree.gv','w')
gv_file.write("digraph Tree{node[shape=box];")
shutil.rmtree('pic')
os.mkdir('pic')
dectree=[]
orderDectree=[]
parent=dict()
root=-1
for row in reader:
if reader.line_num==1:
continue
dectree.append(row)
csvfile.close()
for i in range (len(dectree)):
j=dectree[i][0]
if dectree[i][1]!='':
parent[(dectree[i][1])]=j
if dectree[i][2]!='':
parent[(dectree[i][2])]=j
for j in range (len(dectree)):
if str(j) not in parent:
root=j
#sort nodes
for i in range (len(dectree)):
for j in range (len(dectree)):
if int(dectree[j][0])==i:
orderDectree.append(dectree[j])
for i in range (len(orderDectree)):
gv_file.write(str(orderDectree[i][0]))
if orderDectree[i][5]=='leaf':
gv_file.write('[label='+'"'+orderDectree[i][3])
if orderDectree[i][5]=='decision':
gv_file.write('[label='+'"'+attribute(orderDectree[i][3])+'\n<'+orderDectree[i][4]+' >='+orderDectree[i][4])
gv_file.write('\n'+orderDectree[i][6]+'"'+'];')
if i!=root:
gv_file.write(str(parent['%i'%i])+'->'+str(i)+'[labeldistance=2.5,')
if int(orderDectree[int(parent['%i'%i])][1])==i:
gv_file.write('labellangle='+str(45)+'];')
if int(orderDectree[int(parent['%i'%i])][2])==i:
gv_file.write('labellangle='+str(-45)+'];')
gv_file.write('}')
gv_file.close()
os.system('dot ori_tree.gv -Tpng -o pic/ori_tree_'+str(i)+'.png')
gv_file=open('ori_tree.gv','a+')
gv_file.seek(-1,os.SEEK_END)
gv_file.truncate()
gv_file.write('}')
gv_file.close()
os.system('dot ori_tree.gv -Tpng -o ori_tree.png')
im=Image.open('ori_tree.png')
nim=im.resize((800,550))
nim.save('ori_tree.png',quality=300)
draw_ori_tree()
| AKUMA58/Decision-Tree-Visualization-with-Iris-Dataset | ori_tree.py | ori_tree.py | py | 2,034 | python | en | code | 0 | github-code | 90 |
42799887584 | import json
import numpy as np
import torch
import math
class TokenClassifier:
def __init__(self):
self.vocab = [
'ศูนย์', 'หนึ่ง', 'สอง', 'สาม', 'สี่', 'ห้า', 'หก', 'เจ็ด', 'แปด', 'เก้า', 'สิบ',
'ลบ', 'ลิงกั้ว', 'ลิงกัว', 'บัคคัล', 'บัคคอล', 'มีเสี้ยว', 'ดิสทัล', 'ทั้งหมด',
'มิซซิ่ง', 'คลาวน์', 'คลาวด์', 'อิมแพลนต์', 'อิมแพลนท์', 'อิมพลานต์', 'อิมพลานท์', 'บริดจ์', 'พีดีอาร์อี', 'โพลบบิ้งเดพท์', 'รีเส็ตชั่น',
'เอ็มจีเจ', 'บรีดดิ้ง', 'บีโอพี', 'ซับปูเรชั่น', 'ซุปปูเรชั่น', 'โมบีลีตี้', 'เอ็มโอ', 'เฟอร์เคชั่น', 'ฟอร์เคชั่น','ฟอรเคชัน'
]
def inference(self, sentence):
dp = self.maximal_matching(sentence)
tokenized = self.backtrack(dp, sentence)
labeled_token = self.labeled_token(tokenized)
return labeled_token
def maximal_matching(self, test_sentences):
dp =[[None]*len(test_sentences) for _ in range(len(test_sentences))]
min_col = [len(test_sentences) for _ in range(len(test_sentences))]
for i in range(len(test_sentences)):
for j in range(len(test_sentences)):
if i > j:continue
elif i == 0 and test_sentences[i:j+1] in self.vocab:
dp[i][j] = 1
min_col[j] = min(min_col[j], dp[i][j])
elif test_sentences[i:j+1] in self.vocab:
dp[i][j] = 1 + min_col[i-1]
min_col[j] = min(min_col[j], dp[i][j])
else:
dp[i][j] = math.inf
return dp
def backtrack(self,dp, sentence):
tokenized = []
eow = len(dp)-1
word_pos = []
have_oov = False
while eow >= 0:
minn = math.inf
for i in range(eow+1):
if minn > dp[i][eow]:
minn = dp[i][eow]
sow = i
if minn == math.inf:
if not have_oov:
oov_end = eow
have_oov = True
eow = eow - 1
else:
if have_oov:
oov_start = eow + 1
word_pos.append((oov_start, oov_end))
have_oov = False
word_pos.append((sow,eow))
eow = sow - 1
word_pos.reverse()
for sow, eow in word_pos:
tokenized.append(sentence[sow:eow+1])
return tokenized
def labeled_token(self, tokens):
token_label_list = []
for token in tokens:
if token in ['ศูนย์', 'หนึ่ง', 'สอง', 'สาม', 'สี่', 'ห้า', 'หก', 'เจ็ด', 'แปด', 'เก้า', 'สิบ']:
token_label_list.append([token, "Number"])
elif token in ['ลบ']:
token_label_list.append([token, "Symbol"])
elif token in ['ลิงกั้ว', 'ลิงกัว', 'บัคคัล', 'บัคคอล', 'มีเสี้ยว', 'ดิสทัล', 'ทั้งหมด']:
token_label_list.append([token, "Side"])
elif token in ['มิซซิ่ง']:
token_label_list.append([token, "Missing"])
elif token in ['คลาวน์', 'คลาวด์']:
token_label_list.append([token, "Crown"])
elif token in ['อิมแพลนต์', 'อินแพลนท์', 'อิมพลานต์', 'อิมพลานท์']:
token_label_list.append([token, "Implant"])
elif token in ['บริดจ์']:
token_label_list.append([token, "Bridge"])
elif token in ['พีดีอาร์อี']:
token_label_list.append([token, "PDRE"])
elif token in ['โพลบบิ้งเดพท์']:
token_label_list.append([token, "PD"])
elif token in ['รีเส็ตชั่น']:
token_label_list.append([token, "RE"])
elif token in ['เอ็มจีเจ']:
token_label_list.append([token, "MGJ"])
elif token in ['บรีดดิ้ง', 'บีโอพี']:
token_label_list.append([token, "BOP"])
elif token in ['ซับปูเรชั่น', 'ซุปปูเรชั่น']:
token_label_list.append([token, "SUP"])
elif token in ['โมบีลีตี้', 'เอ็มโอ']:
token_label_list.append([token, "MO"])
elif token in ['เฟอร์เคชั่น', 'ฟอร์เคชั่น','ฟอรเคชัน']:
token_label_list.append([token, "FUR"])
return token_label_list
| kracker71/dentist-voice-assistant-capstone-v2 | backend_ner/utils/model.py | model.py | py | 5,372 | python | th | code | 0 | github-code | 90 |
38218120951 | import numpy as np
import scipy.integrate
import warnings
if scipy.__version__.startswith('1.4'):
from scipy.integrate.quadrature import AccuracyWarning
else:
from scipy.integrate._quadrature import AccuracyWarning
from SWESimulators import CDKLM16, Common
class DoubleJetPerturbationType:
"""
An enum-type class for defining different types of initializations and
perturbations for the double jet simulation case.
"""
# Steady state solution
SteadyState = 1
# Bump located at the standard position x = x_0 for both jets.
StandardPerturbedState = 2
# Bumps located randomly according to N(x_0, sigma), independent locations for the two jets.
NormalPerturbedState = 3
# Bumps located at random uniformly distributed along the x-axis, independent for the two jets.
UniformPerturbedState = 4
# A strong (20x) model error is added to the steady-state
ModelErrorPerturbation = 5
# The initial state consists of the steady-state after a given spin-up time.
# Used in a DoubleJetEnsemble, each ensemble member is also given an individual spin up from the
# already spun-up initial conditions.
SpinUp = 6
# Same as NormalPerturbedState, but in a DoubleJetEnsemble each ensemble member is spun up individually.
NormalPerturbedSpinUp = 7
# Similar to SpinUp, but the model error is only applied every 10'th timestep.
LowFrequencySpinUp = 8
# Standard deterministic perturbation of the steady-state, with common and individual spin up, and
# model errors only every 10th timestep.
LowFrequencyStandardSpinUp = 9
# IEWPF paper case!
# Using the dataAssimilationStep with 1 min model time steps with model error and dynamic dt.
# Initialize with 3 days spin up
IEWPFPaperCase = 10
@staticmethod
def _assert_valid(pert_type):
assert(pert_type == DoubleJetPerturbationType.SteadyState or \
pert_type == DoubleJetPerturbationType.StandardPerturbedState or \
pert_type == DoubleJetPerturbationType.NormalPerturbedState or \
pert_type == DoubleJetPerturbationType.UniformPerturbedState or \
pert_type == DoubleJetPerturbationType.ModelErrorPerturbation or \
pert_type == DoubleJetPerturbationType.SpinUp or \
pert_type == DoubleJetPerturbationType.NormalPerturbedSpinUp or \
pert_type == DoubleJetPerturbationType.LowFrequencySpinUp or \
pert_type == DoubleJetPerturbationType.LowFrequencyStandardSpinUp or \
pert_type == DoubleJetPerturbationType.IEWPFPaperCase), \
'Provided double jet perturbation type ' + str(pert_type) + ' is invalid'
class DoubleJetCase:
"""
Class that generates initial conditions for a double jet case (both perturbed and unperturbed)
"""
def __init__(self, gpu_ctx,
perturbation_type=DoubleJetPerturbationType.SteadyState,
model_error = True, commonSpinUpTime = 200000):
"""
Class that generates initial conditions for a double jet case (both perturbed and unperturbed).
The use of initial perturbations/spin up periods are given by the perturbation_type argument,
which should be a DoubleJetPerturbationType instance.
"""
# The following parameters are the standard choices we have made for our double jet case.
# If any of them are to be altered, they should be made optional input parameters to the
# constructor, with the values below given as default parameters.
# Check that the provided perturbation type is valid
DoubleJetPerturbationType._assert_valid(perturbation_type)
self.perturbation_type = perturbation_type
# Domain-related parameters
self.phi_0 = 72*np.pi/180.0
self.phi_05 = 75*np.pi/180.0
self.phi_1 = 78*np.pi/180.0
self.midpoint_phi_pos = 73.5*np.pi/180
self.midpoint_phi_neg = 76.5*np.pi/180
self.phi_delta = 5.5*np.pi/180
self.phi_pos_min = self.midpoint_phi_pos - self.phi_delta
self.phi_pos_max = self.midpoint_phi_pos + self.phi_delta
self.phi_neg_min = self.midpoint_phi_neg - self.phi_delta
self.phi_neg_max = self.midpoint_phi_neg + self.phi_delta
self.e_n = np.exp( -4/(self.phi_delta*2)**2)
distance_between_latitudes = 111e3 # m
degrees_0 = self.phi_0*180/np.pi
degrees_1 = self.phi_1*180/np.pi
y_south = degrees_0*distance_between_latitudes
y_north = degrees_1*distance_between_latitudes
degrees_mid = self.phi_05*180/np.pi
self.ny = 300
self.dy = (y_north - y_south)/self.ny
self.dx = self.dy
self.nx = 500
self.ghosts = np.array([2,2,2,2]) # north, east, south, west
self.dataShape = (self.ny+self.ghosts[0]+self.ghosts[2], self.nx+self.ghosts[1]+self.ghosts[3])
# Physical parameters
self.g = 9.80616 # m/s^2 - gravitational acceleration
omega = 7.2722e-5 # 1/s - Angular rotation speed of the earth
self.earth_radius = 6.37122e6 # m - radius of the Earth
self.u_max = 3 # m/s - Gulf stream has "maximum speed typically about 2.5 m/s"
self.h_0 = 230 # m - It was found to be 230.03, but with a dobious calculation.
# - Better then just to set the depth to a constant :)
self.commonSpinUpTime = commonSpinUpTime # s - Because it just seems like a good measure.
self.individualSpinUpTime = 100000 # s - Because it just seems like a good measure.
self.f = 2*omega*np.sin(self.phi_05)
self.tan = np.tan(self.phi_05)
# Initial data
sim_h_init, redef_hu_init = self._initSteadyState()
sim_h_init_mean = sim_h_init.mean()
self.delta_eta = np.max(sim_h_init) - np.min(sim_h_init)
max_dt = 0.25*self.dx/(np.max(redef_hu_init/sim_h_init + np.sqrt(self.g*sim_h_init)))
dt = 0.8*max_dt
self.base_cpu_Hi = np.ones((self.dataShape[0]+1, self.dataShape[1]+1), dtype=np.float32) * sim_h_init_mean
self.base_cpu_eta = -np.ones(self.dataShape, dtype=np.float32) * sim_h_init_mean
self.base_cpu_hu = np.zeros(self.dataShape, dtype=np.float32)
self.base_cpu_hv = np.zeros(self.dataShape, dtype=np.float32)
for i in range(self.dataShape[1]):
self.base_cpu_eta[:,i] += sim_h_init
self.base_cpu_hu[:,i] = redef_hu_init
self.sim_args = {
"gpu_ctx": gpu_ctx,
"nx": self.nx, "ny": self.ny,
"dx": self.dy, "dy": self.dy,
"dt": dt,
"g": self.g,
"f": self.f,
"coriolis_beta": 0.0,
"r": 0.0,
"H": self.base_cpu_Hi,
"t": 0.0,
"rk_order": 2,
"boundary_conditions": Common.BoundaryConditions(2,2,2,2),
"small_scale_perturbation": model_error,
"small_scale_perturbation_amplitude": 0.0003,
"small_scale_perturbation_interpolation_factor": 5,
}
self.base_init = {
"eta0": self.base_cpu_eta,
"hu0": self.base_cpu_hu,
"hv0": self.base_cpu_hv
}
if self.perturbation_type == DoubleJetPerturbationType.SpinUp or \
self.perturbation_type == DoubleJetPerturbationType.LowFrequencySpinUp or \
self.perturbation_type == DoubleJetPerturbationType.LowFrequencyStandardSpinUp:
if self.perturbation_type == DoubleJetPerturbationType.LowFrequencySpinUp:
self.commonSpinUpTime = self.commonSpinUpTime
self.individualSpinUpTime = self.individualSpinUpTime*1.5
elif self.perturbation_type == DoubleJetPerturbationType.LowFrequencyStandardSpinUp:
self.sim_args, self.base_init = self.getStandardPerturbedInitConditions()
self.commonSpinUpTime = self.commonSpinUpTime*2
tmp_sim = CDKLM16.CDKLM16(**self.sim_args, **self.base_init)
tmp_t = tmp_sim.step(self.commonSpinUpTime)
tmp_eta, tmp_hu, tmp_hv = tmp_sim.download(interior_domain_only=False)
self.base_init['eta0'] = tmp_eta
self.base_init['hu0'] = tmp_hu
self.base_init['hv0'] = tmp_hv
self.sim_args['t'] = tmp_sim.t
tmp_sim.cleanUp()
# The IEWPFPaperCase - isolated to give a better overview
if self.perturbation_type == DoubleJetPerturbationType.IEWPFPaperCase:
self.sim_args["small_scale_perturbation_amplitude"] = 0.00025
self.sim_args["model_time_step"] = 60 # sec
tmp_sim = CDKLM16.CDKLM16(**self.sim_args, **self.base_init)
tmp_sim.updateDt()
three_days = 3*24*60*60
tmp_t = tmp_sim.dataAssimilationStep(three_days)
tmp_eta, tmp_hu, tmp_hv = tmp_sim.download(interior_domain_only=False)
self.base_init['eta0'] = tmp_eta
self.base_init['hu0'] = tmp_hu
self.base_init['hv0'] = tmp_hv
self.sim_args['t'] = tmp_sim.t
tmp_sim.cleanUp()
def __del__(self):
self.cleanUp()
def cleanUp(self):
# All allocated data needs to be freed from here
self.gpu_ctx = None
self.sim_args = None
self.base_init = None
def getInitConditions(self):
"""
Provides dicts with initial conditions and constructor arguments suitable for a CDKLM simulator.
"""
if self.perturbation_type == DoubleJetPerturbationType.StandardPerturbedState:
return self.getStandardPerturbedInitConditions()
elif self.perturbation_type == DoubleJetPerturbationType.NormalPerturbedState or \
self.perturbation_type == DoubleJetPerturbationType.NormalPerturbedSpinUp:
return self.getNormalPerturbedInitConditions()
elif self.perturbation_type == DoubleJetPerturbationType.UniformPerturbedState:
return self.getUniformPerturbedInitConditions()
else:
# perturbation type is SteadyState, ModelErrorPerturbation, SpinUp, LowFrequencySpinUp
return self.getBaseInitConditions()
def getBaseInitConditions(self):
"""
Provides the unperturbed steady-state double jet initial conditions
"""
return self.sim_args, self.base_init
def getStandardPerturbedInitConditions(self):
"""
Provides the standard perturbed double jet initial conditions, using two eta-bumps at x = nx/4
"""
mid_cell_x_pos = int(self.nx/5) + self.ghosts[3]
mid_cell_x_neg = int(self.nx/5) + self.ghosts[3]
return self._create_perturbed_init(mid_cell_x_pos, mid_cell_x_neg)
def getNormalPerturbedInitConditions(self):
"""
Provides the standard perturbed double jet initial conditions,
using two eta-bumps at slightly perturbed
"""
mid_cell_x_pos = np.random.normal(self.nx/5, 10)
mid_cell_x_neg = np.random.normal(self.nx/5, 10)
return self._create_perturbed_init(mid_cell_x_pos, mid_cell_x_neg)
def getUniformPerturbedInitConditions(self):
"""
Provides the standard perturbed double jet initial conditions, using two eta-bumps at random x-positions
"""
mid_cell_x_pos = int(np.random.rand()*self.nx + self.ghosts[3])
mid_cell_x_neg = int(np.random.rand()*self.nx + self.ghosts[3])
return self._create_perturbed_init(mid_cell_x_pos, mid_cell_x_neg)
def _create_perturbed_init(self, mid_cell_x_pos, mid_cell_x_neg):
"""
Creates initial conditions with perturbations in eta according to the indices given as input.
"""
eta_pert = np.zeros(self.dataShape)
distance_between_longitudes_75 = 28.7e3 # m
distance_between_latitudes = 111e3 # m
radius_y_cells = distance_between_longitudes_75*180/self.dx
mid_cell_y_pos = int(1*self.ny/4)
mid_cell_y_neg = int(3*self.ny/4)
pert_alpha = self.phi_delta #1/6 # 1/3
pert_beta = self.phi_delta/5 # 1/30 # 1/15
h_hat = 0.12*self.delta_eta
for j in range(self.ny+self.ghosts[2]+self.ghosts[0]):
for i in range(self.nx+self.ghosts[3]+self.ghosts[1]):
cell_diff_x_pos = i-mid_cell_x_pos
cell_diff_x_pos = min(abs(cell_diff_x_pos), abs(cell_diff_x_pos+self.nx), abs(cell_diff_x_pos-self.nx))
cell_diff_x_neg = i-mid_cell_x_neg
cell_diff_x_neg = min(abs(cell_diff_x_neg), abs(cell_diff_x_neg+self.nx), abs(cell_diff_x_neg-self.nx))
squared_dist_y_pos = ((1/pert_beta)*(np.pi/180)*(j-mid_cell_y_pos)*self.dy/distance_between_latitudes)**2
squared_dist_y_neg = ((1/pert_beta)*(np.pi/180)*(j-mid_cell_y_neg)*self.dy/distance_between_latitudes)**2
squared_dist_x_pos = ((1/pert_alpha)*(np.pi/180)*(cell_diff_x_pos)*self.dx/(distance_between_longitudes_75))**2
squared_dist_x_neg = ((1/pert_alpha)*(np.pi/180)*(cell_diff_x_neg)*self.dx/(distance_between_longitudes_75))**2
lat = np.cos(75*np.pi/180) # approximation into the beta-plane
eta_pert[j,i] += h_hat*lat*np.exp(-squared_dist_y_pos - squared_dist_x_pos) +\
h_hat*lat*np.exp(-squared_dist_y_neg - squared_dist_x_neg)
return self.sim_args, {"eta0": self.base_cpu_eta + eta_pert, "hu0": self.base_cpu_hu, "hv0": self.base_cpu_hv}
###-----------------------------------------------------------------
### Utility functions for creating the stable initial case
###-----------------------------------------------------------------
def _initSteadyState(self):
"""
Main function for creating the unperturbed steady-state initial conditions
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=AccuracyWarning)
#warnings.simplefilter('ignore', scipy.integrate.quadrature.AccuracyWarning)
#warnings.simplefilter('ignore', AccuracyWarning)
#warnings.simplefilter('ignore',DeprecationWarning)
# The initial conditions are created through four steps, here as a cross section along y
# 1. Calculate $u_{temp}$ based on the expression for initial $u$ from the paper
# 2. Calculate initial $h_{init}$ using the expression for $u_{temp}$.
# 3. Re-calculate initial $u_{init}$ by using the expression for geostrophic balance on the initial $h_{init}$.
# 4. Obtain $hu_{init} = h_{init} u_{init}$.
dy_phi = (self.phi_1 - self.phi_0)/self.ny
sim_phi = np.linspace(self.phi_0 - 2*dy_phi, self.phi_1 + 2*dy_phi, self.ny+4)
# 1)
sim_u_init = self._init_u(sim_phi)
# 2)
sim_h_init = self._generate_h0(sim_phi, self.phi_0)
sim_h_init_mean = np.mean(sim_h_init)
# Calculate hu which is in geotrophic balance wrt sim_h_init (it's slope is equal to the slope of eta)
redef_hu_init = np.zeros_like(sim_h_init)
for j in range(1, len(redef_hu_init)-1):
redef_hu_init[j] = - (self.g*sim_h_init_mean/self.f)*(sim_h_init[j+1]-sim_h_init[j-1])/(2*self.dy)
return sim_h_init, redef_hu_init
def _init_u_scalar(self, lat):
"""
The initialization function used by Galewsky
"""
if lat < self.phi_05:
return (self.u_max/self.e_n) *np.exp(1/((lat-self.phi_pos_min)*(lat-self.phi_pos_max)))
else:
return -(self.u_max/self.e_n) *np.exp(1/((lat-self.phi_neg_min)*(lat-self.phi_neg_max)))
def _init_u(self, lat):
"""
Initializing u according to Galewsky
"""
steps = 1
if np.isscalar(lat):
return steps*self._init_u_scalar(lat)
else:
out = np.zeros_like(lat)
for i in range(len(lat)):
if lat[i] > self.phi_0 and lat[i] <= self.phi_1:
out[i] = self._init_u_scalar(lat[i])
if out[i] == np.inf:
out[i] = 0.0
return steps*out
# Integrand for initialization of h
def _init_h_integrand(self, lat):
"""
Integrand in Galewsky's expression for initial h
"""
return self.earth_radius*self._init_u(lat)*(self.f + (self.tan/self.earth_radius)*self._init_u(lat))
def _generate_h0(self, lat, lat_0):
"""
Initializing gh according to galewsky
"""
gh0 = np.zeros_like(lat)
for i in range(lat.size):
gh0[i] = self.g*self.h_0 - scipy.integrate.quadrature(self._init_h_integrand, self.phi_0, lat[i])[0]
return gh0/self.g
| metno/gpu-ocean | gpu_ocean/SWESimulators/DoubleJetCase.py | DoubleJetCase.py | py | 17,378 | python | en | code | 10 | github-code | 90 |
25290952120 | import tempfile
import unittest
from pimlicotest import example_path
class PipelineConfigTest(unittest.TestCase):
def setUp(self):
# Get a basic local config file so Pimlico doesn't go looking for one on the system
self.local_conf_path = example_path("examples_local_config")
# Create a temporary directory to use as our storage location
self.storage_dir = tempfile.mkdtemp()
# Override the local config values that should point to this path
self.override_local_config = {
"store": self.storage_dir,
}
def tearDown(self):
import shutil
shutil.rmtree(self.storage_dir)
class TestEmptyPipelineLoading(PipelineConfigTest):
"""
Load a pipeline config file that doesn't contain any modules
"""
def test_load(self):
from pimlico.core.config import PipelineConfig
# Load a config file
conf_path = example_path("empty.conf")
pipeline = PipelineConfig.load(conf_path,
local_config=self.local_conf_path,
override_local_config=self.override_local_config, only_override_config=True)
class TestEmptyPipelineCreation(PipelineConfigTest):
"""
Create an empty pipeline programmatically.
"""
def test_load(self):
from pimlico.core.config import PipelineConfig
pipeline = PipelineConfig.empty(override_local_config=self.override_local_config, only_override_config=True)
if __name__ == "__main__":
unittest.main()
| markgw/pimlico | src/test/python/pimlicotest/core/config.py | config.py | py | 1,560 | python | en | code | 6 | github-code | 90 |
30020744147 | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QDialog,QInputDialog,QLineEdit,QMessageBox
import pyodbc
from decimal import Decimal
import Vasa
import os
import sys
from Odbc_Connection import Add_Odbc_Connection
class Ui_New_product_dialog(object):
def setupUi(self, New_product_dialog):
New_product_dialog.setObjectName("New_product_dialog")
New_product_dialog.resize(691, 614)
New_product_dialog.setStyleSheet("#New_product_dialog{background-image: url(:/Images/pexels-photo-949587.jpeg);}")
self.Add_prod_frame = QtWidgets.QFrame(New_product_dialog)
self.Add_prod_frame.setGeometry(QtCore.QRect(54, 90, 581, 451))
self.Add_prod_frame.setStyleSheet("#Add_prod_frame{background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(126, 183, 204, 255), stop:1 rgba(255, 255, 255, 255));\n"
" \n"
"border-radius:25px;\n"
"border-style:outset;\n"
"border-width:3px;\n"
"border-color:green;}")
self.Add_prod_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Add_prod_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.Add_prod_frame.setObjectName("Add_prod_frame")
self.category_label = QtWidgets.QLabel(self.Add_prod_frame)
self.category_label.setGeometry(QtCore.QRect(30, 80, 191, 41))
font = QtGui.QFont()
font.setFamily("Cambria")
font.setPointSize(14)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.category_label.setFont(font)
self.category_label.setStyleSheet("font: 75 14pt \"Cambria\";\n"
"font:bold;\n"
"color: rgb(11, 19, 175);\n"
"")
self.category_label.setWordWrap(False)
self.category_label.setIndent(-1)
self.category_label.setObjectName("category_label")
self.package_name_label = QtWidgets.QLabel(self.Add_prod_frame)
self.package_name_label.setGeometry(QtCore.QRect(30, 150, 191, 41))
font = QtGui.QFont()
font.setFamily("Cambria")
font.setPointSize(14)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.package_name_label.setFont(font)
self.package_name_label.setStyleSheet("font: 75 14pt \"Cambria\";\n"
"font:bold;\n"
"color: rgb(11, 19, 175);\n"
"")
self.package_name_label.setWordWrap(False)
self.package_name_label.setIndent(-1)
self.package_name_label.setObjectName("package_name_label")
self.rate_label = QtWidgets.QLabel(self.Add_prod_frame)
self.rate_label.setGeometry(QtCore.QRect(30, 220, 191, 41))
font = QtGui.QFont()
font.setFamily("Cambria")
font.setPointSize(14)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.rate_label.setFont(font)
self.rate_label.setStyleSheet("font: 75 14pt \"Cambria\";\n"
"font:bold;\n"
"color: rgb(11, 19, 175);\n"
"")
self.rate_label.setWordWrap(False)
self.rate_label.setIndent(-1)
self.rate_label.setObjectName("rate_label")
self.category_combo_box = QtWidgets.QComboBox(self.Add_prod_frame)
self.category_combo_box.setGeometry(QtCore.QRect(239, 90, 221, 41))
self.category_combo_box.setStyleSheet("border-radius:5px;\n"
"border-style:outset;\n"
"border-width:2px;\n"
"border-color:black;\n"
"font: 11pt \"Cambria\";\n"
"font:bold;")
self.category_combo_box.setEditable(False)
self.category_combo_box.setIconSize(QtCore.QSize(20, 20))
self.category_combo_box.setObjectName("category_combo_box")
self.rate_le = QtWidgets.QLineEdit(self.Add_prod_frame)
self.rate_le.setGeometry(QtCore.QRect(240, 220, 221, 41))
self.rate_le.setStyleSheet("border-radius:10px;\n"
"border-style:outset;\n"
"border-width:2px;\n"
"border-color:black;\n"
"font: 11pt \"Cambria\";\n"
"font:bold")
self.rate_le.setEchoMode(QtWidgets.QLineEdit.Normal)
self.rate_le.setObjectName("rate_le")
self.save_btn = QtWidgets.QPushButton(self.Add_prod_frame)
self.save_btn.setGeometry(QtCore.QRect(90, 360, 131, 61))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.save_btn.setFont(font)
self.save_btn.setStyleSheet("#save_btn{background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(26, 204, 57, 255), stop:1 rgba(255, 255, 255, 255));\n"
"border-radius:20px;\n"
"border-style:outset;\n"
"border-width:3px;\n"
"border-color:black;\n"
"font:bold;} \n"
"#save_btn:pressed{border-style:solid;border-width:6px}" )
self.save_btn.setObjectName("save_btn")
self.clear_btn = QtWidgets.QPushButton(self.Add_prod_frame)
self.clear_btn.setGeometry(QtCore.QRect(330, 360, 131, 61))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.clear_btn.setFont(font)
self.clear_btn.setStyleSheet("#clear_btn{background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(204, 188, 26, 255), stop:1 rgba(255, 255, 255, 255));\n"
"border-radius:20px;\n"
"border-style:outset;\n"
"border-width:3px;\n"
"border-color:black;\n"
"font:bold;} \n"
"#clear_btn:pressed{border-style:solid;border-width:6px}" )
self.clear_btn.setObjectName("clear_btn")
self.add_btn = QtWidgets.QPushButton(self.Add_prod_frame)
self.add_btn.setGeometry(QtCore.QRect(30, 290, 171, 41))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.add_btn.setFont(font)
self.add_btn.setStyleSheet("#add_btn{background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(85, 120, 204, 255), stop:1 rgba(255, 255, 255, 255));\n"
"border-radius:20px;\n"
"border-style:outset;\n"
"border-width:3px;\n"
"border-color:black;\n"
"font:bold;} \n"
"#add_btn:pressed{border-style:solid;border-width:6px}" )
self.add_btn.setObjectName("add_btn")
'''self.toolButton = QtWidgets.QToolButton(self.Add_prod_frame)
self.toolButton.setGeometry(QtCore.QRect(435, 110, 141, 141))
self.toolButton.setStyleSheet("\n"
"border-radius:50px;\n"
"border-style:outset;\n"
"border-width:0px;\n"
"border-color:white;\n"
"")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("images/images.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon)
self.toolButton.setIconSize(QtCore.QSize(80, 140))
self.toolButton.setObjectName("toolButton")'''
self.package_combo_box = QtWidgets.QComboBox(self.Add_prod_frame)
self.package_combo_box.setEnabled(False)
self.package_combo_box.setGeometry(QtCore.QRect(240, 152, 221, 41))
self.package_combo_box.setStyleSheet("border-radius:5px;\n"
"border-style:outset;\n"
"border-width:2px;\n"
"border-color:black;\n"
"font: 11pt \"Cambria\";\n"
"font:bold;")
self.package_combo_box.setEditable(False)
self.package_combo_box.setIconSize(QtCore.QSize(20, 20))
self.package_combo_box.setObjectName("package_combo_box")
self.add_btn_2 = QtWidgets.QPushButton(self.Add_prod_frame)
self.add_btn_2.setGeometry(QtCore.QRect(290, 290, 171, 41))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.add_btn_2.setFont(font)
self.add_btn_2.setStyleSheet("#add_btn_2{background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(100, 9, 85, 255), stop:1 rgba(255, 255, 255, 255));\n"
"border-radius:20px;\n"
"border-style:outset;\n"
"border-width:3px;\n"
"border-color:black;\n"
"font:bold;} \n"
"#add_btn_2:pressed{border-style:solid;border-width:6px}" )
self.add_btn_2.setObjectName("add_btn_2")
self.toolButton_2 = QtWidgets.QToolButton(New_product_dialog)
self.toolButton_2.setGeometry(QtCore.QRect(160, 50, 371, 81))
self.toolButton_2.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(125, 37, 37, 255), stop:1 rgba(255, 255, 255, 255));\n"
"font: 75 16pt \"Cambria\";\n"
"font: Bold;\n"
"color: rgb(34, 48, 170);\n"
"border-radius:40px;\n"
"border-style:outset;\n"
"border-width:3px;\n"
"border-color:green;")
self.toolButton_2.setObjectName("toolButton_2")
self.retranslateUi(New_product_dialog)
QtCore.QMetaObject.connectSlotsByName(New_product_dialog)
def retranslateUi(self, New_product_dialog):
_translate = QtCore.QCoreApplication.translate
New_product_dialog.setWindowTitle(_translate("New_product_dialog", "Dialog"))
self.category_label.setText(_translate("New_product_dialog", "Category :"))
self.package_name_label.setText(_translate("New_product_dialog", "Package Name :"))
self.rate_label.setText(_translate("New_product_dialog", "Rate (Rs.) :"))
self.save_btn.setText(_translate("New_product_dialog", "Save"))
self.clear_btn.setText(_translate("New_product_dialog", "Clear"))
self.add_btn.setText(_translate("New_product_dialog", "New Product"))
#self.toolButton.setText(_translate("New_product_dialog", "..."))
self.add_btn_2.setText(_translate("New_product_dialog", "New Package"))
self.toolButton_2.setText(_translate("New_product_dialog", "New Function Product"))
class Add_Function_Product_window(QDialog,Ui_New_product_dialog):
def __init__(self,parent=None):
super(Add_Function_Product_window,self).__init__(parent)
self.setupUi(self)
self.setWindowTitle("Add Function Products")
self.onlydouble = QtGui.QIntValidator()
self.rate_le.setValidator(self.onlydouble)
self.combovalue()
self.category_combo_box.currentIndexChanged['QString'].connect(self.comboindexchanged)
self.add_btn.clicked.connect(self.newprod)
self.add_btn_2.clicked.connect(self.newpackage)
self.save_btn.clicked.connect(self.savebtn)
self.clear_btn.clicked.connect(self.clearbtn)
config_name = 'function_product.cfg'
# determine if application is a script file or frozen exe
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(sys.executable)
elif __file__:
application_path = os.path.dirname(__file__)
config_path = os.path.join(application_path, config_name)
icon_image = os.path.join(application_path, "images", "VASA_ICON.png")
self.setWindowIcon(QtGui.QIcon(icon_image))
clear_image = os.path.join(application_path, "images", "clear.png")
save_image = os.path.join(application_path, "images", "save.png")
add_image = os.path.join(application_path, "images", "add.png")
self.clear_btn.setIcon(QtGui.QIcon(clear_image))
self.clear_btn.setIconSize(QtCore.QSize(35, 35))
self.save_btn.setIcon(QtGui.QIcon(save_image))
self.save_btn.setIconSize(QtCore.QSize(30, 30))
self.add_btn.setIcon(QtGui.QIcon(add_image))
self.add_btn.setIconSize(QtCore.QSize(20, 20))
self.add_btn_2.setIcon(QtGui.QIcon(add_image))
self.add_btn_2.setIconSize(QtCore.QSize(20, 20))
def connectdb(self):
global cur
global connect
cur, con = Add_Odbc_Connection.connectdb(self)
connect = con
return cur
'''global cur
global connect
connect = pyodbc.connect('Driver={ODBC Driver 17 for SQL Server};'
'Server=DHANALAKSHMI_PC\SQLEXPRESS;'
'Database=VASADB;'
'Trusted_Connection=yes;')
cur = connect.cursor()
return cur'''
def newprod(self):
text,okpressed = QInputDialog.getText(self,'Add Product','<html style="font-size:12pt;font-weight:bold;">Product Name :</html>',QLineEdit.Normal,"")
if okpressed and text!='':
value = str.upper(text)
self.connectdb()
query_value =(value,'FUNCTION')
ins_query = 'INSERT INTO dbo.PROD_DETAILS (PROD_ID,PROD_NAME,PROD_TYPE) VALUES (NEXT VALUE FOR dbo.seq_prod,?,?)'
cur.execute(ins_query,query_value)
connect.commit()
connect.close()
QMessageBox.information(self,'Information','Product '+value+' added successfully')
self.category_combo_box.clear()
self.combovalue()
def newpackage(self):
text,okpressed = QInputDialog.getText(self,'Add Package','<html style="font-size:12pt;font-weight:bold;">Package Name :</html>',QLineEdit.Normal,"")
if okpressed and text!='':
value = str.upper(text)
self.connectdb()
ins_query = 'INSERT INTO dbo.PACKAGE_DETAILS (PACKAGE_ID,PACKAGE_NAME) VALUES (NEXT VALUE FOR dbo.seq_package,?)'
cur.execute(ins_query,value)
connect.commit()
connect.close()
QMessageBox.information(self,'Information','Package '+value+' added successfully')
self.package_combo_box.clear()
#self.combovalue()
def combovalue(self):
self.combolist = set('',)
sel_query ="SELECT PROD_NAME from dbo.PROD_DETAILS WHERE PROD_TYPE ='FUNCTION'"
self.connectdb()
cur.execute(sel_query)
result= cur.fetchall()
for i in result:
x=i[0]
self.combolist.add(x)
self.category_combo_box.addItems(self.combolist)
def combopackagevalue(self):
self.combolist = set('',)
sel_query ="SELECT PACKAGE_NAME from dbo.PACKAGE_DETAILS"
self.connectdb()
cur.execute(sel_query)
result= cur.fetchall()
for i in result:
x=i[0]
self.combolist.add(x)
self.package_combo_box.addItems(self.combolist)
def comboindexchanged(self):
current_value =self.category_combo_box.currentText()
print("The category combo box current value is ",current_value)
if current_value =='MARRIAGE':
self.package_combo_box.setEnabled(True)
self.package_combo_box.setStyleSheet("border-radius:10px;\n"
"border-style:outset;\n"
"border-width:2px;\n"
"border-color:black;\n"
"font: 11pt \"Cambria\";\n"
"font:bold")
self.combopackagevalue()
else:
self.package_combo_box.setEnabled(False)
self.package_combo_box.setStyleSheet('QPushButton: disabled{background - color:grey}')
def packageid(self,name):
sel_query ='SELECT PACKAGE_ID FROM dbo.PACKAGE_DETAILS WHERE PACKAGE_NAME=?'
cur.execute(sel_query, name)
result =cur.fetchone()
package_id =result[0]
return package_id
print('the package id value for '+name+' is ',result)
def savebtn(self):
cat_value = str(self.category_combo_box.currentText())
package_name = str(self.package_combo_box.currentText())
if self.rate_le.text().isnumeric():
price = int(self.rate_le.text())
else:
QMessageBox.information(self,'Alert Window','Please enter the Rate details')
price=0
self.connectdb()
if price != 0 and cat_value !='MARRIAGE':
upd_query ="UPDATE dbo.PROD_DETAILS SET PRICE=? WHERE PROD_NAME=? AND PROD_TYPE='FUNCTION'"
data =(price,cat_value)
cur.execute(upd_query,data)
connect.commit()
connect.close()
QMessageBox.information(self,'Information','Prod information added successfully')
self.rate_le.clear()
elif cat_value =='MARRIAGE' and package_name !='' and price != 0 :
package_id_value = self.packageid(package_name)
sel_query = "SELECT * FROM dbo.PROD_DETAILS WHERE PROD_NAME=? AND PACKAGE_ID =? AND PROD_TYPE='FUNCTION'"
sel_data =(cat_value,package_id_value)
cur.execute(sel_query,sel_data)
sel_value = cur.fetchall()
if len(sel_value)>0:
update_query = 'UPDATE dbo.PROD_DETAILS SET PRICE =? WHERE PROD_NAME=? AND PACKAGE_ID=?'
upd_data =(price,cat_value,package_id_value)
cur.execute(update_query,upd_data)
connect.commit()
connect.close()
QMessageBox.information(self,'Information','PROD details updated successfully')
self.rate_le.clear()
self.frame_size_le.clear()
else:
ins_query ='INSERT INTO dbo.PROD_DETAILS(PROD_ID,PROD_NAME,PACKAGE_ID,PRICE,PROD_TYPE) VALUES (NEXT VALUE FOR dbo.seq_prod, ?,?,?,?)'
prod_type ='FUNCTION'
data = (cat_value,package_id_value,price,prod_type)
cur.execute(ins_query,data)
connect.commit()
connect.close()
QMessageBox.information(self, 'Information', 'Prod information added successfully')
self.rate_le.clear()
self.package_combo_box.clear()
elif package_name =='' and cat_value=='MARRIAGE':
QMessageBox.information(self, 'Alert Window', 'PACKAGE NAME is mandatory')
def clearbtn(self):
self.rate_le.clear()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
New_product_dialog = QtWidgets.QDialog()
#ui = Ui_New_product_dialog()
#ui.setupUi(New_product_dialog)
#New_product_dialog.show()
ui = Add_Function_Product_window()
ui.show()
sys.exit(app.exec_())
| rvssankar/vasa_studio | Add_Function_Product.py | Add_Function_Product.py | py | 18,294 | python | en | code | 0 | github-code | 90 |
18550317839 | #!/usr/bin/env python3
import sys
def main():
S = input()
T = 'abcdefghijklmnopqrstuvwxyz'
if len(S) < 26:
U = set(T) - set(S)
print(S + sorted(U)[0])
else:
if S == T[::-1]:
print(-1)
else:
count = 1
for i in range(25):
if S[25-i] > S[24-i]:
break
else:
count += 1
#print(S[26-count:])
for s in sorted(S[26-count:]):
if s > S[25-count]:
print(S[:25-count] + s)
exit()
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03393/s376274470.py | s376274470.py | py | 644 | python | en | code | 0 | github-code | 90 |
27362383849 | import numpy as np
import numpy.linalg as linalg
import math
import random
# utils
def is_row_vector(vector):
return vector.shape[0] == 1
def is_col_vector(vector):
return vector.shape[1] == 1
def matrix_dimensions_match(first, second):
return first.shape[1] == second.shape[0]
# functions
def portfolio_returns(portfolio_weights, expected_returns):
'''
input does not need to be transposed
'''
if not is_row_vector(portfolio_weights):
portfolio_weights = np.transpose(portfolio_weights)
if not is_col_vector(expected_returns):
expected_returns = np.transpose(expected_returns)
if not matrix_dimensions_match(portfolio_weights, expected_returns):
print("ERROR: Your dimensions do not match!")
return None
return np.matmul(portfolio_weights, expected_returns)
def portfolio_variance(portfolio_weights, var_covar_matrix):
'''
input does not need to be transposed
'''
if not is_row_vector(portfolio_weights):
portfolio_weights = np.transpose(portfolio_weights)
if not matrix_dimensions_match(portfolio_weights, var_covar_matrix):
print("ERROR: Your dimensions do not match!")
return None
return np.matmul(np.matmul(portfolio_weights, var_covar_matrix), np.transpose(portfolio_weights))
def min_var_portfolio(var_covar_matrix):
'''
takes a variance-covariance-matrix and
returns the minimum variance portfolio
'''
ones_vector = np.ones((var_covar_matrix.shape[0],1))
mat_sum_axis1 = np.matmul(linalg.inv(var_covar_matrix), ones_vector)
return mat_sum_axis1 / np.matmul(np.transpose(ones_vector), mat_sum_axis1)
def find_min_var_portfolio_for_fixed(fixed_asset, other_assets, var_covar_matrix, repetitions=100000):
variance = math.inf
for _ in range(repetitions):
fa = [fixed_asset]
num = 1000
sec = random.randrange(-600,600)
while True:
tri = random.randrange(-600,600)
if (400 + sec + tri) <= 1000:
break
fo = 600 - tri - sec
fa = [0.4, sec / num, tri / num, fo / num]
w = np.array([fa])
new_variance = portfolio_variance(w, var_covar_matrix)
if new_variance < variance:
variance = new_variance
portfolio_weights = w
return portfolio_weights, variance
def find_min_var_portfolio_for_return(goal_return, expected_returns, var_covar_matrix, repetitions=100000, no_short_sales=False):
variance = math.inf
for i in range(repetitions):
if no_short_sales:
w = np.zeros((4,1))
for i in range(w.shape[0]):
w[i] = random.random()
else:
w = np.zeros((4,1))
for i in range(w.shape[0]):
w[i] = random.randrange(-1000,1000)
if np.sum(w) == 0:
continue
w /= np.sum(w)
new_variance = portfolio_variance(w, var_covar_matrix)
new_expec_return = portfolio_returns(w, expected_returns)
if new_variance < variance and abs(goal_return - np.average(new_expec_return)) < 0.01:
variance = new_variance
goal_return = new_expec_return
portfolio_weights = w
return portfolio_weights, goal_return, variance
def find_min_var_portfolio_for_var(goal_var, expected_returns, var_covar_matrix, repetitions=100000, no_short_sales=False):
expected_r = - math.inf
for i in range(repetitions):
if no_short_sales:
w = np.zeros((4,1))
for i in range(w.shape[0]):
w[i] = random.random()
else:
w = np.zeros((4,1))
for i in range(w.shape[0]):
w[i] = random.randrange(-1000,1000)
if np.sum(w) == 0:
continue
w /= np.sum(w)
new_variance = portfolio_variance(w, var_covar_matrix)
new_expec_return = portfolio_returns(w, expected_returns)
if new_expec_return > expected_r and abs(goal_var - new_variance) < 0.1:
variance = new_variance
expected_r = new_expec_return
portfolio_weights = w
return portfolio_weights, expected_r, variance
| Hubertus444/investments | basics.py | basics.py | py | 4,204 | python | en | code | 0 | github-code | 90 |
18455635219 | from operator import itemgetter
from itertools import accumulate
n,k = map(int, input().split())
sushi = [list(map(int, input().split())) for i in range(n)]
sushi.sort(key=itemgetter(1))
sushi.reverse()
# 各ネタ最大美味しさの寿司
a = []
# それ以外の寿司
b = []
# aにすでに入っているネタ = 1
in_a = [0]*(n+1)
for i in range(n):
t,d = sushi[i]
if in_a[t]==0:
in_a[t]=1
a.append(d)
else:
b.append(d)
ans = 0
a = list(accumulate([0]+a))
b = list(accumulate([0]+b))
for i in range(len(a)):
if i<=k:
if 0<=k-i<len(b):
ans = max(a[i]+b[k-i]+i**2,ans)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03148/s012977204.py | s012977204.py | py | 656 | python | en | code | 0 | github-code | 90 |
28348946727 | import pytest
import json
from humps import decamelize
from eth_account import Account, messages
from siwe.siwe import SiweMessage, ValidationError
BASE_TESTS = "tests/siwe/test/"
with open(BASE_TESTS + "parsing_positive.json", "r") as f:
parsing_positive = decamelize(json.load(fp=f))
with open(BASE_TESTS + "parsing_negative.json", "r") as f:
parsing_negative = decamelize(json.load(fp=f))
with open(BASE_TESTS + "validation_negative.json", "r") as f:
validation_negative = decamelize(json.load(fp=f))
with open(BASE_TESTS + "validation_positive.json", "r") as f:
validation_positive = decamelize(json.load(fp=f))
class TestMessageParsing:
@pytest.mark.parametrize("abnf", [True, False])
@pytest.mark.parametrize(
"test_name,test",
[(test_name, test) for test_name, test in parsing_positive.items()],
)
def test_valid_message(self, abnf, test_name, test):
siwe_message = SiweMessage(message=test["message"], abnf=abnf)
test["fields"] == siwe_message
@pytest.mark.parametrize("abnf", [True, False])
@pytest.mark.parametrize(
"test_name,test",
[(test_name, test) for test_name, test in parsing_negative.items()],
)
def test_invalid_message(self, abnf, test_name, test):
with pytest.raises(ValueError):
SiweMessage(message=test, abnf=abnf)
class TestMessageGeneration:
@pytest.mark.parametrize(
"test_name,test",
[(test_name, test) for test_name, test in parsing_positive.items()],
)
def test_valid_message(self, test_name, test):
siwe_message = SiweMessage(message=test["fields"])
assert siwe_message.prepare_message() == test["message"]
class TestMessageValidation:
@pytest.mark.parametrize(
"test_name,test",
[(test_name, test) for test_name, test in validation_positive.items()],
)
def test_valid_message(self, test_name, test):
siwe_message = SiweMessage(message=test)
siwe_message.validate(test["signature"])
@pytest.mark.parametrize(
"test_name,test",
[(test_name, test) for test_name, test in validation_negative.items()],
)
def test_invalid_message(self, test_name, test):
with pytest.raises((ValidationError, ValueError)):
siwe_message = SiweMessage(message=test)
siwe_message.validate(test["signature"])
class TestMessageRoundTrip:
account = Account.create()
@pytest.mark.parametrize(
"test_name,test",
[(test_name, test) for test_name, test in parsing_positive.items()],
)
def test_message_round_trip(self, test_name, test):
message = SiweMessage(test["fields"])
message.address = self.account.address
signature = self.account.sign_message(
messages.encode_defunct(text=message.prepare_message())
).signature
message.validate(signature)
| 0xdaem0n/siwe-py | tests/test_siwe.py | test_siwe.py | py | 2,910 | python | en | code | null | github-code | 90 |
19263579254 | #!/usr/bin/python3
from flask import Flask, jsonify, request, make_response, abort
import psycopg2
import psycopg2.extras
from . import *
app = Flask(__name__)
config = load_configuration('/var/www/robodoge/config.yml')
try:
merger = Robodoge(config)
except ConfigurationError as err:
print(err.msg)
sys.exit(1)
@app.route('/automerge/api/v1.0/pr/', methods=['GET'])
def get_prs():
conn = merger.get_connection()
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
try:
cursor.execute("""SELECT id, number, url,state,title,user_login,html_url,assignee_login,milestone_title,base_ref, build_node, s3_arn, test_node
FROM pull_request
WHERE project='dogecoin/dogecoin' and state!='closed'
ORDER BY id ASC""")
return jsonify({'prs': cursor.fetchall()})
finally:
cursor.close()
finally:
conn.close()
@app.route('/automerge/api/v1.0/pr/build_ready', methods=['GET'])
def get_buildable_prs():
conn = merger.get_connection()
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
try:
cursor.execute("""SELECT id, number, url,state,title,user_login,html_url,assignee_login,milestone_title,base_ref, build_node, s3_arn, test_node
FROM pull_request
WHERE project='dogecoin/dogecoin' and state='open' and assignee_login is null and milestone_title='1.9' and base_ref='1.9-dev' and build_node IS NULL
ORDER BY id ASC""")
return jsonify({'prs': cursor.fetchall()})
finally:
cursor.close()
finally:
conn.close()
@app.route('/automerge/api/v1.0/pr/<int:pr_id>', methods=['GET'])
def get_pr(pr_id):
conn = merger.get_connection()
try:
cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
try:
cursor.execute("""SELECT id, number, url,state,title,user_login,html_url,assignee_login,milestone_title,base_ref, build_node, s3_arn, test_node
FROM pull_request
WHERE id=%(id)s""", {'id': pr_id})
return jsonify({'prs': cursor.fetchall()})
finally:
cursor.close()
finally:
conn.close()
@app.route('/automerge/api/v1.0/pr/<int:pr_id>', methods=['POST'])
def update_pr(pr_id):
pr_url = None
conn = merger.get_connection()
try:
cursor = conn.cursor()
try:
cursor.execute("""SELECT url
FROM pull_request
WHERE id=%(id)s""", {'id': pr_id})
row = cursor.fetchone()
if not row:
abort(404)
else:
pr_url = row[0].replace('pulls', 'issues')
finally:
cursor.close()
if not request.json or not 'operation' in request.json:
return jsonify({'result': 'No operation specified'})
if request.json['operation'] == 'claim_build':
return claim_pr(conn, pr_id, pr_url, 'rnicoll', request.remote_addr)
elif request.json['operation'] == 'build_success':
if not 's3_arn' in request.json:
return jsonify({'result': 'No S3 ARN specified'})
return mark_build_success(conn, pr_id, request.json['s3_arn'])
elif request.json['operation'] == 'build_failed':
return mark_build_failed(conn, pr_id)
elif request.json['operation'] == 'test_pr':
return test_pr(conn, pr_id, pr_url, request.remote_addr)
elif request.json['operation'] == 'test_success':
return mark_test_success(conn, pr_id)
elif request.json['operation'] == 'test_failed':
return mark_test_failed(conn, pr_id)
else:
return jsonify({'result': 'Invalid operation specified'})
finally:
conn.close()
def claim_pr(conn, pr_id, pr_url, username, remote_addr):
# Tell Github we're claiming the PR
request = {
'assignee': username
}
if not merger.call_github(pr_url, request, 'PATCH'):
return jsonify({'result': 'failed to call Github'})
# Update the local database
cursor = conn.cursor()
try:
cursor.execute("""UPDATE pull_request
SET assignee_login=%(username)s, build_node=%(remote_addr)s, build_started=NOW()
WHERE id=%(id)s AND build_node IS NULL""",
{'id': pr_id, 'username': username, 'remote_addr': remote_addr})
rowcount = cursor.rowcount
conn.commit()
finally:
cursor.close()
if rowcount > 0:
# Return a value to let the node know that's okay
return jsonify({'result': 'ok'})
else:
return jsonify({'result': 'Build already taken'})
def mark_build_failed(conn, pr_id):
try:
cursor.execute("""UPDATE pull_request
SET build_failed=NOW()
WHERE id=%(id)s""", {'id': pr_id})
conn.commit()
finally:
cursor.close()
# Return a value to let the node know that's okay
return jsonify({'result': 'ok'})
def mark_build_success(conn, pr_id, s3_arn):
try:
cursor.execute("""UPDATE pull_request
SET build_succeeded=NOW(), s3_arn=%(s3_arn)s
WHERE id=%(id)s""", {'id': pr_id, 's3_arn': s3_arn})
conn.commit()
finally:
cursor.close()
# Return a value to let the node know that's okay
return jsonify({'result': 'ok'})
def test_pr(conn, pr_id, pr_url, remote_addr):
# Update the local database
cursor = conn.cursor()
try:
cursor.execute("""UPDATE pull_request
SET test_node=%(remote_addr)s, test_started=NOW()
WHERE id=%(id)s""", {'id': pr_id, 'remote_addr': remote_addr})
conn.commit()
finally:
cursor.close()
# Return a value to let the node know that's okay
return jsonify({'result': 'ok'})
def mark_test_failed(conn, pr_id):
try:
cursor.execute("""UPDATE pull_request
SET test_failed=NOW()
WHERE id=%(id)s""", {'id': pr_id})
conn.commit()
finally:
cursor.close()
# Return a value to let the node know that's okay
return jsonify({'result': 'ok'})
def mark_test_success(conn, pr_id):
try:
cursor.execute("""UPDATE pull_request
SET test_succeeded=NOW()s
WHERE id=%(id)s""", {'id': pr_id})
conn.commit()
finally:
cursor.close()
# Return a value to let the node know that's okay
return jsonify({'result': 'ok'})
| rnicoll/robodoge | robodoge/coordinator.py | coordinator.py | py | 6,901 | python | en | code | 0 | github-code | 90 |
70762201898 | """
Proyecto Python MySQL:
- Abrir aistente
- Login o registro
- Si elegimos registro, creará un usuario en la base de datos
- Si elegimos login, identificará al usuario y nos preguntará
- Crear nota, mostrar nota, borrarlas
"""
# carpeta archivo
from usuarios import acciones
print("""
Acciones disponible:
- registro
- login
""")
hazEl = acciones.Acciones()
accion = input("¿Qué quieres hacer?: ")
if accion== "registro":
hazEl.registro()
elif accion =="login":
hazEl.login()
else:
print("\nError... Opcion no valida!!!") | AlexSR2590/curso-python | 20-proyecto-python/main.py | main.py | py | 550 | python | es | code | 0 | github-code | 90 |
13088364217 | import os
class Pajaro:
alas = True
def __init__(self, tipo, color):
self.t = tipo
self.c = color
@classmethod
def volar(cls):
print(f"Las aves tienen alas: {cls.alas}")
def cantidad_huevos(self):
if self.t =="Canario":
return "3"
else:
return "Desconocido"
def clear_pantalla():
if os.name == "nt":
os.system("cls")
clear_pantalla()
piolin = Pajaro(tipo = "Canario", color= "Amarillo")
piolin.volar()
print(f"El {piolin.t} pone {piolin.cantidad_huevos()} huevos")
| jimymora1965/Practica-POO-dia-8-Udemy | pajaro_huevos_metodoDeClase.py | pajaro_huevos_metodoDeClase.py | py | 570 | python | es | code | 0 | github-code | 90 |
683154915 | #!/usr/bin/env python
import requests
from solar.core.resource import virtual_resource as vr
from solar.events.api import add_event
from solar.events.controls import React
discovery_service = 'http://0.0.0.0:8881'
bareon_partitioning = 'http://0.0.0.0:9322/v1/nodes/{0}/partitioning'
bareon_repos = 'http://0.0.0.0:9322/v1/nodes/{0}/repos'
bareon_sync = 'http://0.0.0.0:9322/v1/actions/sync_all'
class NodeAdapter(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
@property
def node_id(self):
return self['id']
@property
def partitioning(self):
return requests.get(bareon_partitioning.format(self['id'])).json()
@property
def repos(self):
return requests.get(bareon_repos.format(self['id'])).json()
# Sync hw info about nodes from discovery service into bareon-api
requests.post(bareon_sync)
# Get list of nodes from discovery service
nodes_list = requests.get(discovery_service).json()
# Create slave node resources
node_resources = vr.create('nodes', 'templates/not_provisioned_nodes.yaml',
{'nodes': nodes_list})
# Get master node
master_node = filter(lambda n: n.name == 'node_master', node_resources)[0]
with open('/vagrant/tmp/keys/ssh_public') as fp:
master_key = fp.read().strip()
# Dnsmasq resources
for node in nodes_list:
node = NodeAdapter(node)
node_resource = next(n for n in node_resources
if n.name.endswith('node_{0}'.format(node.node_id)))
node_resource.update(
{
'partitioning': node.partitioning,
'master_key': master_key,
'repos': node.repos,
}
)
dnsmasq = vr.create('dnsmasq_{0}'.format(node.node_id),
'resources/dnsmasq', {})[0]
master_node.connect(dnsmasq)
node_resource.connect(dnsmasq, {'admin_mac': 'exclude_mac_pxe'})
event = React(node_resource.name, 'run', 'success', node_resource.name,
'provision')
add_event(event)
event = React(node_resource.name, 'provision', 'success', dnsmasq.name,
'exclude_mac_pxe')
add_event(event)
event = React(dnsmasq.name, 'exclude_mac_pxe', 'success',
node_resource.name, 'reboot')
add_event(event)
| Mirantis/solar | examples/provisioning/provision.py | provision.py | py | 2,363 | python | en | code | 8 | github-code | 90 |
25201143779 | import requests
import xml.etree.ElementTree as ET
import pandas as pd
def fetch_fasta_sequence(transcript_id):
try:
# Send GET request to the NCBI e-utilities server
r = requests.get(
f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nucleotide&id={transcript_id}&rettype=fasta")
# Raise an error if the request is not successful
r.raise_for_status()
# Return the response in text format
return r.text
except requests.exceptions.HTTPError as err:
print(f"An HTTP error occurred: {err}")
except requests.exceptions.RequestException as err:
print(f"An error occurred: {err}")
def fetch_refseq_id(gene_symbol):
try:
# Search for the gene's NCBI gene ID using the gene symbol
r = requests.get(f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=gene&term={gene_symbol}")
r.raise_for_status()
root = ET.fromstring(r.text)
gene_id = root.find("IdList/Id").text
# Use the gene ID to retrieve the RefSeq ID associated with that gene
r = requests.get(
f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?dbfrom=gene&db=nucleotide&id={gene_id}&cmd=neighbor&linkname=gene_nuccore_refseqrna")
r.raise_for_status()
root = ET.fromstring(r.text)
refseq_id = "NM_004272"
return refseq_id
except requests.exceptions.HTTPError as err:
print(f"An HTTP error occurred: {err}")
except requests.exceptions.RequestException as err:
print(f"An error occurred: {err}")
seq_list=[]
search_term = gene_symbol = "HOMER2"
refseq_id = fetch_refseq_id(gene_symbol)
print("RefSeq ID for " + gene_symbol + " is " + refseq_id)
# Read the csv file
data = pd.read_csv('HOX_gene_families.csv')
# Set the search term
# Filter the rows where the 'Approved symbol' column contains the search term
results = data.loc[data['Approved symbol'].str.contains(search_term, na=False)]
# Print the filtered 'Approved symbol' column
List_of_families=list(results['Approved symbol'])
print(List_of_families)
for x in List_of_families:
refseq_id = fetch_refseq_id(x)
transcript_id = refseq_id
fasta_sequence = fetch_fasta_sequence(transcript_id)
print(fasta_sequence)
seq_list.append(fasta_sequence)
print("\n")
print(List_of_families)
print(seq_list)
emptylist=[]
mainlist=[]
x=0
for x in range(len(List_of_families)):
emptylist.append(List_of_families[x])
emptylist.append(seq_list[x])
mainlist.append(emptylist)
emptylist=[]
# initialize list of lists
data = mainlist
# Create the pandas DataFrame
df = pd.DataFrame(data, columns=['GeneIDs', 'Seq'])
# Save dataframe as csv file in the current folder
df.to_csv('filename.csv', index = False, encoding='utf-8')
# print dataframe.
print(df) | flyfir248/UPGMA-Gene-analysis-test | task 1 final.py | task 1 final.py | py | 2,838 | python | en | code | 0 | github-code | 90 |
29362482320 | import matplotlib.pyplot as plt
import numpy as np
import enems
if __name__ == "__main__":
# ## LOAD DATA ################################################################################################### #
test_data_obs = enems.load_data_obs().values
test_data_df = enems.load_data_75()
test_data = test_data_df.to_dict("list")
# ## PLOT FUNCTIONS ############################################################################################## #
def plot_ensemble_members(ensemble_series: dict, observation_series: np.array, selected_series: set,
plot_title: str, output_file_path: str) -> None:
_, axs = plt.subplots(1, 1, figsize=(7, 2.5))
axs.set_xlabel("Time")
axs.set_ylabel("Value")
axs.set_title(plot_title)
axs.set_xlim(0, 143)
axs.set_ylim(0, 5)
[axs.plot(ensemble_series[series_id], color="#999999", zorder=3, alpha=0.33) for series_id in selected_series]
axs.plot(observation_series, color="#000000", zorder=4)
plt.tight_layout()
plt.savefig(output_file_path)
plt.close()
return None
def plot_log(n_total_members: int, log: dict, output_file_path: str) -> None:
_, axss = plt.subplots(1, 3, figsize=(10.0, 2.5))
x_values=[n_total_members-i-1 for i in range(len(log["history"]["total_correlation"]))]
axss[0].set_xlabel("")
axss[0].set_ylabel("Total correlation")
axss[0].plot(x_values, log["history"]["total_correlation"], color="#7777FF", zorder=3)
axss[0].set_ylim(70, 140)
axss[0].set_xlim(x_values[0], x_values[-1])
axss[1].set_xlabel("# selected members")
axss[1].set_ylabel("Joint entropy")
axss[1].axhline(log["original_ensemble_joint_entropy"], color="#FF7777", zorder=3, label="Full set")
axss[1].plot(x_values, log["history"]["joint_entropy"], color="#7777FF", zorder=3, label="Selected set")
axss[1].set_ylim(6.3, 6.9)
axss[1].set_xlim(x_values[0], x_values[-1])
axss[1].legend()
axss[2].set_xlabel("")
axss[2].set_ylabel("Transinformation")
axss[2].plot(x_values, log["history"]["transinformation"], color="#7777FF", zorder=3, label="Selected set")
axss[2].axhline(log["original_ensemble_transinformation"], color="#FF7777", zorder=3, label="Full set")
axss[2].set_xlim(x_values[0], x_values[-1])
plt.tight_layout()
plt.savefig(output_file_path)
plt.close()
return None
# ## FUNCTIONS CALL ############################################################################################## #
cur_selection_log = enems.select_ensemble_members(test_data, test_data_obs, n_bins=10, bin_by="equal_intervals",
beta_threshold=0.95, n_processes=1, verbose=False)
plot_log(len(test_data.keys()), cur_selection_log, "test/log_obs.svg")
plot_ensemble_members(test_data, test_data_obs, set(test_data.keys()),
"All members (%d)" % len(test_data.keys()),
"test/ensemble_all_obs.svg")
plot_ensemble_members(test_data, test_data_obs, cur_selection_log["selected_members"],
"Selected members (%d)" % len(cur_selection_log["selected_members"]),
"test/ensemble_selected_obs.svg")
del test_data_obs, cur_selection_log
| adlzanchetta/en-ems | test/test_plot_with_obs.py | test_plot_with_obs.py | py | 3,535 | python | en | code | 0 | github-code | 90 |
18422306119 | from itertools import permutations
from math import ceil
times = list(int(input()) for _ in range(5))
orders = list(permutations(times, 5))
minimum = sum(ceil(time/10)*10 for time in times)
for order in orders:
cnt = order[0]
for i in range(1, 5):
cnt += ceil(order[i]/10)*10
minimum = min(minimum, cnt)
print(minimum)
| Aasthaengg/IBMdataset | Python_codes/p03076/s673792187.py | s673792187.py | py | 341 | python | en | code | 0 | github-code | 90 |
40540311795 | from __future__ import print_function
# Add local python path to the global path and import standard library modules...
import os
import sys; sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), "..", "lib", "Python"))
import time
import re
import multiprocessing as mp
# RDKit imports...
try:
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem.SaltRemover import SaltRemover
from rdkit.Chem.SaltRemover import InputFormat
from rdkit.Chem import AllChem
except ImportError as ErrMsg:
sys.stderr.write("\nFailed to import RDKit module/package: %s\n" % ErrMsg)
sys.stderr.write("Check/update your RDKit environment and try again.\n\n")
sys.exit(1)
# MayaChemTools imports...
try:
from docopt import docopt
import MiscUtil
import RDKitUtil
except ImportError as ErrMsg:
sys.stderr.write("\nFailed to import MayaChemTools module/package: %s\n" % ErrMsg)
sys.stderr.write("Check/update your MayaChemTools environment and try again.\n\n")
sys.exit(1)
ScriptName = os.path.basename(sys.argv[0])
Options = {}
OptionsInfo = {}
def main():
"""Start execution of the script"""
MiscUtil.PrintInfo("\n%s (RDK v%s; %s): Starting...\n" % (ScriptName, rdBase.rdkitVersion, time.asctime()))
(WallClockTime, ProcessorTime) = MiscUtil.GetWallClockAndProcessorTime()
# Retrieve command line arguments and options...
RetrieveOptions()
# Process and validate command line arguments and options...
ProcessOptions()
# Perform actions required by the script...
RemoveSalts()
MiscUtil.PrintInfo("\n%s: Done...\n" % ScriptName)
MiscUtil.PrintInfo("Total time: %s" % MiscUtil.GetFormattedElapsedTime(WallClockTime, ProcessorTime))
def RemoveSalts():
"""Identify and remove salts from molecules"""
# Setup a molecule reader...
MiscUtil.PrintInfo("\nProcessing file %s..." % OptionsInfo["Infile"])
Mols = RDKitUtil.ReadMolecules(OptionsInfo["Infile"], **OptionsInfo["InfileParams"])
# Set up a molecule writer...
Writer = SetupMoleculeWriter()
MolCount, ValidMolCount, SaltsMolCount = ProcessMolecules(Mols, Writer)
if Writer is not None:
Writer.close()
MiscUtil.PrintInfo("\nTotal number of molecules: %d" % MolCount)
MiscUtil.PrintInfo("Number of valid molecules: %d" % ValidMolCount)
MiscUtil.PrintInfo("Number of ignored molecules: %d" % (MolCount - ValidMolCount))
MiscUtil.PrintInfo("\nNumber of molecules coontaining salts: %d" % (SaltsMolCount))
def ProcessMolecules(Mols, Writer):
"""Process and remove salts from molecules. """
if OptionsInfo["MPMode"]:
return ProcessMoleculesUsingMultipleProcesses(Mols, Writer)
else:
return ProcessMoleculesUsingSingleProcess(Mols, Writer)
def ProcessMoleculesUsingSingleProcess(Mols, Writer):
"""Process and remove salts from molecules using a single process. """
MiscUtil.PrintInfo("\nRemoving salts...")
Compute2DCoords = OptionsInfo["OutfileParams"]["Compute2DCoords"]
SetSMILESMolProps = OptionsInfo["OutfileParams"]["SetSMILESMolProps"]
# Set up a salt remover...
Remover = SetupSaltRemover()
(MolCount, ValidMolCount, SaltsMolCount) = [0] * 3
FirstMol = True
for Mol in Mols:
MolCount += 1
if Mol is None:
continue
if RDKitUtil.IsMolEmpty(Mol):
MolName = RDKitUtil.GetMolName(Mol, MolCount)
MiscUtil.PrintWarning("Ignoring empty molecule: %s" % MolName)
continue
ValidMolCount += 1
if FirstMol:
FirstMol = False
if SetSMILESMolProps:
RDKitUtil.SetWriterMolProps(Writer, Mol)
UnsaltedMol, SaltyStatus = RemoveMolSalts(Mol, Remover, MolCount)
if SaltyStatus:
SaltsMolCount += 1
WriteMolecule(Writer, UnsaltedMol, Compute2DCoords)
return (MolCount, ValidMolCount, SaltsMolCount)
def ProcessMoleculesUsingMultipleProcesses(Mols, Writer):
"""Process and remove salts from molecules using multiprocessing."""
MiscUtil.PrintInfo("\nRemoving salts using multiprocessing...")
MPParams = OptionsInfo["MPParams"]
Compute2DCoords = OptionsInfo["OutfileParams"]["Compute2DCoords"]
# Setup data for initializing a worker process...
InitializeWorkerProcessArgs = (MiscUtil.ObjectToBase64EncodedString(Options), MiscUtil.ObjectToBase64EncodedString(OptionsInfo))
# Setup a encoded mols data iterable for a worker process by pickling only public
# and private molecule properties...
WorkerProcessDataIterable = RDKitUtil.GenerateBase64EncodedMolStrings(Mols)
# Setup process pool along with data initialization for each process...
MiscUtil.PrintInfo("\nConfiguring multiprocessing using %s method..." % ("mp.Pool.imap()" if re.match("^Lazy$", MPParams["InputDataMode"], re.I) else "mp.Pool.map()"))
MiscUtil.PrintInfo("NumProcesses: %s; InputDataMode: %s; ChunkSize: %s\n" % (MPParams["NumProcesses"], MPParams["InputDataMode"], ("automatic" if MPParams["ChunkSize"] is None else MPParams["ChunkSize"])))
ProcessPool = mp.Pool(MPParams["NumProcesses"], InitializeWorkerProcess, InitializeWorkerProcessArgs)
# Start processing...
if re.match("^Lazy$", MPParams["InputDataMode"], re.I):
Results = ProcessPool.imap(WorkerProcess, WorkerProcessDataIterable, MPParams["ChunkSize"])
elif re.match("^InMemory$", MPParams["InputDataMode"], re.I):
Results = ProcessPool.map(WorkerProcess, WorkerProcessDataIterable, MPParams["ChunkSize"])
else:
MiscUtil.PrintError("The value, %s, specified for \"--inputDataMode\" is not supported." % (MPParams["InputDataMode"]))
SetSMILESMolProps = OptionsInfo["OutfileParams"]["SetSMILESMolProps"]
(MolCount, ValidMolCount, SaltsMolCount) = [0] * 3
FirstMol = True
for Result in Results:
MolCount += 1
MolIndex, EncodedMol, SaltyStatus = Result
if EncodedMol is None:
continue
ValidMolCount += 1
Mol = RDKitUtil.MolFromBase64EncodedMolString(EncodedMol)
if FirstMol:
FirstMol = False
if SetSMILESMolProps:
RDKitUtil.SetWriterMolProps(Writer, Mol)
if SaltyStatus:
SaltsMolCount += 1
WriteMolecule(Writer, Mol, Compute2DCoords)
return (MolCount, ValidMolCount, SaltsMolCount)
def InitializeWorkerProcess(*EncodedArgs):
"""Initialize data for a worker process."""
global Options, OptionsInfo
MiscUtil.PrintInfo("Starting process (PID: %s)..." % os.getpid())
# Decode Options and OptionInfo...
Options = MiscUtil.ObjectFromBase64EncodedString(EncodedArgs[0])
OptionsInfo = MiscUtil.ObjectFromBase64EncodedString(EncodedArgs[1])
# Set up salt remover...
OptionsInfo["SaltRemover"] = SetupSaltRemover()
def WorkerProcess(EncodedMolInfo):
"""Process data for a worker process."""
MolIndex, EncodedMol = EncodedMolInfo
if EncodedMol is None:
return [MolIndex, None, False]
Mol = RDKitUtil.MolFromBase64EncodedMolString(EncodedMol)
if RDKitUtil.IsMolEmpty(Mol):
MolName = RDKitUtil.GetMolName(Mol, (MolIndex + 1))
MiscUtil.PrintWarning("Ignoring empty molecule: %s" % MolName)
return [MolIndex, None, False]
Mol, SaltyStatus = RemoveMolSalts(Mol, OptionsInfo["SaltRemover"], (MolIndex + 1))
EncodedMol = RDKitUtil.MolToBase64EncodedMolString(Mol, PropertyPickleFlags = Chem.PropertyPickleOptions.MolProps | Chem.PropertyPickleOptions.PrivateProps)
return [MolIndex, EncodedMol, SaltyStatus]
def RemoveMolSalts(Mol, Remover, MolNum):
"""Remove salts from mol and return unsalted mol along with mol salty status."""
UnsaltedMol = Mol
SaltyStatus = False
if Remover is not None:
KeptMol, DeletedMols = Remover.StripMolWithDeleted(Mol, dontRemoveEverything = False)
if len(DeletedMols) >= 1:
SaltyStatus = True
if RDKitUtil.IsMolEmpty(KeptMol):
if len(DeletedMols) >= 1:
# Take the larged fragment from DeletedMols
UnsaltedMol = GetLargestMol(DeletedMols)
else:
# Use largest fragment as unsalted molecule...
MolFrags = Chem.GetMolFrags(Mol, asMols = True)
if len(MolFrags) > 1:
# Keep the largest fragment as unsalted molecule...
SaltyStatus = True
UnsaltedMol = GetLargestMol(MolFrags)
if SaltyStatus:
Chem.SanitizeMol(UnsaltedMol)
MolName = RDKitUtil.GetMolName(Mol, MolNum)
if len(MolName):
UnsaltedMol.SetProp("_Name", MolName)
return (UnsaltedMol, SaltyStatus)
def GetLargestMol(Mols):
"""Get largest mol from list of mols"""
LargestMol = None
LargestMolSize = -1
for Mol in Mols:
Size = Mol.GetNumAtoms()
if Size > LargestMolSize:
LargestMol = Mol
LargestMolSize = Size
return LargestMol
def SetupSaltRemover():
"""Setup a salt removerr."""
Remover = None
if OptionsInfo["SaltsByComponentsMode"]:
return Remover
return SaltRemover(defnFilename = OptionsInfo["SaltsFile"], defnData = OptionsInfo["SaltsSMARTS"], defnFormat = InputFormat.SMARTS)
def WriteMolecule(Writer, Mol, Compute2DCoords):
"""Write out molecule."""
if OptionsInfo["CountMode"]:
return
if Compute2DCoords:
AllChem.Compute2DCoords(Mol)
Writer.write(Mol)
def SetupMoleculeWriter():
"""Setup a molecule writer."""
Writer = None
if OptionsInfo["CountMode"]:
return Writer
Writer = RDKitUtil.MoleculesWriter(OptionsInfo["Outfile"], **OptionsInfo["OutfileParams"])
if Writer is None:
MiscUtil.PrintError("Failed to setup a writer for output fie %s " % OptionsInfo["Outfile"])
MiscUtil.PrintInfo("Generating file %s..." % OptionsInfo["Outfile"])
return Writer
def ProcessOptions():
"""Process and validate command line arguments and options"""
MiscUtil.PrintInfo("Processing options...")
# Validate options...
ValidateOptions()
OptionsInfo["Infile"] = Options["--infile"]
OptionsInfo["InfileParams"] = MiscUtil.ProcessOptionInfileParameters("--infileParams", Options["--infileParams"], Options["--infile"])
OptionsInfo["Outfile"] = Options["--outfile"]
OptionsInfo["OutfileParams"] = MiscUtil.ProcessOptionOutfileParameters("--outfileParams", Options["--outfileParams"], Options["--infile"], Options["--outfile"])
OptionsInfo["Overwrite"] = Options["--overwrite"]
OptionsInfo["CountMode"] = False
if re.match("^count$", Options["--mode"], re.I):
OptionsInfo["CountMode"] = True
OptionsInfo["MPMode"] = True if re.match("^yes$", Options["--mp"], re.I) else False
OptionsInfo["MPParams"] = MiscUtil.ProcessOptionMultiprocessingParameters("--mpParams", Options["--mpParams"])
SaltsByComponentsMode = False
SaltsBySMARTSFileMode = False
SaltsBySMARTSMode = False
if re.match("^ByComponent$", Options["--saltsMode"], re.I):
SaltsByComponentsMode = True
elif re.match("^BySMARTSFile$", Options["--saltsMode"], re.I):
SaltsBySMARTSFileMode = False
elif re.match("^BySMARTS$", Options["--saltsMode"], re.I):
SaltsBySMARTSMode = True
else:
MiscUtil.PrintError("The salts mode specified, %s, using \"--saltsMode\" option is not valid." % Options["--saltsMode"])
OptionsInfo["SaltsByComponentsMode"] = SaltsByComponentsMode
OptionsInfo["SaltsBySMARTSFileMode"] = SaltsBySMARTSFileMode
OptionsInfo["SaltsBySMARTSMode"] = SaltsBySMARTSMode
SaltsFile = None
if re.match("^BySMARTSFile$", Options["--saltsMode"], re.I):
if not re.match("^auto$", Options["--saltsFile"], re.I):
SaltsFile = Options["--saltsFile"]
OptionsInfo["SaltsFile"] = SaltsFile
SaltsSMARTS = None
if re.match("^BySMARTS$", Options["--saltsMode"], re.I):
if not Options["--saltsSMARTS"]:
MiscUtil.PrintError("No salts SMARTS pattern specified using \"--saltsSMARTS\" option during \"BySMARTS\" value of \"-s, --saltsMode\" option")
SaltsSMARTS = Options["--saltsSMARTS"].strip(" ")
if not len(SaltsSMARTS):
MiscUtil.PrintError("Empty SMARTS pattern specified using \"--saltsSMARTS\" option during \"BySMARTS\" value of \"-s, --saltsMode\" option")
if re.search(" ", SaltsSMARTS):
SaltsSMARTS = re.sub('[ ]+', '\n', SaltsSMARTS)
OptionsInfo["SaltsSMARTS"] = SaltsSMARTS
def RetrieveOptions():
"""Retrieve command line arguments and options"""
# Get options...
global Options
Options = docopt(_docoptUsage_)
# Set current working directory to the specified directory...
WorkingDir = Options["--workingdir"]
if WorkingDir:
os.chdir(WorkingDir)
# Handle examples option...
if "--examples" in Options and Options["--examples"]:
MiscUtil.PrintInfo(MiscUtil.GetExamplesTextFromDocOptText(_docoptUsage_))
sys.exit(0)
def ValidateOptions():
"""Validate option values"""
MiscUtil.ValidateOptionFilePath("-i, --infile", Options["--infile"])
MiscUtil.ValidateOptionFileExt("-i, --infile", Options["--infile"], "sdf sd smi txt csv tsv")
if Options["--outfile"]:
MiscUtil.ValidateOptionFileExt("-o, --outfile", Options["--outfile"], "sdf sd smi")
MiscUtil.ValidateOptionsOutputFileOverwrite("-o, --outfile", Options["--outfile"], "--overwrite", Options["--overwrite"])
MiscUtil.ValidateOptionsDistinctFileNames("-i, --infile", Options["--infile"], "-o, --outfile", Options["--outfile"])
MiscUtil.ValidateOptionTextValue("-m, --mode", Options["--mode"], "remove count")
if re.match("^remove$", Options["--mode"], re.I):
if not Options["--outfile"]:
MiscUtil.PrintError("The outfile must be specified using \"-o, --outfile\" during \"remove\" value of \"-m, --mode\" option")
MiscUtil.ValidateOptionTextValue("--mp", Options["--mp"], "yes no")
MiscUtil.ValidateOptionTextValue("--saltsMode", Options["--saltsMode"], "ByComponent BySMARTSFile BySMARTS")
if re.match("^BySMARTSFile$", Options["--saltsMode"], re.I):
if not re.match("^auto$", Options["--saltsFile"], re.I):
MiscUtil.ValidateOptionFilePath("--saltsFile", Options["--saltsFile"])
# Setup a usage string for docopt...
_docoptUsage_ = """
RDKitRemoveSalts.py - Remove salts
Usage:
RDKitRemoveSalts.py [--infileParams <Name,Value,...>] [--mode <remove or count>]
[--mp <yes or no>] [--mpParams <Name.Value,...>] [--outfileParams <Name,Value,...> ]
[--overwrite] [--saltsMode <ByComponent, BySMARTSFile, BySMARTS>]
[--saltsFile <FileName or auto>] [--saltsSMARTS <SMARTS>]
[-w <dir>] [-o <outfile>] -i <infile>
RDKitRemoveSalts.py -h | --help | -e | --examples
Description:
Remove salts from molecules or simply count the number of molecules containing
salts. Salts are identified and removed based on either SMARTS strings or by selecting
the largest disconnected components in molecules as non-salt portion of molecules.
The supported input file formats are: SD (.sdf, .sd), SMILES (.smi., csv, .tsv, .txt)
The supported output file formats are: SD (.sdf, .sd), SMILES (.smi)
Options:
-e, --examples
Print examples.
-h, --help
Print this help message.
-i, --infile <infile>
Input file name.
--infileParams <Name,Value,...> [default: auto]
A comma delimited list of parameter name and value pairs for reading
molecules from files. The supported parameter names for different file
formats, along with their default values, are shown below:
SD: removeHydrogens,yes,sanitize,yes,strictParsing,yes
SMILES: smilesColumn,1,smilesNameColumn,2,smilesDelimiter,space,
smilesTitleLine,auto,sanitize,yes
Possible values for smilesDelimiter: space, comma or tab.
-m, --mode <remove or count> [default: remove]
Specify whether to remove salts from molecules and write out molecules
or or simply count the number of molecules containing salts.
--mp <yes or no> [default: no]
Use multiprocessing.
By default, input data is retrieved in a lazy manner via mp.Pool.imap()
function employing lazy RDKit data iterable. This allows processing of
arbitrary large data sets without any additional requirements memory.
All input data may be optionally loaded into memory by mp.Pool.map()
before starting worker processes in a process pool by setting the value
of 'inputDataMode' to 'InMemory' in '--mpParams' option.
A word to the wise: The default 'chunkSize' value of 1 during 'Lazy' input
data mode may adversely impact the performance. The '--mpParams' section
provides additional information to tune the value of 'chunkSize'.
--mpParams <Name,Value,...> [default: auto]
A comma delimited list of parameter name and value pairs for to
configure multiprocessing.
The supported parameter names along with their default and possible
values are shown below:
chunkSize, auto
inputDataMode, Lazy [ Possible values: InMemory or Lazy ]
numProcesses, auto [ Default: mp.cpu_count() ]
These parameters are used by the following functions to configure and
control the behavior of multiprocessing: mp.Pool(), mp.Pool.map(), and
mp.Pool.imap().
The chunkSize determines chunks of input data passed to each worker
process in a process pool by mp.Pool.map() and mp.Pool.imap() functions.
The default value of chunkSize is dependent on the value of 'inputDataMode'.
The mp.Pool.map() function, invoked during 'InMemory' input data mode,
automatically converts RDKit data iterable into a list, loads all data into
memory, and calculates the default chunkSize using the following method
as shown in its code:
chunkSize, extra = divmod(len(dataIterable), len(numProcesses) * 4)
if extra: chunkSize += 1
For example, the default chunkSize will be 7 for a pool of 4 worker processes
and 100 data items.
The mp.Pool.imap() function, invoked during 'Lazy' input data mode, employs
'lazy' RDKit data iterable to retrieve data as needed, without loading all the
data into memory. Consequently, the size of input data is not known a priori.
It's not possible to estimate an optimal value for the chunkSize. The default
chunkSize is set to 1.
The default value for the chunkSize during 'Lazy' data mode may adversely
impact the performance due to the overhead associated with exchanging
small chunks of data. It is generally a good idea to explicitly set chunkSize to
a larger value during 'Lazy' input data mode, based on the size of your input
data and number of processes in the process pool.
The mp.Pool.map() function waits for all worker processes to process all
the data and return the results. The mp.Pool.imap() function, however,
returns the the results obtained from worker processes as soon as the
results become available for specified chunks of data.
The order of data in the results returned by both mp.Pool.map() and
mp.Pool.imap() functions always corresponds to the input data.
-o, --outfile <outfile>
Output file name.
--outfileParams <Name,Value,...> [default: auto]
A comma delimited list of parameter name and value pairs for writing
molecules to files. The supported parameter names for different file
formats, along with their default values, are shown below:
SD: compute2DCoords,auto,kekulize,no
SMILES: kekulize,no,smilesDelimiter,space, smilesIsomeric,yes,
smilesTitleLine,yes,smilesMolName,yes,smilesMolProps,no
Default value for compute2DCoords: yes for SMILES input file; no for all other
file types.
--overwrite
Overwrite existing files.
-s, --saltsMode <ByComponent, BySMARTSFile, BySMARTS> [default: ByComponent]
Specify whether to identify and remove salts based on SMARTS strings or
by selecting the largest disconnected component as non-salt portion of a
molecule. Possible values: ByComponent, BySMARTSFile or BySMARTS.
--saltsFile <FileName or auto> [default: auto]
Specify a file name containing specification for SMARTS corresponding to salts or
use default salts file, Salts.txt, available in RDKit data directory. This option is only
used during 'BySMARTSFile' value of '-s, --saltsMode' option.
RDKit data format: Smarts<tab>Name(optional)
For example:
[Cl,Br,I]
[N](=O)(O)O
[CH3]C(=O)O Acetic acid
--saltsSMARTS <SMARTS text>
Space delimited SMARTS specifications to use for salts identification instead
their specifications in '--saltsFile'. This option is only used during 'BySMARTS'
value of '-s, --saltsMode' option.
-w, --workingdir <dir>
Location of working directory which defaults to the current directory.
Examples:
To remove salts from molecules in a SMILES file by keeping largest disconnected
components as non-salt portion of molecules and write out a SMILES file, type:
% RDKitRemoveSalts.py -i Sample.smi -o SampleOut.smi
To remove salts from molecules in a SMILES file by keeping largest disconnected
components as non-salt portion of molecules, perform salt removal in multiprocessing
mode on all available CPUs without loading all data into memory, and write out a
SMILES file, type:
% RDKitRemoveSalts.py --mp yes -i Sample.smi -o SampleOut.smi
To remove salts from molecules in a SMILES file by keeping largest disconnected
components as non-salt portion of molecules, perform salt removal in multiprocessing
mode on all available CPUs by loading all data into memory, and write out a
SMILES file, type:
% RDKitRemoveSalts.py --mp yes --mpParams "inputDataMode,InMemory"
-i Sample.smi -o SampleOut.smi
To remove salts from molecules in a SMILES file by keeping largest disconnected
components as non-salt portion of molecules, perform salt removal in multiprocessing
mode on specific number of CPUs and chunk size without loading all data into memory,
and write out a SMILES file, type:
% RDKitRemoveSalts.py --mp yes --mpParams "inputDataMode,Lazy,
numProcesses,4,chunkSize,8" -i Sample.smi -o SampleOut.smi
To count number of molecule containing salts from in a SD file, using largest
components as non-salt portion of molecules, without generating any output
file, type:
% RDKitRemoveSalts.py -m count -i Sample.sdf
To remove salts from molecules in a SMILES file using SMARTS strings in default
Salts.txt distributed with RDKit to identify salts and write out a SMILES file, type:
% RDKitRemoveSalts.py -m remove -s BySMARTSFile -i Sample.smi
-o SampleOut.smi
To remove salts from molecules in a SD file using SMARTS strings in a local
CustomSalts.txt to identify salts and write out a SMILES file, type:
% RDKitRemoveSalts.py -m remove -s BySMARTSFile --saltsFile
CustomSalts.txt -i Sample.sdf -o SampleOut.smi
To remove salts from molecules in a SD file using specified SMARTS to identify
salts and write out a SD file, type:
% RDKitRemoveSalts.py -m remove -s BySMARTS --saltsSMARTS
'[Cl,Br,I] [N](=O)(O)O [N](=O)(O)O'
-i Sample.sdf -o SampleOut.smi
To remove salts form molecules from a CSV SMILES file, SMILES strings in column 1,
name in column 2, and generate output SD file, type:
% RDKitRemoveSalts.py --infileParams
"smilesDelimiter,comma,smilesTitleLine,yes,smilesColumn,1,
smilesNameColumn,2" --outfileParams "compute2DCoords,yes"
-i SampleSMILES.csv -o SampleOut.sdf
Author:
Manish Sud(msud@san.rr.com)
See also:
RDKitConvertFileFormat.py, RDKitRemoveDuplicateMolecules.py,
RDKitRemoveInvalidMolecules.py, RDKitSearchFunctionalGroups.py,
RDKitSearchSMARTS.py
Copyright:
Copyright (C) 2020 Manish Sud. All rights reserved.
The functionality available in this script is implemented using RDKit, an
open source toolkit for cheminformatics developed by Greg Landrum.
This file is part of MayaChemTools.
MayaChemTools is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your option) any
later version.
"""
if __name__ == "__main__":
main()
| sirimullalab/redial-2020 | mayachemtools/bin/RDKitRemoveSalts.py | RDKitRemoveSalts.py | py | 25,643 | python | en | code | 5 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.