seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18020334039 | def input_li():
return list(map(int, input().split()))
def input_int():
return int(input())
N, M = input_li()
A_LI = []
B_LI = []
for _ in range(N):
A_LI.append(input())
for _ in range(M):
B_LI.append(input())
for i in range(N - M + 1):
for j in range(N - M + 1):
is_ok = True
for col in range(M):
for row in range(M):
if B_LI[col][row] != A_LI[i + col][j + row]:
is_ok = False
break
if not is_ok:
break
if is_ok:
print('Yes')
exit()
print('No')
| Aasthaengg/IBMdataset | Python_codes/p03804/s734935461.py | s734935461.py | py | 612 | python | en | code | 0 | github-code | 90 |
74769273896 | from vpython import *
# Criar o Sol e planetas
sun = sphere(pos=vector(0, 0, 0), radius=2, color=color.yellow)
earth = sphere(pos=vector(10, 0, 0), radius=1, color=color.blue)
mars = sphere(pos=vector(15, 0, 0), radius=0.8, color=color.red)
venus = sphere(pos=vector(7, 0, 0), radius=0.9, color=color.orange)
mercury = sphere(pos=vector(3, 0, 0), radius=0.5, color=color.white)
jupiter = sphere(pos=vector(20, 0, 0), radius=2.5, color=color.cyan)
# Criar os eixos de coordenadas
x_axis = cylinder(pos=vector(0,0,0), axis=vector(25,0,0), radius=0.1, color=color.white)
y_axis = cylinder(pos=vector(0,0,0), axis=vector(0,25,0), radius=0.1, color=color.white)
z_axis = cylinder(pos=vector(0,0,0), axis=vector(0,0,25), radius=0.1, color=color.white)
# Definir as velocidades iniciais dos planetas
earth.velocity = vector(0, 0, 2)
mars.velocity = vector(0, 0, 1.5)
venus.velocity = vector(0, 0, 1.8)
mercury.velocity = vector(0, 0, 2.2)
jupiter.velocity = vector(0, 0, 0.8)
# Definir a taxa de atualização do modelo
dt = 0.01
# Criar um loop para atualizar a posição dos planetas
while True:
rate(100)
earth.pos = earth.pos + earth.velocity*dt
mars.pos = mars.pos + mars.velocity*dt
venus.pos = venus.pos + venus.velocity*dt
mercury.pos = mercury.pos + mercury.velocity*dt
jupiter.pos = jupiter.pos + jupiter.velocity*dt
| becegato/modelo-3D-de-um-sistema-solar-simples | sis_solar_3D.py | sis_solar_3D.py | py | 1,349 | python | en | code | 0 | github-code | 90 |
36841346747 | from django.shortcuts import render,redirect
from django.urls import reverse
from django.core.files.storage import FileSystemStorage
from .models import Gallery
fs = FileSystemStorage()
# Create your views here.
def home(request):
gallery = Gallery.objects.all()
return render(request,"multipleimagesapp/home.html",{"context":gallery})
def uploadhandler(request):
if request.method == "POST":
images_id = request.POST.get("image_id")
if images_id:
gallery_instance = Gallery.objects.get(pk=images_id)
else:
title = request.POST.get('title')
gallery_instance = Gallery.objects.create(title=title)
files = request.FILES.getlist('images')
if files:
for count,file in enumerate(files):
try:
saved_file_instance = fs.save(file.name,file)
except Exception as e:
print(f"Exception : {e}")
else:
image_field = gallery_instance.images
if images_id and image_field and count == 0: # its update request and image field is not empty
image_field = image_field +','+ fs.url(saved_file_instance)
else:
image_field = image_field + fs.url(saved_file_instance)
if count < len(files)-1:
image_field = image_field + ","
gallery_instance.images = image_field
gallery_instance.save()
return redirect(reverse("multipleimagesapp:home"))
def gallery(request,pk):
gallery_instance = Gallery.objects.filter(pk=pk)
data = dict()
if gallery_instance:
data["title"] = gallery_instance[0].title
if gallery_instance[0].images:
data["images"] = gallery_instance[0].images.split(",")
return render(request, "multipleimagesapp/gallery.html",{'data_uid':pk, "data":data})
message = "Either instance not exist or wrong request made"
return redirect(reverse("multipleimagesapp:home"))
def delete(request):
if request.method == "POST":
uid = request.POST["uid"]
file_name = request.POST["image"]
gallery_instance = Gallery.objects.filter(pk=uid)
if gallery_instance:
try:
file_name_delete =file_name
if gallery_instance[0].images.split(",")[-1] == file_name:
if len(gallery_instance[0].images.split(","))!=1:
file_name_delete = ","+file_name_delete
else:
file_name_delete = file_name_delete + ","
gallery_instance[0].images = gallery_instance[0].images.replace(file_name_delete,"")
gallery_instance[0].save()
except Exception as e:
print(f"Exception : {e}")
else:
file_name = file_name.split("/")[-1]
file_name = file_name.replace("%20"," ")
try:
pass
fs.delete(file_name)
except Exception as e:
print(f"Exception : {e}")
return redirect(reverse('multipleimagesapp:gallery',args=(uid,)))
return redirect(reverse("multipleimagesapp:home"))
def update(request):
if request.method == "POST":
uid = request.POST['uid']
image = request.POST['image']
file = request.FILES['file']
try:
gallery_instance = Gallery.objects.get(pk =uid)
saved_file_instance = fs.save(file.name,file)
gallery_instance.images = gallery_instance.images.replace(image,fs.url(saved_file_instance))
except Exception as e:
print(f"Exception : {e}")
else:
gallery_instance.save()
file_name = image.split("/")[-1]
file_name = file_name.replace("%20"," ")
fs.delete(file_name)
return redirect(reverse('multipleimagesapp:gallery',args=(uid,)))
return redirect(reverse("multipleimagesapp:home"))
| skprasad117/Multiple_Image_Upload | multipleimagesapp/views.py | views.py | py | 4,169 | python | en | code | 0 | github-code | 90 |
32798381577 | #basic modules
import requests
import datetime
import json
import time
import sys
#sqlalchemy essentials
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import func
#modules from requests specifically
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
#customized modules
from src.coinDB.model import *
from src.coinDB.config import *
from src.coinDB.db import *
#######################################################
## ==== run time parameter for database update ===== ##
## 18.92251205444336 seconds to download data ##
## 182.5061640739441 seconds to update database ##
## ================================================= ##
#######################################################
class CoinMetrics:
URL_BASE = "https://coinmetrics.io/api/v1/"
def __init__(self, api_base_url = URL_BASE, asset = ["btc", "bch", "ltc", "eth", "etc"]):
self.api_base_url = api_base_url
self.timeout = 120 # time limit
self.current_time = int(time.time())
self.APIsession = requests.Session()
DBSession = sessionmaker(bind=ENGINE)
self.DBsession = DBSession()
retries = Retry(total=5, backoff_factor = 0.5, status_forcelist = [502, 503, 504])
self.APIsession.mount("http://", HTTPAdapter(max_retries=retries))
self.prev_time = self.DBsession.query(func.max(coin_date.unix_date)).scalar()
if self.prev_time is None:
self.prev_time = 0
if asset is None:
self.avail_asset = ["btc", "bch", "ltc", "eth", "etc"]
else:
self.avail_asset = asset
def __request(self, url):
try:
response = self.APIsession.get(url, timeout = self.timeout)
response.raise_for_status()
content=json.loads(response.content.decode('utf-8'))
if 'error' in content:
raise ValueError(content['error'])
else:
return content
except Exception as e:
raise
def get_supported_asset(self):
url = '{}get_supported_assets'.format(self.api_base_url)
return self.__request(url)
def get_available_data_type_for_asset(self, asset):
url = '{}get_available_data_types_for_asset/{}'.format(self.api_base_url, asset)
return self.__request(url)
def get_asset_data_for_time_range(self, asset, data_type, begin, end):
url = '{}get_asset_data_for_time_range/{}/{}/{}/{}'.format(self.api_base_url, asset, data_type, begin, end)
return self.__request(url)
def get_assets_everything(self, asset, begin, end) :
feature = self.get_available_data_type_for_asset(asset=asset)
d = {}
# print(feature['result'])
for f in feature["result"]:
# print(f)
# f=f.replace("(usd)", "")
tmp_array = self.get_asset_data_for_time_range(asset=asset, data_type=f, begin=begin, end=end)
for response in tmp_array["result"]:
# dictionary structure : dictionary[timestamp][feature] = value
if response[1] is None:
continue
if response[0] in d:
d[response[0]][f] = response[1]
else:
d[response[0]] = {}
d[response[0]][f] = response[1]
return d
def get_all_asset_data_for_time_range(self, asset=None, begin=0, end=0):
d = {}
# print("asset: ", asset)
asset = self.avail_asset
if asset is None:
raise ValueError("Desired cryptocoin type not specified")
for a in asset:
print("grabbing asset: {}".format(a))
d[a] = self.get_assets_everything(asset=a, begin=begin, end=end)
return d
def insert_database(self, value=None, entry_id=None, feature=None):
if value is None or entry_id is None or feature is None:
print(feature, value, entry_id)
raise ValueError("missing essential value for database update. feature: {}, value: {}, entry_id: {}".format(feature, value, entry_id))
if feature == "activeaddresses":
new_row = active_address(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "adjustedtxvolume(usd)":
new_row = adjusted_tx_volume(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "averagedifficulty":
new_row = avg_difficulty(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "blockcount":
new_row = block_count(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "blocksize":
new_row = block_size(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "exchangevolume(usd)":
new_row = exchange_volume(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "fees":
new_row = fees(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "generatedcoins":
new_row = generated_coins(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "marketcap(usd)":
new_row = market_cap(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "medianfee":
new_row = median_fee(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "mediantxvalue(usd)":
new_row = median_tx_value(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "paymentcount":
new_row = payment_count(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "price(usd)":
new_row = price(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "realizedcap(usd)":
new_row = realized_cap(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "txcount":
new_row = tx_count(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
elif feature == "txvolume(usd)":
new_row = tx_volume(entry_id=entry_id, value=value)
self.DBsession.add(new_row)
else:
raise ValueError("unexpected feature to insert into database: %s"%(feature))
def update_database(self):
print("update sequence initiated.")
print("targeted crypto coin: %s"%(self.avail_asset))
print("downloading data..")
start_time = time.time()
self.coin = self.get_all_asset_data_for_time_range(begin=self.prev_time, end=self.current_time)
time_used = time.time() - start_time
print("completed. time used to download data: ", time_used)
print("inserting new data..")
start_time = time.time()
for coin_abb in self.coin:
print("processing %s...."%(coin_abb))
current_coin_code = COIN_CODE[coin_abb]
for timestamp in self.coin[coin_abb]:
new_row = coin_date(coin_type=current_coin_code, unix_date=timestamp)
self.DBsession.add(new_row)
self.DBsession.flush() #get id number
current_entry_id = new_row.entry_id
for feature in self.coin[coin_abb][timestamp]:
self.insert_database(value=self.coin[coin_abb][timestamp][feature], \
entry_id=current_entry_id, \
feature = feature)
self.DBsession.commit() #commit at last to save time
time_used = time.time() - start_time
print("completed. time used to update data: ", time_used)
if __name__ == "__main__":
print("this script is not meant to be executed directly. Exiting..")
sys.exit(1)
# d = cm.get_available_data_type_for_asset(asset="btc")
# d = cm.get_assets_everything(asset="btc", begin=0, end=int(time.time()))
# print(d)
| wolflex888/CryptoDB | src/CoinMetrics.py | CoinMetrics.py | py | 8,367 | python | en | code | 1 | github-code | 90 |
12976710314 | import os
from tqdm import tqdm
from time import time
import requests
from lxml import html
from bs4 import BeautifulSoup
from utils import get_path_of_all_xml_file, walkData
input_file_lst = get_path_of_all_xml_file()
nctid_lst = [file.split('/')[-1].split('.')[0] for file in input_file_lst]
nctid = 'NCT03469336'
tag = "Publications automatically indexed to this study by ClinicalTrials.gov Identifier (NCT Number):"
url_prefix = 'https://clinicaltrials.gov/ct2/show/study/'
start_idx, end_idx = 300000, 350000
for nctid in tqdm(nctid_lst[start_idx:end_idx]):
url = url_prefix + nctid
suffix = str(start_idx)[:-3] + 'K_' + str(end_idx)[:-3] + 'K'
t1 = time()
page=requests.get(url)
t2 = time()
if tag.lower() in page.text.lower():
with open("ctgov_data/nctid_with_publication" + suffix + ".txt", 'a') as fout:
fout.write(nctid + '\n')
print(nctid)
start_idx, end_idx = 50000, 100000
start_idx, end_idx = 100000, 150000
start_idx, end_idx = 150000, 200000
start_idx, end_idx = 200000, 250000
start_idx, end_idx = 250000, 300000
start_idx, end_idx = 300000, 350000
# # page=requests.Session().get(url)
# page=requests.get(url)
# ## <class 'requests.models.Response'>
# tree=html.fromstring(page.text)
# result=tree.xpath('//td[@class="title"]//a/text()')
# text = page.text
# for idx, i in enumerate(text.split('\n')):
# if "publications automatically" in i.lower():
# idx_o = idx
# break
# for i in range(idx_o, idx_o + 5):
# print(text.split('\n')[i])
| futianfan/HINT | src/collect_publication.py | collect_publication.py | py | 1,510 | python | en | code | 0 | github-code | 90 |
18262017089 | import sys
N,M = map(int,input().split())
s = [0] * M
c = [0] * M
a = ["-1"] * (N)
if N == 1 and M == 0:
print(0)
sys.exit()
for i in range(M):
s[i],c[i] = map(int,input().split())
for j in range(M):
if s[j] == 1 and c[j] == 0 and N != 1:
print(-1)
sys.exit()
elif a[s[j]-1] == "-1":
a[s[j]-1] = str(c[j])
elif a[s[j]-1] == str(c[j]):
pass
else:
print(-1)
sys.exit()
if a[0] == "-1":
a[0] = "1"
for h in range(1,N):
if a[h] == "-1":
a[h] = "0"
ans = "".join(a)
print(int(ans)) | Aasthaengg/IBMdataset | Python_codes/p02761/s091691597.py | s091691597.py | py | 569 | python | en | code | 0 | github-code | 90 |
8367357728 | import logging
import math
import json
from dynamodb_json import json_util as jsonDB
import boto3
from datetime import datetime, timedelta
from zoneinfo import ZoneInfo
#Needed because Lambda runs on a different timezone
time_zone = ZoneInfo("Canada/Eastern")
table_names = ['Weather_API_toronto', 'Weather_API_kingston', 'Weather_API_innisfil',
'Open_Weather_toronto','Open_Weather_kingston', 'Open_Weather_innisfil',
'Accu_Weather_toronto']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.client('dynamodb')
def get_value(table_name, date):
item = dynamodb.get_item(TableName = table_name, Key= {'Date':{'S': date}})
return jsonDB.loads(item)['Item']
def update_table(table_name, date, var, update):
response = dynamodb.update_item(TableName=table_name,
Key={
'Date': {'S':date}
},
UpdateExpression=f'set {var} = :r',
ExpressionAttributeValues={
':r': {"M": update},
},
ReturnValues="UPDATED_NEW"
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
logger.info(f"Issue updating {date}-{var} in table {table_name}")
def put_value(table_name, update):
update = json.loads(jsonDB.dumps(update))
dynamodb.put_item(TableName = table_name, Item = update)
def date_format(date: datetime):
#Formate for table key
return date.strftime('%Y-%m-%dT%H')
def new_hour(cur_hour, diff):
new_hour = cur_hour-diff
return new_hour if new_hour>0 else new_hour-24
def temp_score(temp):
#Temperature score
return (2**(temp/7)) - 1
def cond_score(cond):
#Condition score
return (2**(cond/3)) - 1
def cloud_score(cloud):
#Cloud score
return (2**(cloud/105)) - 1
def hour_accuracy(temp, cond, cloud):
#Accuracy score for hour type
return round(math.exp( -(temp_score(temp) + cond_score(cond))**2) + math.exp(-(cloud_score(cloud))**2) - 1 , 4)*100
def date_accuracy(max, min, cond):
#Accuracy score for date type
return round(math.exp( -(temp_score(max)+ temp_score(min) + cond_score(cond))**2) , 4)*100
def lambda_handler(event, context):
#Get current data and convert to table key format
cur_date = datetime.now(tz=time_zone)
fcur_date = date_format(cur_date)
for table_name in table_names:
try:#Get current weather and running average data
logger.info(f"Calculating statistics for {table_name} at {fcur_date}")
avgs = get_value(table_name, 'AverageForcast')
cur_weather = get_value(table_name, fcur_date)['cur']
except:
logger.info(f"No Data entry for {fcur_date} in {table_name}")
continue
#Incase I missed a condition mapping
#Or incase they actually use one of the ones I didn't know how to classify.
#What type of weather is "Hot"? What does that mean? Hot and cloudy? hot and rainy?
if cur_weather['Condition'] == -1:
logger.info(f"{fcur_date} missing condition data")
continue
#Compare with forcasted data from previous hours
for hour in range(1,13):
#Date with wanted forcast range
new_date = cur_date - timedelta(hours = hour)
fnew_date = date_format(new_date)
try:#Get forcasted value for current hour
data = get_value(table_name, fnew_date)
cur_forcast, forcast_scores = data[f'f{hour}'], data['forcasts']
except:
logger.info(f'Forcast {hour} does not exist for {fnew_date} in {table_name}')
continue
#I'm not going to rant again. But seriously.. Cold? Hot? Come on AccuWeather
if cur_forcast['Condition'] == -1:
logger.info(f"{fnew_date} missing condition data for {hour} hour forcast ")
continue
#Calculate tempurature and condition differences
t_diff = round(abs(cur_weather['Temp'] - cur_forcast['Temp']),2)
d_diff = round(abs(cur_weather['Condition'] - cur_forcast['Condition']),2)
# c_diff = round(abs(cur_weather['Cloud Cov'] - cur_forcast['Cloud Cov']),2)
c_diff = 0 #Cloud score was providing WAY to much variance, also kinda covered in condition
#Calculate accuracy score
score = round(hour_accuracy(t_diff, d_diff, c_diff),4)
#Save forcasted for specific hour back to data in past
#Can use these incase I want to implement variable date accessing
forcast_scores[f'f{hour}'] = score
update_table(table_name, fnew_date, 'forcasts',forcast_scores)
#Update current forcast score running average
forcast_avg = avgs[f'f{hour}']
cur_avg, cur_count = forcast_avg['avg'], forcast_avg['count']
cur_count+=1
new_avg = round(cur_avg + (score-cur_avg) / cur_count,2)
avgs[f'f{hour}'] = {'avg':new_avg, 'count':cur_count}
#Update running averages
put_value(table_name, avgs)
return {
'statusCode': 200,
'body': json.dumps('Cowboy') #Yehaw
}
| Randerd/Weather-statistics | AWS/dynamoDB/get_statistics.py | get_statistics.py | py | 5,242 | python | en | code | 0 | github-code | 90 |
34813565410 | # Cara mengakses nilai Entry
from tkinter import*
def Click():
ouput_entry = inputen.get()
tulisan2 = Label(root, text = ouput_entry)
tulisan2.pack()
print(ouput_entry)
root = Tk()
tulisan1 = Label(root, text = 'Masukan inputan anda !')
tulisan1.pack()
inputen = StringVar()
inputan1 = Entry(root, width = 30, textvariable = inputen)
inputan1.pack()
tombol1 = Button(root, text = 'Click Here', command = Click)
tombol1.pack()
root.mainloop()
# Langkah langkah
# 1. buatlah program GUI seperti biasa berisi Label, Entry, dan Tombol beserta fungsi untuk mengeksekusinya --> isi gui hanya optional saja
# 2. pada pembuatan Entry, buatlah sebuah variabel dalam bentuk textvariabel --> lihat line 17 (nama variabel bebas)
# 3. definisikan textvariabel pada Entry sebagai sebuah StringVar(). mengindikasikan bahwa textvariabel Entry adalah berupa Teks/String --> lihat line 16
# 4. kemudian pada fungsi command Click, definisikan sebuah variabel baru (nama bebas) dengan nilai "<textvariable>.get()" --> lihat line 5
# 5. tampilkanlah output atau nilai/value dari Entry bisa dengan Label -->> line 6, atau bisa juga menampilkannya di console run -->> line 8, atau
# bisa juga dengan cara lainnya(terserah anda)
#========= SELESAI ===============
| ekawahanaputra/Belajar_Python | 3_Tkinter/8_Akses_Nilai_Entry.py | 8_Akses_Nilai_Entry.py | py | 1,283 | python | id | code | 0 | github-code | 90 |
11799894491 | from PIL import Image
from cStringIO import StringIO
class ImageRotater(object):
def __init__(self, raw_data, quality=65):
self.raw_data = raw_data
self._quality = quality
@classmethod
def from_raw_string(cls, raw_data):
try:
return cls(raw_data)
except IOError:
raise
def _image_to_raw_data(self, image, format):
image_buffer = StringIO()
image.save(image_buffer, format, quality=self._quality)
image_buffer.seek(0)
return image_buffer.read()
def rotate(self, angle):
if angle == 0:
return self.raw_data
image_buffer = StringIO(self.raw_data)
image = Image.open(image_buffer)
new_image = image.rotate(angle)
return self._image_to_raw_data(new_image, image.format)
| slobdell/blimp-client | blimp_client/common/image_rotater.py | image_rotater.py | py | 832 | python | en | code | 0 | github-code | 90 |
46371473283 | import numpy as np
import pickle
import unmask
import ann
import pca
from PIL import Image, ImageFilter
import os
#Must be odd!
region_dim = 7
img_dither_folder = "./data/dither/"
img_orig_folder = "./data/orig/"
def mirror_load(img_in):
img_file = Image.open(img_in)
img_file = unmask.unmask(img_file)
width, height = img_file.size
half = int(region_dim/2)
outputimage = Image.new('RGB',(width+region_dim-1,height+region_dim-1),0)
#Paste center
outputimage.paste(img_file, box=(half,half))
#Paste top
outputimage.paste(img_file.crop((0,0,width,half)), box=(half,0))
#Paste bottom
outputimage.paste(img_file.crop((0,height-half,width,height)), box=(half,half+height))
#Paste left
outputimage.paste(img_file.crop((0,0,half,height)), box=(0,half))
#Paste right
outputimage.paste(img_file.crop((width-half,0,width,height)), box=(half+width,half))
#Paste top left corner
outputimage.paste(img_file.crop((0,0,half,half)), box=(0,0))
#Paste top right corner
outputimage.paste(img_file.crop((width-half,0,width,half)), box=(width+half,0))
#Paste bottom right corner
outputimage.paste(img_file.crop((width-half,height-half,width,height)), box=(width+half,height+half))
#Paste bottom left corner
outputimage.paste(img_file.crop((0,height-half,half,height)), box=(0,half+height))
img_file.close()
return outputimage
def crop_and_copy(img_border_file,lxi,lyi):
#half = int(region_dim/2)
width, height = img_border_file.size
width -= (region_dim-1)
height -= (region_dim-1)
crop_left = lxi
crop_tot_left = width+lxi
crop_top = lyi
crop_tot_top = height+lyi
#img_border_file.crop((crop_left,crop_top,crop_tot_left,crop_tot_top)).save("border ({},{}).png".format(lxi,lyi))
return img_border_file.crop((crop_left,crop_top,crop_tot_left,crop_tot_top))
def img_to_vecs(img_dither):
#load
img_dither_file = mirror_load(img_dither)
width, height = img_dither_file.size
width -= (region_dim-1)
height -= (region_dim-1)
half_region = int(region_dim/2)
data = np.zeros((height*width,3*region_dim*region_dim), float)
for lyi in range(0,region_dim):
for lxi in range(0,region_dim):
temp = crop_and_copy(img_dither_file,lxi,lyi)
data[:,lyi*region_dim+lxi] = np.array(list(temp.getdata(0)))/255.0
data[:,(lyi*region_dim+lxi)*2] = np.array(list(temp.getdata(1)))/255.0
data[:,(lyi*region_dim+lxi)*3] = np.array(list(temp.getdata(2)))/255.0
temp.close()
img_dither_file.close()
return data, width, height
def imgs_to_x_y_vecs(img_dither,img_orig,keep):
#load
img_dither_file = mirror_load(img_dither)
img_orig_file = Image.open(img_orig)
width, height = img_orig_file.size
half_region = int(region_dim/2)
indicies = np.arange(width*height)
np.random.shuffle(indicies)
indicies = indicies[:keep]
data = np.zeros((len(indicies),3*(region_dim*region_dim+1)), float)
for lyi in range(0,region_dim):
for lxi in range(0,region_dim):
temp = crop_and_copy(img_dither_file,lxi,lyi)
#print(list(temp.getdata(0)))
data[:,lyi*region_dim+lxi] = (np.array(list(temp.getdata(0)))/255.0)[indicies]
data[:,(lyi*region_dim+lxi)*2] = (np.array(list(temp.getdata(1)))/255.0)[indicies]
data[:,(lyi*region_dim+lxi)*3] = (np.array(list(temp.getdata(2)))/255.0)[indicies]
temp.close()
data[:,region_dim*region_dim*3] = (np.array(list(img_orig_file.getdata(0))))[indicies].tolist()
data[:,region_dim*region_dim*3+1] = (np.array(list(img_orig_file.getdata(1))))[indicies].tolist()
data[:,region_dim*region_dim*3+2] = (np.array(list(img_orig_file.getdata(2))))[indicies].tolist()
img_dither_file.close()
img_orig_file.close()
#np.random.shuffle(data)
return data
def load_data(percentage):
"""
loads training and testing data
"""
#We will presume the files in both directories have the same names.
files = os.listdir(img_orig_folder)
data = np.empty((0,3*(region_dim*region_dim+1)), float)
count = 0
keep = int(1000*1000*0.01)
np.random.shuffle(files)
files = files[:int(percentage*len(files))]
for name in files:
count += 1
print("Getting {} ({} of {})".format(name,count,len(files)))
data = np.append(data,imgs_to_x_y_vecs(img_dither_folder+name,img_orig_folder+name,keep),axis=0)
np.random.shuffle(data)
X = data[:,:region_dim*region_dim*3]
Y = data[:,region_dim*region_dim*3:]
return X, Y
def train(X,Y):
reg = ann.LiamANN(layers=(len(X[0]),int(0.5*len(X[0]))),tol=0.1,alpha=1e-3,max_iter=200,X=X,Y=Y)
#reg = pickle.load( open( "reg.p", "rb" ) )
reg.fit(X,Y)
return reg
def train_epoch(X,Y,pca):
#reg = ann.LiamANN(layers=(len(X[0]),int(0.5*len(X[0]))),tol=0.1,alpha=0.5,max_iter=200,X=X,Y=Y)
reg = pickle.load( open( "reg.p", "rb" ) )
count = 0
while 1==1:
count += 1
reg.fit_epoch(X, Y)
undither("./data/dither/1.gif","./resolved-iteration({}).png".format(count),reg,pca)
#print(reg.coefs_)
return reg
def undither(img_dither,img_out,reg,pca):
data, width, height = img_to_vecs(img_dither)
data = pca.transform(data,no_components)
outputimage = Image.new('RGB',(width,height),0)
raw = reg.predict(data).astype(int)
formatd = list(zip(raw[:,0],raw[:,1],raw[:,2]))
outputimage.putdata(formatd)
#outputimage = outputimage.filter(ImageFilter.Kernel((3,3), [1]*9))
#outputimage = outputimage.filter(ImageFilter.SMOOTH)
outputimage.save(img_out)
print("Loading data...")
X,Y = load_data(1.0)
print("Running PCA...")
no_components = region_dim*region_dim
pca = pca.LiamPCA()
pca.fit(X)
X = pca.transform(X,no_components)
print("Dumping PCA...")
pickle.dump( pca, open( "pca.p", "wb" ) )
print("Training...")
reg = train(X,Y)
print("Dumping network...")
pickle.dump( reg, open( "reg.p", "wb" ) )
print("Test...")
undither("./data/dither/2.gif","./test-result.png",reg,pca)
| liampulles/WITS_Repo | brain_undither/code/brain.py | brain.py | py | 6,118 | python | en | code | 0 | github-code | 90 |
73261279018 | import nextcord
from nextcord.ext import commands
from config import teal,msglogs
import datetime
class Delete(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_message_delete(self, message):
if message.author.bot:
return
elif "?purge" in message.content:
return
#Removes carl bot from logs
embeder = nextcord.Embed(title=f"Message deleted in #{message.channel}",description=f"{message.content}",color=teal)
embeder.set_author(name=f"{message.author.name}",icon_url=f"{message.author.avatar}")
embeder.timestamp = datetime.datetime.utcnow()
target = self.client.get_channel(msglogs)
await target.send(embed=embeder)
def setup(client):
client.add_cog(Delete(client)) | Moaz-07/Nebula | events/message/on_message_delete.py | on_message_delete.py | py | 815 | python | en | code | 0 | github-code | 90 |
18961909201 | from django.contrib import admin
from .models import Repeater, RepeaterLocation, RepeaterDigitalModes, RepeaterLinkModes
class RepeaterLocationInlineAdmin(admin.TabularInline):
model = RepeaterLocation
extra = 0
class RepeaterLinkModesInlineAdmin(admin.TabularInline):
model = RepeaterLinkModes
extra = 0
class RepeaterDigitalModesInlineAdmin(admin.TabularInline):
model = RepeaterDigitalModes
extra = 0
class RepeaterAdmin(admin.ModelAdmin):
model = Repeater
list_display = ('callsign', 'output_frequency')
inlines = [RepeaterLocationInlineAdmin, RepeaterDigitalModesInlineAdmin, RepeaterLinkModesInlineAdmin]
admin.site.register(Repeater, RepeaterAdmin)
| kamodev/repeater_list | repeaters/admin.py | admin.py | py | 703 | python | en | code | 0 | github-code | 90 |
73875791978 | import argparse
import sys
import os
import torch
import torch.nn.parallel
from torch.autograd import Variable
import torch.optim as optim
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../')))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../dataloaders')))
import shapenet_part_loader
import shapenet_core13_loader
import shapenet_core55_loader
from model import PointCapsNet
import segmentation as seg
import open3d as o3d
import matplotlib.pyplot as plt
from chamfer_distance import ChamferDistance
CD = ChamferDistance()
## MONKEY PATCHING
PointCloud = o3d.geometry.PointCloud
Vector3dVector = o3d.utility.Vector3dVector
draw_geometries = o3d.visualization.draw_geometries
viz = o3d.visualization.Visualizer()
image_id = 0
USE_CUDA = True
def show_points(points_tensor):
#print("showing tensor of shape", points_tensor.size())
prc_r_all=points_tensor.transpose(1, 0).contiguous().data.cpu()
prc_r_all_point=PointCloud()
prc_r_all_point.points = Vector3dVector(prc_r_all)
draw_geometries([prc_r_all_point])
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
capsule_net = PointCapsNet(opt.prim_caps_size, opt.prim_vec_size, opt.latent_caps_size, opt.latent_vec_size, opt.num_points)
if opt.model != '':
print(opt.model)
capsule_net.load_state_dict(torch.load(opt.model))
else:
print ('pls set the model path')
if USE_CUDA:
print("Let's use", torch.cuda.device_count(), "GPUs!")
capsule_net = torch.nn.DataParallel(capsule_net)
capsule_net.to(device)
capsule_net.eval() #CRUICIAL
for i in range(opt.batch_size):
#LATENT_FILENAME = "tmp_lcs/cbvae_latcaps_airplane_%03d.pt"%i
LATENT_FILENAME = "tmp_lcs/generated_capsules.pt"
print("[INFO] Opening", LATENT_FILENAME)
slc = torch.load(LATENT_FILENAME) # single latent capsule
if slc.dim() == 2: slc = slc.unsqueeze(0)
reconstruction = capsule_net.module.caps_decoder(slc)
print("[INFO] Showing raw reconstruction.")
show_points(reconstruction[0])
print("[INFO] Showing reconstruction with part segmentation.")
seg.seg_and_viz(slc, reconstruction)
break # only showing single latent capsule
if __name__ == "__main__":
from open3d import *
import matplotlib.pyplot as plt
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8, help='input batch size')
parser.add_argument('--n_epochs', type=int, default=300, help='number of epochs to train for')
parser.add_argument('--prim_caps_size', type=int, default=1024, help='number of primary point caps')
parser.add_argument('--prim_vec_size', type=int, default=16, help='scale of primary point caps')
parser.add_argument('--latent_caps_size', type=int, default=64, help='number of latent caps')
parser.add_argument('--latent_vec_size', type=int, default=64, help='scale of latent caps')
parser.add_argument('--num_points', type=int, default=2048, help='input point set size')
parser.add_argument('--model', type=str, default='checkpoints/shapenet_part_dataset_ae_200.pth', help='model path')
parser.add_argument('--dataset', type=str, default='shapenet_part', help='dataset: shapenet_part, shapenet_core13, shapenet_core55')
opt = parser.parse_args()
print(opt)
main() | ArthLeu/beta-capsnet | main/latent_processes/decode_and_viz.py | decode_and_viz.py | py | 3,536 | python | en | code | 0 | github-code | 90 |
7542740918 | import math
import csv
import pandas as pd
import matplotlib.pyplot as plt
N = 30 # полное число испарившихся частиц
L = 900 # ML - длина НП
dL_count = 20 # число разбиений всей длины НП
dL = L/dL_count # длина одного фрагмента длины НП
a = 70 # а.м. - расстояние между НП
dx_count = 70 # число разбиений расстояния между НП - количество источников на подложке
dx = a/dx_count # длина одного фрагмента расстояния между НП
n = N/dx_count # скорость испарения из точечного источника
N_dL1 = 0
for i in range (1, dx_count+1):
N_dL1 = N_dL1 + n/math.pi * math.atan(dL/(i*dx))
X = [dL] # Абсцисса для построения графика осажденная доза(высота)
N_dL = [N_dL1] # набор осажденных доз на каждый фрагмент dL
N_dL_element = 0
summ = N_dL1
for j in range (1, dL_count):
for i in range (1, dx_count+1):
#N_dL_element = N_dL_element + 2*n/math.pi * math.asin(dL/(2*math.sqrt(j*j*dL*dL + i*i*dx*dx)))
N_dL_element = N_dL_element + n/math.pi*(math.pi - math.acos((dL*dL - 2*i*i*dx*dx - j*j*dL*dL - pow((j+1)*dL, 2))/(2*math.sqrt(j*j*dL*dL+i*i*dx*dx)*math.sqrt(pow((j+1)*dL, 2)+i*i*dx*dx))))
summ = summ + N_dL_element
X.append((j+1)*dL)
N_dL.append(N_dL_element)
N_dL_element = 0
print(summ)
print(N_dL)
plt.plot(X, N_dL, ':o')
plt.grid()
plt.xlabel('Высота, МС')
plt.ylabel('Осажденная доза, ат.')
plt.show()
| nastalla/Reevaporation | Reevaporation.py | Reevaporation.py | py | 1,711 | python | ru | code | 0 | github-code | 90 |
23855385118 | # ( (C1 ^ (p-1-d) mod p) * (C2 mod p) ) mod p = m
def blockDecrypt(cpair, keys):
"""
:param cpair: tuple of ciphertext ints
:param keys: dictionary with p and d
:return: one block of plaintext integer form
"""
print("\n\n", cpair, "\n\n")
cOne = cpair[0]
cTwo = cpair[1]
p = keys['p']
d = keys['d']
cOneModP = pow(cOne, (p-1-d), p)
cTwoModP = cTwo % p
m = (cOneModP * cTwoModP) % p
return m
def decrypt(ciphertext, keys):
"""
:param ciphertext: list of int tuples
:param keys: dictionary {'p':0, 'd':0}
:return: ascii string of plaintext
"""
plaintext = ''
for block in ciphertext:
# decrypt one block
m = blockDecrypt(block, keys)
# convert integer block m to ascii
hexBlock = hex(m)[2:]
print("\n",hexBlock)
text = bytes.fromhex(hexBlock).decode("ASCII")
plaintext = plaintext + text
return plaintext
| laurenschneider/Public-Key-Crypto | decrypt.py | decrypt.py | py | 956 | python | en | code | 0 | github-code | 90 |
74094140456 | import matplotlib.pyplot as plt
from time import time
from gem.utils import graph_util, plot_util
from gem.evaluation import visualize_embedding as viz
from gem.evaluation import evaluate_graph_reconstruction as gr
from gem.embedding.gf import GraphFactorization
from gem.embedding.hope import HOPE
from gem.embedding.lap import LaplacianEigenmaps
from gem.embedding.lle import LocallyLinearEmbedding
from gem.embedding.node2vec import node2vec
import networkx as nx
from gem.embedding.teammate import Teammate
from gem.embedding.sdne import SDNE
from argparse import ArgumentParser
if __name__ == '__main__':
''' Sample usage
python run_karate.py -node2vec 1
'''
parser = ArgumentParser(description='Graph Embedding Experiments on Roller Derby graphs')
parser.add_argument('-node2vec', '--node2vec',
help='whether to run node2vec (default: False)')
args = vars(parser.parse_args())
try:
run_n2v = bool(int(args["node2vec"]))
except:
run_n2v = False
# File that contains the edges. Format: source target
# Optionally, you can add weights as third column: source target weight
edge_tot = '../../Data/AllTeamsFullLTGraphNormalized.edgelist'
edge_train = '../../Data/AllTeamsLTGraphTrainNormalized.edgelist'
edge_test = '../../Data/AllTeamsLTGraphTestNormalized.edgelist'
edge_val = '../../Data/AllTeamsLTGraphValNormalized.edgelist'
# Specify whether the edges are directed
isDirected = True
# Load graph. Have to prune manually to keep number of nodes fixed
G = nx.read_weighted_edgelist(edge_tot, nodetype=int)
G_test_dummy = nx.read_weighted_edgelist(edge_test, nodetype=int)
G_train_dummy = nx.read_weighted_edgelist(edge_train, nodetype=int)
G_val_dummy = nx.read_weighted_edgelist(edge_val, nodetype=int)
G = G.to_directed()
G_train = G.copy()
G_val = G.copy()
G_test = G.copy()
for edge in G.edges():
if edge not in G_train_dummy.edges(): G_train.remove_edge(*edge)
if edge not in G_test_dummy.edges(): G_test.remove_edge(*edge)
if edge not in G_val_dummy.edges(): G_val.remove_edge(*edge)
print(len(G_train))
print(len(G_test))
print(len(G_val))
print(G.number_of_edges())
print(G_train.number_of_edges())
print(G_val.number_of_edges())
print(G_test.number_of_edges())
#print(G_train_dummy.nodes)
models = []
# Load the models you want to run
#models.append(GraphFactorization(d=2, max_iter=50000, eta=1 * 10**-4, regu=1.0))
#models.append(HOPE(d=4, beta=0.03))
#models.append(LaplacianEigenmaps(d=2))
#models.append(LocallyLinearEmbedding(d=2))
if run_n2v:
models.append(
node2vec(d=4, max_iter=1, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1)
)
#alpha = 0 to have "traditional" second order loss
models.append(Teammate(d=4, alpha=1e-5, nu1=0, nu2=0, K=2,n_units=[50,15], rho=0.99, n_iter=25, xeta=0.01, n_batch=50,
modelfile=['enc_model_teammate.json', 'dec_model_teammate.json'],
weightfile=['enc_weights_teammate.hdf5', 'dec_weights_teammate.hdf5']))
'''
models.append(SDNE(d=4, alpha=1e-5, beta=4, nu1=1e-6, nu2=1e-6, K=2,n_units=[50,15], rho=0.99, n_iter=25, xeta=0.01, n_batch=50,
modelfile=['enc_model_sdneb4.json', 'dec_model_sdneb4.json'],
weightfile=['enc_weights_sdneb4.hdf5', 'dec_weights_sdneb4.hdf5']))
models.append(SDNE(d=4, alpha=1e-5, beta=5, nu1=1e-6, nu2=1e-6, K=2,n_units=[50,15], rho=0.99, n_iter=100, xeta=0.01, n_batch=50,
modelfile=['enc_model_sdneb5.json', 'dec_model_sdneb5.json'],
weightfile=['enc_weights_sdneb5.hdf5', 'dec_weights_sdneb5.hdf5']))
'''
# For each model, learn the embedding and evaluate on graph reconstruction and visualization
for num,embedding in enumerate(models):
print ('Num nodes: %d, num edges: %d' % (G.number_of_nodes(), G.number_of_edges()))
t1 = time()
# Learn embedding - accepts a networkx graph or file with edge list
Y, t = embedding.learn_embedding(graph=G_train,valgraph=G_val,edge_f=None, is_weighted=True, no_python=True)
print (embedding._method_name+':\n\tTraining time: %f' % (time() - t1))
# Evaluate on graph reconstruction:train
MANE, avgrecpred, avgrectrue, err, err_baseline = gr.evaluateStaticGraphReconstruction(G_train, embedding, Y, None, is_weighted=True, is_undirected=False)
print("MANE train is ",MANE)
print("avgrec 10 pred train is ",avgrecpred)
print("avgrec 10 true is ",avgrectrue)
print("MSE train is ",pow(err,2)/G_train.number_of_edges())
#print(("\tMAP: {} \t precision curve: {}\n\n\n\n"+'-'*100).format(MAP,prec_curv[:5]))
#viz.plot_embedding2D(embedding.get_embedding(), di_graph=G_train, node_colors=None)
#plt.show()
#plt.clf()
# Evaluate on graph reconstruction:val
MANE, avgrecpred, avgrectrue, err, err_baseline = gr.evaluateStaticGraphReconstruction(G_val, embedding, Y, None, is_weighted=True, is_undirected=False)
print("MANE val is ",MANE)
print("avgrec 10 pred val is ",avgrecpred)
print("avgrec 10 true val is ",avgrectrue)
print("MSE val is ",pow(err,2)/G_val.number_of_edges())
#print(("\tMAP: {} \t precision curve: {}\n\n\n\n"+'-'*100).format(MAP,prec_curv[:5]))
#viz.plot_embedding2D(embedding.get_embedding(), di_graph=G_val, node_colors=None)
#plt.show()
#plt.clf()
"""
# Evaluate on graph reconstruction:val
MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(G_test, embedding, Y, None, is_weighted=True, is_undirected=False)
print(("\tMAP: {} \t precision curve: {}\n\n\n\n"+'-'*100).format(MAP,prec_curv[:5]))
viz.plot_embedding2D(embedding.get_embedding(), di_graph=G_test, node_colors=None)
plt.show()
plt.clf()
"""
| GarrettMerz/Projects | RollerDerby/GEM/examples/run_derby.py | run_derby.py | py | 6,090 | python | en | code | 0 | github-code | 90 |
18265293509 | n, k = map(int, input().split(" "))
def calc(n, k):
r = ""
while n > 0:
n, remainder = divmod(n, k)
r += str(remainder)
return r[::-1]
print(len(calc(n, k))) | Aasthaengg/IBMdataset | Python_codes/p02766/s159393095.py | s159393095.py | py | 189 | python | en | code | 0 | github-code | 90 |
24830754832 | from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.SignUp.as_view(), name='signup'),
path('superuser_required/', views.SuperuserRequired.as_view(), name="superuser_required"),
path('verify_account/<uidb64>/<token>/', views.verify_account, name="verify_account"),
# path('reset_password/', views.reset_password, name='reset_password') # in default auth
]
| p-flis/dinofood | accounts/urls.py | urls.py | py | 410 | python | en | code | 2 | github-code | 90 |
4683642066 | import numpy as np
import cv2
# 画像ファイルをカラーで読み込み
org_img = cv2.imread('yorkie.png', cv2.IMREAD_COLOR)
# cv2.copyToでコピーする
mask = np.full(org_img.shape, 255, np.uint8)
cv_copy_img = cv2.copyTo(org_img, mask)
# numpy.ndarray.copyでコピーする
numpy_copy_img = org_img.copy()
# shallow copyでコピーする
shallow_copy_img = org_img
# コピー元(img1)の左上を矩形で塗りつぶす
cv2.rectangle(org_img, (0, 0), (100, 100), (255, 255, 255), thickness=-1)
# 画像をウィンドウ表示する
cv2.imshow('cv_copy_img', cv_copy_img)
cv2.imshow('numpy_copy_img', numpy_copy_img)
cv2.imshow('shallow_copy_img', shallow_copy_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| ghmagazine/opencv_dl_book | ch3/3.2/copy_image.py | copy_image.py | py | 730 | python | ja | code | 33 | github-code | 90 |
2234017928 | # Start, mid, end
# Every iteration
# End, and mid will move
#
# abbaa
# <>abbaa
# <a>bbaa
# <a|b>baa
# <abb>aa
# <ab|ba>a
# <abbaa>
# a<bb|aa>
# ab<baa>
class Solution:
def countSubstrings(self, s: str) -> int:
s = '$#' + '#'.join(list(s)) + '#@'
i, c, r, mir = 1, 1, 1, 1
n = len(s)
pal_count = 0
pal = [0] * n
while i < n - 1:
mir = 2 * c - i
if i < r:
pal[i] = min(r - i, pal[mir])
while s[i - pal[i] - 1] == s[i + pal[i] + 1]:
pal[i] += 1
if i + pal[i] > r:
c = i
r = c + pal[i]
if pal[i]:
pal_count += (pal[i] + 1) // 2
i += 1
return pal_count
# Manacher's Algorithm
# Time: O(n)
# Space: O(n)
# Runtime: 44 ms, faster than 97.91% of Python3 online submissions for Palindromic Substrings.
# Memory Usage: 14 MB, less than 50.00% of Python3 online submissions for Palindromic Substrings.
class Solution:
def countSubstrings(self, s: str) -> int:
ss = '$'
for c in s:
ss += c + '#'
ss = ss[:-1] + '@'
count = 0
for mirror in range(1, len(ss)-1):
offset = 0
while ss[mirror+offset] == ss[mirror-offset]:
if ss[mirror+offset].isalnum():
count += 1
offset += 1
return count
# Time: O(n^2)
# Space: O(1)
# Runtime: 224 ms, faster than 52.46% of Python3 online submissions for Palindromic Substrings.
# Memory Usage: 14.3 MB, less than 73.46% of Python3 online submissions for Palindromic Substrings.
| vyshor/LeetCode | Palindromic Substrings.py | Palindromic Substrings.py | py | 1,663 | python | en | code | 0 | github-code | 90 |
5911538370 | import datetime
import io
import pathlib
from faker import Faker
import pytest
from isic.stats.models import GaMetrics, ImageDownload
from isic.stats.tasks import (
_cdn_access_log_records,
collect_google_analytics_metrics_task,
collect_image_download_records_task,
)
fake = Faker()
data_dir = pathlib.Path(__file__).parent / "data"
@pytest.mark.django_db
def test_collect_google_analytics_task(mocker, settings):
# only have one VIEW_ID, otherwise the counts will be multiplied
settings.ISIC_GOOGLE_ANALYTICS_VIEW_IDS = ["just_one"]
settings.ISIC_GOOGLE_API_JSON_KEY = "something"
mocker.patch("isic.stats.tasks._initialize_analyticsreporting", mocker.MagicMock)
mocker.patch(
"isic.stats.tasks._get_google_analytics_report",
return_value={
"num_sessions": 10,
"sessions_per_country": {
"US": 3,
"CA": 5,
},
},
)
collect_google_analytics_metrics_task()
assert GaMetrics.objects.count() == 1
assert GaMetrics.objects.first().num_sessions == 10
assert GaMetrics.objects.first().sessions_per_country == [
{
"country_name": "United States",
"country_numeric": "840",
"country_alpha_2": "US",
"sessions": 3,
},
{
"country_name": "Canada",
"country_numeric": "124",
"country_alpha_2": "CA",
"sessions": 5,
},
]
def test_cdn_access_log_parsing(mocker):
def get_object(*args, **kwargs):
with open(data_dir / "cloudfront_log.gz", "rb") as f:
return {"Body": io.BytesIO(f.read())}
records = list(
_cdn_access_log_records(mocker.MagicMock(get_object=get_object), mocker.MagicMock())
)
assert len(records) == 24
assert records[0] == {
"download_time": datetime.datetime(2022, 3, 16, 3, 28, tzinfo=datetime.timezone.utc),
"path": "22f1e9e4-bd31-4053-9362-f8891a2b307d/17.jpg",
"ip_address": "112.208.241.149",
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36", # noqa: E501
"request_id": "PLFnSMEVjigrLG1hv_9OOOQUUUslSn6oo0ih_cmAbMp_tlK-ZNK1yA==",
"status": 200,
}
@pytest.mark.django_db
def test_collect_image_download_records_task(
mocker, eager_celery, image_factory, django_capture_on_commit_callbacks
):
# TODO: overriding the blob name requires passing the size manually.
image = image_factory(
accession__blob="some/exists.jpg", accession__blob_name="exists.jpg", accession__blob_size=1
)
def mock_client(*args, **kwargs):
return mocker.MagicMock(delete_objects=lambda **_: {})
mocker.patch("isic.stats.tasks.boto3", mocker.MagicMock(client=mock_client))
mocker.patch("isic.stats.tasks._cdn_log_objects", return_value=[{"Key": "foo"}])
mocker.patch(
"isic.stats.tasks._cdn_access_log_records",
return_value=[
{
"download_time": fake.date_time(tzinfo=fake.pytimezone()),
"path": "some/exists.jpg",
"ip_address": "1.1.1.1",
"user_agent": fake.user_agent(),
"request_id": fake.uuid4(),
"status": 200,
},
{
"download_time": fake.date_time(tzinfo=fake.pytimezone()),
"path": "some/doesnt-exist.jpg",
"ip_address": "1.1.1.1",
"user_agent": fake.user_agent(),
"request_id": fake.uuid4(),
"status": 200,
},
{
"download_time": fake.date_time(tzinfo=fake.pytimezone()),
"path": "some/exists-2.jpg",
"ip_address": "1.1.1.1",
"user_agent": fake.user_agent(),
"request_id": fake.uuid4(),
"status": 403,
},
{
"download_time": fake.date_time(tzinfo=fake.pytimezone()),
"path": "some/doesnt-exist-2.jpg",
"ip_address": "1.1.1.1",
"user_agent": fake.user_agent(),
"request_id": fake.uuid4(),
"status": 403,
},
],
)
with django_capture_on_commit_callbacks(execute=True):
collect_image_download_records_task()
assert ImageDownload.objects.count() == 1
assert image.downloads.count() == 1
# TODO: assert file is deleted with boto, this is tricky to do with mocking
| ImageMarkup/isic | isic/stats/tests/test_tasks.py | test_tasks.py | py | 4,598 | python | en | code | 3 | github-code | 90 |
18235706199 | from collections import Counter
def solve():
N = int(input())
S = list(input())
c = Counter(S)
ans = c['R']*c['G']*c['B']
for i in range(N):
for j in range(i+1,N):
k = j*2-i
if k>N-1:
break
if S[i]!=S[j] and S[j]!=S[k] and S[i]!=S[k]:
ans -= 1
return ans
print(solve())
| Aasthaengg/IBMdataset | Python_codes/p02714/s452562527.py | s452562527.py | py | 320 | python | en | code | 0 | github-code | 90 |
20206491579 | from gf import Gf
from block_gf import BlockGf
from block2_gf import Block2Gf
def map_block(fun, G):
"""
Map function f(Gf)->Gf to every element of a BlockGf or Block2Gf
"""
if isinstance(G, BlockGf):
block_list = [fun(bl) for name, bl in G]
if isinstance(block_list[0], Gf):
return BlockGf(name_list = list(G.indices), block_list = block_list)
else:
return block_list
elif isinstance(G, Block2Gf):
block_list = []
for bn1 in G.indices1:
block_list.append([fun(G[bn1,bn2]) for bn2 in G.indices2])
if isinstance(block_list[0][0], Gf):
return Block2Gf(name_list1 = list(G.indices1), name_list2 = list(G.indices2), block_list = block_list)
else:
return block_list
else:
raise Exception('map_block only applicable for BlockGf and Block2Gf')
| parcollet/mda1 | pytriqs/gf/map_block.py | map_block.py | py | 890 | python | en | code | 0 | github-code | 90 |
27367895742 | # -*- coding: utf-8 -*-
from src.utilities import time_modification
from src.market_understanding import futures_analysis
def test_combining_individual_futures_analysis():
index_price = 10.0
expiration = "2023-02-01 00:00:00"
expiration_timestamp = time_modification.convert_time_to_unix(expiration)
future = {"maker_commission": -0.01, "expiration_timestamp": expiration_timestamp}
ticker = {"instrument_name": "BTC-PERPETUAL", "mark_price": 10.1}
expected = [
{
"instrument_name": "BTC-PERPETUAL",
"with_rebates": True,
"market_expectation": "contango",
"mark_price": 10.1,
"ratio_price_to_index": 0.009999999999999964,
"remaining_active_time_in_hours": -5.91,
}
][0]
fut_analysis = futures_analysis.combining_individual_futures_analysis(
index_price, future, ticker
)[0]
assert fut_analysis["instrument_name"] == expected["instrument_name"]
assert fut_analysis["with_rebates"] == expected["with_rebates"]
assert fut_analysis["market_expectation"] == expected["market_expectation"]
assert fut_analysis["ratio_price_to_index"] == expected["ratio_price_to_index"]
| venoajie/MyApp | tests/test_mkt_undrstg_fut_anlys.py | test_mkt_undrstg_fut_anlys.py | py | 1,214 | python | en | code | 2 | github-code | 90 |
42842064090 | __author__ = 'dev'
# for i in range(10):
# print('i is now {}'.format(i))
# i = 0
# while i < 10:
# print('i is now {}'.format(i))
# i +=1
# availableExits = ['east', 'north east', 'south']
#
# chosenExits =''
# while chosenExits not in availableExits:
# chosenExits = input('Please choose a direction: ')
# if chosenExits == 'quit':
# print('Game Over')
# break
#
# else:
# print("Aren't you glad you got out of there!")
import random
highest = 10
answer = random.randint(1, highest)
# print('Please guess a number between 1 and {}: '.format(highest))
# if guess != answer:
# if guess < answer:
# guess = int(input('Please guess higher: '))
# else:
# guess = int(input('Please guess lower: '))
# if guess == answer:
# print('Well done! You guessed it!')
# # else:
# # print('Sorry, you have not guessed correctly')
# else:
# print('You got it on the first try!')
# # My solution:
# guess = int(input('Please guess a number between 1 and {} or enter 0 to exit: '.format(highest)))
# if guess == answer:
# print('You got it on the first try!')
#
# else:
# while guess != answer and guess != 0:
# if guess < answer:
# guess = int(input('Please guess higher: '))
# else:
# else:
# guess = int(input('Please guess lower: '))
# if guess == answer:
# print('Well done! You guessed it!')
#
# if guess == 0:
# print('Thanks for playing!')
# Tim's Solution
print('Please guess a number between 1 and {}: '.format(highest))
guess = 0
while guess != answer:
guess = int(input())
if guess < answer:
print('Please guess higher: ')
elif guess > answer:
print('Please guess lower: ')
else:
print('Well done! You guessed it!')
| ChristopherDaigle/Learning_and_Development | Udemy/Learn_Python_Programming_Masterclass/While/while.py | while.py | py | 1,830 | python | en | code | 0 | github-code | 90 |
209574958 | import os
import subprocess
import sys
import time
import uuid
import boto3
import yaml
def upload_video():
"""
Upload the video to S3 Bucket.
"""
s3_client = boto3.client("s3")
# Create the bucket if not exists (idempotent).
s3_client.create_bucket(Bucket=s3_bucket_name)
s3_client.upload_file(video_path, s3_bucket_name, uniq_id)
def get_subtitle():
"""
Get the subtitle from the video by Amazon Transcribe.
"""
transcribe_client = boto3.client("transcribe")
transcribe_client.start_transcription_job(
TranscriptionJobName=uniq_id,
Media={
'MediaFileUri': f"s3://{s3_bucket_name}/{uniq_id}"
},
OutputBucketName=s3_bucket_name,
OutputKey=uniq_id,
LanguageCode='en-US',
Subtitles={
'Formats': [
'srt'
],
'OutputStartIndex': 1
}
)
while True:
status = transcribe_client.get_transcription_job(TranscriptionJobName=uniq_id)
if status['TranscriptionJob']['TranscriptionJobStatus'] in ['COMPLETED', 'FAILED']:
break
print("In progress...")
time.sleep(5)
s3_client = boto3.client("s3")
# Download the generated subtitle file from S3 Bucket.
s3_client.download_file(s3_bucket_name, f"{uniq_id}.srt", f"{video_path_prefix}.srt")
def burn_subtitle():
"""
Burn the subtitle to generate a new video.
"""
cmd = f"ffmpeg -i {video_path} -filter:v subtitles={video_path_prefix}.srt {video_path_prefix}-sub.mp4"
proc = subprocess.Popen(cmd, shell=True)
proc.wait()
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: python dolphin.py [video path]")
sys.exit()
video_path = sys.argv[1]
video_path_prefix = os.path.splitext(video_path)[0]
# Use a unique id to identify files to avoid collisions.
uniq_id = uuid.uuid4().hex
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
s3_bucket_name = config["s3-bucket-name"]
upload_video()
get_subtitle()
burn_subtitle()
| ileoyang/dolphin-subtitle-generator | dolphin.py | dolphin.py | py | 2,110 | python | en | code | 0 | github-code | 90 |
31850250810 | from vpython import color, cross, gcurve, graph, mag, norm, rate, sphere, vector
G = 6.67e-11
RES = 1.5e11
MS = 2e30
ME = 5e29
ve = 3.4e4
g1 = graph(xtitle="t [s]", ytitle="Lz [kg*m^2/s]", width=450, height=200, ymin=0)
fp = gcurve(color=color.blue)
fs = gcurve(color=color.red)
ft = gcurve(color=color.green)
sun = sphere(pos=vector(0, 0, 0), radius=RES/10, color=color.yellow)
planet = sphere(pos=vector(RES, 0, 0), radius=RES/20, color=color.cyan, make_trail=True)
sun.m = MS
planet.m = ME
planet.p = planet.m*vector(0, ve, 0)
sun.p = -planet.p
point_x = vector(2*RES, 0, 0)
t = 0
dt = 36000
Lnorm = mag(cross(planet.pos, planet.p))
while t < 3e8:
rate(1000)
r = planet.pos - sun.pos
Fe = -G*sun.m*planet.m*norm(r)/mag(r)**2
planet.p = planet.p + Fe*dt
sun.p = sun.p - Fe*dt
planet.pos = planet.pos + planet.p*dt/planet.m
sun.pos = sun.pos + sun.p*dt/sun.m
rpx = planet.pos - point_x
rsx = sun.pos - point_x
planet.L = cross(rpx, planet.p)
sun.L = cross(rsx, sun.p)
t = t +dt
fp.plot(t, planet.L.z/Lnorm)
fs.plot(t, sun.L.z/Lnorm)
ft.plot(t, (sun.L.z+planet.L.z)/Lnorm)
| marcbaetica/Physics-Simulations | planetary_angular_momentum/planetary_angular_momentum.py | planetary_angular_momentum.py | py | 1,140 | python | en | code | 0 | github-code | 90 |
36985159372 |
# Node
class SLNode:
# - Constructor
# -val
# - next
def __init__(self, value):
self.value = value
self.next = None
# SinglyLinkList
# -Constructor
# - head
class SList:
def __init__(self):
self.head = None
# - addFront(val)
# - add a new node to the beginning of the list
def addFront(self, val):
new_node = SLNode(val)
new_node.next = self.head
self.head = new_node
# - removeFront()
# - removes and returns the first node of the list
def removeFront(self):
if self.head is None:
print("The list is empty nothing to delete")
self.head = self.head.next
return self.head
# - addBack(val)
# - add new node to the end of the list
def addBack(self, val):
new_node = SLNode(val)
if self.head is None:
self.head = new_node
lastNode = self.head
while lastNode.next is not None:
lastNode= lastNode.next
lastNode.next = new_node
# - removeBack()
# - removes and returns the last node of the list
def removeBack(self):
if self.head is None:
print("The list is empty nothing to delete")
elif self.head.next is None:
self.head = None
else:
lastNode = self.head
while lastNode.next.next is not None:
lastNode = lastNode.next
lastNode.next = None
return lastNode
# - container(val)
# - returns a boolean on whether or not the val is in the list
def container(self, val):
if self.head is None:
print("List is empty")
nextNode = self.head
while nextNode is not None:
if nextNode.value == val:
print("Value found")
return True
nextNode = nextNode.next
print("Value not found")
return False
def printValues(self):
if self.head is None:
print("List is empty")
runner = self.head
while runner is not None:
print(runner.value, end=",")
runner = runner.next
print("")
my_list = SList()
my_list.addFront('jim')
my_list.addFront('andy')
my_list.addBack('dany')
my_list.addBack('dany')
# Recursive Fibonacci
# ------------------------------------------------------------------------------------------------------------------------
# Write rFib(num). Recursively compute and return the numth Fibonacci value. As earlier, treat the first two (num = 0, num = 1) Fibonacci values as 0 and 1. Thus:
# rFib(2) = 1 (0+1)
# rFib(3) = 2 (1+1)
# rFib(4) = 3 (1+2)
# rFib(5) = 5 (2+3)
# rFib(3.65) = rFib(3) = 2
# rFib(-2) = rFib(0) = 0.
def rFib(num):
num = int(num)
if num <= 0:
return 0
if num == 1:
return 1
return rFib(num-1) + rFib(num-2)
print(rFib(3))
print(rFib(3.65))
# rListLength
# ------------------------------------------------------------------------------------------------------------------------
# Given the first node of a singly linked list, create a recursive function that returns the number of nodes in that list. You can assume the list contains no loops, and that it is short enough that you will not ‘blow your stack’.
def rListLength(head):
if head is None:
return 0
else:
return rListLength(head.next) + 1
print(rListLength(my_list.head)) | SaudiWebDev2020/Wijdan_Kuddah | algorithms/weekSix/dayTwo.py | dayTwo.py | py | 3,454 | python | en | code | 0 | github-code | 90 |
74014288296 | #!/usr/bin/env python
import xml.etree.ElementTree as ET
import ipdb
it = ET.iterparse("full_data/simplewiki.xml")
tagprefix = "{http://www.mediawiki.org/xml/export-0.10/}"
class Ctx:
def __init__(self):
self.idx = 0
def fname(self, page):
return f"full_data/articles/article_{self.idx}.txt"
def main():
ctx = Ctx()
for _, el in it:
if tag(el) == "page":
page = process_page(el)
if not page:
continue
save_page(ctx, page)
ctx.idx += 1
def tag(el):
return el.tag[len(tagprefix):]
def process_page(el):
title = None
text = None
for c in el.iter():
t = tag(c)
if tag(c) == "title":
title = c.text
elif tag(c) == "text":
text = c.text
if title is None or text is None or len(text.split()) <= 5:
print(f"warn: title={title} text={text}")
return False
return (title, text)
def save_page(ctx, page):
title, text = page
with open(ctx.fname(page), "w") as f:
f.write(title)
f.write("\n")
f.write(text)
main()
| rsepassi/chat | wikidump.py | wikidump.py | py | 1,138 | python | en | code | 1 | github-code | 90 |
36507673617 | """
This module contains the selenium master functionality that controls the selenium
web drivers.
"""
import typing as t
import logging
import abc
import time
from seproxer.selenium_extensions import webdriver_factory
from seproxer.selenium_extensions import states
from seproxer.selenium_extensions import validators
import seproxer.selenium_extensions.states.managers
import seproxer.selenium_extensions.validators.managers
import seproxer.options
from selenium.webdriver.remote import webdriver as remote_webdriver
import selenium.common.exceptions as selenium_exceptions
logger = logging.getLogger(__name__)
class ControllerError(Exception):
"""
Generic error related to a controller operation
"""
class ControllerResultsFailed(ConnectionError):
"""
Exception occurred when a controller failed to to get results, typically caused by
a webdriver error.
"""
class ControllerWaitTimeout(ControllerError):
"""
Error is raised when a controller wait object could not reached its desired wait state
"""
class ControllerUrlResult:
__slots__ = ("state_results", "validator_results")
def __init__(self,
state_results: t.List[states.managers.StateResult],
validator_results: validators.PageValidatorResults) -> None:
self.state_results = state_results
self.validator_results = validator_results
class ControllerWait:
def __init__(self, timeout: float=20.0) -> None:
self._timeout = timeout
@abc.abstractmethod
def check(self) -> bool:
"""
Implement this method to return True when the desired condition is reached
"""
def wait_until(self, timeout: t.Optional[float]=None):
"""
Continuously waits until the `check` method returns True
:raises ControllerWaitTimeout: Occurs when the check method does not return True
after the specified timeout period.
"""
if timeout is None:
timeout = self._timeout
start_time = time.time()
while not self.check():
time.sleep(0.2)
if start_time and (time.time() - start_time) >= timeout:
raise ControllerWaitTimeout(
"Timed out waiting for {}.check".format(self.__class__.__name__)
)
class DriverController:
"""
The purpose of this class is to drive the WebDriver and perform the
appropriate validators on URLs once the defined state(s) are reached
"""
def __init__(self,
driver: remote_webdriver.WebDriver,
loaded_state_manager: states.managers.LoadedStateManager,
validator_manager: validators.managers.PageValidatorManager) -> None:
self._webdriver = driver
self._loaded_state_manager = loaded_state_manager
self._validator_manager = validator_manager
def get_results(self,
url: str,
controller_wait: t.Optional[ControllerWait]=None) -> ControllerUrlResult:
try:
self._webdriver.get(url)
# If we have a specified controller wait, let's wait until the desired state is reached
# before auditing states and validators
if controller_wait:
try:
controller_wait.wait_until()
except ControllerWaitTimeout:
logger.warning("ControllerWait state not reached for {}".format(url))
# Perform our auditors -- also block until certain states are reached
state_results = self._loaded_state_manager.get_state_results(self._webdriver)
# After our the page reaches a testable state, now let's run all our validators on it
# TODO: Consider dependant graphs for validators based on states
validator_results = self._validator_manager.validate(self._webdriver)
except selenium_exceptions.WebDriverException as e:
logging.exception("Failed result attempt for {}".format(url))
raise ControllerResultsFailed(e)
return ControllerUrlResult(state_results, validator_results)
def done(self):
self._webdriver.quit()
@staticmethod
def from_options(options: seproxer.options.Options) -> "DriverController":
driver = webdriver_factory.get_webdriver(options)
loaded_state_manager = states.managers.LoadedStateManager.from_options(options)
validator_manager = validators.managers.PageValidatorManager.from_options(options)
return DriverController(
driver=driver,
loaded_state_manager=loaded_state_manager,
validator_manager=validator_manager,
)
| Rastii/seproxer | seproxer/selenium_extensions/controller.py | controller.py | py | 4,736 | python | en | code | 8 | github-code | 90 |
40319783059 | import numpy as np
from scipy.stats.mstats import gmean
from gnuradio import gr
import random
class SynchronizeAndEstimate(gr.sync_block):
def __init__(self, case, num_bins, diagnostics, freq_offset, bin_selection, buffer_on, buffer_size, seed_value):
self.case = 0
self.case = case
self.num_bins = float(num_bins)
self.diagnostics = diagnostics
self.freq_offset = freq_offset
self.bin_selection = bin_selection
self.buffer_size = buffer_size
self.buffer_on = buffer_on
self.seed_value = seed_value
sdr_profile = {0: {'system_scenario': '4G5GSISO-TU',
'diagnostic': 1,
'wireless_channel': 'Fading',
'channel_band': 0.97*960e3,
'bin_spacing': 15e3,
'channel_profile': 'LTE-TU',
'CP_type': 'Normal',
'num_ant_txrx': 1,
'param_est': 'Estimated',
'MIMO_method': 'SpMult',
'SNR': 5,
'ebno_db': [24],
'num_symbols': [48],
'stream_size': 1},
1: {'system_scenario': 'WIFIMIMOSM-A',
'diagnostic': 0,
'wireless_channel': 'Fading',
'channel_band': 0.9 * 20e6,
'bin_spacing': 312.5e3,
'channel_profile': 'Indoor A',
'CP_type': 'Extended',
'num_ant_txrx': 2,
'param_est': 'Ideal',
'MIMO_method': 'SpMult',
'SNR': 50,
'ebno_db': [6, 7, 8, 9, 10, 14, 16, 20, 24],
'num_symbols': [10, 10, 10, 10, 10, 10, 10, 10, 10],
'stream_size': 2}}
self.system_scenario = sdr_profile[self.case]['system_scenario']
self.diagnostic = sdr_profile[self.case]['diagnostic']
self.wireless_channel = sdr_profile[self.case]['wireless_channel']
self.channel_band = sdr_profile[self.case]['channel_band']
self.bin_spacing = sdr_profile[self.case]['bin_spacing']
self.channel_profile = sdr_profile[self.case]['channel_profile']
self.CP_type = sdr_profile[self.case]['CP_type']
self.num_ant_txrx = sdr_profile[self.case]['num_ant_txrx']
self.param_est = sdr_profile[self.case]['param_est']
self.MIMO_method = sdr_profile[self.case]['MIMO_method'] # Make this 0 (or something) for single antenna
self.SNR = sdr_profile[self.case]['SNR']
self.ebno_db = sdr_profile[self.case]['ebno_db']
self.num_symbols = sdr_profile[self.case]['num_symbols']
self.stream_size = sdr_profile[self.case]['stream_size']
self.sig_datatype = 'Complex'
self.phy_chan = 'Data'
self.modulation_type = 'QPSK'
self.bits_per_bin = 2
self.synch_data_pattern = np.array([1, 3])
self.SNR_type = 'Digital' # Digital, Analog
self.ref_sigs = 0.0
self.NFFT = int(2**(np.ceil(np.log2(round(self.channel_band / self.bin_spacing)))))
self.fs = self.bin_spacing * self.NFFT
self.len_CP = int(round(self.NFFT / 4))
self.num_bins0 = np.floor(self.channel_band / self.bin_spacing)
num_bins0 = self.num_bins0 # Max number of occupied bins for data
num_bins1 = 4 * np.floor(num_bins0 / 4) # Make number of bins a multiple of 4 for MIMO
if self.diagnostics is True:
all_bins = np.array(self.bin_selection)
else:
all_bins = np.array(list(range(-int(num_bins1 / 2), 0)) + list(range(1, int(num_bins1 / 2) + 1)))
# positive and negative bin indices
ref_bins0 = np.random.randint(1, int(num_bins1 / 2) + 1, size=int(np.floor(num_bins1 * self.ref_sigs / 2)))
ref_bins = np.unique(ref_bins0)
# positive and negative bin indices
ref_only_bins = np.sort(np.concatenate((-ref_bins, ref_bins))) # Bins occupied by pilot (reference) signals
# positive and negative bin indices - converted to & replaced by positive only in MultiAntennaSystem class
data_only_bins = np.setdiff1d(all_bins, ref_only_bins) # Actual bins occupied by data
self.num_data_bins = len(data_only_bins)
self.used_bins_data = ((self.NFFT + all_bins) % self.NFFT).astype(int)
num_sync_data_patterns = int(np.ceil(self.num_symbols[0] / sum(self.synch_data_pattern)))
symbol_pattern0 = np.concatenate((np.zeros(self.synch_data_pattern[0]), np.ones(self.synch_data_pattern[1])))
self.symbol_pattern = np.tile(symbol_pattern0, num_sync_data_patterns)
self.symbol_length = self.NFFT + self.len_CP
gr.sync_block.__init__(self,
name="SynchronizeAndEstimate",
in_sig=[np.complex64],
out_sig=[np.complex64])
self.num_sync_bins = self.NFFT - 2
self.num_of_synchs_and_synch_bins = np.array([self.synch_data_pattern[0], self.num_sync_bins])
self.total_num_synch_bins = np.product(self.num_of_synchs_and_synch_bins)
self.prime = 23
synch_bin_index_from_0 = np.array(range(0, int(self.total_num_synch_bins)))
synch_bin_index_from_1 = np.array(range(1, int(self.total_num_synch_bins) + 1))
if self.total_num_synch_bins % 2 == 0:
self.zadoff_chu = np.exp(-1j * (2 * np.pi / self.total_num_synch_bins) * self.prime * (synch_bin_index_from_0 ** 2 / 2))
else:
self.zadoff_chu = np.exp(-1j * (2 * np.pi / self.total_num_synch_bins) * self.prime * (synch_bin_index_from_0 * synch_bin_index_from_1) / 2)
if self.seed_value != 0:
index_zadoff_chu = list(range(self.zadoff_chu.shape[0]))
map_index_position = list(zip(index_zadoff_chu, self.zadoff_chu[:]))
random.seed(self.seed_value)
random.shuffle(map_index_position)
index, self.zadoff_chu = zip(*map_index_position)
self.used_bin_index = list(range(int(-self.num_sync_bins / 2), 0)) + list(
range(1, int(self.num_sync_bins / 2) + 1))
self.used_bins = ((self.NFFT + np.array(self.used_bin_index)) % self.NFFT)
self.used_bins_synch = self.used_bins.astype(int) # Same as Caz.used_bins.astype(int) #i
self.synch_reference = self.zadoff_chu # i (import file)
# window: CP to end of symbol
self.ptr_o = np.array(range(int(self.len_CP), int(self.len_CP + self.NFFT))).astype(int)
self.ptr_i = self.ptr_o - np.ceil(int(self.len_CP / 2)).astype(int)
lmax_s = 20
lmax_d = int(sum(self.symbol_pattern))
# self.time_synch_ref = np.zeros((self.num_ant_txrx, lmax_s, 2)) # ONE OF THESE 2 WILL BE REMOVED
self.est_chan_freq_p = np.zeros((self.num_ant_txrx, lmax_s, int(self.NFFT)), dtype=complex)
self.est_chan_freq_n = np.zeros((self.num_ant_txrx, lmax_s, len(self.used_bins_synch)), dtype=complex)
self.est_chan_time = np.zeros((self.num_ant_txrx, lmax_s, 3), dtype=complex)
self.est_synch_freq = np.zeros((self.num_ant_txrx, lmax_s, len(self.used_bins_synch)), dtype=complex)
if self.num_ant_txrx == 1:
self.est_data_freq = np.zeros((self.num_ant_txrx, 1, len(self.used_bins_data)), dtype=complex)
elif self.num_ant_txrx == 2 and self.MIMO_method == 'STCode':
pass
elif self.num_ant_txrx == 2 and self.MIMO_method == 'SPMult':
pass
# Max length of channel impulse is CP
self.est_chan_impulse = np.zeros((self.num_ant_txrx, lmax_s, int(self.NFFT)), dtype=complex)
self.num_of_synchs_and_synch_bins = self.num_of_synchs_and_synch_bins.astype(int)
self.synch_state = 0
self.case = case
self.stride_val = None
self.correlation_observations = None
self.start_sample = None
self.del_mat = None
self.time_synch_ref = np.zeros((self.num_ant_txrx, 250, 3)) # There are two more in the init.
self.time_series_data_window = np.zeros(self.NFFT, dtype=complex)
self.rx_buffer_time_data = None
self.samp_freq = self.NFFT * self.bin_spacing
self.samp_period = 1/self.samp_freq
# Buffer Pointers
self.start_ptr = 0
self.end_ptr = buffer_size - 1
self.current_ptr = 0
self.current_end_ptr = 0
self.data_buffer = np.zeros((1, buffer_size)) + 1j * np.zeros((1, buffer_size))
self.inout = np.zeros((1, buffer_size)) + 1j * np.zeros((1, buffer_size))
self.dmax_ind_buffer = np.array([0])
def work(self, input_items, output_items):
in0 = input_items[0] # input buffer
out = output_items[0] # output buffer
# Start from the middle of the CP
if self.num_ant_txrx == 1:
self.est_data_freq = np.zeros((self.num_ant_txrx, 1, len(self.used_bins_data)), dtype=complex)
elif self.num_ant_txrx == 2 and self.MIMO_method == 'STCode':
pass
elif self.num_ant_txrx == 2 and self.MIMO_method == 'SPMult':
pass
input_time_series_data = in0
input_with_frequency_offset = input_time_series_data
for index in range(input_time_series_data.shape[0]):
input_with_frequency_offset[index] = input_time_series_data[index] * np.exp(
1j * 2 * np.pi * self.freq_offset * self.samp_period * index)
# num_loops = (len(input_data) - self.window_len) / self.stride_val + 1 # number of windows across rx data
self.time_synch_ref = np.zeros((self.num_ant_txrx, 250, 3))
self.stride_val = np.ceil(self.len_CP / 2)
ptr_frame = 0
b = 0
xp = []
for m in range(1):
self.correlation_observations = -1
self.start_sample = (self.len_CP - 4) - 1
total_loops = int(np.ceil(input_with_frequency_offset.shape[0] / self.stride_val))
max_correlation_value_buffer = np.zeros(total_loops)
ptr_adj, loop_count, symbol_count = 0, 0, 0
tap_delay = 3
x = np.zeros(tap_delay)
ptr_synch0 = np.zeros(1000)
while loop_count <= total_loops:
if self.correlation_observations == -1:
ptr_frame = loop_count * self.stride_val + self.start_sample + ptr_adj
elif self.correlation_observations < 5:
ptr_frame += sum(self.synch_data_pattern) * (int(self.NFFT) + self.len_CP)
else:
ptr_frame = (np.ceil(np.dot(xp[-1:], b) - self.len_CP / 4))[0]
if (self.num_of_synchs_and_synch_bins[0] - 1) * self.symbol_length + int(self.NFFT) + ptr_frame < input_with_frequency_offset.shape[0]:
for i in range(self.num_of_synchs_and_synch_bins[0]):
start = int(i * self.symbol_length + ptr_frame)
fin = int(i * self.symbol_length + ptr_frame + int(self.NFFT))
self.time_series_data_window[i * int(self.NFFT): (i + 1) * int(self.NFFT)] = input_with_frequency_offset[
start:fin]
# Take FFT of the window
fft_vec = np.zeros((self.num_of_synchs_and_synch_bins[0], int(self.NFFT)), dtype=complex)
for i in range(self.num_of_synchs_and_synch_bins[0]):
start = i * int(self.NFFT)
fin = (i + 1) * int(self.NFFT)
fft_vec[i, 0:int(self.NFFT)] = np.fft.fft(self.time_series_data_window[start: fin], int(self.NFFT))
synch_symbol_freq_data = fft_vec[:, self.used_bins_synch]
synch_symbol_freq_data_vector = np.reshape(synch_symbol_freq_data, (1, synch_symbol_freq_data.shape[0] * synch_symbol_freq_data.shape[1]))
pow_est = sum(sum(synch_symbol_freq_data_vector * np.conj(synch_symbol_freq_data_vector))) / synch_symbol_freq_data_vector.shape[1] # Altered
synch_data_normalized = synch_symbol_freq_data_vector / (np.sqrt(pow_est) + 1e-10)
bins = self.used_bins_synch[:, None]
cp_dels = np.array(range(int(self.len_CP + 1)))[:, None]
p_mat0 = np.exp(1j * 2 * (np.pi / self.NFFT) * np.dot(bins, cp_dels.T))
p_mat = np.tile(p_mat0, (self.num_of_synchs_and_synch_bins[0], 1))
self.del_mat = np.dot(np.conj(self.synch_reference)[None, :], np.dot(np.diag(synch_data_normalized[0]), p_mat))
dd = abs(self.del_mat[0, :])
max_correlation_value, max_correlation_index = dd.max(0), dd.argmax(0)
max_correlation_value_buffer[loop_count] = max_correlation_value
if max_correlation_value > 0.5 * synch_data_normalized.shape[1] or self.correlation_observations > -1:
if max_correlation_index > np.ceil(0.75 * self.len_CP):
if self.correlation_observations == -1: # 0
ptr_adj += np.ceil(0.5 * self.len_CP)
ptr_frame = loop_count * self.stride_val + self.start_sample + ptr_adj
elif self.correlation_observations < 5:
ptr_frame += np.ceil(0.5 * self.len_CP)
# Take FFT of the window
fft_vec = np.zeros((self.num_of_synchs_and_synch_bins[0], int(self.NFFT)), dtype=complex)
for i in range(self.num_of_synchs_and_synch_bins[0]):
start = i * int(self.NFFT)
fin = (i + 1) * int(self.NFFT)
fft_vec[i, 0:int(self.NFFT)] = np.fft.fft(
self.time_series_data_window[start: fin], int(self.NFFT))
synch_symbol_freq_data = fft_vec[:, self.used_bins_synch]
synch_symbol_freq_data_vector = np.reshape(synch_symbol_freq_data, (1, synch_symbol_freq_data.shape[0] * synch_symbol_freq_data.shape[1]))
pow_est = sum(sum(synch_symbol_freq_data_vector * np.conj(synch_symbol_freq_data_vector))) / synch_symbol_freq_data_vector.shape[1]
synch_data_normalized = synch_symbol_freq_data_vector / (np.sqrt(pow_est) + 1e-10)
bins = self.used_bins_synch[:, None]
cp_dels = np.array(range(self.len_CP + 1))[:, None]
p_mat0 = np.exp(1j * 2 * (np.pi / int(self.NFFT)) * np.dot(bins, cp_dels.T))
p_mat = np.tile(p_mat0, (self.num_of_synchs_and_synch_bins[0], 1))
# maybe replace index 0 with m
self.del_mat = np.dot(np.conj(self.synch_reference)[None, :],
np.dot(np.diag(synch_data_normalized[0]), p_mat))
dd = abs(self.del_mat[0, :])
max_correlation_value, max_correlation_index = dd.max(0), dd.argmax(0)
max_correlation_value_buffer[loop_count] = max_correlation_value
time_synch_ind = self.time_synch_ref[m, max(self.correlation_observations, 1), 0]
if ptr_frame - time_synch_ind > (2 * self.len_CP + int(self.NFFT)) or self.correlation_observations == -1:
self.correlation_observations += 1
self.time_synch_ref[m, self.correlation_observations, 0] = ptr_frame
self.time_synch_ref[m, self.correlation_observations, 1] = max_correlation_index
self.time_synch_ref[m, self.correlation_observations, 2] = max_correlation_value
ptr_synch0[symbol_count % tap_delay] = sum(self.time_synch_ref[m, self.correlation_observations, 0:2])
x[symbol_count % tap_delay] = symbol_count * sum(self.synch_data_pattern) # No need for +1 on lhs
symbol_count += 1
x2 = x[0:min(self.correlation_observations, tap_delay)]
x_plus = np.concatenate((x2, np.atleast_1d(symbol_count * sum(self.synch_data_pattern))))
xp = np.zeros((len(x_plus), 2))
xp[:, 0] = np.ones(len(x_plus))
xp[:, 1] = x_plus
if self.correlation_observations > 3:
y = ptr_synch0[0:min(tap_delay, self.correlation_observations)]
xl = np.zeros((len(x2), 2))
xl[:, 0] = np.ones(len(x2))
xl[:, 1] = x2
b = np.linalg.lstsq(xl, y)[0]
if self.correlation_observations == 0:
self.dmax_ind_buffer = np.append(self.dmax_ind_buffer, max_correlation_index)
self.dmax_ind_buffer = np.delete(self.dmax_ind_buffer, 0, 0)
else:
self.dmax_ind_buffer = np.append(self.dmax_ind_buffer, max_correlation_index)
if self.dmax_ind_buffer.shape[0] > 3:
self.dmax_ind_buffer = self.dmax_ind_buffer[-3:]
dmax_ind_processing = 0
if dmax_ind_processing == 1:
if self.dmax_ind_buffer.shape[0] >= 3:
current_avg_buffer = self.dmax_ind_buffer
average_delay = gmean(current_avg_buffer)
average_delay = np.round(average_delay)
best_index = np.argmin(average_delay)
data_recov0 = np.dot(np.diag(synch_data_normalized[0]), p_mat[:, int(current_avg_buffer[best_index])]) # -1
else:
data_recov0 = np.dot(np.diag(synch_data_normalized[0]), p_mat[:, max_correlation_index])
else:
data_recov0 = np.dot(np.diag(synch_data_normalized[0]), p_mat[:, max_correlation_index])
# recovered data with delay removed - DataRecov in MATLAB code
h_est1 = np.zeros((int(self.NFFT), 1), dtype=complex)
# TmpV1 in MATLAB code
# self.SNR += 1e-10
data_recov = (data_recov0 * np.conj(self.synch_reference)) / (1 + (1 / self.SNR))
h_est00 = np.reshape(data_recov, (data_recov.shape[0], self.num_of_synchs_and_synch_bins[0]))
h_est0 = h_est00.T
h_est = np.sum(h_est0, axis=0) / (self.num_of_synchs_and_synch_bins[0] + 1e-10)
h_est1[self.used_bins_synch, 0] = h_est
self.est_chan_freq_p[m, self.correlation_observations, 0:len(h_est1)] = h_est1[:, 0]
self.est_chan_freq_n[m, self.correlation_observations, 0:len(h_est)] = h_est
h_est_time = np.fft.ifft(h_est1[:, 0], int(self.NFFT))
self.est_chan_impulse[m, self.correlation_observations, 0:len(h_est_time)] = h_est_time
h_est_ext = np.tile(h_est, (1, self.num_of_synchs_and_synch_bins[0])).T
synch_equalized = (data_recov0 * np.conj(h_est_ext[:, 0])) / (
(np.conj(h_est_ext[:, 0]) * h_est_ext[:, 0]) + (1 / (self.SNR + 1e-10)) + 1e-10)
self.est_synch_freq[m, self.correlation_observations,
0:len(self.used_bins_synch) * self.num_of_synchs_and_synch_bins[0]] = synch_equalized
loop_count += 1
if self.num_ant_txrx == 1:
m = 0 # Just an antenna index
for p in range(self.correlation_observations):
for data_sym in range(self.synch_data_pattern[1]):
if sum(self.time_synch_ref[m, p, :]) + self.NFFT < input_with_frequency_offset.shape[0]:
data_ptr = int(self.time_synch_ref[m, p, 0] + (data_sym + 1) * self.symbol_length)
self.rx_buffer_time_data = input_with_frequency_offset[data_ptr: data_ptr + self.NFFT] # -1
fft_vec = np.fft.fft(self.rx_buffer_time_data, self.NFFT)
freq_dat0 = fft_vec[self.used_bins_data]
p_est = sum(freq_dat0 * np.conj(freq_dat0)) / len(freq_dat0)
data_recov0 = freq_dat0 / np.sqrt(p_est)
h_est = self.est_chan_freq_p[m, p, self.used_bins_data]
del_rotate = np.exp(
1j * 2 * (np.pi / self.NFFT) * self.used_bins_data * self.time_synch_ref[m, p, 1])
data_recov = np.dot(np.diag(data_recov0), del_rotate)
data_equalized = (data_recov * np.conj(h_est)) / (
(np.conj(h_est) * h_est) + (1 / self.SNR))
if p * self.synch_data_pattern[1] + data_sym == 0:
self.est_data_freq[m, p, :] = self.est_data_freq[m, p, :] + data_equalized
else:
self.est_data_freq = np.vstack((self.est_data_freq[m, :], data_equalized))
self.est_data_freq = self.est_data_freq[np.newaxis, :, :]
data = self.est_data_freq[m, p, 0:len(self.used_bins_data)]
p_est1 = sum(data * np.conj(data)) / (len(data) + 1e-10)
self.est_data_freq[
m, p * self.synch_data_pattern[1] + data_sym, 0:len(self.used_bins_data)] /= np.sqrt(p_est1)
data_out = self.est_data_freq[m, p * self.synch_data_pattern[1] + data_sym,
0:len(self.used_bins_data)]
out[0:len(data_out)] = data_out
return len(output_items[0])
| akyerr/5GWifi_GNURadio | gr-RX_OFDM/python/SynchronizeAndEstimate.py | SynchronizeAndEstimate.py | py | 22,846 | python | en | code | 2 | github-code | 90 |
14154241743 | t = int(input(''))
l = []
for i in range(t):
a, b = input().split()
a = int(a)
b = int(b)
l.append(a+b)
for j in range(t):
print(l[j])
'''
c언어도 마음만 먹으면 배열로 지정하여
한번에 입력받고, 한번에 출력할 수 있지만
파이썬 리스트로 구현해보고 싶어서 리스트를 활용함.
''' | suyeon0305/backjoon | 210801_백준_#10950.py | 210801_백준_#10950.py | py | 349 | python | ko | code | 0 | github-code | 90 |
32346959149 | guesTList = ['00-CC-00','01-CC-01','02-CC-02','03-CC-03','04-CC-04', '05-CC-05','06-CC-06','07-CC-07','08-CC-08','09-CC-09']
parklist = []
countEntrada = 0
matricula = ""
def parkManager(matricula,movimento):
global countEntrada
if movimento.upper() == "E":
parklist.append(matricula)
countEntrada += 1
print(parklist)
elif movimento.upper() == "S":
parklist.remove(matricula)
print("Saída concluída")
print(parklist)
print("Entradas: {0}" .format(countEntrada))
def parkvalidator(matricula,movimento):
mov = ""
if matricula not in guesTList:
print("Matrícula não autorizada")
mov = False
elif movimento.upper() == "E":
if matricula in guesTList and matricula not in parklist:
mov = True
else:
mov = False
elif movimento.upper() == "S":
if matricula in parklist:
mov = True
else:
mov = False
if mov == False:
print("Não é possível fazer esse movimento!")
else:
print("Movimento inserido inválido")
if mov == True:
parkManager(matricula,movimento)
while matricula != "00-00-00":
matricula = input("Qual a sua matricula? ")
movimento = input("Qual movimento deseja fazer? (Entrada-E; Saída-S) ")
parkvalidator(matricula,movimento) | lisboaab/AED-refaz-exs | testes anteriores/normal22-23/pt1-estacionamento.py | pt1-estacionamento.py | py | 1,393 | python | pt | code | 0 | github-code | 90 |
31109925067 | from base import Animation
import math
class Positional(Animation):
def __init__(self, config):
super(Positional, self).__init__(config)
self.brightness = 1.0
# Look at the range of notes assigned to this animations
# In order to determine the min and max
self.min = min(self.notes)
self.max = max(self.notes)
# The min and max can then be used to deterine the correct
# position for a set of LEDs, depending on the note pressed.
self.width = round((1. / (self.max - self.min)) * self.length)
self.start = (self.msg.note - self.min) * self.width
self.end = self.start + self.width
self.midpoint = self.start + (self.width // 2)
def run(self, deltaMs):
width = self.find_width(deltaMs)
if width == 1.0 or self.oscillation == 0.0:
self.refresh_params()
if deltaMs < self.attack:
self.brightness = deltaMs / self.attack
else:
self.brightness = 1.0
self.draw(width)
def find_width(self, deltaMs):
if self.oscillation != 0:
oscillation = self.normalize(self.oscillation, 0, 10)
return 30 * round(self.width * abs(math.sin(deltaMs * oscillation)))
return self.width
def draw(self, width):
try:
rgb = self.hsb_to_rgb(self.hue, self.saturation, self.brightness)
self.pixels[self.midpoint] = rgb * 0.9 * self.master
for px in reversed(xrange(1, int(round(width // 2)))):
factor = 1- (px / (width // 2))
self.pixels[self.midpoint+px] = rgb * factor * self.master
if self.midpoint-px >= 0:
self.pixels[self.midpoint-px] = rgb * factor * self.master
except IndexError:
pass
def off(self, deltaMs):
width = self.find_width(deltaMs)
if deltaMs < self.decay:
self.brightness = 1 - (deltaMs / self.decay)
else:
self.brightness = 0.0
self.draw(width)
| mykolasmith/sierra | animation/positional.py | positional.py | py | 2,065 | python | en | code | 4 | github-code | 90 |
35372275723 | from Nodes import Nodes
class HillClimbing:
def __init__(self, state):
super().__init__()
self.start_node = Nodes(state)
def first_choice(self, max_sidesteps=0):
current_node = self.start_node
current_cost = current_node.get_cost()
moves = 0; side_steps = 0
while True:
next_child, next_cost = current_node.first_choice_child()
if(next_cost > current_cost):
return current_node.state, current_cost, (next_cost == current_cost), moves
if(next_cost == current_cost):
side_steps += 1
if side_steps > max_sidesteps:
return current_node.state, current_cost, (next_cost == current_cost), moves
else:
side_steps = 0
current_node, current_cost = next_child, next_cost
moves += 1
if __name__ == "__main__":
# Initial State
start_state = (4,5,6,3,4,5,6,5)
print("Initial state:")
Nodes.visualize(start_state)
hill = HillClimbing(start_state)
print("Running Steepest-Ascent:")
end_state, end_cost, is_plateau, moves = hill.steepest_ascent()
status = "Plateau reached!" if is_plateau else "Local Minima reached!"
print(status+" state:{} cost:{}".format(end_state, end_cost))
Nodes.visualize(end_state)
print("Running First-Choice (with 100 sidesteps):")
end_state, end_cost, is_plateau, moves = hill.first_choice(100)
status = "Plateau reached!" if is_plateau else "Local Minima reached!"
print(status+" state:{} cost:{}".format(end_state, end_cost))
Nodes.visualize(end_state) | S-r-e-e-V/8Queens | Hill.py | Hill.py | py | 1,642 | python | en | code | 0 | github-code | 90 |
19385839829 | import json
import github_util
import requests_cache
import requests
config = None
getters = __import__("repository_getters")
token = None
def carregar_ambiente():
global config
with open('config.json') as config_file:
config = json.load(config_file)
if(config == None):
print("[!] Arquivo de configuração não definido.")
return False
# Checar token
if(not github_util.check_token(config.get('github_token', ''))):
access_token = input("Token invalido. Cole seu token do github: ")
while(not github_util.check_token(access_token)):
access_token = input("Token invalido. Cole seu token do github: ")
config['github_token'] = access_token
with open("config.json", "w") as config_file:
config_file.write(json.dumps(config, sort_keys=True, indent=4))
global token
token = access_token
requests_cache.install_cache('main_cache', expire_after=None)
return True
def get_repositories():
carregar_ambiente()
requested_getters = config['getters']
repositories = []
for requested_getter in requested_getters:
Getter = getattr(getters, requested_getter["name"])
repositories += Getter(requested_getter['data']).list()
return repositories
def ver_historico_arquivo():
filepath = input("Digite o caminho do arquivo: ")
print("[...] Gerando historico de arquivo")
repos = get_repositories()
for repo in repos:
changes = github_util.list_file_history(repo, token, filepath)
print(f'--------\nrepositório {repo}, mudanças no arquivo {filepath}.\n--------')
for change in changes:
print(f"{change['commit']['author']['name']}: {change['commit']['message']}")
| pabloufrn/visual-artefatos | terminal_interface.py | terminal_interface.py | py | 1,601 | python | en | code | 0 | github-code | 90 |
42785883519 | from tkinter import messagebox
import roll
import stats_and_mods
from roll import Roller, roll_skill, roll_initiative, roll_damage, roll_to_hit
from gui_helpers import toggle_active_disabled, autocheck_checkboxes, depress_button, \
release_button, display_roll_result
import skill_check
# TODO: main menu: display stats, check (leads to menu or dropdown menu to select skill)
# TODO: display stats
import tkinter as tk
class Menu:
roller = Roller()
def __init__(self):
self.window = tk.Tk()
self.window.title("Main Menu")
self.window.geometry("450x350")
self.skill_check_menu = skill_check.Skill_Check_Menu()
# self.roll_menu = roll.Roll_Menu()
# etc...
self.skill_check_button = tk.Button(
self.window,
text="Skill Check",
command=lambda: self.skill_check_menu.display(self.window, self.roller) # TODO: remove lambda if no params?
)
self.skill_check_button.pack()
self.roll_initiative_button = tk.Button(self.window, text="Roll Initiative", command=self.roll_initiative_menu)
self.roll_initiative_button.pack()
self.roll_to_hit_button = tk.Button(self.window, text="Roll to Hit", command=self.roll_to_hit_menu)
self.roll_to_hit_button.pack()
self.roll_for_damage_button = tk.Button(self.window, text="Roll for Damage", command=self.roll_for_damage_menu)
self.roll_for_damage_button.pack()
self.update_character_stats_button = tk.Button(self.window, text="Character Stats...",
command=self.character_stats_menu)
self.update_character_stats_button.pack()
self.current_roll_result = None
def main_menu(self):
self.window.mainloop()
def get_selected_skill(self, skill_var):
selected_skill = skill_var.get()
print(selected_skill)
def roll_initiative_menu(self):
print("Roll Initiative button clicked")
roll_initiative_menu = tk.Toplevel(self.window)
roll_initiative_menu.title("Skill Check")
roll_initiative_menu.geometry("300x200")
output_box = tk.Text(roll_initiative_menu, width=15, height=4)
roll_button = tk.Button(
roll_initiative_menu,
text="Roll!",
command=lambda: display_roll_result(output_box, lambda: roll_initiative(advantage=False), self.roller)
)
roll_button.grid(row=0, sticky="nsew", pady=10, padx=100)
output_box.grid(row=1, sticky="nsew", pady=20, padx=15)
roll_initiative_menu.grid_rowconfigure(0, weight=0)
roll_initiative_menu.grid_rowconfigure(1, weight=3)
roll_initiative_menu.grid_columnconfigure(0, weight=1)
def roll_to_hit_menu(self):
# TODO: implement with weapon field and advantage, disadvantage checkboxes
roll_to_hit_menu = tk.Toplevel(self.window)
roll_to_hit_menu.title("Roll to hit")
roll_to_hit_menu.geometry("550x300")
weapon_label = tk.Label(roll_to_hit_menu, text="Enter weapon used:")
weapon = tk.StringVar(value="None")
weapon.set("None")
weapon_dropdown = tk.OptionMenu(roll_to_hit_menu, weapon, *stats_and_mods.weapons_stats.keys())
advantage_var = tk.BooleanVar()
advantage_checkbutton = tk.Checkbutton(
roll_to_hit_menu,
text="Advantage",
variable=advantage_var,
command=lambda: toggle_active_disabled(advantage_var, [disadvantage_checkbutton])
)
disadvantage_var = tk.BooleanVar()
disadvantage_checkbutton = tk.Checkbutton(
roll_to_hit_menu,
text="Disadvantage",
variable=disadvantage_var,
command=lambda: toggle_active_disabled(disadvantage_var, [advantage_checkbutton])
)
output_box = tk.Text(roll_to_hit_menu, width=40, height=15)
try:
roll_button = tk.Button(
roll_to_hit_menu,
text="Roll!",
command = lambda: display_roll_result(
output_box,
lambda: roll_to_hit(
weapon.get(),
advantage=advantage_var.get(),
disadvantage=disadvantage_var.get()
),
self.roller
)
)
except KeyError as e: # TODO test this out
messagebox.showerror("Error", "Please enter a valid skill name.")
weapon_label.grid(row=0, pady=10)
weapon_dropdown.grid(row=1, column=0, padx=10, pady=15, sticky="w")
advantage_checkbutton.grid(row=2, column=0, pady=10, sticky="w")
disadvantage_checkbutton.grid(row=3, column=0, pady=10, sticky="w")
roll_button.grid(row=4, column=0, pady=20, sticky="w")
output_box.grid(row=0, rowspan=6, column=1)
def roll_for_damage_menu(self):
roll_for_damage_menu = tk.Toplevel(self.window)
roll_for_damage_menu.title("Roll for damage")
roll_for_damage_menu.geometry("550x300")
weapon_label = tk.Label(roll_for_damage_menu, text="Enter weapon used:")
weapon = tk.StringVar(value="None")
weapon.set("None")
weapon_dropdown = tk.OptionMenu(roll_for_damage_menu, weapon, *stats_and_mods.weapons_stats.keys())
advantage_var = tk.BooleanVar()
advantage_checkbutton = tk.Checkbutton(
roll_for_damage_menu,
text="Advantage",
variable=advantage_var,
command=lambda: toggle_active_disabled(
advantage_var,
[disadvantage_checkbutton]
)
)
sneak_var = tk.BooleanVar()
sneak_checkbutton = tk.Checkbutton(
roll_for_damage_menu,
text="Sneak",
variable=sneak_var,
command=lambda: self.combined_functions([
lambda: autocheck_checkboxes(
sneak_var,
[advantage_checkbutton]
),
lambda: toggle_active_disabled(
sneak_var,
[disadvantage_checkbutton]
)
])
)
disadvantage_var = tk.BooleanVar()
disadvantage_checkbutton = tk.Checkbutton(
roll_for_damage_menu,
text="Disadvantage",
variable=disadvantage_var,
command=lambda: toggle_active_disabled(disadvantage_var, [advantage_checkbutton, sneak_checkbutton])
)
output_box = tk.Text(roll_for_damage_menu, width=40, height=15)
roll_button = tk.Button(roll_for_damage_menu,
text="Roll!",
command = lambda: self.combined_functions([lambda: display_roll_result(
output_box,
lambda: roll_damage(
weapon.get(),
advantage=advantage_var.get(),
disadvantage=disadvantage_var.get(),
sneak=sneak_var.get()
),
self.roller
), lambda: print(f"disadvantage_var: {disadvantage_var.get()}, advantage_var: {advantage_var.get()}, snear_var: {sneak_var.get()}")]) # TODO: ............. /????
)
weapon_dropdown.grid()
advantage_checkbutton.grid()
disadvantage_checkbutton.grid()
roll_button.grid()
weapon_label.grid(row=0, pady=10)
weapon_dropdown.grid(row=1, column=0, padx=15)
advantage_checkbutton.grid(row=2, column=0, padx=15, pady=10, sticky="w")
sneak_checkbutton.grid(row=3, column=0, padx=15, pady=10, sticky="w")
disadvantage_checkbutton.grid(row=4, column=0, padx=15, pady=10, sticky="w")
roll_button.grid(row=5, column=0)
output_box.grid(row=0, rowspan=6, column=1, padx=15)
# Can I sneak attack? Opens new menu to check conditions.
sneak_eligibility_label = tk.Label(roll_for_damage_menu, text="Can I sneak attack?", cursor="hand2", fg="blue")
sneak_eligibility_label.bind("<Button-1>", self.sneak_eligibility_menu)
sneak_eligibility_label.place(relx=0.22, rely=0.9, anchor="e", bordermode="outside")
# def sneak_eligibility_menu(self, weapon):
def sneak_eligibility_menu(self, empty):
sneak_menu = tk.Toplevel(self.window)
sneak_menu.title("Can I sneak attack?")
sneak_menu.geometry("550x300")
weapon_label = tk.Label(sneak_menu, text="Enter weapon used:")
weapon_label.pack(pady=10)
weapon = tk.StringVar(value="None")
weapon.set("None")
weapon_dropdown = tk.OptionMenu(sneak_menu, weapon, *stats_and_mods.weapons_stats.keys())
weapon_dropdown.pack()
# Are you disadvantaged? Yes / No
disadvantage_var = tk.BooleanVar()
disadvantage_label = tk.Label(sneak_menu, text="Are you disadvantaged?")
disadvantage_label.pack(pady=10)
yes_disadvantaged_button = tk.Button(sneak_menu, text="Yes", command=lambda: self.combined_functions(
# [lambda: depress_button(yes_disadvantaged_button[1], [no_disadvantaged_button]),
[lambda: disadvantage_var.set(True),
lambda: depress_button(empty, yes_disadvantaged_button),
lambda: release_button(empty, no_disadvantaged_button),
lambda: toggle_active_disabled(disadvantage_var, [yes_advantaged_button, no_advantaged_button])]
# lambda: autodisable_checkbox(disadvantage_var, no_advantaged_button)]
))
no_disadvantaged_button = tk.Button(sneak_menu, text="No", command=lambda: self.combined_functions(
[lambda: disadvantage_var.set(False),
lambda: depress_button(empty, no_disadvantaged_button),
release_button(empty, yes_disadvantaged_button),
lambda: toggle_active_disabled(disadvantage_var, [yes_advantaged_button, no_advantaged_button])]
# lambda: autodisable_checkbox(disadvantage_var, no_advantaged_button)]
))
yes_disadvantaged_button.pack(side="left")
no_disadvantaged_button.pack(side="left")
# Do you have advantage? Yes / No
advantage_var = tk.BooleanVar()
advantage_label = tk.Label(sneak_menu, text="Do you have advantage?")
advantage_label.pack(pady=10)
yes_advantaged_button = tk.Button(sneak_menu, text="Yes", command=lambda: self.combined_functions([lambda: depress_button(yes_advantaged_button, [no_advantaged_button]), lambda: advantage_var.set(True)]))
no_advantaged_button = tk.Button(sneak_menu, text="No", command=lambda: self.combined_functions([lambda: depress_button(no_advantaged_button, [yes_advantaged_button]), lambda: advantage_var.set(False)]))
yes_advantaged_button.pack(side="left")
no_advantaged_button.pack(side="left")
# Are you and another enemy of the target flanking the target? Yes / No
flanking_var = tk.BooleanVar()
flanking_label = tk.Label(sneak_menu, text="Are you and another enemy of the target flanking the target?")
flanking_label.pack(pady=10)
yes_flanking_button = tk.Button(sneak_menu, text="Yes", command=lambda: self.combined_functions(
[lambda: depress_button(yes_flanking_button, [no_flanking_button]), lambda: flanking_var.set(True)]))
no_flanking_button = tk.Button(sneak_menu, text="No", command=lambda: self.combined_functions(
[lambda: depress_button(no_flanking_button, [yes_flanking_button]), lambda: flanking_var.set(False)]))
yes_flanking_button.pack(side="left")
no_flanking_button.pack(side="left")
def display_sneak_result():
print(roll.get_sneak_eligibility(weapon, advantage=advantage_var, disadvantage=disadvantage_var, flanking=flanking_var))
evaluate_button = tk.Button(sneak_menu, text="Evaluate", command=display_sneak_result)
evaluate_button.pack()
def character_stats_menu(self):
print("Update Character Stats button clicked")
def show_roll_history(self):
print("Roll history button clicked")
# roll_initiative_menu = tk.Toplevel(self.window)
# roll_initiative_menu.title("Skill Check")
# roll_initiative_menu.geometry("550x300")
#
# roll_button = tk.Button(
# roll_initiative_menu,
# text="Roll!",
# command=lambda: self.display_roll_result(roll_initiative_menu, lambda: roll_initiative(advantage=False))
# )
# roll_button.pack(pady=20)
def combined_functions(self, func_list):
for f in func_list: f()
| bdyson556/DnDRoller | gui.py | gui.py | py | 12,914 | python | en | code | 0 | github-code | 90 |
73088523177 | import itertools as it
from dataclasses import dataclass
from functools import cache, reduce
from typing import TextIO, override
from advent.common import BaseAdventDay
cached_ord = cache(ord)
@dataclass
class Day3(BaseAdventDay[list[str]]):
def get_score(self, letter: str) -> int:
o = ord(letter)
if cached_ord("a") <= o <= cached_ord("z"):
return o - cached_ord("a") + 1
elif cached_ord("A") <= o <= cached_ord("Z"):
return o - cached_ord("A") + 27
else:
raise ValueError(f"Invalid letter {letter}")
@override
def parse_input(self, input: TextIO) -> list[str]:
return [row.strip() for row in input]
@override
def _run_1(self, input: list[str]) -> int:
def process_row(row: str):
half = len(row) // 2
left, right = row[:half], row[half:]
return frozenset(left) & frozenset(right)
return sum(max(self.get_score(c) for c in process_row(r)) for r in input)
@override
def _run_2(self, input: list[str]) -> int:
tot = 0
for group in it.batched(input, 3):
ret = reduce(set[str].__and__, map(set, group))
assert ret
badge = ret.pop()
tot += self.get_score(badge)
return tot
| DavideCanton/advent-of-code-2022 | advent/day3.py | day3.py | py | 1,305 | python | en | code | 0 | github-code | 90 |
18318130489 | h,w,k=map(int,input().split())
s=[]
ans=[]
for i in range(h):
s.append(list(input()))
ans.append(['1']*w)
cnt=0
flag=0
flag2=0
for i in range(h):
if flag2==0:
cnt+=1
if s[i].count('#')>0:
flag = s[i].count('#')+cnt-1
for j in range(w):
ans[i][j]=str(cnt)
if flag2>0:
for k in range(1, flag2+1):
ans[i-k][j]=str(cnt)
if s[i][j]=='#':
if cnt != flag:
cnt+=1
flag2=0
else:
flag2+=1
if i == h-1:
for j in range(w):
for k in range(flag2):
ans[i-k][j]=ans[i-flag2][j]
for i in range(h):
print(' '.join(ans[i])) | Aasthaengg/IBMdataset | Python_codes/p02855/s635012935.py | s635012935.py | py | 741 | python | en | code | 0 | github-code | 90 |
34244434047 | # -*- coding: utf-8 -*-
"""
.. module:: TorController
:synopsis: Small wrapper for sending requests to website using Twisted
over the tor network (optionally)
.. moduleauthor:: Adam Drakeford <adamdrakeford@gmail.com>
"""
import txtorcon
import functools
from twisted.python import log
from mamba.utils import config
from twisted.internet import reactor
class TorController(object):
"""Controller class for starting up and shutting down instances of tor
"""
def __init__(self):
super(TorController, self).__init__()
self.on_finish = None
def spawn(self, on_finish=None):
""" Spawns a new isntance of tor process """
tor_config = txtorcon.TorConfig()
tor_config.SOCKSPort = config.Application().tor_socks_port
tor_config.ControlPort = config.Application().tor_control_port
d = txtorcon.launch_tor(
tor_config, reactor, progress_updates=self.updates, timeout=60)
d.addCallback(functools.partial(self.setup_complete, tor_config))
d.addErrback(self.setup_failed)
if on_finish is not None:
self.on_finish = on_finish
def updates(self, prog, tag, summary):
""" Logs the current progress of the startup process """
log.msg("{}%: {}".format(prog, summary))
def setup_complete(self, tor_config, proto):
""" Called when the setup has been completed sucdcessfully """
log.msg("setup complete:", proto)
self.on_finish()
def setup_failed(self, arg):
""" Called if a failure occurs """
log.msg("SETUP FAILED", arg)
| dr4ke616/LazyTorrent | application/lib/ext_services/tor_controller.py | tor_controller.py | py | 1,618 | python | en | code | 0 | github-code | 90 |
19454771155 | from PIL import Image, ImageDraw
from random import randint
def stega_encrypt():
text = input('text: ')
keys = []
img = Image.open(input("img: "))
draw = ImageDraw.Draw(img)
width = img.size[0]
height = img.size[1]
pix = img.load()
f = open('text.txt','w')
for elem in ([ord(elem) for elem in text]):
key = (randint(1,width-10),randint(1,height-10))
r,g,b = pix[key][:3]
draw.point(key, (r,elem , b))
f.write(str(key)+'\n')
img.save("supermemc.png", "PNG")
f.close()
stega_encrypt()
| jeka10293847/img | Hide text.py | Hide text.py | py | 597 | python | en | code | 0 | github-code | 90 |
18109833029 | n,q = map(int, input().split())
name = []
time = []
for i in range(n):
x = input().split()
name.append(x[0])
time.append(int(x[1]))
t = 0
n_end = []
t_end = []
while(len(time) > 0):
if time[0] <= q:
t += time[0]
n_end.append(name.pop(0))
time.pop(0)
t_end.append(t)
else :
t += q
time[0] -= q
a = time.pop(0)
b = name.pop(0)
time.append(a)
name.append(b)
for i in range(len(n_end)):
print(n_end[i],t_end[i])
| Aasthaengg/IBMdataset | Python_codes/p02264/s531381968.py | s531381968.py | py | 515 | python | en | code | 0 | github-code | 90 |
33783736648 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import os
import sys
import requests
import traceback
from selenium import webdriver
from multiprocessing import Pool, cpu_count, freeze_support
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def validatetitle(title):
rstr = r'[\/\\\:\*\?\"\<\>\|]'
new_title = re.sub(rstr, "", title).replace(' ', '')
return new_title
class Chapter():
def __init__(self, comic_title, comic_dir, chapter_title, chapter_url):
self.comic_title, self.comic_dir, self.chapter_title, self.chapter_url = comic_title, comic_dir, chapter_title, chapter_url
self.chapter_dir = os.path.join(self.comic_dir, validatetitle(self.chapter_title))
if not os.path.exists(self.chapter_dir):
os.mkdir(self.chapter_dir)
self.pages = []
def get_pages(self):
r_slt = r'onchange="select_page\(\)">([\s\S]*?)</select>'
r_p = r'<option value="(.*?)".*?>第(\d*?)页<'
try:
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap['phantomjs.page.settings.loadImages'] = False
driver = webdriver.PhantomJS(desired_capabilities=dcap)
driver.get(self.chapter_url)
text = driver.page_source
st = re.findall(r_slt, text)[0]
self.pages = [(int(p[-1]), p[0]) for p in re.findall(r_p, st)]
except Exception:
traceback.print_exc()
self.pages = []
except KeyboardInterrupt:
raise KeyboardInterrupt
finally:
driver.quit()
print('Got %d pages in chapter %s' %
(len(self.pages), self.chapter_title))
return self.pages
def download_chapter(self):
results = []
if not self.pages:
print('No page')
return None
mp = Pool(min(8, max(cpu_count(), 4)))
for page in self.pages:
results.append(mp.apply_async(self.download_page, (page,)))
mp.close()
mp.join()
num = sum([result.get() for result in results])
print('Downloaded %d pages' % num)
def download_page(self, page):
headers = {
'use-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'referer': self.chapter_url
}
n = page[0]
url = page[-1]
if not os.path.exists(self.chapter_dir):
os.mkdir(self.chapter_dir)
path = os.path.join(self.chapter_dir, '%s.%s' % (str(n), url.split('.')[-1]))
try:
print('Downloading page %s into file %s' % (n, path))
res = requests.get('https:%s' % url, headers=headers)
data = res.content
with open(path, 'wb') as f:
f.write(data)
except Exception:
e = traceback.format_exc()
print('Got eorr when downloading picture\n %s' % e)
return 0
except KeyboardInterrupt:
raise KeyboardInterrupt
else:
return 1
class Comic():
def __init__(self, comic_url, comic_title=None, comic_dir=None):
self.comic_url = comic_url
n_comic_title, self.des, self.cover, self.chapter_urls = self.get_info()
self.chapter_num = len(self.chapter_urls)
self.comic_title = (comic_title if comic_title else n_comic_title)
self.comic_dir = os.path.abspath((comic_dir if comic_dir else validatetitle(self.comic_title)))
if not os.path.exists(self.comic_dir):
os.mkdir(self.comic_dir)
print('There are %s chapters in comic %s' % (self.chapter_num, self.comic_title))
self.chapters = {
info[0]: Chapter(self.comic_title, self.comic_dir, *info) for info in self.chapter_urls
}
self.pages = []
def get_info(self):
headers = {
'use-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'Referer': 'http://manhua.dmzj.com/tags/s.shtml'
}
root = 'http://manhua.dmzj.com'
r_title = r'<span class="anim_title_text"><a href=".*?"><h1>(.*?)</h1></a></span>'
r_des = r'<meta name=\'description\' content=".*?(介绍.*?)"/>'
r_cover = r'src="(.*?)" id="cover_pic"/></a>'
r_cb = r'<div class="cartoon_online_border" >([\s\S]*?)<div class="clearfix"></div>'
r_cs = r'<li><a title="(.*?)" href="(.*?)" .*?>.*?</a>'
try:
text = requests.get(self.comic_url, headers=headers).text
except ConnectionError:
traceback.print_exc()
raise ConnectionError
title = re.findall(r_title, text)[0]
cb = re.findall(r_cb, text)[0]
chapter_urls = [(c[0], '%s%s#@page=1' % (root, c[1])) for c in re.findall(r_cs, cb)]
cover_url = re.findall(r_cover, text)[0]
des = re.findall(r_des, text)
return title, des, cover_url, chapter_urls
def download_all_chapters(self):
print('Downloading all chapters of comic %s into dir %s' % (self.comic_title, self.comic_dir))
for title in self.chapters.keys():
self.download_chapter(title)
def download_chapter(self, key, flag=True):
if not key in self.chapters:
print('No such chapter %s\nThere are chapters:\n%s' % (key, '\n'.join(self.chapters.keys())))
return None
if not self.chapters[key].pages:
self.pages += self.chapters[key].get_pages()
self.chapters[key].download_chapter()
if __name__ == '__main__':
if sys.platform.startswith('win'):
freeze_support()
if not sys.argv[1]:
print('without start url')
else:
path = sys.argv[1]
print('Download comics based on file %s' % path)
print('Using multi threads...')
comic = Comic(path)
comic.download_all_chapters()
| Packedcat/comic-crawler | comic.py | comic.py | py | 6,012 | python | en | code | 0 | github-code | 90 |
21179647576 |
def solve(n,a):
odd = sorted(a[i] for i in range(n) if a[i] % 2)
even = sorted(a[i] for i in range(n) if a[i] % 2 == 0)
odd.sort()
even.sort()
sorted_array = []
even_idx = 0
odd_idx = 0
for num in a:
if num % 2 == 0:
sorted_array.append(even[even_idx])
even_idx += 1
else:
sorted_array.append(odd[odd_idx])
odd_idx += 1
if sorted_array == sorted(a):
return 'YES'
else:
return 'NO'
t = int(input())
for _ in range(t):
n = int(input())
a = list(map(int,input().split()))
print(solve(n,a)) | Tettey1/A2SV | contest_11/C_Parity_Sort.py | C_Parity_Sort.py | py | 630 | python | en | code | 0 | github-code | 90 |
18541064239 | N = int(input())
Ai = list(map(int,input().split()))
#print(Ai)
def create_list():
MAXI = N
sn = [0 for _ in range(MAXI+1)]
judge = 0
for i in range(1,MAXI+1):
#print(str(i)+'+1 when '+S[i-1])
sn[i] = sn[i-1] + Ai[i-1]
return sn
def iCj(i, j):
mot = 1
chi = 1
for x in range(j):
chi *= i-x
mot *= x+1
comb = int(chi/mot)
return comb
base = create_list()
#print(base)
count = 0
base.sort()
#print(base)
#for i in range(N):
# for j in range(i+1,N+1):
# if(base[j]-base[i] == 0):
# count += 1
# print(str(j)+' - '+str(i))
#else:
# print(str(j)+' = '+str(i))
# print( base[r[i]] - base[l[i]] )#sum(rnum)-sum(lnum) )
i = 0
while(i<N+1):
this = 1
j = i+1
while(j < N+1 and base[j] == base[i]):
this += 1
j += 1
if(this >= 2):
count += iCj(this,2)
i += this
print(count) | Aasthaengg/IBMdataset | Python_codes/p03363/s884270247.py | s884270247.py | py | 862 | python | en | code | 0 | github-code | 90 |
18352301559 | a1=int(input())
a2=input()
res1=[i for i in a2.split()]
res=[i for i in a2.split()]
for x in res:
if int(x)<1 or int(x)>1000:
res.remove(x)
empty=0
sum_all=0
if 1<=a1<=100:
if len(res1)==len(res):
for x in res:
empty=sum_all
sum_all=empty+(1/int(x))
print((1/sum_all)) | Aasthaengg/IBMdataset | Python_codes/p02934/s964641528.py | s964641528.py | py | 324 | python | en | code | 0 | github-code | 90 |
22139229602 | import numpy as np
import matplotlib.pyplot as plt
import cv2
# fungsi histogram
def histogram(name, img):
# jumlah bin = 256
plt.figure(name)
plt.title(name)
plt.hist(img.ravel(), 256, [0, 256])
# plt.savefig('{}.png'.format(name.lower()))
return plt.show()
# histogram untuk plot array 1D
def histogram2(name, hist):
bin = [i for i in range(256)]
plt.figure(name)
plt.title(name)
plt.bar(bin, hist)
# plt.savefig('{}.png'.format(name.lower()))
return plt.show()
# fungsi convert grayscale dengan perkalian matriks #np.dot
def bgr2gray(img):
b, g, r = img[:,:,0], img[:,:,1], img[:,:,2]
gray = 0.114 * b + 0.587 * g + 0.299 * r
return gray
# fungsi untuk Contrast Stretching img
def contrastStretching(img):
row, col = img.shape
out = np.zeros((row, col), dtype=np.float32)
fmax = img.max()
fmin = img.min()
for i in range(row):
for j in range(col):
out[i, j] = (img[i, j] - fmin) / (fmax - fmin) * 255
return out
# Fungsi" untuk melakukan histogram equalization
# hit jumlah kemunculan tiap nilai pixel
def jmlh_kemunculan(img):
muncul = np.zeros((256), np.float32)
row, col = img.shape
for i in range(row):
for j in range(col):
muncul[img[i, j]] +=1
return muncul
# normalisasi histogram
def normalizedProb(muncul, img):
prob = np.zeros((256))
row, col = img.shape
total = row * col
for i in range(256):
prob[i] = muncul[i] / total
return prob
# hit histogram komulatif
def histogram_komulatif(prob):
hist = np.zeros(256)
total = 0
for i in range(256):
total += prob[i]
hist[i] = total
return hist
# equalized, kali dengan 255
def equalized(hist, img):
row, col = img.shape
out = np.zeros((row, col), dtype=np.uint8)
equalizer = hist * 255
equalizer = equalizer.astype(np.int)
for i in range(row):
for j in range(col):
out[i, j] = equalizer[img[i, j]]
return out
# Lakukan histogram equalization
def histogramEqualization(img):
jumlah = jmlh_kemunculan(img)
normalisasi = normalizedProb(jumlah, img)
komulatif = histogram_komulatif(normalisasi)
img_equalized = equalized(komulatif, img)
return normalisasi, komulatif, img_equalized
# read image
img = cv2.imread('C:/Users/TOBI/Documents/Belajar_Python/PCD_prak/p3/car.png')
# convert grayscale
img_gray = bgr2gray(img)
img_gray = img_gray.astype(np.uint8)
# contrast Stretching
img_contrast = contrastStretching(img_gray)
img_contrast = img_contrast.astype(np.uint8)
# histogram equalization
norm, kom, img_equalized = histogramEqualization(img_gray)
# plot Histogram contrast Stretching
Himg_gray = histogram("gambar grayscale", img_gray)
Himg_contrast = histogram("hasil Contrast Stretching", img_contrast)
# plot Histogram histogram Equalization
Hnorm = histogram2('normalisasi', norm)
Hkomulatif = histogram2('komulatif', kom)
Himg_histEqualiz = histogram("hasil \nhistogram equalization", img_equalized)
print(img.shape)
print(img_gray.shape)
print(img_contrast.shape)
# display
cv2.imshow("Gambar grayscale", img_gray)
cv2.imshow("Hasil contrast streching", img_contrast)
cv2.imshow("Hasil histogram equalization", img_equalized)
# cv2.imwrite('melon_grayscale.png', img_gray)
# cv2.imwrite('melon_contrast strech.png', img_contrast)
# cv2.imwrite('melon_histogram equalization.png', img_equalized)
cv2.waitKey(0)
cv2.destroyAllWindows() | tobialbertino/belajar-code | Belajar_Python/PCD_prak/p3/LKP3.py | LKP3.py | py | 3,493 | python | en | code | 2 | github-code | 90 |
36120346046 | import streamlit as st
import pandas as pd
import datetime as dt
from utils.data_gather import get_tdg_latest_version
def display_tdg_stats(df_tdg: pd.DataFrame):
st.write("### Statistiques de déploiement de bornes en France")
st.write("#### WORK IN PROGRESS")
# IRVE COUNT
irve_count = df_tdg["id_pdc_itinerance"].nunique()
st.write("Nombre de bornes présentes sur Transport Data Gouv : ", str(irve_count))
# IRVE Coverage
total_irve = 100000
irve_coverage = irve_count / total_irve * 100
irve_coverage = round(irve_coverage, 2)
st.write("Couverture par rapport au réseau total: ", str(irve_coverage), "%")
st.write("*(Nombre de bornes recensées en France ~= 100000)*")
# Ajouter un camembert ?
# Map with IRVE or stations
# Amenageur count
cpo_count = df_tdg["nom_amenageur"].nunique()
st.write("Nombre d'aménageurs ressensés sur Transport Data Gouv : ", str(cpo_count))
return
def write():
st.markdown(
"""
## A propos
Qualicharge est un projet visant à analyser les données des bornes de recharges de véhicules électriques afin de mieux comprendre
les problèmes actuels du réseaux de bornes français et pousser les acteurs vers le haut.
"""
)
st.markdown("""Sur cette page vous découvrirez : \
- Des informations générales sur le déploiement des bornes de recharges de véhicules électriques en france \
- Des informations sur la saturation du réseau pendant des weekends très chargés de Mai 2023
""")
tdg_gdf = get_tdg_latest_version(as_gdf=True)
display_tdg_stats(tdg_gdf)
if __name__ == "__main__":
st.set_page_config(
layout="wide", page_icon="⚡️", page_title="Qualicharge -- DataViz"
)
write() | MTES-MCT/qualicharge-geoviz-public | src/assets/pages/About.py | About.py | py | 1,852 | python | fr | code | 0 | github-code | 90 |
18240854859 | import sys
def input(): return sys.stdin.readline().strip()
def mapint(): return map(int, input().split())
sys.setrecursionlimit(10**9)
K = int(input())
ans = set()
def dfs(last, lis):
ans.add(int(''.join(lis)))
if len(lis)==11:
return
dfs(last, lis+str(last))
if last!=0:
dfs(last-1, lis+str(last-1))
if last!=9:
dfs(last+1, lis+str(last+1))
for i in range(1, 10):
dfs(i, str(i))
ans = list(ans)
ans.sort()
print(ans[K-1]) | Aasthaengg/IBMdataset | Python_codes/p02720/s851328816.py | s851328816.py | py | 475 | python | en | code | 0 | github-code | 90 |
25687874984 | """
Test / Example file for OpenDistro Secuity API
TODO : Implement with unittest
"""
import os
import sys
this_file_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(this_file_dir+'/../lib/opendistrosecurity'))
from opendistrosecurity import *
from tenants import *
from roles import *
from rolesmapping import *
import json
from pprint import pprint
## Get Env vars
ODHOST = os.environ.get('ODHOST')
ODPORT= os.environ.get('ODPORT')
ODUSER = os.environ.get('ODUSER')
ODPWD = os.environ.get('ODPWD')
# If nothing, ask
if(ODHOST is None):
print("OpenDistro Address : ")
ODHOST = input()
if(ODPORT is None):
print("OpenDistro Port : ")
ODPORT = input()
if(ODUSER is None):
print("OpenDistro User : ")
ODUSER = input()
if(ODPWD is None):
print("OpenDistro Password : ")
ODPWD = input()
#Create OpenDistro Connection
od = OpenDistro(host=ODHOST,
port=ODPORT,
user=ODUSER,
pwd=ODPWD)
#Check the connection
if (od.check_connection()):
print(">>> We are Connected ...")
else:
print(f"Problem with connecting to {od.host}:{od.port} with user {od.user}")
exit(1)
# TENANTS
#Create a Tenant Client for direct lowlevel objects creations
tenants_client = TenantsClient(od)
test_tenant_name_lowlevel = "_test_tenant_lowlevel"
test_tenant_name_highlevel = "_test_tenant_highlevel"
test_tenant_name_highlevel_updated = "_updated_test_tenant_highlevel"
print(">>> TENANTS >>>")
print(">>> Creating a tenant with the lowlevel methods")
tenants_client.create_tenant(tenant=test_tenant_name_lowlevel,body='{"description":"This tenant was created for testing purpose with the lowlevel API"}')
print(">>> Creating tenant with Objects (High level API)")
tenant_object = OpenDistroTenant(name=test_tenant_name_highlevel,
description="This tenant was created for testing purposes with the High Level API")
print(">>> Display the created tenant :")
tenant_object.display()
print(">>> Saving the created tenant to OpenDistro")
tenant_object.save(tenants_client)
print(">>> Updating this tenant's name and decription")
tenant_object.description = "This tenant was created for testing purposes with the High Level API - Updated"
tenant_object.name = test_tenant_name_highlevel_updated
tenant_object.save(tenants_client)
print(">>> Display the updatedtenant :")
tenant_object.display()
print(">>> Listing tenants from the server:")
created_tenant_sets = (test_tenant_name_lowlevel,
test_tenant_name_highlevel,
test_tenant_name_highlevel_updated)
tenants_dict = tenants_client.get_tenants()
if all (tenant in tenants_dict for tenant in created_tenant_sets):
print(" >>> Success : All created tenants found")
else:
print(" >>> Error : Not found our tenants :(")
print(">>> Print every tenant we find : ")
pprint(tenants_dict.keys())
print(">>> Deleting created test tenants (with low level api)")
[ tenants_client.delete_tenant(tenant) for tenant in created_tenant_sets ]
print(">>> Checking that everything has been deleted")
tenants_dict = tenants_client.get_tenants()
if any(tenant in tenants_dict for tenant in created_tenant_sets):
print(" >>> Error : A test tenant has not been deletedi :(")
else:
print(" >>> Success : Not found any of ourtenants :)")
print(">>> Print every tenant we find : ")
pprint(tenants_dict.keys())
# ROLES
#Create a Role Client for direct lowlevel objects creations
roles_client = RolesClient(od)
test_role_name_lowlevel = "_test_role_lowlevel"
test_role_name_highlevel = "_test_role_highlevel"
test_role_name_highlevel_updated = "_updated_test_role_highlevel"
index_permission1 = IndexPermission()
index_permission1.addindexpattern("index1.1*")
index_permission1.addindexpattern("index1.2*")
index_permission1.addindexpattern("index1.3*")
index_permission1.adddls('{"term" : {"field1.1":"true"}}')
index_permission1.addfls("~filter_me")
index_permission1.addmaskedfield("mask_me")
index_permission1.removeindexpattern("index1.3*")
index_permission1.addallowedaction("allowed_action1");
index_permission2 = IndexPermission()
index_permission2.addindexpattern("index2.1*")
index_permission2.addindexpattern("index2.2*")
index_permission2.addindexpattern("index2.3*")
index_permission2.adddls('{"term" : {"field2.1":"true"}}')
index_permission2.addfls("~filter_me")
index_permission2.addmaskedfield("mask_me")
index_permission2.removeindexpattern("index2.3*")
index_permission2.addallowedaction("allowed_action2");
tenant_permission1 = TenantPermission()
tenant_permission1.addtenantpattern("tenant1*")
tenant_permission1.addtenantpattern("tenant2*")
tenant_permission1.addtenantpattern("tenant3*")
tenant_permission1.addallowedaction("allowed_action1")
r = OpenDistroRole(name=test_role_name_highlevel,
index_permissions=[index_permission1 , index_permission2],
tenant_permissions=[tenant_permission1]
)
print(r._object_dict)
print(">>> ROLES >>>")
print(">>> Creating a role with the low level methods")
roles_client.create_role(role=test_role_name_lowlevel,body='{"description":"This role was created for testing purpose with the lowlevel API"}')
print(">>> Creating a role with the high level methods")
r.save(roles_client)
r.delete(roles_client)
rolesmapping_client = RolesMappingClient(od)
rm = rolesmapping_client.get_rolesmappings()
rm = OpenDistroRoleMapping(role_name="tests")
print(rm.__dict__)
rm.adduser("plop")
rm.addbackendrole("ohyeah")
rm.addhost("host")
rm.save(rolesmapping_client)
| chrousto/opendistrosecurity-py | tests/examples.py | examples.py | py | 5,624 | python | en | code | 0 | github-code | 90 |
37370289518 | from datetime import datetime
from django.conf import settings
from django.db import models
from .excel_models import CarrierExcel
HELP_TEXT = '''
<h3>Only xls files with following structure</h3>
#6. column: Company <br/>
#15. column: Town <br/>
'''
class MatchingRequest(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
uploaded = models.FileField(
upload_to='matching',
help_text=HELP_TEXT,
max_length=250,
)
created_at = models.DateTimeField(default=datetime.now)
@property
def carrier_excel(self):
return CarrierExcel(self.uploaded.read())
| rsiera/matcher | matcher/carriermatcher/models.py | models.py | py | 623 | python | en | code | 0 | github-code | 90 |
33721223660 | import charts
import pandas as pa
def run():
continent = input('Continent ==> ')
df = pa.read_csv('data.csv')
df = df[df['Continent'] == continent]
countries = df['Country'].values
percentages = df['World Population Percentage'].values
charts.generate_pie_chart(countries, percentages)
print('Generado para el continente: ', continent)
if __name__ == '__main__':
run() | krif07/curso-python-pip | app/main.py | main.py | py | 388 | python | en | code | 0 | github-code | 90 |
25275325830 | # import flask
from flask import Flask
# tell flask this is the file where it launches from
app = Flask(__name__)
# Create a function that displays 'hello world' on our home page
# The @app decorator tells the function the path to launch from
# "/" means lauching from the home page
# Instead of writing html code inside our python code, we could create a templates folder
# This folder shall carry all html content that we wish to display on a webpage
# We shall also change the hello_word which is our homepage function to index which is the default
# in the templates, we create a file index.html which carry the home page content
# Since templates are now separate, we have to render them. We have to import the
# module render_template
# render_template can take any no. *args e.g. berlin="berlin", name="samson" etc
from flask import render_template
@app.route("/")
def index():
# Introduce html tags. Here we want to make hello world a header h1
return render_template("index.html")
# Create a function that takes the user to recommended movies
@app.route("/recommender")
def recommender():
some_movies = ["movie1", "movie2", "movie3", "movie4"]
# We render the recommender.html template but also make it dynamic
# by carrying the movies variable
return render_template("recommender.html", movies=some_movies)
# If you go to the recommender url (http://127.0.0.1:<port>/recommender) you should see the movies there
# To run and debug from only this script:
if __name__ == "__main__":
app.run(debug=True, port=5000) | karianjahi/thinema | application.py | application.py | py | 1,557 | python | en | code | 0 | github-code | 90 |
5813154306 | import json
from typing import Optional, Self, Callable
from fastapi import FastAPI
tags_metadata = [
{
"name": "auth",
"description": "Авторизация/регистрация пользователей.",
},
{
"name": "admin",
"description": "Администрирование ресурса"
},
{
"name": "categories",
"description": "Доступ к категориям"
},
{
"name": "secure",
"description": "Методы для аутентификации"
},
{
"name": "users",
"description": "Доступ к пользователям"
},
{
"name": "podcasts",
"description": "Доступ к подкастам"
}
]
class ApiSingleton:
__instance_ptr: Optional[Self] = None
@classmethod
def instance(cls) -> Self:
if cls.__instance_ptr is None:
cls.__instance_ptr = ApiSingleton()
return cls.__instance_ptr
def __init__(self):
self.__api = FastAPI(openapi_tags=tags_metadata)
def register_route(self, path: str, endpoint: Callable, methods: list[str],
tags: Optional[list[str]] = None) -> None:
if tags is None:
tags = []
self.__api.add_api_route(path, endpoint, methods=methods, tags=tags)
def get_api(self) -> FastAPI:
return self.__api
| KalbinVV/PodcastAPI | api_singleton.py | api_singleton.py | py | 1,429 | python | en | code | 0 | github-code | 90 |
73309926057 | # -*- coding: utf-8 -*-
from openerp import models, fields, api
from datetime import datetime, timedelta
from openerp.exceptions import UserError, ValidationError
class FinancieraComision(models.Model):
_name = 'financiera.comision'
name = fields.Char('Nombre')
# active = fields.Boolean("Activa", default=True)
state = fields.Selection([('borrador', 'Borrador'), ('confirmada', 'Confirmada'), ('obsoleta', 'Obsoleta')], string='Estado', readonly=True, default='borrador')
comision_global = fields.Boolean('Comision global', default=True)
entidad_id = fields.Many2one('financiera.entidad', 'Entidad')
start_date = fields.Date('Fecha desde')
end_date = fields.Date('Fecha hasta')
sobre = fields.Selection([('prestamo', 'Prestamo'), ('cuota', 'Cuota')], string='Aplica sobre', default='prestamo')
comision_prestamo = fields.Selection([('monto_solicitado', 'Tasa sobre Monto Solicitado'), ('monto_fijo', 'Monto Fijo')], string='Opciones sobre Prestamo')
comision_cuota = fields.Selection([('monto_cuota', 'Tasa sobre Monto de la Cuota'), ('monto_fijo', 'Monto Fijo')], string='Opciones sobre Cuota')
tasa = fields.Float('Tasa a aplicar', digits=(16,4))
monto = fields.Float('Monto a aplicar', digits=(16,2))
journal_ids = fields.Many2many('account.journal', 'financiera_comision_journal_rel', 'comision_id', 'journal_id', string='Metodo de Pago/Cobro', domain="[('type', 'in', ('cash', 'bank'))]")
partner_id = fields.Many2one('res.partner', 'Facturara', domain="[('supplier', '=', True)]")
account_payment_term_id = fields.Many2one('account.payment.term', 'Plazo de pago')
iva = fields.Boolean('Calcular IVA')
iva_incluido = fields.Boolean('IVA incluido')
vat_tax_id = fields.Many2one('account.tax', 'Tasa de IVA', domain="[('type_tax_use', '=', 'purchase')]")
journal_id = fields.Many2one('account.journal', 'Diario', domain="[('type', '=', 'purchase')]")
detalle_factura = fields.Char('Detalle en linea de factura')
company_id = fields.Many2one('res.company', 'Empresa', required=False, default=lambda self: self.env['res.company']._company_default_get('financiera.comision'))
@api.one
@api.onchange('sobre')
def _onchange_sobre(self):
self.comision_prestamo = None
self.comision_cuota = None
self.tasa = 0
self.monto = 0
@api.one
@api.onchange('entidad_id')
def _onchange_entidad_id(self):
if len(self.entidad_id) > 0 and len(self.entidad_id.partner_id) > 0:
self.partner_id = self.entidad_id.partner_id.id
@api.one
@api.onchange('comision_global')
def _onchange_comision_global(self):
self.entidad_id = None
@api.one
@api.onchange('name')
def _onchange_name(self):
self.detalle_factura = self.name
@api.one
def confirmar_comision(self):
self.state = 'confirmada'
@api.one
def depreciar_comision(self):
self.state = 'obsoleta'
@api.one
def editar_comision(self):
self.state = 'borrador'
class ExtendsFinancieraSucursal(models.Model):
_inherit = 'financiera.entidad'
_name = 'financiera.entidad'
partner_id = fields.Many2one('res.partner', 'Proveedor', domain="[('supplier', '=', True)]")
cuit = fields.Char('CUIT')
cbu = fields.Char('CBU')
banco_id = fields.Many2one('res.bank', 'Banco')
nro_de_cuenta = fields.Char('Nro de cuenta')
class ExtendsResPartner(models.Model):
_inherit = 'res.partner'
_name = 'res.partner'
@api.model
def default_get(self, values):
rec = super(ExtendsResPartner, self).default_get(values)
context = dict(self._context or {})
active_model = context.get('active_model')
if active_model in ['financiera.grupo.comision', 'financiera.entidad']:
rec.update({
'supplier': True,
'customer': False,
})
return rec
@api.model
def create(self, values):
rec = super(ExtendsResPartner, self).create(values)
context = dict(self._context or {})
active_model = context.get('active_model')
current_uid = context.get('uid')
if active_model in ['financiera.grupo.comision', 'financiera.entidad']:
rec.update({
'supplier': True,
'customer': False,
})
return rec
class ExtendsAccountInvoice(models.Model):
_inherit = 'account.invoice'
_name = 'account.invoice'
comision_prestamo_id = fields.Many2one('financiera.prestamo', 'Comision Prestamo')
comision_cuota_id = fields.Many2one('financiera.prestamo.cuota', 'Comision Cuota')
payment_comision_id = fields.Many2one('account.payment', 'Pago generador comision')
class ExtendsAccountPayment(models.Model):
_inherit = 'account.payment'
_name = 'account.payment'
invoice_comisiones_ids = fields.One2many('account.invoice', 'payment_comision_id', 'Facturas de Comisiones')
@api.multi
def cancel(self):
res = super(ExtendsAccountPayment, self).cancel()
for invoice_id in self.invoice_comisiones_ids:
invoice_id.signal_workflow('invoice_cancel')
class ExtendsFinancieraPrestamo(models.Model):
_inherit = 'financiera.prestamo'
_name = 'financiera.prestamo'
invoice_comisiones_ids = fields.One2many('account.invoice', 'comision_prestamo_id', 'Facturas de Comisiones')
comisiones_ids = fields.Many2many('financiera.comision', 'financiera_prestamo_comision_rel', 'prestamo_id', 'comision_id', string='Comisiones que Aplican')
def comisiones_prestamo(self):
cr = self.env.cr
uid = self.env.uid
entidad_id = None
entidad_id = self.sucursal_id.id
journal_id = -1
if len(self.payment_ids) > 0:
indice_ultimo_pago = len(self.payment_ids)-1
journal_id = self.payment_ids[indice_ultimo_pago].journal_id.id
comisiones_obj = self.pool.get('financiera.comision')
domain = [
('sobre', '=', 'prestamo'),
('state', '=', 'confirmada'),
'|', ('comision_global', '=', True),('entidad_id', '=', entidad_id),
'|', ('journal_ids', '=', False), ('journal_ids', 'in', [journal_id]),
('start_date', '<=', self.fecha),
'|', ('end_date', '=', False), ('end_date', '>=', self.fecha),
('company_id', '=', self.company_id.id)]
comisiones_ids = comisiones_obj.search(cr, uid, domain)
for _id in comisiones_ids:
self.comisiones_ids = [(4, _id)]
return comisiones_ids
@api.one
def generar_comision(self, comision_id):
vat_tax_id = None
invoice_line_tax_ids = None
price_unit = 0
flag_facturar = True
ail_ids = []
if comision_id.iva and len(comision_id.vat_tax_id) > 0:
vat_tax_id = comision_id.vat_tax_id.id
invoice_line_tax_ids = [(6, 0, [vat_tax_id])]
else:
vat_tax_id = None
invoice_line_tax_ids = None
journal_id = None
if len(comision_id.journal_id) > 0:
journal_id = comision_id.journal_id
else:
raise UserError("Debe definir el diario de Proveedor en Comisiones -> Configuracion.")
if comision_id.comision_prestamo == 'monto_solicitado':
comision_tasa = comision_id.tasa / 100
monto = 0
if len(self.payment_ids) > 0:
indice_ultimo_pago = len(self.payment_ids)-1
monto = self.payment_ids[indice_ultimo_pago].amount
price_unit = monto * comision_tasa
elif comision_id.comision_prestamo == 'monto_fijo':
price_unit = comision_id.monto
if len(self.payment_ids) > 0:
# Si Tenia otros pagos y existe una factura de comision
# por el mismo monto a generar no sera considerada.
for invoice_id in self.invoice_comisiones_ids:
if invoice_id.state != 'cancel' and invoice_id.amount_total == price_unit:
flag_facturar = False
if comision_id.iva and comision_id.iva_incluido:
price_unit = price_unit / (1+(comision_id.vat_tax_id.amount/100))
if flag_facturar:
# Create invoice line
ail = {
'name': comision_id.detalle_factura,
'quantity':1,
'price_unit': price_unit,
# 'vat_tax_id': vat_tax_id,
'invoice_line_tax_ids': invoice_line_tax_ids,
'report_invoice_line_tax_ids': invoice_line_tax_ids,
'account_id': journal_id.default_debit_account_id.id,
'company_id': comision_id.company_id.id,
}
ail_ids.append((0,0,ail))
account_invoice_supplier = {
'description_financiera': comision_id.detalle_factura,
'account_id': comision_id.partner_id.property_account_payable_id.id,
'partner_id': comision_id.partner_id.id,
'journal_id': journal_id.id,
'currency_id': self.currency_id.id,
'company_id': comision_id.company_id.id,
'date': datetime.now(),
'date_invoice': datetime.now(),
'invoice_line_ids': ail_ids,
'type': 'in_invoice',
'payment_term_id': comision_id.account_payment_term_id.id,
'sucursal_id': self.sucursal_id.id,
}
new_invoice_id = self.env['account.invoice'].create(account_invoice_supplier)
self.invoice_comisiones_ids = [new_invoice_id.id]
return new_invoice_id
@api.one
def confirmar_pagar_prestamo(self, payment_date, payment_amount, payment_journal_id, payment_communication):
rec = super(ExtendsFinancieraPrestamo, self).confirmar_pagar_prestamo(payment_date, payment_amount, payment_journal_id, payment_communication)
comisiones_ids = self.comisiones_prestamo()
for _id in comisiones_ids:
comision_id = self.env['financiera.comision'].browse(_id)
invoice_id = self.generar_comision(comision_id)
self.payment_last_id.invoice_comisiones_ids = [invoice_id[0].id]
class ExtendsFinancieraPrestamoCuota(models.Model):
_inherit = 'financiera.prestamo.cuota'
_name = 'financiera.prestamo.cuota'
invoice_comisiones_ids = fields.One2many('account.invoice', 'comision_cuota_id', 'Facturas de Comisiones')
comisiones_ids = fields.Many2many('financiera.comision', 'financiera_cuota_comision_rel', 'cuota_id', 'comision_id', string='Comisiones que Aplican')
def comisiones_cuota(self):
cr = self.env.cr
uid = self.env.uid
entidad_id = None
entidad_id = self.sucursal_id.id
journal_id = -1
payment_date = None
if len(self.payment_ids) > 0:
indice_ultimo_pago = len(self.payment_ids)-1
journal_id = self.payment_ids[indice_ultimo_pago].journal_id.id
payment_date = self.payment_ids[indice_ultimo_pago].payment_date
comisiones_obj = self.pool.get('financiera.comision')
domain = [
('sobre', '=', 'cuota'),
('state', '=', 'confirmada'),
'|', ('comision_global', '=', True),('entidad_id', '=', entidad_id),
'|', ('journal_ids', '=', False), ('journal_ids', 'in', [journal_id]),
('start_date', '<=', payment_date),
'|', ('end_date', '=', False), ('end_date', '>=', payment_date),
('company_id', '=', self.company_id.id)]
comisiones_ids = comisiones_obj.search(cr, uid, domain)
for _id in comisiones_ids:
self.comisiones_ids = [(4, _id)]
return comisiones_ids
@api.one
def generar_comision(self, comision_id):
vat_tax_id = None
invoice_line_tax_ids = None
price_unit = 0
flag_facturar = True
ail_ids = []
if comision_id.iva and len(comision_id.vat_tax_id) > 0:
vat_tax_id = comision_id.vat_tax_id.id
invoice_line_tax_ids = [(6, 0, [vat_tax_id])]
else:
vat_tax_id = None
invoice_line_tax_ids = None
journal_id = comision_id.journal_id
if comision_id.comision_cuota == 'monto_cuota':
comision_tasa = comision_id.tasa / 100
monto = 0
if len(self.payment_ids) > 0:
indice_ultimo_pago = len(self.payment_ids)-1
monto = self.payment_ids[indice_ultimo_pago].amount
price_unit = monto * comision_tasa
elif comision_id.comision_cuota == 'monto_fijo':
price_unit = comision_id.monto
if len(self.payment_ids) > 0:
# Si Tenia otros pagos y existe una factura de comision
# por el mismo monto a generar no sera considerada.
for invoice_id in self.invoice_comisiones_ids:
if invoice_id.state != 'cancel' and invoice_id.amount_total == price_unit:
flag_facturar = False
if comision_id.iva and comision_id.iva_incluido:
price_unit = price_unit / (1+(comision_id.vat_tax_id.amount/100))
if flag_facturar:
# Create invoice line
ail = {
'name': comision_id.detalle_factura,
'quantity':1,
'price_unit': price_unit,
# 'vat_tax_id': vat_tax_id,
'invoice_line_tax_ids': invoice_line_tax_ids,
'report_invoice_line_tax_ids': invoice_line_tax_ids,
'account_id': journal_id.default_debit_account_id.id,
'company_id': comision_id.company_id.id,
}
ail_ids.append((0,0,ail))
account_invoice_supplier = {
'description_financiera': comision_id.detalle_factura,
'account_id': comision_id.partner_id.property_account_payable_id.id,
'partner_id': comision_id.partner_id.id,
'journal_id': journal_id.id,
'currency_id': self.currency_id.id,
'company_id': comision_id.company_id.id,
'date': datetime.now(),
'date_invoice': datetime.now(),
'invoice_line_ids': ail_ids,
'type': 'in_invoice',
'payment_term_id': comision_id.account_payment_term_id.id,
'sucursal_id': self.sucursal_id.id,
}
new_invoice_id = self.env['account.invoice'].create(account_invoice_supplier)
self.invoice_comisiones_ids = [new_invoice_id.id]
return new_invoice_id
@api.one
def confirmar_cobrar_cuota(self, payment_date, journal_id, payment_amount, multi_cobro_id, payment_close=False):
super(ExtendsFinancieraPrestamoCuota, self).confirmar_cobrar_cuota(payment_date, journal_id, payment_amount, multi_cobro_id, payment_close)
comisiones_ids = self.comisiones_cuota()
for _id in comisiones_ids:
comision_id = self.env['financiera.comision'].browse(_id)
invoice_id = self.generar_comision(comision_id)
self.payment_last_id.invoice_comisiones_ids = [invoice_id[0].id]
| levislibra/financiera_comision | models/models.py | models.py | py | 13,246 | python | en | code | 0 | github-code | 90 |
19733102767 | """
File handles the class Troop and functions tied to it. Troop handles both
offencive and defencive units, and strategies.
"""
from typing import Callable
from workplace import *
class Troop:
"""A collection of military units."""
__target: Point2D # Common target for all units in troop
# ___Job_list___
marines: List[Unit] # All marines in this troop
tanks: List[Unit] # All siege tanks in this troop
bunkers: Dict[Unit, List[Unit]] # All bunkers in this troop and the marines within
others: List[Unit] # All other units in this troop
# --------------
# Class constants
marines_capacity: int = 8 # How many marines a defending troop is asking for
tanks_capacity: int = 2 # How many tanks a defending troop is asking for
marines_capacity_atk: int = 12 # How many marines an attacking troop is asking for
tanks_capacity_atk: int = 4 # How many tanks a attacking troop is asking for
target_radius: int = 7 # How close a unit must be to a target to be there
leash_radius: int = 4 # How close a unit must be to leader when leash is active
leash_stretch: int = 5 # How far away a unit can be from leader at most when leash is active
under_attack_wait: int = 200 # How many on_steps the troop wait before
# declaring not under_attack (if not attacked)
# Unitlist for those in special states
not_reached_target: List[Unit] # All units that have not reached target
already_idle: List[Unit] # All units that have been noticed as idle
tanks_siege: List[Unit] # All siege tanks in siegemode in this troop
repair_these: Dict[Unit, List[Unit]] # All damaged repairable units and who are repairing it
foes_to_close: List[Unit] # All foes that are within proximity
# Statehandlers
__order: Callable # A function that moves unit as demanded
under_attack: int # If troop is under attack or not and how many on_steps it will remain
is_attackers: bool # If troop is attacking or not
prohibit_refill: bool # - If troop will request more troops or not
# Follow leader
__leash: Optional[Callable] # A function that moves unit towards leader position
leader: Optional[Unit] # When marching as one, follow this unit
# (Attacking) Troop targets
enemy_bases: List[BaseLocation] = [] # All potential enemy bases for attackers to attack
enemy_structures: Dict[Tuple[float, float], bool] = {} # All known enemy structures
# that needs to be destroyed to win and if they're visible or not
# ---------- EVENTS ----------
# These are functions triggered by different events. Most are
# triggered from MyAgent.
def __init__(self, position: Point2D, is_attackers: bool = False):
"""Called when a new troop is being created. Note that no units are
required for making a troop, rather it is why they need to be created.
"""
self.__order = self.__march_order
self.marines = []
self.tanks = []
self.tanks_siege = []
self.bunkers = {}
self.others = []
self.not_reached_target = []
self.already_idle = []
self.under_attack = 0
self.is_attackers = is_attackers
self.prohibit_refill = False
self.enemy_bases = []
self.__leash = None
self.leader = None
self.foes_to_close = []
self.repair_these = {}
if is_attackers:
self.marines_capacity = self.marines_capacity_atk
self.tanks_capacity = self.tanks_capacity_atk
self.set_target(position)
def on_step(self, bot: IDABot) -> None:
"""Called each on_step() of IDABot."""
# Remove all non idle units from the idle list
self.already_idle = list(filter(
lambda unit: unit.is_idle, self.already_idle))
if self.under_attack:
self.under_attack -= 1
# If no foe is close by or troop not damaged for a while, then calm down
if not self.foes_to_close or self.under_attack == 0:
self.under_attack = 0
self.foes_to_close = []
self.already_idle = []
self.not_reached_target = self.get_units()
# If not moving (shouldn't attack) attack attackers.
elif self.__order != self.__move_order:
if not self.foes_to_close:
pass
else:
for unit in self.get_units():
if not(unit.has_target and unit.target in self.foes_to_close):
targeted_foe = self.get_suitable_to_close_foe_for(unit)
if targeted_foe:
unit.attack_unit(targeted_foe)
elif self.__leash:
left_behind = False
for unit in self.get_units():
if unit != self.leader and not self.nearby_target(unit):
if self.nearby_leader(unit):
if unit.has_target and unit.target == self.leader:
self.__order(unit)
else:
if not unit.has_target or unit.target != self.leader:
self.__leash(unit)
if self.losing_leader(unit):
left_behind = True
if not self.leader.is_idle and left_behind:
self.leader.stop()
elif self.leader.is_idle and not left_behind:
self.__order(self.leader)
if not self.is_attackers and not self.under_attack:
if not self.bunkers.keys():
self.build_bunker(bot, self.target_pos)
def on_idle(self, unit: Unit, bot: IDABot) -> None:
"""Called each time a member is idle."""
# if unit.unit_type.unit_typeid in repairer_TYPEIDS and self.repair_these:
# self.have_unit_repair(unit)
if unit not in self.already_idle:
self.already_idle.append(unit)
self.on_just_idle(unit, bot)
def on_just_idle(self, unit: Unit, bot: IDABot) -> None:
"""Called each time a member just became idle."""
if self.under_attack and self.__order != self.__move_order:
targeted_foe = self.get_suitable_to_close_foe_for(unit)
if targeted_foe:
unit.attack_unit(targeted_foe)
else:
print(unit, " just panicked!")
elif self.nearby_target(unit):
if unit in self.not_reached_target:
self.not_reached_target.remove(unit)
self.on_member_reach_target(unit, bot)
elif not self.nearby_target(unit):
if unit in self.tanks_siege:
unit.ability(ABILITY_ID.MORPH_UNSIEGE)
self.tanks_siege.remove(unit)
elif not self.__leash or not self.leader == unit:
self.unit_execute_order(unit)
def on_member_reach_target(self, unit: Unit, bot: IDABot) -> None:
"""A member reaches target for first time."""
if self.have_all_reached_target and self.prohibit_refill and self.is_attackers:
if Troop.has_enemy_structure_as_target(self.target_pos):
self.lost_enemy_structure(self.target_pos, bot)
else:
self.try_to_win(bot)
# del self.enemy_structures[self.target_pos]
#
# self.try_to_win(bot)
elif unit in self.tanks and unit not in self.tanks_siege \
and not (unit.has_target and unit.target == PLAYER_ENEMY):
unit.ability(ABILITY_ID.MORPH_SIEGEMODE)
self.tanks_siege.append(unit)
elif unit in self.marines:
for bunker, occupants in self.bunkers.items():
if len(occupants) < 4:
unit.right_click(bunker)
self.bunkers[bunker].append(unit)
def on_damaged_member(self, unit: Unit, bot: IDABot) -> None:
"""A member takes damage (might be dead)."""
self.need_repair(unit)
for foe in bot.get_all_units():
if foe.player != PLAYER_ENEMY:
continue
if foe not in self.foes_to_close \
and max(foe.unit_type.attack_range + foe.radius + unit.radius,
10)**2 > foe.position.squared_dist(unit.position):
self.foes_to_close.append(foe)
if not self.foes_to_close:
bot.try_to_scan(unit.position)
self.under_attack = self.under_attack_wait
# --------- ORDERS ---------
# Handles how units advance to target and how the execution of it.
def __march_order(self, unit: Unit) -> None:
"""Have a member attack given position."""
unit.attack_move(self.__target)
def __move_order(self, unit: Unit) -> None:
"""Moves a unit to given position."""
unit.move(self.__target)
def __attack_order(self, unit: Unit) -> None:
"""Have a unit attack target."""
unit.attack_unit(self.__target)
def __follow_leader(self, unit: Unit) -> None:
"""Have unit follow leader."""
unit.right_click(self.leader)
def march_units(self, position: Point2D) -> None:
"""Have troop and all its units attack given position."""
self.__leash = None
self.__order = self.__march_order
self.set_target(position)
self.all_execute_orders()
def march_together_units(self, position: Point2D) -> None:
"""Have troop and all its units attack given position but stay close to leader."""
self.__leash = self.__follow_leader
self.__order = self.__march_order
self.set_target(position)
self.all_execute_orders()
def move_units(self, position: Point2D) -> None:
"""Moves troop and all its units to given position."""
self.__leash = None
self.__order = self.__move_order
self.set_target(position)
self.all_execute_orders()
def attack_units(self, target: Unit) -> None:
"""Have all units attack given unit."""
self.__leash = self.__follow_leader
self.__order = self.__attack_order
self.set_target(target)
self.all_execute_orders()
def defend_workplace(self, work: Workplace, bot: IDABot) -> None:
"""Have units defend given workplace from enemies."""
# TODO: Not yet fully implemented, fix or remove
for unit in bot.get_all_units():
if unit.player == PLAYER_ENEMY \
and work.within_proximity(unit.position):
self.foes_to_close.append(unit)
def all_execute_orders(self) -> None:
"""Have all members execute order."""
for trooper in self.get_units():
self.__order(trooper)
def unit_execute_order(self, trooper: Unit) -> None:
"""Have a member execute order."""
self.__order(trooper)
# ---------- BASIC HANDLERS ----------
# Handles basic functions as adding and removing units
def add(self, units: Union[Unit, Sequence[Unit]]) -> None:
"""Adds unit(s) to troop."""
if isinstance(units, Unit):
units = [units]
for unit in units:
if unit.unit_type.is_building and self.is_attackers:
continue
if unit.unit_type.unit_typeid == UNIT_TYPEID.TERRAN_MARINE:
self.marines.append(unit)
elif unit.unit_type.unit_typeid in siege_tanks_TYPEIDS:
self.tanks.append(unit)
if unit.unit_type.unit_typeid == UNIT_TYPEID.TERRAN_SIEGETANKSIEGED:
self.tanks_siege.append(unit)
elif unit.unit_type.unit_typeid == UNIT_TYPEID.TERRAN_BUNKER:
if self.nearby_target(unit):
self.bunkers[unit] = []
self.have_soldiers_enter(unit)
else:
continue
else:
self.others.append(unit)
self.not_reached_target.append(unit)
if self.satisfied and self.is_attackers:
self.prohibit_refill = True
if not unit.unit_type.is_building:
self.try_assigning_leader(unit)
def remove(self, unit: Unit) -> None:
"""Handles units that are to be removed from troop."""
if unit in self.already_idle:
self.already_idle.remove(unit)
if unit in self.not_reached_target:
self.not_reached_target.remove(unit)
for bunker, occupants in self.bunkers.items():
if unit in occupants:
bunker.ability(ABILITY_ID.UNLOADALL)
self.bunkers[bunker] = []
if unit in self.marines:
self.marines.remove(unit)
elif unit in self.tanks:
self.tanks.remove(unit)
if unit in self.tanks_siege:
unit.ability(ABILITY_ID.MORPH_UNSIEGE)
self.tanks_siege.remove(unit)
elif unit in self.bunkers:
if unit.is_alive and self.bunkers[unit]:
unit.ability(ABILITY_ID.UNLOADALL)
del self.bunkers[unit]
elif unit in self.others:
self.others.remove(unit)
if unit == self.leader:
self.leader = None
for unit in self.get_units():
self.try_assigning_leader(unit)
def get_units(self) -> List[Unit]:
"""Get all units in troop."""
return (self.marines
+ self.tanks
+ self.others
+ list(self.bunkers.keys()))
def has_unit(self, unit: Unit) -> bool:
"""Check if troop has unit."""
if unit in self.get_units():
return True
else:
return False
def set_target(self, target: Union[Point2D, Unit]) -> None:
"""Sets target of troop."""
self.__target = target
self.not_reached_target = self.get_units()
self.already_idle = []
for bunker in self.bunkers:
if not self.nearby_target(bunker):
self.remove(bunker)
def flush_troop(self) -> List[Unit]:
"""Remove and return all units in troop."""
units = self.get_units().copy()
free = []
while units:
unit = units.pop()
if not unit.unit_type.is_building:
self.remove(unit)
free.append(unit)
return free
# ---------- MISC ----------
# Other needed functions.
def build_bunker(self, bot: IDABot, location) -> None: # AW
"""Builds a bunker when necessary."""
bunker = UnitType(UNIT_TYPEID.TERRAN_BUNKER, bot)
workplace = closest_workplace(location)
if can_afford(bot, bunker) \
and not currently_building(bot, UNIT_TYPEID.TERRAN_BUNKER) \
and bot.have_one(UNIT_TYPEID.TERRAN_BARRACKS) \
and not workplace.is_building_unittype(bunker) \
and not self.bunkers:
position = bot.building_placer.get_build_location_near(
location.to_i(), bunker)
workplace.have_worker_construct(bunker, position)
def nearby_target(self, at: Union[Unit, Point2D]) -> bool:
"""Check if a unit is nearby target."""
if isinstance(at, Unit):
return at.position.dist(self.target_pos) <= self.target_radius
elif isinstance(at, Point2D):
return at.dist(self.target_pos) <= self.target_radius
else:
raise Exception("Can't do that!")
def nearby_leader(self, at: Union[Unit, Point2D]) -> bool:
"""Check if a unit is nearby leader."""
if isinstance(at, Unit):
return at.position.squared_dist(self.leader.position) \
<= self.leash_radius**2
elif isinstance(at, Point2D):
return at.squared_dist(self.leader.position) \
<= self.leash_radius**2
else:
raise Exception("Can't do that!")
def losing_leader(self, at: Union[Unit, Point2D]) -> bool:
"""Check if a unit is not nearby (with margin) leader."""
if isinstance(at, Unit):
return at.position.squared_dist(self.leader.position) \
> (self.leash_radius + self.leash_stretch) ** 2
elif isinstance(at, Point2D):
return at.squared_dist(self.leader.position) \
> (self.leash_radius + self.leash_stretch) ** 2
else:
raise Exception("Can't do that!")
def try_assigning_leader(self, unit: Unit) -> None:
"""Try to set new leader to given unit for troop."""
if not unit.unit_type.is_building:
if not self.leader:
self.leader = unit
elif self.leader.is_flying == unit.is_flying:
if unit.radius > self.leader.radius:
self.leader = unit
elif not unit.is_flying:
self.leader = unit
def have_soldiers_enter(self, bunker: Unit) -> None:
"""Have marines enter bunker."""
for marine in self.marines[:4]:
marine.right_click(bunker)
self.bunkers[bunker].append(marine)
def get_suitable_to_close_foe_for(self, unit: Unit) -> Optional[Unit]:
"""Returns a suitable target for units if they're defending themself
from attackers."""
best_aggressor = get_closest(
[(foe.position, foe) for foe in self.foes_to_close
if not foe.unit_type.is_building],
unit.position)
if best_aggressor:
return best_aggressor
else:
return get_closest(
[(foe.position, foe) for foe in self.foes_to_close
if foe.unit_type.is_building],
unit.position)
def need_repair(self, unit: Unit) -> None:
"""Have a unit request repairs and remember this."""
if unit not in self.repair_these:
self.repair_these[unit] = []
def have_unit_repair(self, unit: Unit) -> None:
"""Try to have the unit repair a target that needs repairs."""
fixed = []
for repair_this, repairers in self.repair_these.items():
if repair_this.max_hit_points - repair_this.hit_points:
fixed.append(repair_this)
elif len(repairers) < 3:
unit.repair(repair_this)
break
for unit in fixed:
del self.repair_these[unit]
def try_to_win(self, bot: IDABot) -> None:
"""Attackers will try to kill all enemy units."""
if self.enemy_structures:
# Attack closest structure
self.march_together_units(get_closest(
[(Point2D(pos[0], pos[1]), Point2D(pos[0], pos[1]))
for pos in self.enemy_structures],
self.leader.position if self.leader else self.target_pos))
elif bot.remember_enemies:
found = None
for unit in bot.remember_enemies:
if unit.position == self.target_pos:
found = unit
break
if found:
bot.remember_enemies.remove(found)
self.march_together_units(get_closest(
[(unit.position, unit.position) for unit in bot.remember_enemies],
self.leader.position if self.leader else self.target_pos))
# ---------- PROPERTIES ----------
# Values that are trivial calculations but important for the object
@property
def satisfied(self) -> bool:
"""Return True if the troop wants more units."""
return (self.prohibit_refill or
self.wants_marines <= 0 and self.wants_tanks <= 0)
@property
def is_terminated(self) -> bool:
"""Return True if the troop is empty and can't refill."""
return self.prohibit_refill and not self.get_units()
@property
def have_all_reached_target(self) -> bool:
"""Returns true if all members are close to target."""
return not self.not_reached_target or \
all([unit.position.squared_dist(self.target_pos)
<= self.target_radius**2
for unit in self.get_units()])
@property
def wants_marines(self) -> int:
"""Return required amount of marines to satisfy capacity."""
return max(self.marines_capacity - len(self.marines), 0) \
if not self.under_attack and not self.prohibit_refill else 0
@property
def wants_tanks(self) -> int:
"""Return required amount of tanks to satisfy capacity."""
return max(self.tanks_capacity - len(self.tanks), 0) \
if not self.under_attack and not self.prohibit_refill else 0
@property
def has_enough(self) -> bool:
"""Check if the capacity is satisfied for all unit types."""
return 0 >= self.wants_marines and 0 >= self.wants_tanks
@property
def target_pos(self) -> Point2D:
"""Returns the target position."""
return self.__target if isinstance(self.__target, Point2D) \
else self.__target.position
# ---------- CLASS METHODS ----------
# Methods relevant to the class rather then any instance of it.
# Focused on handling enemy targets for troops.
@classmethod
def found_enemy_structure(cls, unit: Unit, bot: IDABot) -> None:
"""Adds target structure to Troop targets."""
if unit.is_cloaked and unit.is_alive:
cls.enemy_structures[(unit.position.x, unit.position.y)] = True
for base in bot.base_location_manager.base_locations:
if base.contains_position(unit.position) \
and base not in cls.enemy_bases:
cls.enemy_bases.append(base)
# Try to attack closest first
for troop in attackers:
if cls.has_enemy_structure_as_target(troop.target_pos):
troop.try_to_win(bot)
@classmethod
def check_validity_enemy_structures(cls, bot: IDABot) -> None:
"""Confirm that enemy_structures are still valid targets."""
remove_these = []
for target, visible in cls.enemy_structures.items():
if visible:
if not bot.map_tools.is_visible(round(target[0]), round(target[1])):
cls.enemy_structures[target] = False
else:
if bot.map_tools.is_visible(round(target[0]), round(target[1])):
cls.enemy_structures[target] = True
found = None
for unit in bot.get_all_units():
if unit.player == PLAYER_ENEMY:
if unit.position.x == target[0] \
and unit.position.y == target[1]:
if unit.is_alive:
found = unit
break
if not found:
remove_these.append(target)
for target in remove_these:
cls.lost_enemy_structure(Point2D(target[0], target[1]), bot)
@classmethod
def lost_enemy_structure(cls, at: Union[Unit, Point2D], bot: IDABot) -> None:
"""Removes target structure from Troop targets."""
if isinstance(at, Unit):
at = at.position
if cls.has_enemy_structure_as_target(at):
del cls.enemy_structures[(at.x, at.y)]
for base in cls.enemy_bases.copy():
if base.contains_position(at) and \
not base.is_occupied_by_player(PLAYER_ENEMY):
cls.enemy_bases.remove(base)
for troop in all_troops():
if troop.is_attackers \
and troop.target_pos.x == at.x \
and troop.target_pos.y == at.y:
troop.try_to_win(bot)
@classmethod
def has_enemy_structure_as_target(cls, at: Union[Point2DI, Unit]) -> bool:
"""Returns True if troops have given target in enemy structures
targets."""
if isinstance(at, Unit):
at = at.position
for enemy_structure in cls.enemy_structures:
if enemy_structure[0] == at.x and enemy_structure[1] == at.y:
return True
return False
# ========== END OF TROOP ==========
# All troops!
defenders: List[Troop] = []
attackers: List[Troop] = []
def create_troop_defending(point: Point2D) -> None:
"""Create a new troop with given target that are suppose to defend."""
defenders.append(Troop(point))
def create_troop_attacking(point: Point2D) -> None:
"""Creates a new troop with given target that are suppose to attack."""
attackers.append(Troop(point, True))
def remove_terminated_troops() -> None:
"""Remove empty troops from relevant lists."""
i = 0
while i < len(attackers):
if attackers[i].is_terminated:
attackers.pop(i)
else:
i += 1
i = 0
while i < len(defenders):
if defenders[i].is_terminated:
defenders.pop(i)
else:
i += 1
def all_troops():
"""Returns all troops."""
return attackers + defenders
def marine_seeks_troop(position: Point2D) -> Optional[Troop]:
"""Find closest troop requiring a marine most."""
closest = [None, None, None]
distance = [0, 0, 0]
for troop in all_troops():
if troop.prohibit_refill:
continue
if troop.wants_marines > 0 and not troop.is_attackers:
if not closest[0] or troop.target_pos.dist(position) / troop.wants_marines < distance[0]:
closest[0] = troop
distance[0] = troop.target_pos.dist(position)
elif troop.wants_marines > 0:
if not closest[1] or troop.target_pos.dist(position) / troop.wants_marines < distance[1]:
closest[1] = troop
distance[1] = troop.target_pos.dist(position)
else:
if not closest[2] or troop.target_pos.dist(position) < distance[2]:
closest[2] = troop
distance[2] = troop.target_pos.dist(position)
return closest[0] if closest[0] else closest[1] if closest[1] else closest[2]
def tank_seeks_troop(position: Point2D) -> Optional[Troop]:
"""Find closest troop requiring a tank most."""
closest = [None, None, None]
distance = [0, 0, 0]
for troop in all_troops():
if troop.prohibit_refill:
continue
if troop.wants_tanks > 0 and not troop.is_attackers:
if not closest[0] or troop.target_pos.dist(position) / troop.wants_tanks < distance[0]:
closest[0] = troop
distance[0] = troop.target_pos.dist(position)
elif troop.wants_tanks > 0:
if not closest[1] or troop.target_pos.dist(position) / troop.wants_tanks < distance[1]:
closest[1] = troop
distance[1] = troop.target_pos.dist(position)
else:
if not closest[2] or troop.target_pos.dist(position) < distance[2]:
closest[2] = troop
distance[2] = troop.target_pos.dist(position)
return closest[0] if closest[0] else closest[1] if closest[1] else closest[2]
def bunker_seeks_troop(position: Point2D) -> Optional[Troop]:
"""Return suitable troop for bunker."""
for troop in all_troops():
if not troop.is_attackers and troop.nearby_target(position):
return troop
return None
def find_unit_troop(unit: Unit) -> Optional[Troop]:
"""Return the troop this unit is in. If not any then null."""
for troop in all_troops():
if troop.has_unit(unit):
return troop
return None
def closest_troop(pos: Point2D) -> Optional[Troop]:
"""Finds the closest troop to a position."""
return get_closest([(troop.target_pos, troop) for troop in all_troops()], pos)
| antwg/tdde25 | armies.py | armies.py | py | 28,084 | python | en | code | 0 | github-code | 90 |
30402148487 | import ctypes
import sys
import pygame as pg
import sdl2
import sdl2.ext
import os
from sdl2 import surface, SDL_GetColorKey, SDL_SetColorKey
from sdl2.ext.compat import isiterable
from sdl2.sdlimage import IMG_Load
from PIL import Image
class SoftwareRenderer(sdl2.ext.SoftwareSpriteRenderSystem):
def __init__(self, window):
super(SoftwareRenderer, self).__init__(window)
def render(self, components):
sdl2.ext.fill(self.surface, sdl2.ext.Color(0, 0, 0))
super(SoftwareRenderer, self).render(components)
class song():
pass
class game_process():
def __init__(self, world):
# f = map(open("map.txt").read().split(), int)
f = [[1, 300, 100], [3, 600, 450], [5, 400, 200], [7, 900, 500], [6, 300, 500]]
ar = 3
n = []
timer = Timer()
image2 = Image.new("RGB", (1, 1), (0, 0, 0))
image2.save("pix.png")
for i in f:
note = Note(i[0], ar)
world.add_system(note)
texture = sdl2.ext.load_image("pix.png")
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
note_pic = factory.from_surface(texture)
note_sp = note_sprite(world, note_pic, posx=i[1], posy=i[2])
note_sp.timer = timer
note.note = note_sp
pg.mixer.music.play()
pg.mixer.music.set_volume(0.1)
running = True
while running:
events = sdl2.ext.get_events()
for event in events:
motion = None
if event.type == sdl2.SDL_MOUSEBUTTONDOWN:
pass
# motion = event.motion
# print(motion.x, motion.y)
# print(sdl2.timer.SDL_GetTicks() / 1000)
if event.key.keysym.sym == sdl2.SDLK_r:
running = False
break
if event.type == sdl2.SDL_QUIT:
running = False
break
world.process()
class note_sprite(sdl2.ext.Entity):
def __init__(self, world, sprite, posx=100, posy=100):
self.sprite = sprite
self.sprite.position = posx, posy
class Timer(object):
def __init__(self):
super().__init__()
self.status = True
self.paused = False
self.startTicks = sdl2.timer.SDL_GetTicks()
def stop(self):
self.status = False
self.paused = False
def get_ticks(self):
return (sdl2.timer.SDL_GetTicks() - self.startTicks) // 1000
class Note(sdl2.ext.Applicator):
def __init__(self, time, ar):
super().__init__()
self.componenttypes = Timer, note_sprite, sdl2.ext.Sprite
self.note = None
self.time = time
self.is_active = False
self.ar = ar
self.x, self.y = ctypes.c_int(0), ctypes.c_int(0)
self.flag, self.flag1 = True, True
self.circle_im = self.draw_circle()
def check(self):
rx = self.x.value - (self.note.sprite.x + 70)
ry = self.y.value - (self.note.sprite.y + 70)
if (rx ** 2 + ry ** 2) < 1400:
self.note.sprite.surface = sdl2.ext.load_image("hit300.png")
self.ar = (self.note.timer.get_ticks() + 1) - self.time
self.flag1 = False
def process(self, world, componentsets):
if self.flag:
if self.time == self.note.timer.get_ticks() and not self.is_active:
self.is_active = True
self.note.sprite.surface = sdl2.ext.load_image(self.circle_im)
if self.is_active:
if self.time + self.ar == self.note.timer.get_ticks():
self.is_active = False
self.note.world.delete(self.note)
self.flag = False
elif self.flag1:
buttonstate = sdl2.mouse.SDL_GetMouseState(ctypes.byref(self.x), ctypes.byref(self.y))
if buttonstate:
self.check()
def draw_circle(self):
sp = []
image = Image.open('approachcircle.png')
size = image.size
pix = image.load()
image2 = Image.new("RGB", size)
for x in range(size[0]):
for y in range(size[1]):
image2.putpixel([x, y], pix[x, y])
image2.save("approachcircle2.png")
return "approachcircle2.png"
class Menu_app(sdl2.ext.Applicator):
def __init__(self):
super().__init__()
self.componenttypes = Menu_sp, sdl2.ext.Sprite
def process(self, world, componentsets):
pass
class Menu_sp(sdl2.ext.Entity):
def __init__(self, world, sprite, posx, posy):
self.sprite = sprite
self.sprite.position = posx, posy
def run():
sdl2.ext.init()
window = sdl2.ext.Window("Osu", size=(1600, 900))
menu = sdl2.ext.World()
gameplay = sdl2.ext.World()
spriterenderer = SoftwareRenderer(window)
menu.add_system(spriterenderer)
gameplay.add_system(spriterenderer)
window.show()
running = True
lvl_1 = Menu_app()
menu.add_system(lvl_1)
texture = sdl2.ext.load_image("1.png")
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
menu_pic = factory.from_surface(texture)
Menu_sp = note_sprite(menu, menu_pic, 50, 50)
lvl_1.sp = Menu_sp
# print(note.note.sprite.x)
pg.init()
pg.mixer.music.load('audio.wav')
while running:
events = sdl2.ext.get_events()
for event in events:
if event.key.keysym.sym == sdl2.SDLK_q:
game_process(gameplay)
if event.type == sdl2.SDL_QUIT:
running = False
break
menu.process()
return 0
if __name__ == "__main__":
sys.exit(run()) | Reznnov/osu | main.py | main.py | py | 5,961 | python | en | code | 1 | github-code | 90 |
40959838967 | from dotenv import load_dotenv
import os
load_dotenv()
import base64
import json
from termcolor import colored
import requests
from requests import post
def get_spotify_credentials():
client_id = os.getenv('CLIENT_ID')
client_secret = os.getenv('CLIENT_SECRET')
return client_id, client_secret
def get_token(client_id, client_secret):
auth_string = f'{client_id}:{client_secret}'
auth_bytes = auth_string.encode('utf-8')
auth_base64 = str(base64.b64encode(auth_bytes), "utf-8")
url = 'https://accounts.spotify.com/api/token'
headers = {
'Authorization': 'Basic ' + auth_base64,
'Content-Type': 'application/x-www-form-urlencoded'
}
data = {"grant_type": "client_credentials"}
try:
result = post(url, headers=headers, data=data)
result.raise_for_status() # Raise exception for HTTP errors
json_result = result.json()
token = json_result["access_token"]
return token
except requests.exceptions.RequestException as e:
print(colored(f"Error: {e}", 'red'))
return None
def search_tracks(query, token):
search_url = 'https://api.spotify.com/v1/search'
params = {
'q': query,
'type': 'track',
'limit': 10
}
headers = {
'Authorization': f'Bearer {token}'
}
try:
response = requests.get(search_url, params=params, headers=headers)
response.raise_for_status() # Raise an exception for HTTP errors
data = response.json()
tracks = data.get('tracks', {}).get('items', [])
return tracks
except requests.exceptions.RequestException as e:
print(colored(f"Error: {e}", 'red'))
return []
def display_tracks(tracks):
if not tracks:
print(colored("No tracks found.", 'yellow'))
return
print(colored(f"Found {len(tracks)} tracks:", 'green'))
for i, track in enumerate(tracks, start=1):
track_id = track['id']
track_name = track['name']
artists = ', '.join(artist['name'] for artist in track['artists'])
print(colored(f"{i}.", 'blue'), end=' ')
print(f"{colored(track_name, 'yellow')} by {colored(artists, 'cyan')} (Track ID: {colored(track_id, 'magenta')})")
def main():
client_id, client_secret = get_spotify_credentials()
if not client_id or not client_secret:
print(colored("Client ID and Client Secret not found in environment variables.", 'red'))
return
token = get_token(client_id, client_secret)
if token:
while True:
query = input("Enter the song name to search for (or 'exit' to quit): ")
if query.lower() == 'exit':
break
tracks = search_tracks(query, token)
display_tracks(tracks)
else:
print(colored("Failed to retrieve access token.", 'red'))
if __name__ == "__main__":
main() | enmareynoso/spotify-track-search | main.py | main.py | py | 2,910 | python | en | code | 0 | github-code | 90 |
25310650992 | import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
'''
Question 1
Import the data from fraud_data.csv. What percentage of the observations in the dataset are instances of fraud?
This function should return a float between 0 and 1.
'''
def answer_one():
df = pd.read_csv('fraud_data.csv')
# print(df)
# print(df['Class'][df['Class'] == 1].size) # 1 is froad
return df['Class'][df['Class'] == 1].size / df['Class'].size
# print(answer_one())
from sklearn.model_selection import train_test_split
df = pd.read_csv('fraud_data.csv')
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
'''
Question 2
Using X_train, X_test, y_train, and y_test (as defined above), train a dummy classifier that classifies everything as the majority class of the training data. What is the accuracy of this classifier? What is the recall?
This function should a return a tuple with two floats, i.e. (accuracy score, recall score).
'''
def answer_two():
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.dummy import DummyClassifier
from sklearn.metrics import recall_score
dummy_clf = DummyClassifier(strategy='most_frequent').fit(X_train, y_train)
y_dummy_predictions = dummy_clf.predict(X_test)
# print(y_dummy_predictions)
# print('Accuracy: {:.2f}'.format(accuracy_score(y_test, y_dummy_predictions)))
# '='
# print('Accuracy: {:.2f}'.format(dummy_clf.score(X_test, y_test)))
# print('Accuracy: {:.2f}'.format(recall_score(y_test, y_dummy_predictions)))
return (accuracy_score(y_test, y_dummy_predictions), recall_score(y_test, y_dummy_predictions))
# print(answer_two())
'''
Question 3
Using X_train, X_test, y_train, y_test (as defined above), train a SVC classifer using the default parameters. What is the accuracy, recall, and precision of this classifier?
This function should a return a tuple with three floats, i.e. (accuracy score, recall score, precision score).
'''
def answer_three():
from sklearn.metrics import accuracy_score, recall_score, precision_score
from sklearn.svm import SVC
SVC_clf = SVC().fit(X_train, y_train)
# print(SVC_clf)
y_SVC_prediction = SVC_clf.predict(X_test)
# print(y_SVC_prediction)
# print('Accuracy: {:.2f}'.format(accuracy_score(y_test, y_SVC_prediction)))
# print('Accuracy: {:.2f}'.format(recall_score(y_test, y_SVC_prediction)))
# print('Accuracy: {:.2f}'.format(precision_score(y_test, y_SVC_prediction)))
return (accuracy_score(y_test, y_SVC_prediction), recall_score(y_test, y_SVC_prediction), precision_score(y_test, y_SVC_prediction))
# print(answer_three())
'''
Question 4
Using the SVC classifier with parameters {'C': 1e9, 'gamma': 1e-07}, what is the confusion matrix when using a threshold of -220 on the decision function. Use X_test and y_test.
This function should return a confusion matrix, a 2x2 numpy array with 4 integers.
'''
def answer_four():
from sklearn.metrics import confusion_matrix
from sklearn.svm import SVC
SVC_clf = SVC(C=1e9, gamma=1e-07).fit(X_train, y_train)
# print(SVC_clf)
y_decision_function = SVC_clf.decision_function(X_test) > -220
# print(len(y_decision_function))
# print(y_decision_function)
confusion = confusion_matrix(y_test, y_decision_function)
# print(confusion)
return confusion
print(answer_four())
'''
Question 5
Train a logisitic regression classifier with default parameters using X_train and y_train.
For the logisitic regression classifier, create a precision recall curve and a roc curve using y_test and the probability estimates for X_test (probability it is fraud).
Looking at the precision recall curve, what is the recall when the precision is 0.75?
Looking at the roc curve, what is the true positive rate when the false positive rate is 0.16?
This function should return a tuple with two floats, i.e. (recall, true positive rate).
'''
def answer_five():
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_curve, roc_curve
import matplotlib.pyplot as plt
linear_reg_clf = LogisticRegression().fit(X_train, y_train)
# print(linear_reg_clf)
# y_scores = linear_reg_clf.score(X_test, y_test)
# y_scores = linear_reg_clf.decision_function(X_test)
# print(y_scores)
y_prediction_scores = linear_reg_clf.predict(X_test)
# print(y_prediction_scores)
precision, recall, thresholds = precision_recall_curve(y_test, y_prediction_scores)
fpr, tpr, _ = roc_curve(y_test, y_prediction_scores)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
closest_zero = np.argmin(np.abs(thresholds))
closest_zero_p = precision[closest_zero]
closest_zero_r = recall[closest_zero]
ax1.plot(precision, recall, label='Precision-Recall Curve')
ax1.plot(closest_zero_p, closest_zero_r, 'o', markersize = 12, fillstyle = 'none', c='r', mew=3)
ax1.set_xlabel('Precision', fontsize=16)
ax1.set_ylabel('Recall', fontsize=16)
# plt.axes().set_aspect('equal')
ax2.plot(fpr, tpr, lw=3, label='LogRegr')
ax2.set_xlabel('False Positive Rate', fontsize=16)
ax2.set_ylabel('True Positive Rate', fontsize=16)
plt.show()
return (0.83, 0.94)
# print(answer_five())
'''
Question 6
Perform a grid search over the parameters listed below for a Logisitic Regression classifier, using recall for scoring and the default 3-fold cross validation.
'penalty': ['l1', 'l2']
'C':[0.01, 0.1, 1, 10, 100]
From .cv_results_, create an array of the mean test scores of each parameter combination. i.e.
l1 l2
0.01 ? ?
0.1 ? ?
1 ? ?
10 ? ?
100 ? ?
This function should return a 5 by 2 numpy array with 10 floats.
Note: do not return a DataFrame, just the values denoted by '?' above in a numpy array. You might need to reshape your raw result to meet the format we are looking for.
'''
def answer_six():
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
Cs = [0.01, 0.1, 1, 10, 100]
penalty = ['l1', 'l2']
param_grid = {'C': Cs, 'penalty': penalty}
logistic_reg_clf = LogisticRegression().fit(X_train, y_train)
grid_clf_logreg = GridSearchCV(logistic_reg_clf, param_grid=param_grid, scoring='recall', cv=3)
# print(grid_clf_logreg)
grid_clf_logreg.fit(X_train, y_train)
# y_prediction = grid_clf_logreg.score(X_test, y_test)
# print(y_prediction)
# print(grid_clf_logreg.cv_results_)
# print(grid_clf_logreg.cv_results_.keys())
# print(grid_clf_logreg.cv_results_['mean_test_score'])
mean_test_score = grid_clf_logreg.cv_results_['mean_test_score']
# print(type(mean_test_score))
print(mean_test_score.reshape(5, 2))
print(type(mean_test_score.reshape(5, 2)))
print(np.array(mean_test_score.reshape(5, 2)))
print(type(np.array(mean_test_score.reshape(5, 2))))
# return mean_test_score.reshape(5, 2)
print(answer_six()) | PavelBLab/machine_learning | assignment_3/assignment_3.py | assignment_3.py | py | 7,146 | python | en | code | 0 | github-code | 90 |
14285121003 | # _*_ coding : UTF-8 _*_
# 开发人员 : ChangYw
# 开发时间 : 2019/7/19 10:40
# 文件名称 : Test.PY
# 开发工具 : PyCharm
city = {
"北京" : {
"朝阳区" : ["朝阳公园","工体","朝阳大厦"],
"海淀区" : ["颐和园","香山公园","玉泉山"],
"丰台区" : ["园博园","卢沟桥文化旅游区","世界公园"]
},
"上海" : {
"杨浦区": ["杨浦公园", "工体", "朝阳大厦"],
"虹口区": ["虹口足球场","鲁迅公园","上海大厦"],
"浦东新区": ["迪士尼","野生动物园","东方明珠"]
}
}
print(list(city.get("北京"))[0])
print(city.keys())
print(list(city.values()))
print(type(city.values())) | wenzhe980406/PythonLearning | day05/Test.py | Test.py | py | 726 | python | en | code | 0 | github-code | 90 |
18463850479 | from sys import stdin, setrecursionlimit as srl
from threading import stack_size
srl(int(1e9)+7)
stack_size(int(1e8))
def get(i, value, vis):
if vis[i]:
return value[i]
vis[i] = True
ans = 0
for j in adj[i]:
ans = max(ans, 1+get(j, value, vis))
value[i] = ans
return ans
n, m = map(int, input().split())
adj = {}
for i in range(n):
adj[i] = []
for i in range(m):
x, y = map(int,input().split())
adj[x-1].append(y-1)
vis = [False for i in range(n)]
value = [0 for i in range(n)]
for i in range(n):
value[i] = get(i, value, vis)
x = 0
for i in value:
x = max(x, i)
print(x)
| Aasthaengg/IBMdataset | Python_codes/p03166/s172913418.py | s172913418.py | py | 635 | python | en | code | 0 | github-code | 90 |
33888963993 | import string
import json
import urllib
import urllib2
import ssl
import certifi
import requests
import datetime
NAME = 'KIJK 2.0'
ICON = 'icon-default.png'
ART = 'art-default.jpg'
PREFIX = '/video/kijk'
CHANNELS = [
{
'name': 'Net5',
'slug': 'net5',
},
{
'name': 'SBS6',
'slug': 'sbs6'
},
{
'name': 'Veronica',
'slug': 'veronicatv'
},
{
'name': 'SBS9',
'slug': 'sbs9'
}
]
ICON_MISSED = 'missed.png'
ICON_POPULAR_EPISODES = 'popular_episodes.png'
ICON_POPULAR_PROGRAMS = 'popular_programs.png'
ICON_PROGRAMS = 'programs.png'
ICON_SEARCH = 'search.png'
AZ_UPPER = string.ascii_uppercase
AZ_LOWER = string.ascii_lowercase
DIGS = string.digits
API_URL_V1 = 'https://api.kijk.nl/v1/'
API_URL_V2 = 'https://api.kijk.nl/v2/'
RE_SERIES = 'http://kijk.nl/(.*?)/(.*?)'
KIJKEMBED_API_URL = "https://embed.kijk.nl/api/video/"
BRIGHTCOVE_API_URL = "https://edge.api.brightcove.com/playback/v1/accounts/585049245001/videos/"
PROGRAMS_LIMIT = 20
EPISODES_LIMIT = 20
####################################################################################################
def Start():
ObjectContainer.title1 = NAME
ObjectContainer.art = R(ART)
ObjectContainer.view_group = 'Details'
DirectoryObject.thumb = R(ICON)
DirectoryObject.art = R(ART)
VideoClipObject.thumb = R(ICON)
VideoClipObject.art = R(ART)
Plugin.AddViewGroup("Details", viewMode="InfoList", mediaType="items")
Plugin.AddViewGroup("List", viewMode="List", mediaType="items")
HTTP.CacheTime = CACHE_1HOUR
HTTP.Headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36'
####################################################################################################
@handler(PREFIX, NAME, thumb=ICON, art=ART)
def MainMenu():
oc = ObjectContainer()
oc.add(DirectoryObject(
title = L("MISSED"),
thumb = R(ICON_MISSED),
art = R(ART),
key = Callback(MissedDayList, title2=L("MISSED"))
#https://api.kijk.nl/v2/templates/page/missed/all/20180208
))
oc.add(DirectoryObject(
title = L("POPULAR_EPISODES"),
thumb = R(ICON_POPULAR_EPISODES),
art = R(ART),
key = Callback(PopularEpisodes, title2=L("POPULAR_EPISODES"))
#https://api.kijk.nl/v2/default/sections/popular_PopularVODs
))
oc.add(DirectoryObject(
title = L("POPULAR_PROGRAMS"),
thumb = R(ICON_POPULAR_PROGRAMS),
art = R(ART),
key = Callback(PopularPrograms, title2=L("POPULAR_PROGRAMS"))
#https://api.kijk.nl/v2/default/sections/popular_PopularFormats
))
oc.add(DirectoryObject(
title = L("PROGRAMS_LIST"),
thumb = R(ICON_PROGRAMS),
art = R(ART),
key = Callback(ProgramsList, title2=L("PROGRAMS_LIST"))
))
oc.add(InputDirectoryObject(
title = L("SEARCH"),
thumb = R(ICON_SEARCH),
art = R(ART),
prompt = L("SEARCH_PROMPT"),
key = Callback(Search, title2=L("SEARCH"))
))
oc.add(PrefsObject(
title = L("SETTINGS"),
thumb = R(ICON),
art = R(ART)
))
return oc
####################################################################################################
@route(PREFIX + '/missedDayList')
def MissedDayList(title2='', path=''):
oc = ObjectContainer(title2=title2)
dayStrings = [L("MONDAY"), L("TUESDAY"), L("WEDNESDAY"), L("THURSDAY"), L("FRIDAY"), L("SATURDAY"), L("SUNDAY")]
now = datetime.datetime.today()
for index in range(0, 7):
dayDate = now - datetime.timedelta(index)
dayName = dayStrings[dayDate.weekday()]
dayDateString = dayDate.strftime("%d-%m-%Y")
dayPath = "default/sections/missed-all-"+dayDate.strftime("%Y%m%d")+"?limit=350&offset=0"
if(index == 0):
dayName = L("TODAY")
if(index == 1):
dayName = L("YESTERDAY")
oc.add(DirectoryObject(
title = dayName+": "+dayDateString,
thumb = R(ICON),
art = R(ART),
key = Callback(MissedEpisodesList, title2=dayName, path=dayPath)
))
return oc
####################################################################################################
@indirect
@route(PREFIX + '/missedEpisodesList')
def MissedEpisodesList(title2='', path=''):
oc = ObjectContainer(title2=title2, art=R(ART))
try:
jsonObj = getFromAPI(path=path)
except:
return errorMessage(L("ERROR_EPISODES_RETREIVING"))
try:
elements = jsonObj["items"]
except:
return errorMessage(L("ERROR_EPISODES_NO_RESULTS"))
for e in elements:
try: available = e["available"]
except: available = False
if onlyMP4() and "brightcoveId" not in e:
available = False
if not available:
continue
try: newPath = BRIGHTCOVE_API_URL+e["brightcoveId"]
except: newPath = KIJKEMBED_API_URL+e["id"]
try: title = e["title"]
except: title = ''
try: seasonLabelShort = e["seasonLabelShort"]
except: seasonLabelShort = ''
try: episode = e["episode"]
except: episode = ''
try: episodeLabel = e["episodeLabel"]
except: episodeLabel = ''
try: summary = e["synopsis"]
except: summary = ''
try: thumbUrl = e["images"]["nonretina_image"]
except: thumbUrl = ''
try: artUrl = e["images"]["nonretina_image_pdp_header"]
except: artUrl = ''
thumb = Resource.ContentsOfURLWithFallback(thumbUrl, R(ICON))
art = Resource.ContentsOfURLWithFallback(artUrl, R(ART))
try: millis = e["durationSeconds"]*1000
except: millis = 0
oc.add(VideoClipObject(
title = title+" - "+seasonLabelShort+"E"+episode+": "+episodeLabel,
thumb = thumb,
summary = summary,
art = art,
duration = millis,
url = newPath
))
if len(oc) > 0:
return oc
else:
return errorMessage(L("ERROR_EPISODES_NO_RESULTS"))
####################################################################################################
@indirect
@route(PREFIX + '/popularEpisodes')
def PopularEpisodes(title2=''):
oc = ObjectContainer(title2=title2, art=R(ART))
try:
jsonObj = getFromAPI2(path='default/sections/popular_PopularVODs?limit=20&offset=0')
except:
return errorMessage(L("ERROR_EPISODES_RETREIVING"))
try:
elements = jsonObj["items"]
except:
return errorMessage(L("ERROR_EPISODES_NO_RESULTS"))
for ei, e in enumerate(elements):
if ei == EPISODES_LIMIT:
break
try: available = e["available"]
except: available = False
if onlyMP4() and "brightcoveId" not in e:
available = False
if not available:
continue
try: newPath = BRIGHTCOVE_API_URL+e["brightcoveId"]
except: newPath = KIJKEMBED_API_URL+e["id"]
try: title = e["title"]
except: title = ''
try: seasonLabelShort = e["seasonLabelShort"]
except: seasonLabelShort = ''
try: episode = e["episode"]
except: episode = ''
try: episodeLabel = e["episodeLabel"]
except: episodeLabel = ''
try: summary = e["synopsis"]
except: summary = ''
try: thumbUrl = e["images"]["nonretina_image"]
except: thumbUrl = ''
try: artUrl = e["images"]["nonretina_image_pdp_header"]
except: artUrl = ''
thumb = Resource.ContentsOfURLWithFallback(thumbUrl, R(ICON))
art = Resource.ContentsOfURLWithFallback(artUrl, R(ART))
try: millis = e["durationSeconds"]*1000
except: millis = 0
oc.add(VideoClipObject(
title = title+" - "+seasonLabelShort+"E"+episode+": "+episodeLabel,
thumb = thumb,
summary = summary,
art = art,
duration = millis,
url = newPath
))
if len(oc) > 0:
return oc
else:
return errorMessage(L("ERROR_EPISODES_NO_RESULTS"))
####################################################################################################
@indirect
@route(PREFIX + '/popularPrograms')
def PopularPrograms(title2=''):
oc = ObjectContainer(title2=title2, art=R(ART))
shown = [];
try:
jsonObj = getFromAPI2(path='default/sections/popular_PopularFormats?offset=0')
except:
return errorMessage(L("ERROR_PROGRAMS_RETREIVING"))
try:
elements = jsonObj["items"]
except:
return errorMessage(L("ERROR_PROGRAMS_NO_RESULTS"))
for e in elements:
if len(shown) == PROGRAMS_LIMIT:
break
try: id = e["id"]
except: id = ''
if id in shown:
continue
shown.append(id)
try: available = e["available"]
except: available = False
if not available:
continue
try: title = e["title"]
except: title = ''
try: summary = e["synopsis"]
except: summary = ''
try: thumbUrl = e["images"]["nonretina_image"]
except: thumbUrl = ''
try: artUrl = e["images"]["nonretina_image_pdp_header"]
except: artUrl = ''
thumb = Resource.ContentsOfURLWithFallback(thumbUrl, R(ICON))
art = Resource.ContentsOfURLWithFallback(artUrl, R(ART))
try: millis = int(e["duration"].replace(' min.', ''))*60*1000
except: millis = 0
oc.add(DirectoryObject(
title = title,
thumb = thumb,
summary = summary,
art = art,
duration = millis,
key = Callback(EpisodeList, title2=title, path=e["_links"]["self"], art=art)
))
if len(oc) > 0:
return oc
else:
return errorMessage(L("ERROR_PROGRAMS_NO_RESULTS"))
####################################################################################################
@indirect
@route(PREFIX + '/programsList')
def ProgramsList(title2=''):
oc = ObjectContainer(title2=title2, art=R(ART))
#try:
jsonObj = getFromAPI2(path='templates/page/abc')
#except:
# return errorMessage(L("ERROR_PROGRAMS_RETREIVING"))
#try:
components = jsonObj["components"]
#except:
# return errorMessage(L("ERROR_PROGRAMS_RETREIVING"))
for comp in components:
try: objType = comp["type"]
except: objType = ''
if objType == "letter_programs_list":
pageProgList = comp["data"]["items"]
for programslist in pageProgList:
try: objType = programslist["type"]
except: objType = ''
if objType == "letter_programs":
letters = programslist["data"]["items"]
for letter in letters:
elements = letter["data"]["items"]
for e in elements:
try: available = e["available"]
except: available = False
# if onlyMP4() and "brightcoveId" not in e:
# available = False
if not available:
continue
try: title = e["title"]
except: title = ''
try: summary = e["synopsis"]
except: summary = ''
try: thumbUrl = e["images"]["nonretina_image"]
except: thumbUrl = ''
try: artUrl = e["images"]["nonretina_image_pdp_header"]
except: artUrl = ''
thumb = Resource.ContentsOfURLWithFallback(thumbUrl, R(ICON))
art = Resource.ContentsOfURLWithFallback(artUrl, R(ART))
try: millis = int(e["duration"].replace(' min.', ''))*60*1000
except: millis = 0
oc.add(DirectoryObject(
title = title,
thumb = thumb,
summary = summary,
art = art,
duration = millis,
key = Callback(EpisodeList, title2=title, path=e["_links"]["self"], art=art)
))
oc.objects.sort(key = lambda obj: obj.title.lower())
if len(oc) > 0:
return oc
else:
return errorMessage(L("ERROR_PROGRAMS_NO_RESULTS"))
####################################################################################################
@indirect
@route(PREFIX + '/episodeList')
def EpisodeList(title2='', path='', art=R(ART)):
oc = ObjectContainer(title2=title2, art=art)
shown = [];
try:
jsonObj = getFromAPI(path=path)
sections = jsonObj["sections"]
except:
return errorMessage(L("ERROR_EPISODES_RETREIVING"))
hasMoreItems = False
for s in sections:
try: objType = s["type"]
except: objType = ''
if objType == "horizontal-single":
try: hasMoreItems = s["hasMoreItems"]
except: objType = False
try:
elements = s["items"]
except:
return errorMessage(L("ERROR_EPISODES_NO_RESULTS"))
for e in elements:
if len(shown) == EPISODES_LIMIT:
break
try: id = e["id"]
except: id = ''
shown.append(id)
try: available = e["available"]
except: available = False
if onlyMP4() and "brightcoveId" not in e:
available = False
if not available:
continue
try: newPath = BRIGHTCOVE_API_URL+e["brightcoveId"]
except: newPath = KIJKEMBED_API_URL+e["id"]
try: seasonLabelShort = e["seasonLabelShort"]
except: seasonLabelShort = ''
try: episode = e["episode"]
except: episode = ''
try: episodeLabel = e["episodeLabel"]
except: episodeLabel = ''
try: summary = e["synopsis"]
except: summary = ''
try: thumbUrl = e["images"]["nonretina_image"]
except: thumbUrl = ''
try: artUrl = e["images"]["nonretina_image_pdp_header"]
except: artUrl = ''
thumb = Resource.ContentsOfURLWithFallback(thumbUrl, R(ICON))
art = Resource.ContentsOfURLWithFallback(artUrl, R(ART))
try: millis = e["durationSeconds"]*1000
except: millis = 0
oc.add(VideoClipObject(
title = seasonLabelShort+"E"+episode+": "+episodeLabel,
thumb = thumb,
summary = summary,
art = art,
duration = millis,
url = newPath
))
if hasMoreItems:
for s in sections:
try: objType = s["type"]
except: objType = ''
if objType == "slider":
try:
sliderSections = s["sections"]
except:
continue
for sliderSection in sliderSections:
try:
sliderTabSections = sliderSection["sections"]
except:
continue
for sliderTabSection in sliderTabSections:
try: objType = sliderTabSection["type"]
except: objType = ''
if objType == "vertical":
try:
elements = sliderTabSection["items"]
except:
continue
for e in elements:
if len(shown) == PROGRAMS_LIMIT:
break
try: id = e["id"]
except: id = ''
shown.append(id)
try: available = e["available"]
except: available = False
if onlyMP4() and "brightcoveId" not in e:
available = False
if not available:
continue
try: newPath = BRIGHTCOVE_API_URL+e["brightcoveId"]
except: newPath = KIJKEMBED_API_URL+e["id"]
try: seasonLabelShort = e["seasonLabelShort"]
except: seasonLabelShort = ''
try: episode = e["episode"]
except: episode = ''
try: episodeLabel = e["episodeLabel"]
except: episodeLabel = ''
try: summary = e["synopsis"]
except: summary = ''
try: thumbUrl = e["images"]["nonretina_image"]
except: thumbUrl = ''
try: artUrl = e["images"]["nonretina_image_pdp_header"]
except: artUrl = ''
thumb = Resource.ContentsOfURLWithFallback(thumbUrl, R(ICON))
art = Resource.ContentsOfURLWithFallback(artUrl, R(ART))
try: millis = e["durationSeconds"]*1000
except: millis = 0
oc.add(VideoClipObject(
title = seasonLabelShort+"E"+episode+": "+episodeLabel,
thumb = thumb,
summary = summary,
art = art,
duration = millis,
url = newPath
))
if len(oc) > 0:
return oc
else:
return errorMessage(L("ERROR_EPISODES_NO_RESULTS"))
####################################################################################################
@indirect
@route(PREFIX + '/search')
def Search(title2='', query=''):
oc = ObjectContainer(title2=title2, art=R(ART))
try:
encodedQuery = urllib.quote_plus(query)
jsonObj = getSearchResult(path='default/searchresultsgrouped?search='+encodedQuery)
except:
return errorMessage(L("ERROR_SEARCH_RETREIVING"))
try:
elements = jsonObj["results"]
except:
return errorMessage(L("ERROR_PROGRAMS_NO_RESULTS"))
for e in elements:
try: objType = e["type"]
except: objType = ''
if objType == "series":
try: title = e["title"]+": "+e["subtitle"]
except: title = ''
try: summary = next((item for item in CHANNELS if item['slug'] == e["channel"]), None)["name"]
except: summary = ''
try: thumbUrl = e["images"]["nonretina_image"]
except: thumbUrl = ''
try: artUrl = e["images"]["nonretina_image_pdp_header"]
except: artUrl = ''
thumb = Resource.ContentsOfURLWithFallback(thumbUrl, R(ICON))
art = Resource.ContentsOfURLWithFallback(artUrl, R(ART))
oc.add(DirectoryObject(
title = title,
thumb = thumb,
summary = summary,
art = art,
key = Callback(EpisodeList, title2=title, path='default/pages/series-'+e["_links"]["self"], art=art)
))
try: episodes = e["episodes"]
except: episodes = []
for episode in episodes:
try: newPath = BRIGHTCOVE_API_URL+episode["brightcoveId"]
except: newPath = KIJKEMBED_API_URL+episode["id"]
try: title = episode["title"]+": "+episode["subtitle"]
except: title = ''
try: summary = next((item for item in CHANNELS if item['slug'] == e["channel"]), None)["name"]
except: summary = ''
try: thumbUrl = episode["images"]["nonretina_image"]
except: thumbUrl = ''
try: artUrl = episode["images"]["nonretina_image_pdp_header"]
except: artUrl = ''
thumb = Resource.ContentsOfURLWithFallback(thumbUrl, R(ICON))
art = Resource.ContentsOfURLWithFallback(artUrl, R(ART))
oc.add(VideoClipObject(
title = title,
thumb = thumb,
summary = summary,
art = art,
url = newPath
))
if len(oc) > 0:
return oc
else:
return errorMessage(L("ERROR_SEARCH_NO_RESULTS"))
####################################################################################################
@indirect
def getFromAPI(path=''):
Log("GetAPIV1Result")
Log(API_URL_V1+path)
receivedJson = requests.get(API_URL_V1+path, headers=HTTP.Headers, verify=certifi.where())
Log(receivedJson)
jsonObj = receivedJson.json()
return jsonObj
@indirect
def getSearchResult(path=''):
Log("GetSearchResult")
Log(API_URL_V1+path)
receivedJson = requests.get(API_URL_V2+path, headers=HTTP.Headers, verify=certifi.where())
Log(receivedJson)
receivedJson = "{\"results\": "+receivedJson.text+"}"
jsonObj = json.loads(receivedJson)
return jsonObj
####################################################################################################
@indirect
def getFromAPI2(path=''):
Log("GetAPIV2Result")
Log(API_URL_V2+path)
receivedJson = requests.get(API_URL_V2+path, headers=HTTP.Headers, verify=certifi.where())
Log(receivedJson)
jsonObj = receivedJson.json()
return jsonObj
####################################################################################################
def onlyMP4():
onlymp4 = False
if Client.Platform == 'Samsung': #Samsung smart tv
onlymp4 = True
return onlymp4
####################################################################################################
def errorMessage(message = ''):
return ObjectContainer(header=L("ERROR"), message=message)
| mentosmenno2/Kijk.bundle | Contents/Code/__init__.py | __init__.py | py | 18,416 | python | en | code | 2 | github-code | 90 |
70243711977 | import time
from numpy import random
import matplotlib.pyplot as plt
pivot_index = []
def swap(arr, a, b):
arr[a],arr[b] = arr[b],arr[a]
def findMedian(arr, l, n):
lis = arr[l:l+n]
# Sort the array
lis.sort()
# Return the middle element
return lis[n // 2]
def partitions(arr, l, r, x):
for i in range(l, r):
if arr[i] == x:
swap(arr, r, i)
break
x = arr[r]
i = l
for j in range(l, r):
if (arr[j] <= x):
swap(arr, i, j)
i += 1
swap(arr, i, r)
return i
def medians_of_median(arr, l, r, k, p = 5, t = False):
global pivot_index
# If k is smaller than number of
# elements in array
if (k > 0 and k <= r - l + 1):
# Number of elements in arr[l..r]
n = r - l + 1
# Divide arr[] in groups of size 5,
# calculate median of every group
# and store it in median[] array.
median = []
i = 0
while (i < n // p):
median.append(findMedian(arr, l + i * p, p))
i += 1
# For last group with less than 5 elements
if (i * p < n):
median.append(findMedian(arr, l + i * p, n % p))
i += 1
# Find median of all medians using recursive call.
# If median[] has only one element, then no need
# of recursive call
if i == 1:
medOfMed = median[i - 1]
else:
medOfMed = medians_of_median(median, 0,i - 1, i // 2,p)
# Partition the array around a medOfMed
# element and get position of pivot
# element in sorted array
pos = partitions(arr, l, r, medOfMed)
if t:
pivot_index.append(pos)
# If position is same as k
if (pos - l == k - 1):
return arr[pos]
if (pos - l > k - 1): # If position is more,
# recur for left subarray
return medians_of_median(arr, l, pos - 1, k,p,t)
# Else recur for right subarray
return medians_of_median(arr, pos + 1, r,k - pos + l - 1,p,t)
# If k is more than the number of
# elements in the array
return float('inf')
dataSets = {'uniform': random.uniform , 'normal' : random.normal}
n = 50000
mid = n//2
plt.rcParams["figure.autolayout"] = True
plt.xlabel('Recursion depth')
plt.ylabel('pivot element distance')
for e,key in enumerate(dataSets):
x = []
distance = []
pivot_index = []
arr = dataSets[key](size = n)
medians_of_median(arr,0,n-1,mid,5,True)
for num,i in enumerate(pivot_index):
x.append(num+1)
distance.append(abs(mid - i - 1))
# print(distance)
plt.plot(x,distance,'o',label = key)
plt.legend(loc='best')
plt.show() | tryambakbhunya/tryambakbhunya | 9.PY | 9.PY | py | 2,736 | python | en | code | 0 | github-code | 90 |
73968071335 | import numpy as np
class Realization:
def __init__(self, preyBirthRate, hawkHuntingRate, hawkDeathRate):
self.timeOfEvent = []
self.hawkNumber = []
self.preyNumber = []
self.preyBirthRate = preyBirthRate
self.hawkHuntingRate = hawkHuntingRate
self.hawkDeathRate = hawkDeathRate
def gillespieSimulation(self, initialHawks, initialPrey, timeLimit):
# Initialize simulation parameters
currentTime = 0
hawks = initialHawks
prey = initialPrey
self.timeOfEvent = [currentTime,]
self.hawkNumber = [hawks,]
self.preyNumber = [prey,]
# Main simulation loop
while currentTime < timeLimit:
# Calculate rates of events (you may need to replace these with your specific model)
preyBirthRate = self.preyBirthRate * prey
hawkHuntingRate = self.hawkHuntingRate * prey * hawks
hawkDeathRate = self.hawkDeathRate * hawks
totalRate = preyBirthRate + hawkHuntingRate + hawkDeathRate
if totalRate <= 0:
timeUntilNextEvent = 0.02
else:
# Calculate time until the next event
timeUntilNextEvent = -np.log(np.random.rand()) / totalRate
# Update system state based on the chosen event
randomNumber = np.random.rand()
if randomNumber < preyBirthRate/totalRate:
prey += 1
elif randomNumber < (preyBirthRate + hawkHuntingRate)/totalRate:
prey -= 1
hawks += 1
else:
hawks -= 1
# Update time and record state
currentTime += timeUntilNextEvent
self.timeOfEvent.append(currentTime)
self.hawkNumber.append(hawks)
self.preyNumber.append(prey)
def getRealization(self):
return self.timeOfEvent, self.hawkNumber, self.preyNumber
def getState(self, time):
index = 0
for element in self.timeOfEvent:
if element >= time:
return self.hawkNumber[index-1], self.preyNumber[index-1]
index += 1
"""
convertToRegularSteps (self, int stepsize)
In order to calculate the averege of the ensamble, we need to compute
the ensamble averege at different times. Because the time steps in the
process are random, this function finds the state of the system at regular
steps in time.
"""
def convertToRegularSteps(self, maximumTime, stepsize):
regularHawksNumber = []
regularPreyNumber = []
regularTime = [time*stepsize for time in range(round(maximumTime/stepsize) + 1)]
for currentTimeStep in regularTime:
if currentTimeStep == 0:
regularHawksNumber.append(self.hawkNumber[0])
regularPreyNumber.append(self.preyNumber[0])
else:
regularHawksNumber.append(self.getState(currentTimeStep)[0])
regularPreyNumber.append(self.getState(currentTimeStep)[1])
self.hawkNumber = regularHawksNumber
self.preyNumber = regularPreyNumber
self.timeOfEvent = regularTime
def getNumberOfTimeSteps(self):
length = 0
for element in self.timeOfEvent:
length += 1
return length
def lotkaVolterraSimulation(self, initialHawks, initialPrey, timeLimit, timeStep):
currentTime = 0
hawks = initialHawks
prey = initialPrey
self.timeOfEvent = [currentTime,]
self.hawkNumber = [hawks,]
self.preyNumber = [prey,]
while currentTime < timeLimit:
preyChange = (self.preyBirthRate * prey - self.hawkHuntingRate * prey * hawks) * timeStep
hawkChange = (self.hawkHuntingRate * prey * hawks - self.hawkDeathRate * hawks) * timeStep
hawks += hawkChange
prey += preyChange
currentTime += timeStep
self.timeOfEvent.append(currentTime)
self.hawkNumber.append(hawks)
self.preyNumber.append(prey)
| mark1ry/advanced_statistical | gillespie_algorithm/realization.py | realization.py | py | 4,286 | python | en | code | 0 | github-code | 90 |
41654009449 | import math
def is_prime(num):
x = True
for i in range(2, num):
if num%i == 0:
return False
else:
x = True
return x
def first_divisor(num):
for f in range(2, 2000):
if num%f == 0:
return f
return None
def is_jamcoin(base):
for k in range(2, 11):
if is_prime(base[k]):
return False
else:
return True
text_file = open("C-large.in", "r")
lines = text_file.readlines()
que = lines[1]
z1, z2 = que.split(' ')
text_file.close()
N = int(z1)
J = int(z2)
count = 0
n = (10**(N-1))+1
max_n = (10**N)
b = int(str(n), base=2)
max_b = int(str(max_n), base=2)
i2=0
ab = []
for i in range(b, max_b):
i2+=1
if int((format(i, 'b')), base=10) % 10 != 0:
ab.append(int(format(i, 'b'), base=10))
if i2==5000:
break
print(ab)
print(len(ab))
a = ab
print(a)
print(len(a))
text_file = open("Output.txt", "a")
text_file.write("Case #1:\n")
text_file.close()
for j in range(len(a)):
base = []
divisor = []
# added these line so base value and the list position value is same.
base.append(0)
base.append(0)
divisor.append(0)
divisor.append(0)
for k in range(2, 11):
base.append(0)
a2 = a[j]
for f in range(0, N):
digit = a2 % 10
a2 = a2//10
base[k] += digit * (k ** f)
print(base)
for k in range(2, 11):
divisor.append(0)
divisor[k] = first_divisor(base[k])
#print(k)
print(divisor)
if not None in divisor:
text_file = open("Output.txt", "a")
text_file.write("%s" % a[j])
# print(a[j], ' ', end="")
for f in range(2, 11):
text_file.write(" %s" % divisor[f])
text_file.write("\n")
text_file.close()
count += 1
if count == J:
break
| DaHuO/Supergraph_exp | test_input/CJ_0/16_0_3_adityamohta21_main.py | 16_0_3_adityamohta21_main.py | py | 1,993 | python | en | code | 0 | github-code | 90 |
10583624448 | from django.contrib.auth.views import LoginView, LogoutView
from django.urls import path
from accountapp.views import AccountCreateView, AccountDetailView, AccountUpdateView, AccountDeleteView
app_name = "accountapp"
urlpatterns = [
# create같은 경우는 특정 View 상속 받아서, 파라미터 설정하고 그랬었는데
#login,logout은 거창한 것이 필요 없어서 이런식으로 간단하게 해도 된다.
#login은 템플릿을 지정해 주어야 한다. (로그인 할 때 보는 화면)
path("login/", LoginView.as_view(template_name='accountapp/login.html'), name='login'),
path("logout/", LogoutView.as_view(), name='logout'),
#class based view에서는 .as_view()를 붙어야 한다.
path("create/", AccountCreateView.as_view(), name='create'),
path("update/<int:pk>", AccountUpdateView.as_view(), name='update'),
#특정유저의 정보를 받으려고 한다. => 고유번호를 받아야 한다
#detail/<int:pk> pk라는 이름의 int정보를 받겠다
path("detail/<int:pk>", AccountDetailView.as_view(), name='detail'),
path("delete/<int:pk>", AccountDeleteView.as_view(), name='delete'),
]
| dooli1971039/Pinterest_django | accountapp/urls.py | urls.py | py | 1,180 | python | ko | code | 0 | github-code | 90 |
43965908511 | import os, shutil, cv2
import numpy as np
import argparse
def randomTransform(img):
x, y, c = img.shape
new_img = img + 1.5 * np.random.randn(x, y, c)
new_img = new_img.astype(np.uint8)
return new_img
def sharp(img):
kernel = np.array([[0, -0.05, 0], [-0.05, 1.2, -0.05], [0, -0.05, 0]], np.float32)
new_img = cv2.filter2D(img, -1, kernel = kernel)
return new_img
def average(img):
new_img = cv2.blur(img, (3, 3))
return new_img
def gaussian(img):
new_img = cv2.GaussianBlur(img, (3, 3), 0)
return new_img
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--HR_Root', type = str, default = "/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/CelebA/VALHR",
help = 'Path to val HR.')
parser.add_argument('--Save_Root', type = str, default = "/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/CelebA/VALHR_Transform",
help = 'Path to new HR.')
parser.add_argument('--Transform', type = str, default = "random", help = 'Type of transform')
args = parser.parse_args()
HR_Root = args.HR_Root
Save_Root = args.Save_Root
trans = args.Transform
try:
os.makedirs(Save_Root)
except:
pass
for i, hr_name in enumerate(os.listdir(HR_Root)):
hr_path = os.path.join(HR_Root, hr_name)
img = cv2.imread(hr_path)
if trans == 'random':
new_img = randomTransform(img)
elif trans == 'sharp':
new_img = sharp(img)
elif trans == 'average':
new_img = average(img)
elif trans == 'gaussian':
new_img = gaussian(img)
save_path = os.path.join(Save_Root, hr_name)
cv2.imwrite(save_path, new_img)
print(i) | Frostmoune/FaseSR | dataset/CelebA/trainsform.py | trainsform.py | py | 1,819 | python | en | code | 0 | github-code | 90 |
22696772611 | file = open('puzzle3.in')
map = []
slopes = [
[1, 1],
[3, 1],
[5, 1],
[7, 1],
[1, 2]
]
for line in file:
map.append(line.strip())
treeproduct = 1;
for slope in slopes:
right, down = slope
trees = 0
x = 0
y = 0
while y < len(map):
if map[y][x] == '#':
trees += 1
y = y + down
x = (x + right) % (len(line))
treeproduct *= trees
print(len(map))
print(treeproduct)
| vimtaai/aoc | 2020/day03/puzzle3.py | puzzle3.py | py | 412 | python | en | code | 0 | github-code | 90 |
42785835099 |
import json
import logging
import os
import urllib.request
from datetime import datetime
import aiofiles
from mojang_api import Player
logger = logging.getLogger('utils.prices')
# Skyblock Price Table
# Features autocorrect, string eval, full sanitizer
# Three methods of loading files for prices.
def lJVL(fname, absolute=False):
prefix = "" if absolute else os.getcwd() + "/"
with open(prefix + fname, "r") as fileHandle:
data = fileHandle.read()
return json.loads(data)
class PricesTable:
def __init__(self, *, JFSc: dict = lJVL('database/scammer.json')):
self.scammer = JFSc
async def addScammer(self, *, username, reason, responsible_staff):
try:
players = Player(username=username)
uuid = players.uuid
name = players.username
except:
return None
content = self.scammer
content[uuid] = {'uuid': uuid, 'reason': reason, 'operated_staff': responsible_staff}
jsonwrite = json.dumps(content, indent=4, sort_keys=True)
async with aiofiles.open(os.path.join('database', 'scammer.json'), 'w') as f:
await f.write(jsonwrite)
await f.close()
self.scammer = content
async def removeScammer(self, *, username):
try:
players = Player(username=username)
uuid = players.uuid
name = players.username
except:
return None
content = self.scammer
content.pop(uuid)
jsonwrite = json.dumps(content, indent=4, sort_keys=True)
async with aiofiles.open(os.path.join('database', 'scammer.json'), 'w') as f:
await f.write(jsonwrite)
await f.close()
self.scammer = content
return 'good'
async def queryScammer(self, username):
try:
players = Player(username=username)
uuid = players.uuid
name = players.username
except Exception:
try:
players = Player(uuid=username)
uuid = username
name = players.username
except Exception:
return 'INVPLY'
async with aiofiles.open(os.path.join('database', 'scammer.json'), 'r') as f:
content = json.loads(await f.read())
if uuid not in content:
return 'NOTSCM'
scammerinfo = content[uuid]
await f.close()
return [scammerinfo['uuid'], scammerinfo['reason'], scammerinfo['operated_staff'], name]
| DjagaMC/Pit-scammer-list | utils/prices.py | prices.py | py | 2,599 | python | en | code | 0 | github-code | 90 |
39186405359 | #!/usr/bin/python
from psana import *
import numpy as np
import argparse
import sys
import os
def test_args(args):
"""Checks a couple of the args to make sure they are correct"""
assert(args.year in [2015,2016])
assert(args.run is not None)
def get_processing_by_year(year):
"""Simple database of parameters for these two expeirments. This is hardcoded as this code is for a specific use case.
year: int year of the experiment"""
if args.year == 2016:
exp_name = 'cxil2316'
det_name = 'Sc1Epix'
adu_per_photon = 35
outDir = '/reg/d/psdm/cxi/cxil2316/scratch/scott/'
elif args.year == 2015:
exp_name = 'cxij4915'
det_name = 'Sc1Xes'
outDir = '/reg/d/psdm/cxi/cxij4915/scratch/scott/'
adu_per_photon = 35
return(exp_name, det_name, adu_per_photon, outDir )
def get_num_events(args, run):
"""Gets the number of events to collect for a run. (ie x-ray pulses per sample) if we want to limit it"""
if args.end is None:
numEvents = np.shape(run.times())[0]
else:
numEvents = args.end
return(numEvents)
def num_jobs(numEvents, args):
"""Gets the number of jobs to submit
numEvents: The number of total events
numEventsPerRun: number of events to process per batch in submission to the cluster"""
return( int(np.ceil(float(numEvents) / float(args.numEventsPerRun))))
def create_output_dir(outDir, exp_name, run):
outputDir = outDir + '%s_r%04d/' % (exp_name, run)
if not os.path.isdir(outputDir):
os.makedirs(outputDir)
print ('Creating directory: ' + outputDir)
return(outputDir)
def submit_jobs(numJobs, args, numEvents, exp_name, out_dir):
"""Submits jobs to the LCLS Super computers
numJobs: number of jobs to submit each batch
numEvents: number of events (x-ray pulses) to process for each submission
exp_name: name of experiment as saved on the servers
out_dir: directory to save the h5 file containing the processed data"""
for jobNumber in range(numJobs):
startVal = args.numEventsPerRun*jobNumber
endVal = args.numEventsPerRun*(jobNumber+1) - 1
if endVal > numEvents-1:
endVal = numEvents - 1
outputFile = out_dir + '%s_r%04d_%04d_%04d-PCNOHITS.h5' % (exp_name, args.run, startVal, endVal)
logName = out_dir + 'bsub_%s_r%04d_%04d_%04d.log' % (exp_name, args.run, startVal, endVal)
command = ('bsub -q ' +
args.queue +
' -o ' +
logName +
' -n 1 python get_xes_photon_counting_no_hits.py -r ' +
str(args.run) +
' -s ' +
str(startVal) +
' -e ' +
str(endVal) +
' -o ' +
out_dir +
' -y ' +
str(args.year))
print (command)
os.system(command)
def main(args):
test_args(args)
exp_name, det_name, adu_per_photon, outDir = get_processing_by_year(args.year)
# Set up PSANA stuff and get the number of events
ds = DataSource('exp=%s:run=%d:idx' % (exp_name, args.run))
run = ds.runs().next()
# Get
numEvents = get_num_events(args, run)
numJobs = num_jobs(numEvents, args)
print ('Submitting %d jobs of %d events each' % (numJobs, args.numEventsPerRun)) #Print how many jobs will be run
out_dir = create_output_dir(outDir, exp_name, args.run)#Create output dir
submit_jobs(numJobs, args, numEvents, exp_name, out_dir)
parser = argparse.ArgumentParser(description='Submit batch jobs for processing from xtc to summed XES data')
parser.add_argument('-n', '--numEventsPerRun', help='Number of events per job submitted', type=int, default=500)
parser.add_argument('-r', '--run', help='run number', type=int)
parser.add_argument('-L', '--logName', help='log from bsub', default='bsub.log', type=str)
parser.add_argument('-q', '--queue', help='queue to submit job to', default='psanaq')
parser.add_argument('-e', '--end', help='Number of event number to run', type=int)
parser.add_argument('-x', '--xtcav_recon', help='minimum XTCAV reconstruction agreement for use in summing', type=float)
parser.add_argument('-y', '--year', help='year data was taken', type=int)
args = parser.parse_args()
if __name__ == "__main__":
main(args) | scott-c-jensen/LCLS_Analysis | Step1_Process_Raw_Data_MnCl2/submitBatchPhotonCounting.py | submitBatchPhotonCounting.py | py | 4,420 | python | en | code | 0 | github-code | 90 |
7345695658 | import numpy as np
import cv2
import cv2 as cv
from glob import glob
import os
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
count = 0
for root, dirs, files in os.walk("./sidharth"):
for filename in files:
count = count + 1
img = cv.imread(f"sidharth/{filename}")
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
#
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cropped_face = img[y:y+h+50, x:x+w+50]
# # cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
x=x-10
y=y-10
cv2.imwrite(f'cropped_sidharth/{count}.jpg', cropped_face)
| milangeorge2000/face_recognition | face_detection.py | face_detection.py | py | 728 | python | en | code | 0 | github-code | 90 |
18362152339 | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def divisors(n):
# 約数列挙
divisors = []
for i in range(1, int(n ** 0.5) + 1):
if n % i == 0:
divisors.append(i)
if i != n // i:
divisors.append(n // i)
return divisors
def main():
from itertools import accumulate
n, k = map(int, readline().split())
a = list(map(int, readline().split()))
s = sum(a)
divs = divisors(s)
divs.sort()
ans = 1
for div in divs:
rem = [0] * n
rem2 = [0] * n
for i, x in enumerate(a):
rem[i] = x % div
rem.sort()
for i in range(n):
rem2[i] = div - rem[i]
acc = list(accumulate(rem))
acc2 = list(accumulate(rem2))
for i in range(n):
cnt = max(acc[i], acc2[-1] - acc2[i])
if cnt <= k:
ans = div
break
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02955/s191021998.py | s191021998.py | py | 1,044 | python | en | code | 0 | github-code | 90 |
23430453816 | '''
String Rotation:Assume you have a method isSubstring which
checks if oneword is a substring
of another. Given two strings, sl and s2, write code to
check if s2 is a rotation of sl using only one
call to isSubstring (e.g., "waterbottle" is a rotation of"erbottlewat").
'''
import unittest
def isSubstring(s1,s2):
if not s1 or not s2:
return False
rotations = s1+s1
substr_first_char = s2[0]
# search for the potential starting pt
# of the rotation (if valid substr, this will
# be within the len of the s1 str)
for i in range(len(s1)):
if rotations[i] == substr_first_char:
# found possible rotation start
possible_substr = rotations[i:i+len(s2)]
if possible_substr == s2:
return True
return False
class Test(unittest.TestCase):
def test_1(self):
s1 = 'dish'
s2 = 'ishd'
self.assertTrue(isSubstring(s1, s2))
def test_2(self):
s1 = 'disk'
s2 = 'ishd'
self.assertFalse(isSubstring(s1, s2))
unittest.main(verbosity=2) | sharonpamela/coding_challenges | ctci/1_string_rotation.py | 1_string_rotation.py | py | 1,101 | python | en | code | 0 | github-code | 90 |
18180606929 | n = int(input())
x = list(input())
def popcount(n):
bin_n = bin(n)[2:]
count = 0
for i in bin_n:
count += int(i)
return count
cnt = 0
for i in range(n):
if x[i] == '1':
cnt += 1
plus = [0 for i in range(n)] # 2^index を cnt+1 で割った時のあまり
minus = [0 for i in range(n)] # 2^index を cnt-1 で割った時のあまり
if cnt == 0:
plus[0] = 0
else:
plus[0] = 1
if cnt != 1:
minus[0] = 1
for i in range(1, n):
plus[i] = (plus[i-1]*2) % (cnt+1)
if cnt != 1:
minus[i] = (minus[i-1]*2) % (cnt-1)
origin = int(''.join(x), base=2)
amariplus = origin % (cnt+1)
if cnt != 1:
amariminus = origin % (cnt-1)
for i in range(n):
if x[i] == '0':
amari = (amariplus + plus[n-i-1]) % (cnt+1)
else:
if cnt != 1:
amari = (amariminus - minus[n-i-1]) % (cnt-1)
else:
print(0)
continue
ans = 1
while amari != 0:
ans += 1
amari = amari % popcount(amari)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02609/s210975603.py | s210975603.py | py | 1,026 | python | en | code | 0 | github-code | 90 |
14993107492 | import numba
import numpy as np
from utils.base import BaseWorker
@numba.jit(nopython=True)
def _mandel(real: np.float64, imag: np.float64, max_iterations: np.int8) -> np.int8:
"""determines if a point is in the Mandelbrot set based on deciding if,
after a maximum allowed number of iterations, the absolute value of
the resulting number is greater or equal to 2."""
z_real = 0.0
z_imag = 0.0
for i in range(0, max_iterations):
z_real, z_imag = (
z_real * z_real - z_imag * z_imag + real,
2 * z_real * z_imag + imag,
)
if (z_real * z_real + z_imag * z_imag) >= 4:
return i
return -1
@numba.jit(parallel=True, nopython=True)
def _numba_calculate(
pixels_x: np.int64,
pixels_y: np.int64,
max_x: np.float64,
min_x: np.float64,
min_y: np.float64,
max_y: np.float64,
max_iterations: np.int8,
):
step_x = (max_x - min_x) / pixels_x
step_y = (max_y - min_y) / pixels_y
image = np.ones(shape=(pixels_y, pixels_x), dtype=np.int8)
for x_i in numba.prange(0, pixels_x):
for y_i in numba.prange(0, pixels_y):
iterations = _mandel(
real=min_x + (x_i + 0.5) * step_x,
imag=min_y + (y_i + 0.5) * step_y,
max_iterations=max_iterations,
)
image[y_i, x_i] = iterations
return image
class Worker(BaseWorker):
"""
Numba python implementation of Mandelbrot set calculation
"""
def _calculate(self) -> np.array:
return _numba_calculate(
max_x=self.max_x,
min_x=self.min_x,
max_y=self.max_y,
min_y=self.min_y,
max_iterations=self.max_iterations,
pixels_x=self.pixels_x,
pixels_y=self.pixels_y,
)
| jimhendy/mandelbrot_cython | src/workers/numba_python/worker.py | worker.py | py | 1,829 | python | en | code | 0 | github-code | 90 |
42384591935 | import unittest
from unittest import TestCase
from rosetta.rosetta_validations import Validations
class TestsPlugins(TestCase):
def test_not_null(self):
validators = Validations()
assert validators.not_null(val='').items() <= ({'result': False, 'msg': "Invalid empty string value"}).items()
assert validators.not_null(val=None).items() <= ({'result': False, 'msg': "Invalid 'None' value"}).items()
def test_asset_type(self):
validators = Validations()
asset_types = ['הלוואות', 'ניירות ערך סחירים', 'ניירות ערך לא סחירים', 'מזומנים', 'זכויות', 'השקעות אחרות']
# Positive value tests.
for asset in asset_types:
assert validators.asset_type(asset).items() <= ({'result': True}).items()
# Negative value tests.
assert validators.asset_type(val='junk value').items() <= (
{'result': False, 'msg': "unrecognized asset type"}).items()
assert validators.asset_type(val=None).items() <= (
{'result': False, 'msg': "unrecognized asset type"}).items()
assert validators.asset_type(val='').items() <= (
{'result': False, 'msg': "unrecognized asset type"}).items()
def test_decimal_positive(self):
validators = Validations()
assert validators.decimal_positive(val=1.11).items() <= ({'result': True, 'msg': ''}).items()
assert validators.decimal_positive(val=1).items() <= (
{'result': False, 'msg': "The value 1 must have 2 numbers after decimal point"}).items()
assert validators.decimal_positive(val=1.111).items() <= (
{'result': False, 'msg': "The value 1.111 must have 2 numbers after decimal point"}).items()
assert validators.decimal_positive(val='abc').items() <= (
{'result': False, 'msg': "The value abc not a decimal or not defined"}).items()
assert validators.decimal_positive(val=-1.11).items() <= (
{'result': False, 'msg': "The value -1.11 must be positive decimal"}).items()
assert validators.decimal_positive(val=None).items() <= (
{'result': False, 'msg': "The value None not a decimal or not defined"}).items()
def test_decimal_negative(self):
validators = Validations()
assert validators.decimal_negative(val=-1.11).items() <= ({'result': True, 'msg': ''}).items()
assert validators.decimal_negative(val=-1).items() <= (
{'result': False, 'msg': "The value -1 must have 2 numbers after decimal point"}).items()
assert validators.decimal_negative(val=-1.111).items() <= (
{'result': False, 'msg': "The value -1.111 must have 2 numbers after decimal point"}).items()
assert validators.decimal_negative(val='abc').items() <= (
{'result': False, 'msg': "The value abc not a decimal or not defined"}).items()
assert validators.decimal_negative(val=1.11).items() <= (
{'result': False, 'msg': "The value 1.11 must be negative decimal"}).items()
assert validators.decimal_negative(val=None).items() <= (
{'result': False, 'msg': "The value None not a decimal or not defined"}).items()
def test_is_numeric(self):
validators = Validations()
assert validators.is_numeric(val="234").items() <= ({'result': True, 'msg': ''}).items()
assert validators.is_numeric(val="pizza").items() <= (
{'result': False, 'msg': 'The provided value is not an integer.'}).items()
def test_is_positive(self):
validators = Validations()
assert validators.is_positive(val=1).items() <= ({'result': True}).items()
assert validators.is_positive(val=-1).items() <= ({'result': False, 'msg': "Not a positive number"}).items()
def test_is_float(self):
validators = Validations()
assert validators.is_float(val=1.2313).items() <= ({'result': True}).items()
assert validators.is_float(val=1).items() <= ({'result': False, 'msg': "Not a float"}).items()
def test_valid_currency(self):
validators = Validations()
currencies_list = ['דולר אוסטרליה', 'ריאל ברזילאי', 'דולר קנדי', 'פרנק שוויצרי', 'פסו ציליאני',
'יואן סיני', 'כתר דני', 'אירו', 'ליש"ט', 'דולר הונג קונג', 'פורינט הונגרי',
'רופי הודי', 'יין יפני', 'פזו מכסיקני', 'שקל חדש ישראלי', 'כתר נורווגי',
'ניו זילנד דולר', 'זלוטי פולני', 'רובל רוסי', 'כתר שוודי', 'דולר סינגפורי',
'לירה טורקית', 'דולר טיוואני', 'דולר ארהב', 'רנד דרא"פ', 'UNKNOWN',
]
# Positive value tests.
for currency in currencies_list:
assert validators.valid_currency(currency).items() <= ({'result': True}).items()
# Negative value tests.
assert validators.valid_currency(val='junk value').items() <= (
{'result': False, 'msg': "currency junk value not recognized"}).items()
assert validators.valid_currency(val=None).items() <= (
{'result': False, 'msg': "currency None not recognized"}).items()
assert validators.valid_currency(val='').items() <= (
{'result': False, 'msg': "currency not recognized"}).items()
def test_date_format(self):
validators = Validations()
assert validators.date_format("06/25/1989", "%d/%m/%Y").items() <= (
{'result': False, 'msg': "Incorrect date format, should be DD/MM/YYYY"}).items()
assert validators.date_format("25/06/1989", "%d/%m/%Y").items() <= (
{'result': True}).items()
def test_digits_amount(self):
validators = Validations()
# Positive value tests.
assert validators.digits_amount(1000, 2).items() <= (
{'result': True}).items()
# Negative value tests.
assert validators.digits_amount(10000, 10).items() <= (
{'result': False, 'msg': "Value exceeded digits boundary"}).items()
def test_number_in_range(self):
validators = Validations()
assert validators.number_in_range(50, 20, 60).items() <= ({'result': True}).items()
assert validators.number_in_range(20, 50, 60).items() <= (
{'result': False, 'msg': "Value is not in the correct range."}
).items()
def test_instrument_sub_type(self):
validators = Validations()
# Positive value tests.
assert validators.instrument_sub_type("תעודות התחייבות ממשלתיות").items() <= (
{'result': True}).items()
# Negative value tests.
assert validators.instrument_sub_type("Pizza").items() <= (
{'result': False, 'msg': "unrecognized asset type"}).items()
if __name__ == "__main__":
unittest.main()
| RoySegall/BismarckValidator | rosetta/tests/test_validations.py | test_validations.py | py | 7,090 | python | fa | code | 1 | github-code | 90 |
17964554199 | n=int(input())
s1=input()
s2=input()
l=[]
old="hoge"
for i in range(n):
if s1[i]==s2[i]:
l.append(0)
elif s1[i]!=old:
l.append(1)
old=s1[i]
ans=1
flag=2
INF=10**9+7
for i in l:
if flag==1:
if i==1:ans=ans*3%INF
elif flag==0:ans=ans*2%INF
else:
if i==0:ans*=3
else:ans*=6
flag=i
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03626/s637285365.py | s637285365.py | py | 356 | python | en | code | 0 | github-code | 90 |
16645951084 | import os
import Skills_extract
from flask import *
app = Flask(__name__)
@app.route('/')
def home_page():
return render_template('index.html')
@app.route('/results', methods=['GET', 'POST'])
def results_skills():
if request.method == 'POST':
f_path = request.files['file'].filename
path = r"D:\MajorProject\Resumes\\" + f_path
split_tup = os.path.splitext(path)
# extract the extension
file_extension = split_tup[1]
if file_extension == '.pdf':
text = ''
for page in Skills_extract.extract_text_from_pdf(path):
text += ' ' + page
resume_text = text.lower()
else:
resume_text = Skills_extract.extract_text_from_doc(
path).lower()
output_skills = Skills_extract.extract_skills(resume_text)
return render_template('results.html', skills=output_skills)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 4545))
app.run(debug=False, threaded=True,port=port)
| gagankarthik/MajorProject | app.py | app.py | py | 1,036 | python | en | code | 0 | github-code | 90 |
19018318655 | class Solution:
def optimalStrategyOfGame (self, arr, N):
dp = [[0 for i in range(N + 1)] for j in range(3)]
for i in range(N - 1, -1, -1):
dp[0][i] = arr[i]
for j in range(i + 1, N):
take_left = arr[i] + min(dp[2][j], dp[1][j - 1])
take_right = arr[j] + min(dp[0][j - 2], dp[1][j - 1])
dp[0][j] = max(take_left, take_right)
dp[1], dp[2] = dp[2], dp[1]
dp[0], dp[1] = dp[1], dp[0]
return dp[1][-2]
| Tejas07PSK/lb_dsa_cracker | Dynamic Programming/Optimal Strategy for a Game/solution3.py | solution3.py | py | 522 | python | en | code | 2 | github-code | 90 |
20974607554 | # 10-dars. If-else
# Yangi cars = ['toyota', 'mazda', 'hyundai', 'gm', 'kia'] degan ro'yxat tuzing, ro'yxat elementlarining birinchi harfini katta qilib konsolga chqaring. GM uchun ikkala harfni katta qiling.
cars = ['toyota', 'mazda', 'hyundai', 'gm', 'kia']
for car in cars:
if car == "gm":
print(car.upper())
else:
print(car.title())
# Yuqoridagi mashqni teng emas (!=) operatori yordamida bajaring.
for car in cars:
if car != "gm":
print(car.title())
else:
print(car.upper())
# Foydalanuvchi login ismini so'rang. Agar login admin bo'lsa, "Xush kelibsiz, Admin. Foydalanuvchilar ro'yxatini ko'rasizmi?" xabarini konsolga chiqaring. Aks holda, "Xush kelibsiz, {foydalanuvchi_ismi}!" matnini konsolga chiqaring.
kirish = input("Loginingizni yozing: ")
if kirish.lower() == "admin":
print("Xush kelibsiz, Admin. Foydalanuvchilar ro'yxatini ko'rasizmi?")
else:
print("Xush kelibsiz,", kirish, "!")
# Foydalanuvchidan 2 ta son kiritishni so'rang. Agar ikki son bir-biriga teng bo'lsa, "Sonlar teng" ekan degan yozuvni konsolga chiqaring.
a = input("1-sonni kiriting: ")
b = input("2-sonni kiriting: ")
if a==b:
print("Sonlar teng")
else:
print("Sonlar teng emas")
# Foydalanuvchidan istalgan son kiritishni so'rang. Agar son manfiy bo'lsa konsolga "Manfiy son", agar musbat bo'lsa "Musbat son" degan xabarni chiqaring.
a = int(input("Istalgan son kiriting: "))
if a >= 0:
print("Musbat son")
else:
print("Manfiy son")
# Foydalanuvchidan son kiritishni so'rang, agar son musbat bo'lsa uning ildizini hisoblab konsolga chiqaring. Agar son manfiy bo'lsa, "Musbat son kiriting" degan xabarni chiqaring.
a = int(input("Istalgan son kiriting: "))
if a >- 0:
print(a**(1/2))
else:
print("Musbat son kiriting!") | javaxirabdullayev/python.dasturlash-asoslari | 10-dars. If-else.py | 10-dars. If-else.py | py | 1,793 | python | en | code | 0 | github-code | 90 |
18499094859 | import sys
input = sys.stdin.readline
def main():
K = int(input())
n_odd = 0
n_even = 0
for i in range(1, K + 1):
if i % 2 == 0:
n_even += 1
else:
n_odd += 1
ans = n_odd * n_even
print(ans)
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p03264/s853418541.py | s853418541.py | py | 299 | python | en | code | 0 | github-code | 90 |
6741462549 | """
Even & Odd
Create a program in Python that will accept a positive integer from the user and determine if that number is even or odd.
The program will keep asking the user for a number until they enter Q for quit. When a number is determined to be even or odd,
the program needs to print that to the screen.
The input prompt for the number does not need a prompt. Therefore your Python code will look like the following:
num = int(input())
The input prompt for the question asking the user if they wish to continue will look like:
Continue:
With no spaces after the colon.
When a number is judged to be even the program needs to print:
even
When a number is judged to be odd the program needs to print:
odd
"""
answer = str()
num = int()
modulo=int()
while answer != 'Q':
num = int(input())
modulo= num % 2
if modulo ==0:
print ("even")
print("Continue:")
else :
print("odd")
print("Continue:")
# ask the user if they want to enter another batch
answer = input("")
| VictorOwinoKe/UoM-DESIGN-THINKING- | Advanced Loops/even_odd.py | even_odd.py | py | 1,053 | python | en | code | 1 | github-code | 90 |
21334583505 | from sys import argv, stdout
import struct, zlib
f=file(argv[1]).read()
if len(argv)>2:
out=file(argv[2], 'w')
else:
out=stdout
l=struct.unpack('<I', f[:4])[0]
doc=zlib.decompress(f[4:l+4])
out.write(doc) | gic888/MIEN | tools/extract_xml.py | extract_xml.py | py | 208 | python | en | code | 2 | github-code | 90 |
18215531199 | #
import sys
input=sys.stdin.readline
def main():
N,K=map(int,input().split())
A=list(map(lambda x: int(x)-1,input().split()))
latest=[-1]*N
latest[0]=0
now=0
while(K>0):
K-=1
to=A[now]
if latest[A[now]]!=-1:
K%=latest[now]-latest[A[now]]+1
latest[A[now]]=latest[now]+1
now=to
print(now+1)
if __name__=="__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p02684/s495551540.py | s495551540.py | py | 417 | python | en | code | 0 | github-code | 90 |
6185409882 | # 1부터 10까지의 정수 중에서 짝수의 총합과 홀수의 총합을 각각 구해 보세요.
odd = even = 0
for idx in range(1, 11):
if idx % 2 == 0:
even += idx
else:
odd += idx
print(f'홀수 총합 : {odd}')
print(f'짝수 총합 : {even}')
# 1부터 50까지의 정수 중에서 3의 배수가 아닌 수
# sumA = 1 + 2 + 4 + 5 + 50
# sumB = 3 + 6 + 9 + 48
# sumA - sumB
sumA = sumB = 0
for idx in range(1, 51):
if idx % 3 == 0:
sumB += idx
else:
sumA += idx
result = sumA - sumB
print(f'결과 : {sumA} - {sumB} = {result}')
| super1947/AICourse | DAY03/for02.py | for02.py | py | 571 | python | ko | code | 0 | github-code | 90 |
1400058671 | # -*- coding: utf-8 -*-
import ast
class Flake8Deprecated(object):
name = 'flake8_deprecated'
version = '1.2'
message = 'D001 found {0:s} replace it with {1:s}'
checks = {
'assertEqual': ('failUnlessEqual', 'assertEquals', ),
'assertNotEqual': ('failIfEqual', ),
'assertTrue': ('failUnless', 'assert_', ),
'assertFalse': ('failIf', ),
'assertRaises': ('failUnlessRaises', ),
'assertAlmostEqual': ('failUnlessAlmostEqual', ),
'assertNotAlmostEqual': ('failIfAlmostEqual', ),
'AccessControl.ClassSecurityInfo.protected': ('declareProtected', ),
'AccessControl.ClassSecurityInfo.private': ('declarePrivate', ),
'AccessControl.ClassSecurityInfo.public': ('declarePublic', ),
'zope.interface.provider': ('directlyProvides', ),
'zope.interface.implementer': ('classImplements', 'implements', ),
'self.loadZCML(': ('xmlconfig.file', ),
'zope.component.adapter': ('adapts', ),
}
def __init__(self, tree):
self.flat_checks = self._flatten_checks()
self.tree = tree
def run(self):
for node in ast.walk(self.tree):
if isinstance(node, ast.Call) and \
isinstance(node.func, ast.Attribute):
for newer_version, old_alias in self.flat_checks:
if node.func.attr == old_alias:
msg = self.message.format(old_alias, newer_version)
yield node.lineno, node.col_offset, msg, type(self)
def _flatten_checks(self):
flattened_checks = []
for new_version, old_alias in self.checks.items():
for alias in old_alias:
flattened_checks.append((new_version, alias, ))
return flattened_checks
| dougcpr/deep-purple | .local/lib/python3.6/site-packages/flake8_deprecated.py | flake8_deprecated.py | py | 1,797 | python | en | code | 0 | github-code | 90 |
18483834489 | from collections import deque
N = int(input())
A = [int(input()) for _ in range(N)]
A.sort()
q = deque([A[0]])
i = 1
j = N-1
while i <= j:
temp = max(abs(A[i]-q[0]), abs(A[j]-q[0]), abs(A[i]-q[-1]), abs(A[j]-q[-1]))
if temp == abs(A[i]-q[0]):
q.appendleft(A[i])
i += 1
elif temp == abs(A[j]-q[0]):
q.appendleft(A[j])
j -= 1
elif temp == abs(A[i]-q[-1]):
q.append(A[i])
i += 1
else:
q.append(A[j])
j -= 1
L = list(q)
ans = 0
for i in range(N-1):
ans += abs(L[i+1]-L[i])
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03229/s931860545.py | s931860545.py | py | 568 | python | en | code | 0 | github-code | 90 |
33626799434 | # -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
from Validation import Validation
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import *
import pygame
import smtplib
import os
import pymysql
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
images = []
class Ui_Mail(object):
count = 0
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(581, 425)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 581, 451))
self.tabWidget.setStyleSheet("*{\n"
" \n"
"\n"
" background:url(:/background/wallpaper/background_purple.jpg);\n"
"}\n"
"")
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setStyleSheet("QLineEdit{\n"
" font-size:18px;\n"
" background:transparent;\n"
" border:none;\n"
" color:rgb(238, 238, 236);\n"
" border-bottom: 1px solid #717072;\n"
" padding-bottom: 10px;\n"
"}")
self.tab.setObjectName("tab")
self.lineEdit_reci = QtWidgets.QLineEdit(self.tab)
self.lineEdit_reci.setGeometry(QtCore.QRect(60, 90, 351, 41))
self.lineEdit_reci.setObjectName("lineEdit_reci")
self.lineEdit_sub = QtWidgets.QLineEdit(self.tab)
self.lineEdit_sub.setGeometry(QtCore.QRect(60, 150, 451, 41))
self.lineEdit_sub.setObjectName("lineEdit_sub")
self.lineEdit_msg = QtWidgets.QLineEdit(self.tab)
self.lineEdit_msg.setGeometry(QtCore.QRect(60, 210, 451, 41))
self.lineEdit_msg.setObjectName("lineEdit_msg")
self.lineEdit_fn = QtWidgets.QLineEdit(self.tab)
self.lineEdit_fn.setGeometry(QtCore.QRect(60, 270, 351, 41))
self.lineEdit_fn.setStyleSheet("QLineEdit{\n"
" font-size:18px;\n"
" background:transparent;\n"
" border:1px solid white;\n"
" color:rgb(238, 238, 236);\n"
" padding-bottom: 5px;\n"
" padding-left:3px;\n"
"}")
self.lineEdit_fn.setObjectName("lineEdit_fn")
self.btn_fn = QtWidgets.QPushButton(self.tab)
self.btn_fn.setGeometry(QtCore.QRect(430, 270, 31, 41))
self.btn_fn.setStyleSheet("QPushButton{\n"
" background:transparent;\n"
" border: 1px solid white;\n"
" border-radius: 10px;\n"
" color:white;\n"
"}\n"
"QPushButton:hover{\n"
" color: rgb(115,210,22);\n"
" border: 1px solid rgb(115, 210, 22);\n"
"}")
self.btn_fn.setObjectName("btn_fn")
self.btn_clr = QtWidgets.QPushButton(self.tab)
self.btn_clr.setGeometry(QtCore.QRect(480, 270, 31, 41))
self.btn_clr.setStyleSheet("QPushButton{\n"
" background:transparent;\n"
" border: 1px solid white;\n"
" border-radius: 10px;\n"
" color:white;\n"
"}\n"
"QPushButton:hover{\n"
" border: 1px solid rgb(204, 0, 0);\n"
" color: rgb(204,0,0);\n"
"}")
self.btn_clr.setObjectName("btn_clr")
self.btn_snd = QtWidgets.QPushButton(self.tab)
self.btn_snd.setGeometry(QtCore.QRect(410, 340, 111, 31))
self.btn_snd.setStyleSheet("QPushButton{\n"
" font-size:17px;\n"
" border: 1px solid rgb(52, 101, 164);\n"
" color:white;\n"
" border-radius: 20px;\n"
"}\n"
"QPushButton:hover{\n"
" background:rgb(52, 101, 164);\n"
"}")
self.btn_snd.setObjectName("btn_snd")
self.lineEdit_id = QtWidgets.QLineEdit(self.tab)
self.lineEdit_id.setGeometry(QtCore.QRect(60, 40, 41, 31))
self.lineEdit_id.setObjectName("lineEdit_id")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tableWidget = QtWidgets.QTableWidget(self.tab_2)
self.tableWidget.setGeometry(QtCore.QRect(160, 20, 411, 351))
self.tableWidget.setRowCount(10)
self.tableWidget.setColumnCount(7)
self.tableWidget.setStyleSheet("QTableWidget{\n"
" color:white;\n"
"}")
self.tableWidget.setObjectName("tableWidget")
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.verticalHeader().setHighlightSections(True)
self.label = QtWidgets.QLabel(self.tab_2)
self.label.setGeometry(QtCore.QRect(20, 100, 121, 161))
self.label.setStyleSheet("QLabel{\n"
" border:1px solid rgb(136, 138, 133);\n"
"}")
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit.setGeometry(QtCore.QRect(30, 60, 101, 25))
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setObjectName("lineEdit")
self.pushButton = QtWidgets.QPushButton(self.tab_2)
self.pushButton.setGeometry(QtCore.QRect(30, 290, 101, 25))
self.pushButton.setStyleSheet("QPushButton{\n"
" color:white;\n"
"}")
self.pushButton.setObjectName("pushButton")
self.tabWidget.addTab(self.tab_2, "")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.load()
self.btn_snd.clicked.connect(self.sndmail)
self.btn_clr.clicked.connect(self.clear)
self.btn_fn.clicked.connect(self.setImage)
self.pushButton.clicked.connect(self.loadimg)
def loadimg(self):
ID = int(self.lineEdit.text())
connection = pymysql.connect("localhost", "root", "rootpass", "project")
cursor = connection.cursor()
select_query = "select * from blockacess where id =%d"%(ID)
cursor.execute(select_query)
row = cursor.fetchone()
#self.image_name = cv2.imread('/home/anonymous/Desktop/Project-test/Registered/' + row[0] + str('.jpg'), 1)
pixmap = QtGui.QPixmap('/home/anonymous/Desktop/Project-test/Registered/' + row[1] + '.jpg')
pixmap = pixmap.scaled(self.label.width(), self.label.height(), QtCore.Qt.KeepAspectRatio)
self.label.setPixmap(pixmap)
self.label.setAlignment(QtCore.Qt.AlignCenter)
def load(self):
connection = pymysql.connect("localhost","root","rootpass","project")
cursor = connection.cursor()
cursor.execute('''SELECT * FROM view''')
self.tableWidget.setRowCount(0)
for row, form in enumerate(cursor):
self.tableWidget.insertRow(row)
for column, item in enumerate(form):
print(str(item))
self.tableWidget.setItem(row, column, QtWidgets.QTableWidgetItem(str(item)))
def setImage(self):
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(None, "Select Image", "","Image Files (*.png *.jpg *jpeg *.bmp)") # Ask for filez
inputFilepath = fileName
filename_w_ext = os.path.basename(inputFilepath)
filename, file_extension = os.path.splitext(filename_w_ext)
# filename = foobar
# file_extension = .txt
self.path, self.filename = os.path.split(fileName)
print(self.path)
print(self.filename)
self.count += 1
if self.count == 1:
self.name_list = self.filename
self.lineEdit_fn.setText(self.name_list)
images.append(self.filename)
else:
self.name_list= self.name_list+', '+str(self.filename)
self.lineEdit_fn.setText(self.name_list)
images.append(self.filename)
# path = path/to/file
# filename = foobar.txt
def clear(self):
self.lineEdit_reci.setText('')
self.lineEdit_sub.setText('')
self.lineEdit_msg.setText('')
self.lineEdit_fn.setText('')
self.lineEdit_id.setText('')
images.clear()
def sndmail(self):
obj = Validation()
mail = self.lineEdit_reci.text()
connnection = pymysql.connect("localhost", "root", "rootpass", "project")
cursor = connnection.cursor()
ID = int(self.lineEdit_id.text())
select_query1 = "select * from blockacess where id =%d" % (ID)
cursor.execute(select_query1)
row = cursor.fetchone()
select_query2 = "select count(*) from view where id =%d" % (ID)
cursor.execute(select_query2)
row2 = cursor.fetchone()
name = 'Name : ' +row[1]+', '
age = 'Age : ' +row[3]+ ', '
gender = 'Gender : ' +row[4]+ ', '
citizen = 'Nationality : '+row[5]+', '
other = 'OtherInfo : ' + row[6] +', '
visit = 'Visited : '+str(row2[0])+'.'
address = 'Address : Goregaon(W),Patkar College'
table = 'Suspect Information : '+name+age+gender+citizen+other+visit
if self.lineEdit_id.text() != '' and self.lineEdit_reci.text()!='' and self.lineEdit_sub.text()!='' and self.lineEdit_msg.text()!='' and self.lineEdit_fn.text()!='':
if obj.check_email(mail):
email = ''
password = ''
send_to_email = str(self.lineEdit_reci.text())
subject = str(self.lineEdit_sub.text())
message = table+' Message : '+(self.lineEdit_msg.text())
dir_path = self.path
files = images
msg = MIMEMultipart()
msg['To'] = send_to_email
msg['From'] = email
msg['Subject'] = subject
body = MIMEText(message, 'html', 'utf-8')
msg.attach(body) # add message body (text or html)
for f in files: # add files to the message
file_path = os.path.join(dir_path, f)
attachment = MIMEApplication(open(file_path, "rb").read(), _subtype="txt")
attachment.add_header('Content-Disposition', 'attachment', filename=f)
msg.attach(attachment)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(send_to_email, password)
text = msg.as_string()
server.sendmail(send_to_email, send_to_email, text)
server.quit()
self.sound(0)
self.qmsg('Mail has been successfully send',1)
self.clear()
else:
self.sound(1)
else:
self.sound(1)
self.qmsg('Error !!! Check Entries Again .Make Sure No Filed Is Empty.', 1)
def qmsg(self, msg, check):
qmsgBox = QMessageBox()
qmsgBox.move(((qmsgBox.width()) // 2 + 60), ((qmsgBox.height()) // 2 - 50))
qmsgBox.setStyleSheet(
'QMessageBox {background-color: #2b5b84; color: white;}\nQLabel{color: white;}\nQPushButton{color: white; font-size: 16px; background-color: #1d1d1d; border-radius: 10px; padding: 10px; text-align: center;}\n QPushButton:hover{color: #2b5b84;}')
if check == 0:
QMessageBox.information(qmsgBox, 'PyQt5 message', msg)
else:
QMessageBox.critical(qmsgBox, 'PyQt5 message', msg)
def sound(self, check):
if check == 0:
pygame.mixer.init()
pygame.mixer.music.load('Sound/login.mp3')
pygame.mixer.music.play(0)
else:
pygame.mixer.init()
pygame.mixer.music.load('Sound/error.mp3')
pygame.mixer.music.play(0)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Mail"))
self.lineEdit_reci.setPlaceholderText(_translate("MainWindow", "Recipient"))
self.lineEdit_sub.setPlaceholderText(_translate("MainWindow", "Subject"))
self.lineEdit_msg.setPlaceholderText(_translate("MainWindow", "Message"))
self.lineEdit_fn.setPlaceholderText(_translate("MainWindow", "Choose image"))
self.btn_fn.setText(_translate("MainWindow", "..."))
self.btn_clr.setText(_translate("MainWindow", "X"))
self.btn_snd.setText(_translate("MainWindow", "Send"))
self.lineEdit_id.setPlaceholderText(_translate("MainWindow", "ID"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Mail"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "ID"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "DATE"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "TIME"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "VISIT"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.lineEdit.setPlaceholderText(_translate("MainWindow", "Enter ID"))
self.pushButton.setText(_translate("MainWindow", "Search ID"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "View"))
import img
| saudshaikh724/Smart-Security | SendMail.py | SendMail.py | py | 13,709 | python | en | code | 0 | github-code | 90 |
13010418702 | import random
import torch
import datasets
from transformers import AutoModel, GlueDataset, GlueDataTrainingArguments, AutoTokenizer, AutoFeatureExtractor
from transformers.testing_utils import torch_device
def make_config(config_class, **kwargs):
return staticmethod(lambda: config_class(**kwargs))
class AdapterTestBase:
# If not overriden by subclass, AutoModel should be used.
model_class = AutoModel
# Default shape of inputs to use
default_input_samples_shape = (3, 64)
def get_model(self):
if self.model_class == AutoModel:
model = AutoModel.from_config(self.config())
else:
model = self.model_class(self.config())
model.to(torch_device)
return model
def get_input_samples(self, shape=None, vocab_size=5000, config=None):
shape = shape or self.default_input_samples_shape
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(random.randint(0, vocab_size - 1))
input_ids = torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()
# this is needed e.g. for BART
if config and config.eos_token_id is not None and config.eos_token_id < vocab_size:
input_ids[input_ids == config.eos_token_id] = random.randint(0, config.eos_token_id - 1)
input_ids[:, -1] = config.eos_token_id
in_data = {"input_ids": input_ids}
if config and config.is_encoder_decoder:
in_data["decoder_input_ids"] = input_ids.clone()
return in_data
def add_head(self, model, name, **kwargs):
model.add_classification_head(name, **kwargs)
return model.heads[name].config["num_labels"]
def dataset(self, tokenizer=None):
# setup tokenizer
if tokenizer is None:
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name, use_fast=False)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
return GlueDataset(data_args, tokenizer=tokenizer, mode="train")
def assert_adapter_available(self, model, adapter_name):
self.assertTrue(adapter_name in model.config.adapters)
self.assertGreater(len(model.get_adapter(adapter_name)), 0)
def assert_adapter_unavailable(self, model, adapter_name):
self.assertFalse(adapter_name in model.config.adapters)
self.assertEqual(len(model.get_adapter(adapter_name)), 0)
class VisionAdapterTestBase(AdapterTestBase):
default_input_samples_shape = (3, 3, 224, 224)
def get_input_samples(self, shape=None, config=None):
shape = shape or self.default_input_samples_shape
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(random.random())
pixel_values = torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
in_data = {"pixel_values": pixel_values}
return in_data
def add_head(self, model, name, **kwargs):
if "num_labels" not in kwargs:
kwargs["num_labels"] = 10
model.add_image_classification_head(name, **kwargs)
return model.heads[name].config["num_labels"]
def dataset(self, feature_extractor=None):
if feature_extractor is None:
feature_extractor = AutoFeatureExtractor.from_pretrained(self.feature_extractor_name)
def transform(example_batch):
inputs = feature_extractor([x for x in example_batch["img"]], return_tensors="pt")
inputs["labels"] = example_batch["label"]
return inputs
dataset = datasets.load_dataset(
"./tests_adapters/fixtures/samples/cifar10",
data_dir="./tests_adapters/fixtures/samples/cifar10",
split="train",
)
dataset = dataset.with_transform(transform)
return dataset
| adapter-hub/adapter-transformers | tests_adapters/test_adapter.py | test_adapter.py | py | 4,205 | python | en | code | 1,700 | github-code | 90 |
31921950066 | # https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
import argparse
import copy
import datetime
import json
import os
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from utils import (configure_cudnn, configure_wandb, get_model,
load_checkpoint, prepare_dataloaders, save_checkpoint,
set_seed, test, train)
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('exp_id', type=int)
parser.add_argument('--model',
choices=['mobilenetv2'],
default='mobilenetv2')
parser.add_argument(
'--mode',
choices=['normal', 'qat'],
# normal: training w/o quantization
# qat: quantization-aware-training
default='normal')
parser.add_argument('--replace_relu', action='store_true')
parser.add_argument('--fuse_model', action='store_true')
parser.add_argument('--quantization_backend',
choices=['qnnpack', 'fbgemm'],
default='fbgemm')
parser.add_argument('--pretrained', default='imagenet')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--lr_drop_epochs',
type=int,
nargs='+',
default=[210, 270])
parser.add_argument('--lr', type=float, default=0.005)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument(
'--observer_update_epochs',
default=100000, # not used in default
type=int,
help='number of total epochs to update observers')
parser.add_argument(
'--bn_update_epochs',
default=100000, # not used in default
type=int,
help='number of total epochs to update batch norm stats')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--device', default=None)
parser.add_argument('--seed', type=int, default=1000)
parser.add_argument('--model_dir', default='models')
return parser.parse_args()
def main():
args = parse_arg()
torch.backends.quantized.engine = args.quantization_backend
enable_qat = args.mode == 'qat'
# fix random seed
set_seed(args.seed)
configure_cudnn(deterministic=True, benchmark=False)
exp_id = f'exp_{args.exp_id:04d}'
exp_dir = os.path.join(args.model_dir, exp_id)
# prepare directory to save model
if args.resume:
assert os.path.exists(
os.path.join(exp_dir, 'checkpoint_latest.pth')
), 'Failed to resume training. Cannot find checkpoint file.'
else:
os.makedirs(exp_dir, exist_ok=False)
# dump config
time_str = datetime.datetime.now().strftime('%Y%m%d_%H:%M:%S')
with open(os.path.join(exp_dir, f'{time_str}.json'), mode='w') as f:
json.dump(args.__dict__, f, indent=4)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if args.device is not None:
device = torch.device(args.device)
print(f'device: {device}')
print('Preparing dataset...')
train_dataloader, test_dataloader = prepare_dataloaders(args.batch_size)
print('Preparing model...')
if args.pretrained == '':
args.pretrained = None
model = get_model(args.model,
pretrained=args.pretrained,
replace_relu=args.replace_relu,
fuse_model=args.fuse_model,
eval_before_fuse=False)
if enable_qat:
model.qconfig = torch.quantization.get_default_qat_qconfig(
args.quantization_backend)
torch.quantization.prepare_qat(model, inplace=True)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
milestones=args.lr_drop_epochs,
gamma=0.1)
start_epoch = 0
best_accuracy = -1
best_accuracy_epoch = -1
if args.resume:
model, optimizer, scheduler, start_epoch, best_accuracy, best_accuracy_epoch = load_checkpoint(
os.path.join(exp_dir, 'checkpoint_latest.pth'), model, optimizer,
scheduler, start_epoch, best_accuracy, best_accuracy_epoch)
start_epoch += 1
configure_wandb(project='pytorch_model_quantization',
group=exp_id,
config=args)
if enable_qat:
model.apply(torch.quantization.enable_observer)
model.apply(torch.quantization.enable_fake_quant)
# train loop
for epoch in range(start_epoch, args.epochs):
logs = {'epoch': epoch}
lr = scheduler.get_last_lr()[0]
print(f'\nEpoch: {epoch} / {args.epochs}, lr: {lr:.9f}')
logs['lr'] = lr
if enable_qat:
if epoch >= args.observer_update_epochs:
print('Disabling observer for subseq epochs, epoch = ', epoch)
model.apply(torch.quantization.disable_observer)
if epoch >= args.bn_update_epochs:
print('Freezing BN for subseq epochs, epoch = ', epoch)
model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
# train
loss_epoch = train(model, optimizer, scheduler, criterion, device,
train_dataloader)
print('loss: %.8f' % loss_epoch)
logs['train/loss'] = loss_epoch
# test
accuracy = test(model, device, test_dataloader)
print('accuracy: %.4f' % accuracy)
logs['test/accuracy'] = accuracy
if enable_qat:
# test with quantized model
print('Evaluating quantized model...')
model_quantized = copy.deepcopy(model)
model_quantized.to(torch.device('cpu'))
model_quantized = torch.quantization.convert(
model_quantized.eval(), inplace=False)
accuracy = test(model_quantized, torch.device('cpu'),
test_dataloader)
print('accuracy (quantized): %.4f' % accuracy)
logs['test/accuracy'] = accuracy
if accuracy > best_accuracy:
best_accuracy = accuracy
best_accuracy_epoch = epoch
print('Best accuracy updated. Saving models...')
model_path = os.path.join(exp_dir, 'best_model.pth')
model.to(torch.device('cpu'))
torch.save(model.state_dict(), model_path)
model.to(device) # back model from cpu to `device`
# save checkpoint
save_checkpoint(os.path.join(exp_dir, 'checkpoint_latest.pth'), model,
optimizer, scheduler, epoch, best_accuracy,
best_accuracy_epoch)
logs['test/best_accuracy'] = best_accuracy
logs['test/best_accuracy_epoch'] = best_accuracy_epoch
wandb.log(logs)
print('Reached best accuract %.4f at epoch %d' %
(best_accuracy, best_accuracy_epoch))
wandb.finish()
if __name__ == '__main__':
main()
| motokimura/pytorch_quantization | train.py | train.py | py | 7,173 | python | en | code | 0 | github-code | 90 |
73490488937 | def print_value_and_type(items: list) -> None:
"""
The function prints the values and type of each item in
the list (the list is an argument to the function).
:param items:
:return: None
"""
for item in items:
print(item)
print(type(item))
print('-' * 80)
VAR_1 = 'разработка'
VAR_2 = 'сокет'
VAR_3 = 'декоратор'
STR_LIST = [VAR_1, VAR_2, VAR_3]
print_value_and_type(STR_LIST)
VAR_UNICODE_1 = '\u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u043a\u0430'
VAR_UNICODE_2 = '\u0441\u043e\u043a\u0435\u0442'
VAR_UNICODE_3 = '\u0434\u0435\u043a\u043e\u0440\u0430\u0442\u043e\u0440'
UNICODE_LIST = [VAR_UNICODE_1, VAR_UNICODE_2, VAR_UNICODE_3]
print_value_and_type(UNICODE_LIST)
| AnastasiaYurko/Client-server_apps | lesson1/test.py | test.py | py | 751 | python | en | code | 0 | github-code | 90 |
17956174849 | n,m,r=map(int,input().split())
r=list(map(int,input().split()))
import sys
INF=float('inf')
road=[[INF]*n for _ in range(n)]
for _ in range(m):
a,b,c=map(int,input().split())
road[a-1][b-1]=c
road[b-1][a-1]=c
#経由地
for k in range(n):
#出発地
for s in range(n):
#goal
for g in range(n):
road[s][g]=min(road[s][g],road[s][k]+road[k][g])
import itertools
rlist=list(itertools.permutations(r))
result=10**9
for item in rlist:
tmp=0
from_town=item[0]
for to_town in item[1:]:
tmp+=road[from_town-1][to_town-1]
from_town=to_town
result=min(result,tmp)
print(result)
| Aasthaengg/IBMdataset | Python_codes/p03608/s197547926.py | s197547926.py | py | 658 | python | en | code | 0 | github-code | 90 |
7738737092 | # -*- encoding: utf-8 -*-
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def _layer_init(layer, w_scale=1.0):
# nn.init.orthogonal_(layer.weight.data)
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
layer.weight.data.uniform_(-lim, lim)
layer.weight.data.mul_(w_scale)
nn.init.constant_(layer.bias.data, 0)
return layer
class Actor(nn.Module):
def __init__(self, in_size, hidden_units, out_size, out_gate=nn.Tanh):
super().__init__()
self.in_size = in_size
self.out_size = out_size
hidden_gate_func = nn.ELU
layers = []
previous_features = in_size
for idx, hidden_size in enumerate(hidden_units):
layers.append(_layer_init(nn.Linear(previous_features, hidden_size)))
# layers.append(nn.BatchNorm1d(hidden_size)) # adding batch norm
layers.append(hidden_gate_func(inplace=True))
previous_features = hidden_size
layers.append(_layer_init(nn.Linear(previous_features, out_size), 3e-3))
if out_gate is not None:
layers.append(out_gate())
self.fc_body = nn.Sequential(*layers)
def forward(self, states):
return self.fc_body(states)
class Critic(nn.Module):
def __init__(self, in_size, full_action_size, hidden_units=(400, 300)):
super().__init__()
hidden_gate_func = nn.ELU
self.fc_body = nn.Sequential(
nn.Linear(in_size, hidden_units[0]),
hidden_gate_func(inplace=True),
)
layers = []
previous_features = hidden_units[0] + full_action_size
for hidden_size in hidden_units[1:]:
layers.append(_layer_init(nn.Linear(previous_features, hidden_size)))
# layers.append(nn.BatchNorm1d(hidden_size)) # adding batch norm
layers.append(hidden_gate_func(inplace=True))
previous_features = hidden_size
layers.append(_layer_init(nn.Linear(previous_features, 1), 3e-3))
self.critic_body = nn.Sequential(*layers)
def forward(self, full_states, full_actions):
x = self.fc_body(full_states)
x = torch.cat((x, full_actions), dim=1)
return self.critic_body(x)
if __name__ == "__main__":
import torch.nn.functional as F
full_states = torch.from_numpy(np.random.rand(48).reshape(-1, 48)).float()
actor_states = torch.from_numpy(np.random.rand(24).reshape(-1, 24)).float()
full_actions = torch.from_numpy(np.random.rand(4).reshape(-1, 4)).float()
target_value = torch.tensor(10.).view(-1, 1).float()
actor = Actor(24, hidden_units=(256, 256), out_size=2)
critic = Critic(48, 4, hidden_units=(256, 256))
optimizer = torch.optim.Adam(critic.parameters())
'''
torch.onnx.export(actor, (actor_states, ), "actor.onnx", verbose=False,
training=False,
input_names=['actor_state', 'a', 'b', 'c', 'd', 'e', 'f'],
output_names=['action'])
torch.onnx.export(critic, (full_states, full_actions), "critic.onnx", verbose=False,
training=False,
input_names=['full_states', 'full_actions', 'a', 'b', 'c', 'd', 'e', 'f'],
output_names=['q'])
'''
steps = 100
for i_step in range(steps):
predict_value = critic(full_states, full_actions)
loss = F.mse_loss(predict_value, target_value)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"Step: {i_step},\tLoss: {loss},\tPredict: {predict_value}")
| moliqingwa/DRLND | p3_collab-compet/model.py | model.py | py | 3,642 | python | en | code | 1 | github-code | 90 |
44762261569 | from flask import Flask
from os.path import join, dirname
from dotenv import load_dotenv
import firebase_admin
import pyrebase
import ast
import os
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
certi = ast.literal_eval(os.environ["FIREBASE_CREDS"])
PYREBASE_CONFIG = {
"apiKey" : os.getenv('FIREBASE_API_KEY'),
"authDomain" : os.getenv('AUTH_DOMAIN'),
"databaseURL" : os.getenv('DATABASE_URL'),
"projectId" : os.getenv('PROJECT_ID'),
"storageBucket" : os.getenv('STORAGE_BUCKET'),
"messagingSenderId": os.getenv('MESSAGING_SENDER_ID'),
"appId" : os.getenv('APP_ID'),
"serviceAccount" : certi
}
firebase = pyrebase.initialize_app(PYREBASE_CONFIG)
auth = firebase.auth()
db = firebase.database()
cred = firebase_admin.credentials.Certificate(certi)
app_fb = firebase_admin.initialize_app(cred, {'storageBucket': os.getenv('STORAGE_BUCKET'),}, name='storage')
app = Flask('__name__')
app.config['SECRET_KEY'] = os.getenv("SECRET_KEY")
# from flask_mail import Mail
# app.config["MAIL_SERVER"] = "smtp.gmail.com"
# app.config["MAIL_PORT"] = 465
# app.config["MAIL_USE_SSL"] = True
# app.config["MAIL_USERNAME"] = 'saumya.bhatt106@gmail.com'
# app.config["MAIL_PASSWORD"] = 'bla'
# mail = Mail()
# mail.init_app(app)
from modules import routes | Movies-By-the-Sea/mbts-archives | Frontend/v3.0/modules/__init__.py | __init__.py | py | 1,346 | python | en | code | 1 | github-code | 90 |
40374771956 | from datetime import timedelta
from datetime import datetime
import time
from pymongo import MongoClient
import atexit
client = MongoClient("localhost", 27017)
db = client.WDMOV
def exit_handler():
client.close()
atexit.register(exit_handler)
current_avg_pipeline = [
{
"$match": {
"last_update": {
# According to the standard, every journey should update at
# least once a minute, so we're playing it safe and discarding
# any data older than 5 minutes
"$gte": datetime.now() - timedelta(minutes=5)
}
}
},
{
"$group": {
"_id": None,
"avg_punctuality": {
"$avg": "$punctuality"
}
}
}
]
while True:
start = time.time()
current_avg_time_result = \
db.realtime.aggregate(current_avg_pipeline)
end = time.time()
current_avg_time = [r for r in current_avg_time_result][0][
"avg_punctuality"]
print("%s\tCurrent average delay:\t %f" % (
datetime.now().strftime("%H:%M:%S"), current_avg_time
))
time.sleep(2)
| 8uurg/WDMOV | mongo/streaming/current-avg-delay.py | current-avg-delay.py | py | 1,153 | python | en | code | 0 | github-code | 90 |
36806293360 | from twilio.rest import Client
import time
# Your Account Sid and Auth Token from twilio.com/console
# DANGER! This is insecure. See http://twil.io/secure
# Make sure to add security // I assume these will require an API call and be given based on the user permissions
account_sid = 'ACb3cc029d14a5c2dccfaa71b9036309bc'
auth_token = '57f5a6b74554f21b815ca61b597a6fbf'
client = Client(account_sid, auth_token)
rodda = '+19176134279'
jacob = '+16514922091'
ny_num = '+19175400288'
# We can only send max one message per second
def send_messages(voters, text, number):
dates = []
statuses = []
errors = []
prices = []
delay = 1.01
for voter in voters:
start_time = time.time()
message = client.messages.create(
body=text,
from_=number,
to=voter.number
)
statuses.append(message.status)
errors.append(message.error_code)
prices.append(message.price)
dates.append(message.date_sent)
time.sleep(delay - time.time() + start_time)
return {'voters': voters,
'number': number,
'text': text,
'status': statuses,
'error': errors,
'price': prices,
'date': dates
}
| jhatkins999/autodialer_tests | send_sms.py | send_sms.py | py | 1,267 | python | en | code | 0 | github-code | 90 |
42648354578 | import argparse
import os
from transformers import AutoTokenizer
from config import ICLPretrainConfig, ParseKwargs
from data_icl import ICLPretrainDataForEncDec
from trainer import Trainer
from utils import seed_everything, init_logger, load_dataset_names, expand_dataset_to_prompts
def run(logger, config):
# trainer
trainer = Trainer(config, logger)
model = trainer.load_model(path=config.init_checkpoint)
# tokenizer
tokenizer = AutoTokenizer.from_pretrained(config.model)
trainer.tokenizer = tokenizer
trainer.pad_token_id = tokenizer.pad_token_id
# get prompt data
datasets = load_dataset_names("t0", "train")
prompt_identifiers = expand_dataset_to_prompts(datasets)
train_data = ICLPretrainDataForEncDec(
logger = logger,
config = config,
tokenizer = tokenizer,
datasets = prompt_identifiers,
data_split = "train",
is_training = True
)
train_data.load_raw_data()
train_data.load_dataset()
train_data.load_dataloader()
trainer.do_train(model, train_data, dev_data=None)
if __name__=='__main__':
parser = argparse.ArgumentParser("Training EncDec Models for In-context Learning")
parser.add_argument("-c", "--config_files", default=None)
parser.add_argument("-k", "--kwargs", nargs="*", action=ParseKwargs, default={})
args = parser.parse_args()
config = ICLPretrainConfig(args.config_files, args.kwargs)
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
if not os.path.exists(config.tensorize_dir):
os.makedirs(config.tensorize_dir)
seed_everything(config.train_seed)
logger = init_logger(config)
logger.info(config.to_json())
run(logger, config) | INK-USC/FiD-ICL | encdec/run_icl.py | run_icl.py | py | 1,755 | python | en | code | 10 | github-code | 90 |
24384588747 | #!/usr/bin/env python3.3
import argparse
import fastn
parser = argparse.ArgumentParser(
description = 'Splits a fasta/q file into separate files. Does not split sequences. Puts up to max_bases into each split file. The exception is that any sequence longer than max_bases is put into its own file. No sequences are split.',
usage = '%(prog)s [options] <fasta/q in> <prefix of output files> <max_bases>')
parser.add_argument('infile', help='Name of input fasta/q file to be split')
parser.add_argument('outprefix', help='Name of output fasta/q file')
parser.add_argument('max_bases', type=int, help='Max bases in each output split file', metavar='max_bases')
parser.add_argument('--max_seqs', type=int, help='Max number of sequences in each output split file [no limit]', metavar='INT')
options = parser.parse_args()
fastn.split_by_base_count(options.infile, options.outprefix, options.max_bases, options.max_seqs)
| MagdalenaZZ/Python_ditties | fastn_split_by_seq_sizes.py | fastn_split_by_seq_sizes.py | py | 925 | python | en | code | 0 | github-code | 90 |
18241535429 |
def resolve():
def sub(s):
cur = 0
last = -(C + 1)
res = [0] * (N + 1)
for i in range(N):
if i - last > C and s[i] == "o":
cur += 1
last = i
res[i + 1] = cur
return res
N, K, C = map(int, input().split())
S = input()
left = sub(S)
T = S[::-1]
right = sub(T)
for i in range(N):
if S[i] == "x":
continue
if left[i] + right[N - i - 1] < K:
print(i + 1)
if __name__ == "__main__":
resolve()
| Aasthaengg/IBMdataset | Python_codes/p02721/s725402807.py | s725402807.py | py | 560 | python | en | code | 0 | github-code | 90 |
29940700351 | from bs4 import BeautifulSoup
import requests
import os
import dotenv
dotenv.load_dotenv()
URL = os.getenv('URL')
headers = os.getenv('HEADERS')
def check():
s = []
PAGE = requests.get(URL).text
soup = BeautifulSoup(PAGE, 'html.parser')
name = soup.find('h1', class_='rf-pdp-title').text
price = soup.find('div', class_='rf-pdp-currentprice').text
print(name ,price)
check()
| ironnicko/online-price-tracker | new_iphone_price.py | new_iphone_price.py | py | 403 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.