seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
17998434749 | import sys
from collections import Counter
read=sys.stdin.read
n,m=map(int,input().split())
a=list(map(int,read().split()))
c=Counter(a)
ans="YES"
for i in c.values():
if i%2:
ans="NO"
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03724/s595818377.py | s595818377.py | py | 201 | python | en | code | 0 | github-code | 90 |
43793506404 | """intron_health_migration_script
Revision ID: 7e6b8fa444d3
Revises:
Create Date: 2020-02-02 16:36:25.380587
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '7e6b8fa444d3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('Users',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('salary', sa.Integer(), nullable=False),
sa.Column('time_created', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('Users')
# ### end Alembic commands ###
| oloyedeolad/health_task | intron_health_migrations/versions/7e6b8fa444d3_intron_health_migration_script.py | 7e6b8fa444d3_intron_health_migration_script.py | py | 1,020 | python | en | code | 0 | github-code | 90 |
18440732619 | def B10N(X,B):
if X//B!=0: return B10N(X//B,B)+str(X%B)
return str(X%B)
N,A,B,C = (int(X) for X in input().split())
Li = [int(input()) for X in range(0,N)]
CostM = pow(10,9)
for T in range(0,pow(4,N)):
Cho = list(B10N(T,4).zfill(N))
if Cho.count('1')>0 and Cho.count('2')>0 and Cho.count('3')>0:
Cost = 0
ALi = [Li[X] for X,Y in enumerate(Cho) if Y=='1']
BLi = [Li[X] for X,Y in enumerate(Cho) if Y=='2']
CLi = [Li[X] for X,Y in enumerate(Cho) if Y=='3']
SyC = (len(ALi)+len(BLi)+len(CLi)-3)*10
LeC = abs(sum(ALi)-A)+abs(sum(BLi)-B)+abs(sum(CLi)-C)
if SyC+LeC<CostM: CostM = SyC+LeC
print(CostM) | Aasthaengg/IBMdataset | Python_codes/p03111/s553944943.py | s553944943.py | py | 668 | python | en | code | 0 | github-code | 90 |
26848096196 | import streamlit as st
import os
from PyPDF2 import PdfReader
from PyPDF2 import PdfFileReader
import PyPDF2
import docx
import pandas as pd
from io import StringIO
import string
import re
import nltk
from nltk.corpus import stopwords
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
def stemming(text):
#penghilangan white space
whitespace = text.translate(str.maketrans('','',string.punctuation)).replace("\t", "").replace("\n", "").replace("\r", "").strip()
#mengubah menjadi lowercase
lowercase = whitespace.lower()
#menghapus angka
removed_angka = re.sub(r"\d+", "", lowercase)
hasil = removed_angka
st.text_area("Hasil Preprocessing: ", hasil, disabled=True)
#melakukan tokenizing, yaitu memisahkan kata perkata
tokenized = nltk.tokenize.word_tokenize(hasil)
st.text_area("Hasil Tokenizing: ", tokenized, disabled=True)
# fungsi stopwording/filtering yaitu menghilangkan kata-kata yang tidak penting
#list stopword bahasa indonesia dari library nltk
listStopword = nltk.corpus.stopwords.words('indonesian')
#penambahan kata stopword
addstop = ['ku', 'pel', 'uru', 'mem', 'astikan', 's', 'iapa', 'koyak']
listStopword.extend(addstop)
#proses filtering
filtering = [words for words in tokenized if not words in listStopword]
st.text_area("Hasil Stopword: ", filtering, disabled=True)
# stemming mengubah kata menjadi kata dasarnya
stemmer = StemmerFactory().create_stemmer()
sentence_doc = ' '.join(map(str, filtering))
stemming_doc = stemmer.stem(sentence_doc)
result_doc = nltk.tokenize.word_tokenize(stemming_doc)
st.text_area("Hasil Stemming: ", result_doc, disabled=True)
return result_doc
st.title("IR Apps - Mencari data dari query")
st.subheader("Kelompok 2")
data = []
text =[]
# Get the directory path from the user
try:
dir_path = st.text_input("Masukkan path folder: ")
# Get a list of all the files in the directory
files = os.listdir(dir_path)
os.chdir(dir_path)
# Iterate over the list of files
for file in files:
file_path = os.path.join(dir_path, file) # construct the full path to the file
file_details = os.stat(file_path) # get the file's details
file_name, file_ext = os.path.splitext(file)
data.append((file_name, file_ext))
# table_data = {'Nama File': file, 'Size': file_details.st_size, 'Last modified:': file_details.st_mtime, 'Extention: ': file_extention[1]}
df = pd.DataFrame(data, columns=['Nama File', 'Ekstensi File'])
st.write(df)
except:
st.write(" ")
if st.button("Stemming"):
for i, ekstrak in enumerate(files):
#mengambil ektensi
file_name, file_ext = os.path.splitext(ekstrak)
if file_ext == '.docx':
doc = docx.Document(ekstrak)
fulldoc = []
# all_parag_doc = doc.paragraphs
st.subheader("Hasil ekstrak dokumen ke-"+ f"{i}" + " (DOCX)")
for parag in doc.paragraphs:
fulldoc.append(parag.text)
test = ' '.join(map(str, fulldoc))
st.write(test)
hasilstem_docx = stemming(test)
elif file_ext == '.pdf':
with open(ekstrak, 'rb') as file:
# Baca file PDF
reader = PyPDF2.PdfFileReader(file)
# Ambil jumlah halaman
jumlah_halaman = reader.getNumPages()
# Iterasi melalui setiap halaman
for halaman in range(jumlah_halaman):
# Baca halaman ke-i
hal = reader.getPage(halaman)
# Dapatkan teks dari halaman
teks = hal.extractText()
st.subheader("Hasil ekstrak dokumen ke-"+f"{i}"+ " (PDF)")
st.write(teks)
hasilstem_pdf = stemming(teks)
else:
st.write("Tipe file salah, hanya file pdf dan docx yang dapat dilakukan stemming")
| MahzuzH/document-comparison | test/reading_path.py | reading_path.py | py | 3,897 | python | en | code | 0 | github-code | 90 |
20614680251 | from django.shortcuts import render
from rest_framework.views import APIView
from django.contrib.auth import get_user_model
from StudentHouse.organization.models import *
from rest_framework.response import Response
from StudentHouse.organization.serializers import *
# Create your views here.
class UserDetails(APIView):
def post(self, request, *args, **kwargs):
id = request.data['id']
user = get_user_model().objects.get(id=id)
# for organization, mzazi its the same for teacher it to check if the email exist in anymodel of teacher..
teacher = MwalimuProfile.objects.filter(email=user.email)
if teacher.count() > 0:
# the user is teacher
serializer = MwalimuProfileSerializer(teacher)
return Response(serializer.data)
elif hasattr(user, 'institute'):
profile = user.institute
serializer = OrganizationSerializer(profile)
return Response(serializer.data)
elif hasattr(user, 'mzazi'):
profile = user.mzazi
serializer = MzaziSerializer(profile)
return Response(serializer.data)
else:
return Response({"error": "Unexpected error occurred"})
user_details = UserDetails.as_view() | root123-bot/STUDENT-HOUSE | StudentHouse/user/views.py | views.py | py | 1,288 | python | en | code | 0 | github-code | 90 |
2510237892 | from pyspark.sql import SparkSession, DataFrame, Row
import pyspark.sql.types as st
import pyspark.sql.functions as sf
spark = (
SparkSession
.builder
.master("local[*]")
.getOrCreate()
)
schema = st.StructType([
st.StructField("stable_column", st.StringType(), True),
st.StructField("currency", st.StringType(), True),
st.StructField("cur_value", st.DoubleType(), True),
])
cur_df = spark.createDataFrame(
[
('value', 'EUR', 1.19),
('value', 'UAH', 32.54),
('value', 'EUR', 1.0),
('value', 'UAH', 32.54),
],
schema=schema
)
cur_df.show()
df_with_is_eur = cur_df.withColumn(
'is_Eur',
sf.when(
sf.col('currency') == 'EUR',
sf.lit(True)
).otherwise(False)
)
df_with_is_eur.show()
df_with_is_eur.printSchema()
| VladyslavPodrazhanskyi/learn_spark | code/my_practice/21.when.py | 21.when.py | py | 816 | python | en | code | 0 | github-code | 90 |
10353516537 | DUMMYMODE = True # False for gaze contingent display, True for dummy mode (using mouse or joystick)
# DISPLAY
SCREENNR = 0 # number of the screen used for displaying experiment
DISPTYPE = 'psychopy' # either 'psychopy' or 'pygame'
DISPSIZE = (1920,1080) # canvas size
MOUSEVISIBLE = False # mouse visibility
BGC = (125,125,125) # backgroundcolour
FGC = (0,0,0) # foregroundcolour
FONTSIZE = 32 # font size
# INPUT
KEYLIST = ['space', 'return'] # None for all keys; list of keynames for keys of choice (e.g. ['space','9',':'] for space, 9 and ; keys)
KEYTIMEOUT = 1 # None for no timeout, or a value in milliseconds
# EYETRACKER
# general
TRACKERTYPE = 'eyelink' # either 'smi', 'eyelink' or 'dummy' (NB: if DUMMYMODE is True, trackertype will be set to dummy automatically)
SACCVELTHRESH = 35 # degrees per second, saccade velocity threshold
SACCACCTHRESH = 9500 # degrees per second, saccade acceleration threshold
# EyeLink only
# SMI only
SMIIP = '127.0.0.1'
SMISENDPORT = 4444
SMIRECEIVEPORT = 5555
| NEUREM3/recording-code-for-eyetracked-multi-modal-translation | src/defaults.py | defaults.py | py | 1,007 | python | en | code | 1 | github-code | 90 |
16566741429 |
import os
from io import BytesIO
import requests
from uuid import uuid4
from pathlib import Path
import json
import utils.config as cfg
from utils.helper import Singleton
#
# EVENTS
#
EVENT_SUCCESS = 0
EVENT_FAILED = 1
# Auth events
EVENT_DEVICE_TOKEN_FAILED = 2
EVENT_USER_TOKEN_FAILED = 3
EVENT_ONETIMECODE_NEEDED = 4
#
# CONSTANTS
#
USER_AGENT = "remapy"
BASE_URL = "https://document-storage-production-dot-remarkable-production.appspot.com"
DEVICE_TOKEN_URL = "https://webapp-production-dot-remarkable-production.appspot.com/token/json/2/device/new"
USER_TOKEN_URL = "https://webapp-production-dot-remarkable-production.appspot.com/token/json/2/user/new"
DEVICE = "mobile-android"
SERVICE_MGR_URL = "https://service-manager-production-dot-remarkable-production.appspot.com"
LIST_DOCS_URL = BASE_URL + "/document-storage/json/2/docs"
UPDATE_STATUS_URL = BASE_URL + "/document-storage/json/2/upload/update-status"
UPLOAD_REQUEST_URL = BASE_URL + "/document-storage/json/2/upload/request"
DELETE_ENTRY_URL = BASE_URL + "/document-storage/json/2/delete"
#
# CLIENT
#
class RemarkableClient():
""" Client to connect to rm cloud via REST
"""
class SignInListenerHandler(metaclass=Singleton):
def __init__(self):
self.sign_in_listener = []
def listen_sign_in_event(self, subscriber):
""" Sends a signal (true) if successfully signed in
and (false) if login was not possible in rm cloud.
"""
self.sign_in_listener.append(subscriber)
def publish(self, code=EVENT_SUCCESS, data=None):
for subscriber in self.sign_in_listener:
try:
subscriber.sign_in_event_handler(code, data)
except Exception as e:
print("(Warning) Failed to publish subscriber.")
print(e)
def __init__(self):
self.test = True
self.listener_handler = self.SignInListenerHandler()
def listen_sign_in_event(self, subscriber):
self.listener_handler.listen_sign_in_event(subscriber)
def sign_in(self, onetime_code=None):
""" Load token. If not available the user must provide a
one time code from https://my.remarkable.com/connect/remarkable
"""
try:
# Get device token if not stored local
device_token = cfg.get("authentication.device_token")
if device_token == None:
if onetime_code is None or onetime_code == "":
self.listener_handler.publish(EVENT_ONETIMECODE_NEEDED)
return
device_token = self._get_device_token(onetime_code)
if device_token is None:
self.listener_handler.publish(EVENT_DEVICE_TOKEN_FAILED)
return
# Renew the user token.
user_token = self._get_user_token(device_token)
if user_token is None:
self.listener_handler.publish(EVENT_USER_TOKEN_FAILED)
return
# Save tokens to config
auth = {"device_token": device_token,
"user_token": user_token}
cfg.save({"authentication": auth})
# Inform all subscriber
self.listener_handler.publish(EVENT_SUCCESS, auth)
except:
auth={}
self.listener_handler.publish(EVENT_FAILED, auth)
return auth
def get_item(self, id):
response = self._request("GET", LIST_DOCS_URL, params={
"doc": id,
"withBlob": True
})
if response.ok:
items = response.json()
return items[0]
return None
def delete_item(self, id, version):
response = self._request("PUT", DELETE_ENTRY_URL, body=[{
"ID": id,
"Version": version
}])
if response.ok:
return True
return False
def list_items(self):
response = self._request("GET", LIST_DOCS_URL)
if response.ok:
items = response.json()
# Logging only
# items_str = json.dumps(items, indent=4)
# with open("all_files.json", "wt") as f:
# f.write(items_str)
return items
return None
def get_raw_file(self, blob_url):
stream = self._request("GET", blob_url, stream=True)
zip_io = BytesIO()
for chunk in stream.iter_content(chunk_size=8192):
zip_io.write(chunk)
return zip_io.getbuffer()
def upload(self, id, metadata, zip_file):
response = self._request("PUT", "/document-storage/json/2/upload/request",
body=[{
"ID": id,
"Type": "DocumentType",
"Version": 1
}])
if not response.ok:
print("(Error) Upload request failed")
return
response = response.json()
blob_url = response[0].get("BlobURLPut", None)
response = self._request("PUT", blob_url, data=zip_file.getvalue())
zip_file.seek(0)
if not response.ok:
print("(Error) Upload request failed")
return
return self.update_metadata(metadata)
def update_metadata(self, metadata):
response = self._request("PUT", UPDATE_STATUS_URL, body=[metadata])
if not response.ok:
print("(Error) Upload request failed")
return
return self.get_item(metadata["ID"])
def _get_device_token(self, one_time_code):
""" Create a new device for a given one_time_code to be able to
connect to the rm cloud
"""
body = {
"code": one_time_code,
"deviceDesc": DEVICE,
"deviceID": str(uuid4()),
}
response = self._request("POST", DEVICE_TOKEN_URL, body=body)
if response.ok:
device_token = response.text
return device_token
return None
def _get_user_token(self, device_token):
""" This is the second step of the authentication of the Remarkable Cloud.
Before each new session, you should fetch a new user token.
User tokens have an unknown expiration date.
"""
if device_token is None or device_token == "":
return None
try:
response = self._request("POST", USER_TOKEN_URL, None, headers={
"Authorization": "Bearer %s" % device_token
})
except:
return None
if response.ok:
user_token = response.text
return user_token
return None
def _request(self, method, path,
data=None, body=None, headers=None,
params=None, stream=False):
"""Creates a request against the Remarkable Cloud API
This function automatically fills in the blanks of base
url & authentication.
Credit: https://github.com/subutux/rmapy/blob/master/rmapy/api.py
Args:
method: The request method.
path: complete url or path to request.
data: raw data to put/post/...
body: the body to request with. This will be converted to json.
headers: a dict of additional headers to add to the request.
params: Query params to append to the request.
stream: Should the response be a stream?
Returns:
A Response instance containing most likely the response from
the server.
"""
config = cfg.load()
if headers is None:
headers = {}
if not path.startswith("http"):
if not path.startswith('/'):
path = '/' + path
url = "%s%s" % (BASE_URL, path)
else:
url = path
_headers = {
"user-agent": USER_AGENT,
}
user_token = cfg.get("authentication.user_token")
if user_token != None:
_headers["Authorization"] = "Bearer %s" % user_token
for k in headers.keys():
_headers[k] = headers[k]
r = requests.request(method, url,
json=body,
data=data,
headers=_headers,
params=params,
stream=stream,
timeout=60*2)
return r
| peerdavid/remapy | api/remarkable_client.py | remarkable_client.py | py | 8,589 | python | en | code | 172 | github-code | 90 |
18355669439 | k, x = map(int, input().split())
dif = k - 1
if dif == 0:
print(x)
else:
mini = x - dif
maxi = x + dif
a = []
for i in range(mini, maxi + 1):
a.append(str(i))
s = ' '.join(a)
print(s) | Aasthaengg/IBMdataset | Python_codes/p02946/s092963894.py | s092963894.py | py | 219 | python | en | code | 0 | github-code | 90 |
8323093704 | import cv2
import numpy as np
from os import listdir
from os.path import isfile, join
def getC(u,v):
if u == 0 and v == 0: return (0.5)
elif (u==0 and v>0) or (u>0 and v==0): return (1/np.sqrt(2))
else: return 1
def dct(Color):
F = [[0 for _ in range(16)] for _ in range(16)]
for v in range(16):
for u in range(16):
_sum = 0
for x in range(16):
for y in range(16):
_sum += float(Color[x][y]) * np.cos((v*np.pi) * (2*x + 1) / (2 * float(N))) * \
np.cos((u*np.pi) * (2*y + 1) / (2 * float(N)))
F[v][u] = _sum * getC(u,v) * (1/8)
F = np.array(F)
tmp = np.ravel(list(map(abs,F)))
tmp = np.sort(tmp)[::-1]
cut = tmp[15]
cnt = 0
for i in range(16):
for j in range(16):
if cnt == 16:
F[i][j] = 0
elif abs(F[i][j]) < cut:
F[i][j] = 0
else:
cnt += 1
return F
def idct(F):
S = [[0 for _ in range(16)] for _ in range(16)]
for x in range(16):
for y in range(16):
_sum = 0
for v in range(16):
for u in range(16):
_sum += getC(u,v) * float(F[v][u]) * np.cos((v*np.pi) * (2*x + 1) / (2 * float(N))) * \
np.cos((u*np.pi) * (2*y + 1) / (2 * float(N)))
S[x][y] = _sum * (1/8)
return S
N = 16
# 1(256*256) 2(720*960) 3(720*960)
img_path = 'test2'
files = [ f for f in listdir(img_path) if isfile(join(img_path,f)) ]
max_j = 256
max_k = 256
for i in range(3):
img = cv2.imread(join(img_path,files[i]),1)
(B, G, R) = cv2.split(img)
if i>0 :
max_j = 720
max_k = 960
for j in range(0,max_j,16):
for k in range(0,max_k,16):
tmp = dct(R[j:j+16,k:k+16])
tmp = idct(tmp)
for p in range(16):
for q in range(16):
R[j+p][k+q] = tmp[p][q]
tmp = dct(G[j:j+16,k:k+16])
tmp = idct(tmp)
for p in range(16):
for q in range(16):
G[j+p][k+q] = tmp[p][q]
tmp = dct(B[j:j+16,k:k+16])
tmp = idct(tmp)
for p in range(16):
for q in range(16):
B[j+p][k+q] = tmp[p][q]
print("%d is done!!" % k)
print("[j]%d is done!!" % j)
for j in range(max_j):
for k in range(max_k):
if R[j][k] < 0: R[j][k] = 0
elif R[j][k] >255: R[j][k] = 255
if G[j][k] < 0: G[j][k] = 0
elif G[j][k] >255: G[j][k] = 255
if B[j][k] < 0: B[j][k] = 0
elif B[j][k] >255: B[j][k] = 255
merged = cv2.merge([B,G,R])
cv2.imshow("Merged", merged)
cv2.waitKey(0)
cv2.destroyAllWindows()
| sgu0927/Numerical-Analysis | DCT encoding/DCT_encoding.py | DCT_encoding.py | py | 2,875 | python | en | code | 0 | github-code | 90 |
18458212239 | import bisect
def solve():
N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
if sum(A) < sum(B):
return -1
C = [a - b for a, b in zip(A, B)]
C.sort()
mid = bisect.bisect_left(C, 0)
minus = sum(C[:mid])
if minus >= 0:
return 0
cnt = mid
for i in reversed(range(N)):
minus += C[i]
cnt += 1
if minus >= 0:
return cnt
return -1
print(solve()) | Aasthaengg/IBMdataset | Python_codes/p03151/s220294753.py | s220294753.py | py | 503 | python | en | code | 0 | github-code | 90 |
73795556458 | """empty message
Revision ID: f1aaf899424d
Revises:
Create Date: 2018-04-14 00:03:16.490169
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f1aaf899424d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('character_id', sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column('character_owner_hash', sa.String(length=255), nullable=True),
sa.Column('character_name', sa.String(length=200), nullable=True),
sa.Column('access_token', sa.String(length=100), nullable=True),
sa.Column('access_token_expires', sa.DateTime(), nullable=True),
sa.Column('refresh_token', sa.String(length=100), nullable=True),
sa.Column('latest_seen', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('character_id')
)
op.create_table('mining_data',
sa.Column('character_id', sa.BigInteger(), nullable=False),
sa.Column('date', sa.Date(), nullable=False),
sa.Column('solar_system_id', sa.BigInteger(), nullable=False),
sa.Column('type_id', sa.BigInteger(), nullable=False),
sa.Column('quantity', sa.BigInteger(), nullable=True),
sa.Column('ore_name', sa.String(length=100), nullable=True),
sa.Column('volume', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['character_id'], [u'user.character_id'], ),
sa.PrimaryKeyConstraint('character_id', 'date', 'solar_system_id', 'type_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('mining_data')
op.drop_table('user')
# ### end Alembic commands ###
| paulsulli/MiningLedger | migrations/versions/f1aaf899424d_.py | f1aaf899424d_.py | py | 1,763 | python | en | code | 0 | github-code | 90 |
36127469166 | """Make team enrollment revisable
Revision ID: a0c708394373
Revises: 19efd09533ca
Create Date: 2020-03-19 17:12:42.598485
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "a0c708394373"
down_revision = "19efd09533ca"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"team_enrollment", sa.Column("revisee_id", sa.Integer(), nullable=True)
)
op.create_index(
op.f("ix_team_enrollment_revisee_id"),
"team_enrollment",
["revisee_id"],
unique=False,
)
op.create_foreign_key(
None, "team_enrollment", "team_enrollment", ["revisee_id"], ["id"]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "team_enrollment", type_="foreignkey")
op.drop_index(
op.f("ix_team_enrollment_revisee_id"), table_name="team_enrollment"
)
op.drop_column("team_enrollment", "revisee_id")
# ### end Alembic commands ###
| MTES-MCT/mobilic-api | migrations/versions/a0c708394373_make_team_enrollment_revisable.py | a0c708394373_make_team_enrollment_revisable.py | py | 1,133 | python | en | code | 1 | github-code | 90 |
20902157862 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
import math
import warnings
def initial_type(name,
input,
op_type,
fan_out,
init="google",
use_bias=False,
filter_size=0,
stddev=0.02):
if init == "kaiming":
if op_type == 'conv':
fan_in = input.shape[1] * filter_size * filter_size
elif op_type == 'deconv':
fan_in = fan_out * filter_size * filter_size
else:
if len(input.shape) > 2:
fan_in = input.shape[1] * input.shape[2] * input.shape[3]
else:
fan_in = input.shape[1]
bound = 1 / math.sqrt(fan_in)
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.Uniform(
low=-bound, high=bound))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Uniform(
low=-bound, high=bound))
else:
bias_attr = False
elif init == 'google':
n = filter_size * filter_size * fan_out
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=math.sqrt(2.0 / n)))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + "_offset", initializer=fluid.initializer.Constant(0.0))
else:
bias_attr = False
else:
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=stddev))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + "_offset", initializer=fluid.initializer.Constant(0.0))
else:
bias_attr = False
return param_attr, bias_attr
def cal_padding(img_size, stride, filter_size, dilation=1):
"""Calculate padding size."""
if img_size % stride == 0:
out_size = max(filter_size - stride, 0)
else:
out_size = max(filter_size - (img_size % stride), 0)
return out_size // 2, out_size - out_size // 2
def init_batch_norm_layer(name="batch_norm"):
param_attr = fluid.ParamAttr(
name=name + '_scale', initializer=fluid.initializer.Constant(1.0))
bias_attr = fluid.ParamAttr(
name=name + '_offset', initializer=fluid.initializer.Constant(value=0.0))
return param_attr, bias_attr
def init_fc_layer(fout, name='fc'):
n = fout # fan-out
init_range = 1.0 / math.sqrt(n)
param_attr = fluid.ParamAttr(
name=name + '_weights', initializer=fluid.initializer.UniformInitializer(
low=-init_range, high=init_range))
bias_attr = fluid.ParamAttr(
name=name + '_offset', initializer=fluid.initializer.Constant(value=0.0))
return param_attr, bias_attr
def norm_layer(input, norm_type='batch_norm', name=None):
if norm_type == 'batch_norm':
param_attr = fluid.ParamAttr(
name=name + '_weights', initializer=fluid.initializer.Constant(1.0))
bias_attr = fluid.ParamAttr(
name=name + '_offset', initializer=fluid.initializer.Constant(value=0.0))
return fluid.layers.batch_norm(
input,
param_attr=param_attr,
bias_attr=bias_attr,
moving_mean_name=name + '_mean',
moving_variance_name=name + '_variance')
elif norm_type == 'instance_norm':
helper = fluid.layer_helper.LayerHelper("instance_norm", **locals())
dtype = helper.input_dtype()
epsilon = 1e-5
mean = fluid.layers.reduce_mean(input, dim=[2, 3], keep_dim=True)
var = fluid.layers.reduce_mean(
fluid.layers.square(input - mean), dim=[2, 3], keep_dim=True)
if name is not None:
scale_name = name + "_scale"
offset_name = name + "_offset"
scale_param = fluid.ParamAttr(
name=scale_name,
initializer=fluid.initializer.Constant(1.0),
trainable=True)
offset_param = fluid.ParamAttr(
name=offset_name,
initializer=fluid.initializer.Constant(0.0),
trainable=True)
scale = helper.create_parameter(
attr=scale_param, shape=input.shape[1:2], dtype=dtype)
offset = helper.create_parameter(
attr=offset_param, shape=input.shape[1:2], dtype=dtype)
tmp = fluid.layers.elementwise_mul(x=(input - mean), y=scale, axis=1)
tmp = tmp / fluid.layers.sqrt(var + epsilon)
tmp = fluid.layers.elementwise_add(tmp, offset, axis=1)
return tmp
else:
raise NotImplementedError("norm tyoe: [%s] is not support" % norm_type)
def conv2d(input,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding=0,
groups=None,
name="conv2d",
norm=None,
act=None,
relufactor=0.0,
use_bias=False,
padding_type=None,
initial="normal",
use_cudnn=True):
if padding != 0 and padding_type != None:
warnings.warn(
'padding value and padding type are set in the same time, and the final padding width and padding height are computed by padding_type'
)
param_attr, bias_attr = initial_type(
name=name,
input=input,
op_type='conv',
fan_out=num_filters,
init=initial,
use_bias=use_bias,
filter_size=filter_size,
stddev=stddev)
def get_padding(filter_size, stride=1, dilation=1):
padding = ((stride - 1) + dilation * (filter_size - 1)) // 2
return padding
need_crop = False
if padding_type == "SAME":
top_padding, bottom_padding = cal_padding(input.shape[2], stride,
filter_size)
left_padding, right_padding = cal_padding(input.shape[2], stride,
filter_size)
height_padding = bottom_padding
width_padding = right_padding
if top_padding != bottom_padding or left_padding != right_padding:
height_padding = top_padding + stride
width_padding = left_padding + stride
need_crop = True
padding = [height_padding, width_padding]
elif padding_type == "VALID":
height_padding = 0
width_padding = 0
padding = [height_padding, width_padding]
elif padding_type == "DYNAMIC":
padding = get_padding(filter_size, stride)
else:
padding = padding
conv = fluid.layers.conv2d(
input,
num_filters,
filter_size,
groups=groups,
name=name,
stride=stride,
padding=padding,
use_cudnn=use_cudnn,
param_attr=param_attr,
bias_attr=bias_attr)
if need_crop:
conv = conv[:, :, 1:, 1:]
if norm is not None:
conv = norm_layer(input=conv, norm_type=norm, name=name + "_norm")
if act == 'relu':
conv = fluid.layers.relu(conv, name=name + '_relu')
elif act == 'leaky_relu':
conv = fluid.layers.leaky_relu(
conv, alpha=relufactor, name=name + '_leaky_relu')
elif act == 'tanh':
conv = fluid.layers.tanh(conv, name=name + '_tanh')
elif act == 'sigmoid':
conv = fluid.layers.sigmoid(conv, name=name + '_sigmoid')
elif act == 'swish':
conv = fluid.layers.swish(conv, name=name + '_swish')
elif act == None:
conv = conv
else:
raise NotImplementedError("activation: [%s] is not support" %act)
return conv | PaddlePaddle/Research | CV/PaddleReid/reid/model/layers.py | layers.py | py | 7,959 | python | en | code | 1,671 | github-code | 90 |
34292703405 | # -*- coding: utf-8 -*-
# @Author: Blakeando
# @Date: 2020-08-13 14:24:11
# @Last Modified by: Blakeando
# @Last Modified time: 2020-08-13 14:24:11
import asyncio
import hashlib
import io
import json
import random
import re
import urllib.parse
import aiohttp
import aiosqlite
import discord
import inflect
from bs4 import BeautifulSoup
from discord.ext import commands
import core
import core.exceptions
import core.utils
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
home_page = "https://danbooru.donmai.us/"
class DanbooruClient:
def __init__(self, bot):
self.cache = core.utils.Cache(bot)
bot.loop.create_task(self.update_catalog_task())
async def get_images(self, tag):
images = list()
if await self.cache.get(tag) is None:
async with aiohttp.ClientSession() as session:
async with session.get(
"https://danbooru.donmai.us/posts.json",
params={"tags": tag, "limit": 100},
headers=headers,
) as response:
if response.status == 200:
data = await response.json()
await self.cache.add(tag, data, delay=500)
if data is not None:
for post in data:
try:
if post["file_url"].split(".")[-1] in [
"mp4",
"webm",
]:
pass
else:
images.append(
{
"file_url": post["file_url"],
"post_url": f"https://danbooru.donmai.us/posts/{post['id']}",
}
)
except:
pass
else:
data = await self.cache.get(tag)
for post in data:
try:
if post["file_url"].split(".")[-1] in ["mp4", "webm"]:
pass
else:
images.append(
{
"file_url": post["file_url"],
"post_url": f"https://danbooru.donmai.us/posts/{post['id']}",
}
)
except:
pass
return images
async def get_latest_posts(self):
images = dict()
async with aiohttp.ClientSession() as session:
async with session.get(
home_page + "/posts?tags=rating%3Aexplicit+&ms=1", headers=headers
) as response:
if response.status == 200:
page_html = await response.text()
soup = BeautifulSoup(page_html, "lxml")
div = soup.find("div", id="posts")
for post in div.find_all("article"):
if post.get("data-id", None) is not None:
tags = post.get("data-tags", "").split(" ")
source = f"{home_page}posts/{post.get('data-id', None)}"
img = post.get("data-file-url", None)
async with session.get(source, headers=headers) as response:
if response.status == 200:
page_html = await response.text()
soup = BeautifulSoup(page_html, "lxml")
title = (
soup.find("title")
.string.replace(" | Danbooru", "")
.replace("'", "")
.replace('"', "")
)
images[title] = {
"name": title,
"type": "image",
"product": img,
"price": 1.5 * len(tags),
"quantity": random.randint(5, 10),
"permanent": False,
"nsfw": True,
}
return images
async def update_catalog_task(self):
while True:
content = await self.get_latest_posts()
if not "danbooru" in list(await core.utils.shop_catalog()):
await core.utils.shop_catalog(
catalog="danbooru", nsfw=True, content=content, mode="c"
)
else:
await core.utils.shop_catalog(
catalog="danbooru", content=content, mode="w"
)
await asyncio.sleep(3600)
# async def image_fill_task():
# while True:
# for tag in list(core.danbooru_nsfw_images):
# core.danbooru_nsfw_images[tag]['images'] = await get_images(core.danbooru_nsfw_images[tag]['tag'])
# await asyncio.sleep(5)
# await asyncio.sleep(1800)
| kapsikkum/MechaDon | nsfw/danbooru.py | danbooru.py | py | 5,568 | python | en | code | 0 | github-code | 90 |
18452243129 | n = int(input())
a = input()
b = input()
c = input()
ans = 0
for i in range(n):
if a[i] == b[i] and b[i] == c[i]:
# すべて同じなので操作しなくて良い
continue
if a[i] != b[i] and a[i] != c[i] and b[i] != c[i]:
# すべて異なるので操作2回
ans = ans + 2
continue
# 1つ異なる
ans = ans + 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03140/s801569917.py | s801569917.py | py | 357 | python | en | code | 0 | github-code | 90 |
28241309071 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .models import Room, Message
@login_required
def rooms(request):
rooms = Room.objects.all()
return render(request, 'room/rooms.html', {'rooms': rooms})
@login_required
def room(request, slug):
room = Room.objects.get(slug=slug)
messages = Message.objects.filter(room=room)[0:25]
return render(request, 'room/room.html', {'room': room, 'messages': messages})
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
@login_required
def get_messages(request, slug):
room = get_object_or_404(Room, slug=slug)
messages = Message.objects.filter(room=room).order_by('date_added')
message_data = []
for message in messages:
message_data.append({
'user': message.user.username,
'content': message.content,
'date_added': message.date_added.strftime('%Y-%m-%d %H:%M:%S')
})
return JsonResponse({'messages': message_data})
@login_required
@csrf_exempt # Add this decorator to bypass CSRF token requirement for AJAX
def send_message(request):
print("entered")
if request.method == 'POST' and request.is_ajax():
user = request.user
room_name = request.POST.get('room')
content = request.POST.get('message')
if room_name and content:
try:
room = Room.objects.get(slug=room_name)
message = Message.objects.create( room=room,user=user, content=content)
# Create a dictionary with the message details
message_data = {
'user': message.user.username,
'content': message.content,
# 'date_added': message.date_added.strftime('%Y-%m-%d %H:%M:%S')
}
print("Message Saved Successfully:", message_data)
return JsonResponse({'status': 'success', 'message': message_data})
except Room.DoesNotExist:
print("Room not found for room_name:", room_name)
return JsonResponse({'status': 'error', 'message': 'Room not found'})
else:
print("Invalid data - room_name:", room_name, "content:", content)
return JsonResponse({'status': 'error', 'message': 'Invalid data'})
print("Invalid request method or not AJAX")
return JsonResponse({'status': 'error', 'message': 'Invalid request'}) | Bhumika07092001/Django_application | src/room/views.py | views.py | py | 2,668 | python | en | code | 0 | github-code | 90 |
70841377896 | import subprocess
import os
import re
TCP_LIMIT = 1000 # limite per pacchetti TCP
UDP_LIMIT = 10000 # limite per pacchetti UDP
INTERFACE = 'eth0'
tcp_ip_counts = {}
udp_ip_counts = {}
blocked_ips = set()
ssh_client_ip = os.environ.get('SSH_CLIENT', '').split(' ')[0] if 'SSH_CLIENT' in os.environ else None
ip_pattern = re.compile(r'\b(?:\d{1,3}\.){3}\d{1,3}\b')
def extract_attacker_ip(row):
ips = ip_pattern.findall(row)
return ips[0] if ips else None
try:
with subprocess.Popen(['tcpdump', '-n', '-l', '-i', INTERFACE, 'ip', 'and', '(tcp or udp)'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True) as process:
for row in iter(process.stdout.readline, ''):
attacker_ip = extract_attacker_ip(row)
if not attacker_ip or attacker_ip == ssh_client_ip:
continue
if 'TCP' in row:
tcp_ip_counts[attacker_ip] = tcp_ip_counts.get(attacker_ip, 0) + 1
limit_reached = tcp_ip_counts[attacker_ip] > TCP_LIMIT
elif 'UDP' in row:
udp_ip_counts[attacker_ip] = udp_ip_counts.get(attacker_ip, 0) + 1
limit_reached = udp_ip_counts[attacker_ip] > UDP_LIMIT
else:
continue # ignora altri tipi di pacchetti
if limit_reached and attacker_ip not in blocked_ips:
subprocess.run(['iptables', '-A', 'INPUT', '-s', attacker_ip, '-j', 'DROP'])
blocked_ips.add(attacker_ip)
print(f"IP {attacker_ip} bloccato.")
except KeyboardInterrupt:
print("Interrotto dall'utente.")
finally:
process.terminate()
| Loki-it/Packet-Limiter | main.py | main.py | py | 1,675 | python | en | code | 0 | github-code | 90 |
36437556073 | import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import re
import numpy
#########################################################################################
#### ####
#### GMPIComputation ###################################################################
#### ####
#########################################################################################
class GMPIComputation(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "3. GMPIComputation" # TODO make this more human readable by adding spaces
self.parent.categories = ["SEEGA"]
self.parent.dependencies = []
self.parent.contributors = ["Gabriele Arnulfo (Univ. Genoa) & Massimo Narizzano (Univ. Genoa)"]
self.parent.helpText = """
Compute the Gray Matter Proximity Index starting .....
"""
self.parent.acknowledgementText = """
"""
#########################################################################################
#### ####
#### GMPIComputationWidget ########## ###################################################
#### ####
#########################################################################################
class GMPIComputationWidget(ScriptedLoadableModuleWidget):
#######################################################################################
### setup
#######################################################################################
def setup(self):
self.developerMode = True
ScriptedLoadableModuleWidget.setup(self)
# inspect current Scene and search for files called ?h_pial and ?h_white
# if found fill the lists with relative nodes
lhPialNode = slicer.mrmlScene.GetNodesByName('lh_pial').GetItemAsObject(0)
rhPialNode = slicer.mrmlScene.GetNodesByName('rh_pial').GetItemAsObject(0)
lhWhiteNode = slicer.mrmlScene.GetNodesByName('lh_white').GetItemAsObject(0)
rhWhiteNode = slicer.mrmlScene.GetNodesByName('rh_white').GetItemAsObject(0)
reconFileNode= slicer.mrmlScene.GetNodesByName('recon').GetItemAsObject(0)
self.gmpiCB = ctk.ctkCollapsibleButton()
self.gmpiCB.text = "GMPI Computation"
self.layout.addWidget(self.gmpiCB)
self.gmpiFL = qt.QFormLayout(self.gmpiCB)
#### Left Pial selection box
self.leftPialCBox = slicer.qMRMLNodeComboBox()
self.leftPialCBox.nodeTypes = ( ("vtkMRMLModelNode"), "" )
self.leftPialCBox.selectNodeUponCreation = True
self.leftPialCBox.addEnabled = False
self.leftPialCBox.removeEnabled = False
self.leftPialCBox.noneEnabled = True
self.leftPialCBox.showHidden = False
self.leftPialCBox.showChildNodeTypes = False
self.leftPialCBox.setMRMLScene( slicer.mrmlScene )
self.leftPialCBox.setToolTip( "Pick the left pial." )
self.gmpiFL.addRow("Left Pial: ", self.leftPialCBox)
#### Left White selection box
self.leftWhiteCBox = slicer.qMRMLNodeComboBox()
self.leftWhiteCBox.nodeTypes = ( ("vtkMRMLModelNode"), "" )
self.leftWhiteCBox.selectNodeUponCreation = True
self.leftWhiteCBox.addEnabled = False
self.leftWhiteCBox.removeEnabled = False
self.leftWhiteCBox.noneEnabled = True
self.leftWhiteCBox.showHidden = False
self.leftWhiteCBox.showChildNodeTypes = False
self.leftWhiteCBox.setMRMLScene( slicer.mrmlScene )
self.leftWhiteCBox.setToolTip( "Pick the left pial." )
#### Right Pial selection box
self.rightPialCBox = slicer.qMRMLNodeComboBox()
self.rightPialCBox.nodeTypes = ( ("vtkMRMLModelNode"), "" )
self.rightPialCBox.selectNodeUponCreation = True
self.rightPialCBox.addEnabled = False
self.rightPialCBox.removeEnabled = False
self.rightPialCBox.noneEnabled = True
self.rightPialCBox.showHidden = False
self.rightPialCBox.showChildNodeTypes = False
self.rightPialCBox.setMRMLScene( slicer.mrmlScene )
self.rightPialCBox.setToolTip( "Pick the right pial." )
self.gmpiFL.addRow("Right Pial: ", self.rightPialCBox)
#### Right White selection box
self.rightWhiteCBox = slicer.qMRMLNodeComboBox()
self.rightWhiteCBox.nodeTypes = ( ("vtkMRMLModelNode"), "" )
self.rightWhiteCBox.selectNodeUponCreation = True
self.rightWhiteCBox.addEnabled = False
self.rightWhiteCBox.removeEnabled = False
self.rightWhiteCBox.noneEnabled = True
self.rightWhiteCBox.showHidden = False
self.rightWhiteCBox.showChildNodeTypes = False
self.rightWhiteCBox.setMRMLScene( slicer.mrmlScene )
self.rightWhiteCBox.setToolTip( "Pick the right pial." )
self.gmpiFL.addRow("Right White: ", self.rightWhiteCBox)
self.gmpiFL.addRow("Left White: ", self.leftWhiteCBox)
#### Fiducials list Combo Box
self.fiducialsCBox = slicer.qMRMLNodeComboBox()
self.fiducialsCBox.nodeTypes = ( ("vtkMRMLMarkupsFiducialNode"), "" )
self.fiducialsCBox.selectNodeUponCreation = False
self.fiducialsCBox.addEnabled = False
self.fiducialsCBox.removeEnabled = False
self.fiducialsCBox.noneEnabled = True
self.fiducialsCBox.setMRMLScene( slicer.mrmlScene )
self.fiducialsCBox.setToolTip("Select a fiducial list")
self.gmpiFL.addRow("Fiducial : ", self.fiducialsCBox)
# if nodes already exist load them in ComboBoxes
if lhWhiteNode:
self.leftWhiteCBox.setCurrentNode(lhWhiteNode)
if rhWhiteNode:
self.rightWhiteCBox.setCurrentNode(rhWhiteNode)
if rhPialNode:
self.rightPialCBox.setCurrentNode(rhPialNode)
if lhPialNode:
self.leftPialCBox.setCurrentNode(lhPialNode)
if reconFileNode:
self.fiducialsCBox.setCurrentNode(reconFileNode)
#### GMPI Threshold Slider
self.gmpiSlider = qt.QSlider(qt.Qt.Horizontal)
self.gmpiSlider.setMinimum(-9)
self.gmpiSlider.setMaximum(9)
self.gmpiSlider.setValue(-3)
#### GMPI Spin Box
self.gmpiSpinBox = qt.QDoubleSpinBox()
self.gmpiSpinBox.setRange(-1, 1)
self.gmpiSpinBox.setSingleStep(0.1)
self.gmpiSpinBox.setValue(float(self.gmpiSlider.value)/10)
#### GMPI Slider e SpinBox Layout
# self.gmpiverticalLayout = qt.QHBoxLayout()
#
# self.gmpiverticalLayout.addWidget(self.gmpiSlider)
# self.gmpiverticalLayout.addWidget(self.gmpiSpinBox)
# self.gmpiFL.addRow("GMPI Threshold : ", self.gmpiverticalLayout)
# GMPI Computation Detection button
self.gmpiPB = qt.QPushButton("Apply")
self.gmpiPB.toolTip = "Run the algorithm."
self.gmpiPB.enabled = True
# Create montage files
self.montagePB = qt.QPushButton("Create Montage")
self.montagePB.toolTip = "Create Montage"
self.montagePB.enabled = True
#### Aggiungo il bottone al layout
self.gmpiFL.addRow(self.gmpiPB)
# self.gmpiFL.addRow(self.montagePB)
# connections
self.gmpiPB.connect('clicked(bool)', self.onGMPIComputation)
# self.montagePB.connect('clicked(bool)', self.onMontageCreation)
# self.gmpiSpinBox.valueChanged.connect(self.onSpinBoxValueChange)
# self.gmpiSlider.valueChanged.connect(self.onSliderValueChange)
#######################################################################################
### onGMPIComputation
#######################################################################################
def onGMPIComputation(self):
slicer.util.showStatusMessage("START GMPI Computation")
print ("RUN GMPI Computation")
GMPIComputationLogic().runGMPIComputation(self.fiducialsCBox.currentNode(), \
self.leftPialCBox.currentNode(), \
self.rightPialCBox.currentNode(), \
self.leftWhiteCBox.currentNode(), \
self.rightWhiteCBox.currentNode())
print ("END GMPI Computation")
slicer.util.showStatusMessage("END GMPI Computation")
# def onMontageCreation(self):
# slicer.util.showStatusMessage("START Montage Creation")
# print "RUN Montage Creation"
# GMPIComputationLogic().runMontageCreation(self.fiducialsCBox.currentNode(), \
# self.gmpiSlider)
#
#
# print "END Montage Creation"
# slicer.util.showStatusMessage("END Montage Creation")
#######################################################################################
### onSliderValueChange and onSpinBoxValueChange
#######################################################################################
def onSliderValueChange(self):
self.gmpiSpinBox.setValue(float(self.gmpiSlider.value)/10)
def onSpinBoxValueChange(self):
self.gmpiSlider.setValue(float(self.gmpiSpinBox.value)*10)
#########################################################################################
#### ####
#### GMPIComputationLogic ##############################################################
#### ####
#########################################################################################
class GMPIComputationLogic(ScriptedLoadableModuleLogic):
"""
"""
#######################################################################################
### __init___
#######################################################################################
def __init__(self):
# Create a Progress Bar
self.pb = qt.QProgressBar()
#######################################################################################
### findTheNearestVertex
#######################################################################################
def findNearestVertex(self,contact, surfaceVertices):
dist = numpy.sqrt( numpy.sum( (contact - surfaceVertices)**2,axis=1) )
return (surfaceVertices[ dist.argmin(),:],dist.argmin())
#######################################################################################
### computeGMPI
#######################################################################################
def computeGmpi(self,contact,pial,white):
if (numpy.linalg.norm((pial - white)) ** 2 == 0):
return float('NaN')
else:
return (numpy.dot((contact - white), (pial - white)) / numpy.linalg.norm((pial - white)) ** 2)
#######################################################################################
### runGMPIComputation
#######################################################################################
def runGMPIComputation(self,fids,leftPial,rightPial,leftWhite,rightWhite):
# Check that the Fiducial has been selected
if (fids == None):
# notify error
slicer.util.showStatusMessage("Error, No Fiducial selected")
return
# LEFT/RIGHT PIAL/WHITE must be selected
if ((leftPial == None) or (rightPial == None) or \
(leftWhite == None) or (rightWhite == None)):
# notify error
slicer.util.showStatusMessage("Error, please select the four surfaces!")
return
# Set the parameters of the progess bar and show it
self.pb.setRange(0,fids.GetNumberOfFiducials())
self.pb.show()
self.pb.setValue(0)
slicer.app.processEvents()
# Compute GMPI for each fiducial
for i in range(fids.GetNumberOfFiducials()):
# update progress bar
self.pb.setValue(i+1)
slicer.app.processEvents()
# Only for Active Fiducial points the GMPI is computed
if fids.GetNthFiducialSelected(i) == True:
# Check if it is a left or right channel, by reading the contact
# label. If the label contain the '(prime) char then the contact
# is in the left emisphere, right otherwise
#[TODO]: better way to do it?
# within 3DSlicer volumes are represented in RAS/RAI (check) so
# one can actually think of using channel position (e.g. positive
# or negative x respect to the volume centre)
# instead of using contact names (which
# may significantly vary among centres)
chLabel = fids.GetNthFiducialLabel(i)
if re.search('^\w\d+',chLabel) is None:
# left channel
pial = leftPial.GetPolyData()
white = leftWhite.GetPolyData()
else:
# right channel
pial = rightPial.GetPolyData()
white = rightWhite.GetPolyData()
# we need to convert vtk object to numpy in order
# to take advantage of numpy functions to compute the minimum distance
pialVertices = vtk.util.numpy_support.vtk_to_numpy(pial.GetPoints().GetData())
whiteVertices = vtk.util.numpy_support.vtk_to_numpy(white.GetPoints().GetData())
# instantiate the variable which holds the point
currContactCentroid = [0,0,0]
# copy current position from FiducialList
fids.GetNthFiducialPosition(i,currContactCentroid)
# find nearest vertex coordinates
[whiteNearVtx, whiteNearIdx] = self.findNearestVertex(currContactCentroid,whiteVertices)
pialNearVtx = pialVertices[whiteNearIdx]
# print ",".join([str(pialNearVtx),str(whiteNearVtx),str(currContactCentroid)])
gmpi=float("{0:.3f}".format(self.computeGmpi(currContactCentroid,pialNearVtx,whiteNearVtx)))
print (fids.GetNthFiducialLabel(i)+" gmpi: "+ str(gmpi))
self.descr = fids.GetNthControlPointDescription(i)
if self.descr[-1:] == ',':
fids.SetNthControlPointDescription(i,' '.join([self.descr,'GMPI,',str(gmpi)]))
else:
fids.SetNthControlPointDescription(i, ' '.join([self.descr, ', GMPI,', str(gmpi)]))
# def runMontageCreation(self,fids,gmpiThreshold):
#
# class Implant:
# def __init__(self, electrodes=None):
# if electrodes:
# self.electrodes = electrodes
# self.distances = numpy.ndarray(len(electrodes))
# else:
# self.electrodes = list()
# self.distances = numpy.ndarray((1,1))
#
# def append(self,electrode):
# self.electrodes.append(electrode)
#
# def computeDistances(self):
# # re-allocare distances matrix
# self.distances = numpy.ndarray( ( len(self.electrodes),len(self.electrodes) ) )
# i = 0
# j = 0
# for el1 in self.electrodes:
# for el2 in self.electrodes:
# # need to check whether the two channels are recording
# # from the same hemisphere ... WHY? I mean volume conduction is
# # volume conduction ... so does it matter much if ref is in the other hemisphere
# # would it be better to reduce ref-ch distance instead of preserving
# # laterality?
#
# if (el1.isRight and el2.isRight) or (el1.isLeft and el2.isLeft):
# self.distances[i,j] = numpy.sqrt(numpy.sum((numpy.array(el1.chpos) - numpy.array(el2.chpos))**2))
# else:
# self.distances[i,j] = 1000
#
# i += 1
# j += 1
# i = 0
#
# def find(self,electrode):
# try:
# return self.electrodes.index(electrode)
# except ValueError:
# return None
#
# def buildWhiteChannelsList(self):
#
# whiteReferenceChannels = [ind for ind in xrange(0,len(self.electrodes)) \
# if self.electrodes[ind].gmpi < -0.3 and self.electrodes[ind].ptd < 0 \
# and not self.electrodes[ind].isSubCtx]
# return whiteReferenceChannels
#
# def findWhiteReference(self,electrode,gmpiThreshold):
# srcIdx = self.electrodes.index(electrode)
# if not electrode.isSubCtx:
# refIndices = self.buildWhiteChannelsList()
# refIdx = refIndices[self.distances[srcIdx,refIndices].argmin()]
# return self.electrodes[refIdx]
# else:
# refIndices = self.buildWhiteChannelsList()
# refIdx = refIndices[self.distances[srcIdx,refIndices].argmin()]
# return self.electrodes[refIdx]
#
#
#
#
# class Electrode:
# def __init__(self, label, chpos=None, gmpi=None, ptd=None, isSubCtx=False):
# self.label = label
# self.gmpi = gmpi
# self.ptd = ptd
# self.chpos = chpos
# self.isSubCtx = isSubCtx
# self.isRight = re.search('[A-Z]\d+',label)
# self.isLeft = re.search('[A-Z]\'\d+',label)
#
# def __add__(self, other):
# currElecName = re.match("[A-Z]+[']?",self.label).group(0)
# currChanNum = re.search("\d+", self.label).group(0)
# refLabel = currElecName + str(int(currChanNum) + 1)
# refElec = Electrode(refLabel)
# return refElec
#
# def __eq__(self,other):
# if self.label == other.label:
# return True
# else:
# return False
#
# def __str__(self):
# if self.isRight:
# side = 'r'
# else:
# side = 'l'
# return self.label+' '+side
#
#
# # create table for BP and add it to the active scene
# bpTableNode = slicer.vtkMRMLTableNode()
# bpTableNode.SetName("BP")
# bpTableNode.AddColumn()
# bpTableNode.AddColumn()
# bpTableNode.AddColumn()
# slicer.mrmlScene.AddNode(bpTableNode)
#
# cwTableNode = slicer.vtkMRMLTableNode()
# cwTableNode.SetName("CW")
# cwTableNode.AddColumn()
# cwTableNode.AddColumn()
# cwTableNode.AddColumn()
# slicer.mrmlScene.AddNode(cwTableNode)
#
#
# # read fids and populate a dictionary
# # implantDict = dict()
# implant = Implant()
#
# for elIdx in xrange(0,fids.GetNumberOfFiducials()):
# chpos = [0.0, 0.0, 0.0]
# fids.GetNthFiducialPosition(elIdx,chpos)
# desc = fids.GetNthMarkupDescription(elIdx)
# desc = re.split(',', desc)
# descDict = dict()
# for k,v in zip(desc[::2],desc[1::2]):
# descDict[k.strip()] = float(v)
#
# if descDict.has_key('GMPI'):
# gmpi = descDict['GMPI']
# else:
# gmpi = numpy.nan
# if descDict.has_key('PTD'):
# ptd = descDict['PTD']
# else:
# ptd = numpy.nan
#
# # we need to separate the anatomical names to differentiate between subcortical
# # and cortical channels.
# isSubCtx = any([descDict.has_key(x) for x in ('Hip','Put','Amy','Cau','Tal')])
#
# # implantDict[fids.GetNthFiducialLabel(elIdx)] = (chpos, gmpi, ptd, isSubCtx)
# implant.append(Electrode(fids.GetNthFiducialLabel(elIdx),chpos,gmpi,ptd,isSubCtx))
#
# implant.computeDistances()
#
# # Create bipolar first
# row = 0
# for srcElec in implant.electrodes:
#
# refElec = srcElec+1
#
# if implant.find(refElec):
# bpLabel = srcElec.label+'-'+refElec.label
# bpTableNode.AddEmptyRow()
# bpTableNode.SetCellText(row, 0, str(bpLabel))
# bpTableNode.SetCellText(row, 1, str(srcElec.label))
# bpTableNode.SetCellText(row, 2, str('-'+refElec.label))
# row += 1
#
# # Create Closest White scheme
# row = 0
# for srcElec in implant.electrodes:
#
# refElec = implant.findWhiteReference(srcElec,gmpiThreshold.value)
# if refElec and srcElec.gmpi > gmpiThreshold.value:
# cwLabel = srcElec.label + '-' + refElec.label
# cwTableNode.AddEmptyRow()
# cwTableNode.SetCellText(row, 0, str(cwLabel))
# cwTableNode.SetCellText(row, 1, str(srcElec.label))
# cwTableNode.SetCellText(row, 2, str('-' + refElec.label))
# row += 1
| mnarizzano/SEEGA | GMPIComputation/GMPIComputation.py | GMPIComputation.py | py | 22,278 | python | en | code | 25 | github-code | 90 |
39916350224 | from app.models import db, Location
def seed_locations():
location1 = Location(
user_id = 1,
city = 'Tampa',
state = 'Florida',
country = 'United States',
name = 'Cozy AF Tiny-House Oasis',
amenities = '2 guests, 1 bedroom, 1 bed, 1 bath',
description = 'Awarded a *Unique Stay* by, we welcome you to stay at this Rustic Tiny-House which use to be an old storage container that traveled the world. \
This now cottage has so much unique and fun details awaiting to be discovered.',
price = '161'
)
location2 = Location(
user_id = 1,
city = 'Miami Beach',
state = 'Florida',
country = 'United States',
name = 'Fontainebleau Deluxe Junior Suite - Garden View',
amenities = '4 guests, Studio, 2 beds, 1 bath',
description = 'This is one of the deluxe junior suites in the luxurious Fontainebleau Hotel with access to hotel amenities. The suite is located in the newly \
renovated all-condo Sorrento Tower and is 30 percent bigger than other standard junior suites.',
price = '212'
)
location3 = Location(
user_id = 1,
city = 'Charlotte',
state = 'North Carolina',
country = 'United States',
name = 'Uptown Charlotte Sky Rise overlooking the city!',
amenities = '6 guests, 1 bedroom, 1 bed, 1 bath',
description = 'Your family will be close to everything when you stay at this centrally-located place. Bars in walking distance and a thriving night life.',
price = '83'
)
location4 = Location(
user_id = 1,
city = 'Charleston',
state = 'South Carolina',
country = 'United States',
name = 'KING ST DISTRICT - Walk Everywhere!',
amenities = '5 guests, 2 bedrooms, 2 beds, 1 bath',
description = 'Two bedroom condo/townhome comfortable for a place to stay away from home. Close to downtown Charleston and the beautiful beaches. Less than 5 miles \
to MUSC. Less than 7 miles to the pier on Folly Beach.',
price = '213'
)
location5 = Location(
user_id = 2,
city = 'Tampa',
state = 'Florida',
country = 'United States',
name = 'Condo with Sea views & Heated Pool',
amenities = '4 guests, 1 bedroom, 2 beds, 1 bath',
description = 'Step into the comfort of this beautiful 1-bedroon apartment, with outstanding facilities in Tampa. The property is situated ideally nearby the \
beach so you can enjoy the best views over the Sea within Tampa Bay area.',
price = '85'
)
location6 = Location(
user_id = 2,
city = 'Miami Beach',
state = 'Florida',
country = 'United States',
name = 'COSY STUDIO IN BOUTIQUE HOTEL MIAMI BEACH',
amenities = '2 guests, Studio, 1 bed, 1 bath',
description = 'LOCATED IN THE HEART OF MIAMIBEACH THE NEW HOTEL HAS A PERFECT GEOGRAPHICAL SITUATION AT TWO MINUTES FROM THE BEACH OF THE PARK OF THE CONCERT PLACE, \
BOUTIQUES OR SOME OF THE FAMOUS MALL OF BAL HARBOR…',
price = '99'
)
location7 = Location(
user_id = 2,
city = 'Charlotte',
state = 'North Carolina',
country = 'United States',
name = 'Stylish & Modern | High Rise Lux | Stunning Views',
amenities = '3 guests, 1 bedroom, 1 bed, 1 bath',
description = 'Our studio apartment w/ 1 queen bed, couch & air mattress has everything you need to enjoy your stay in a prime location! \
The apartment is new and offers a modern kitchen w/ granite countertops, natural hardwood flooring, floor-to-ceiling windows, 9\' \
ceilings and stainless steel appliances. Enjoy coffee from our Keurig. Arguably one of the best views in the city. Absolute luxury!',
price = '92'
)
location8 = Location(
user_id = 2,
city = 'Charleston',
state = 'South Carolina',
country = 'United States',
name = 'Serene Cottage with Beautiful Garden and Heated Pool',
amenities = '4 guests, 1 bedroom, 1 bed, 1.5 baths',
description = 'This unique inviting light filled space boasts beautiful french doors throughout with an unmatched view into the lush courtyard. \
Plus 1 off-street parking spot!',
price = '269'
)
location9 = Location(
user_id = 3,
city = 'Tampa',
state = 'Florida',
country = 'United States',
name = 'Tampa Bay Breeze -sunset views over crystal waters',
amenities = '4 guests, 1 bedroom, 2 beds, 1 bath',
description = 'Tropical Paradise is only minutes away! Tampa\'s only all-waterfront, all-suite resort is perfectly \
positioned for those traveling for business or simply to relax.',
price = '154'
)
location10 = Location(
user_id = 3,
city = 'Miami Beach',
state = 'Florida',
country = 'United States',
name = 'OCEANFRONT, BALCONY, WIFI, PARKING',
amenities = '3 guests, Studio, 2 beds, 1 bath',
description = 'Come and spend your vacation in Miami in an idyllic setting. Your accommodation is located on the seafront \
with a lovely, equipped terrace . In the morning, go for a walk on the beach and enjoy the ocean. In addition, your property \
is part of a luxury condo 50s, recently renovated, and offering a wide range of facilities such as swimming pool, sauna, garden, restaurants and a gym.',
price = '109'
)
location11 = Location(
user_id = 3,
city = 'Charlotte',
state = 'North Carolina',
country = 'United States',
name = 'Serene *Walkable *Trendy Plaza Midwood near Uptown',
amenities = '5 guests, 2 bedrooms, 2 beds, 1 bath',
description = 'Experience walkable urban amenities and full-house privacy with free off-street parking in the heart of Charlotte\'s \
trendy & historic Plaza Midwood neighborhood. Enjoy a leisurely walk to dining and entertainment, coffee or wine by the impressive \
stone fireplace or simply enjoy the gentle call of the native barred owl from the cozy comfort of the home\'s revered screened porch.',
price = '245'
)
location12 = Location(
user_id = 3,
city = 'Charleston',
state = 'South Carolina',
country = 'United States',
name = 'Waterfront Privacy, heated infinity pool, location',
amenities = '8 guests, 4 bedrooms, 4 beds, 3 baths',
description = 'Be prepared for a truly eye opening experience of this one of a kind Lowcountry experience. Booking this home will change \
the way you view your next vacation-literally! Every detail in this home has been laid out for you to arrive and immediately go into \
relaxation mode. There are amenities included for the adventurous outdoor lover and the unrivaled night life scene Charleston proudly \
boasts. This home is centrally located to Folly Beach and Downtown Charleston.',
price = '993'
)
location13 = Location(
user_id = 4,
city = 'Tampa',
state = 'Florida',
country = 'United States',
name = 'Clean, Modern Home minutes from Downtown Tampa',
amenities = '4 guests, 2 bedrooms, 2 beds, 1 bath',
description = 'Beautiful, clean home minutes from downtown Tampa. Located in N Hyde Park you are \
centrally located to many of the best areas of Tampa Bay',
price = '89'
)
location14 = Location(
user_id = 4,
city = 'Miami Beach',
state = 'Florida',
country = 'United States',
name = 'Gorgeous Bay View Studio direct beach exit',
amenities = '4 guests, Studio, 3 beds, 1 bath',
description = 'My place is over the well-known Collins Avenue and in front of the ocean with immediate \
access to the beach. Minutes away from many attractions that this city offers during day and night time.. \
You\'ll love my place because of its incredible views, the amazing beach and the comfy beds!',
price = '81'
)
location15 = Location(
user_id = 4,
city = 'Charlotte',
state = 'North Carolina',
country = 'United States',
name = '4-Story Luxury Home w/ Rooftop Hot Tub by Mansion',
amenities = '8 guests, 3 bedrooms, 4 beds, 3.5 baths',
description = 'This Mansion property was built in 2019 and reflects the modern, luxury lifestyle within walking \
distance from the BofA Stadium, Uptown, and more. Complete with full kitchen, dining area, living room with an \
enormous 82 inch HDTV, 2-car garage, Tesla Charger & rooftop grill+hot tub this townhome is perfect for a visiting \
executive or family and friends looking to enjoy CLT together!',
price = '301'
)
location16 = Location(
user_id = 4,
city = 'Charleston',
state = 'South Carolina',
country = 'United States',
name = '*OUTDOOR OASIS* OAK NEST! Btwn CHARLESTON & BEACH',
amenities = '2 guests, 1 bedroom, 1 bed, 2.5 baths',
description = 'GET OUTDOORS! Minutes to Sullivan\'s Island Beach, Shem Creek & Downtown Charleston. The Live Oak Nest \
is a stunning indulgence we all deserve. COUPLES & honeymooners OASIS. Circular driveway, private parking, bikes, \
beach essentials, flickering torches, courtyards, fountains, exquisite baths, chefs kitchen, exquisite decor & furnishings.',
price = '285'
)
location17 = Location(
user_id = 5,
city = 'Tampa',
state = 'Florida',
country = 'United States',
name = 'Beautiful & Brand New Cottage perfectly located!',
amenities = '2 guests, 1 bedroom, 1 bed, 1 bath',
description = 'Enjoy this brand new, fully equipped 1 bedroom, living room, full kitchen, 1 bath, private cottage. \
Designed to make any length stay comfortable & convenient. It\'s 5 minutes from downtown Tampa & 25 minutes from the beaches.',
price = '68'
)
location18 = Location(
user_id = 5,
city = 'Miami Beach',
state = 'Florida',
country = 'United States',
name = 'Oceanfront 14th Floor Brand New Beachfront Flat',
amenities = '3 guests, 1 bedroom, 2 beds, 1 bath',
description = 'A modern style and freshly renovated large beachfront studio apartment in Miami Beach with 180 degree ocean \
views of the crystal clear water and free parking.',
price = '170'
)
location19 = Location(
user_id = 5,
city = 'Charlotte',
state = 'North Carolina',
country = 'United States',
name = 'The Carolina Treehouse',
amenities = '2 guests, 1 bed, 1 bath',
description = 'We built this place for dreamers to reset, reflect, and create. Designed with a \'slow\' pace in mind. Our hope \
is that you enjoy every part of your stay at our Carolina Treehouse; from enjoying your morning cup of pour over coffee to selecting \
your favorite record to play as the sun sets or curl up underneath on our swing bed that overlooks the private pond in this lodge-chic \
treehouse all while being surrounded by nature.',
price = '337'
)
location20 = Location(
user_id = 5,
city = 'Charleston',
state = 'South Carolina',
country = 'United States',
name = 'Roomy & Welcoming Waterfront Charleston House',
amenities = '12 guests, 5 bedrooms, 8 beds, 5 baths',
description = 'Spacious coastal home on a large lot with creek access for paddle boarding & kayaking. Lots to do on-site, nearby park and beach, \
pool table, video games, theater room and swinging daybeds to watch one-of-a-kind sunsets.',
price = '476'
)
db.session.add(location1)
db.session.add(location2)
db.session.add(location3)
db.session.add(location4)
db.session.add(location5)
db.session.add(location6)
db.session.add(location7)
db.session.add(location8)
db.session.add(location9)
db.session.add(location10)
db.session.add(location11)
db.session.add(location12)
db.session.add(location13)
db.session.add(location14)
db.session.add(location15)
db.session.add(location16)
db.session.add(location17)
db.session.add(location18)
db.session.add(location19)
db.session.add(location20)
db.session.commit()
def undo_locations():
db.session.execute('TRUNCATE locations RESTART IDENTITY CASCADE;')
db.session.commit()
| mehendaleo/GetAway | app/seeds/location.py | location.py | py | 12,866 | python | en | code | 1 | github-code | 90 |
1438668318 | # 145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
# Find the sum of all numbers which are equal to the sum of the factorial of their digits.
# Note: as 1! = 1 and 2! = 2 are not sums they are not included.
import math
result = 0
for i in range (0,10000001):
sums = 0
for j in range (0,len(str(i))):
sums = math.factorial(int(str(i)[j:j+1])) + sums
if sums > i :
break
if sums == i and sums != 1 and sums != 2:
result = sums +result
print(sums)
print("result")
print(result)
#40730 | okadaakihito/ProjectEuler | Problem_34.py | Problem_34.py | py | 552 | python | en | code | 0 | github-code | 90 |
6338306949 | #!/usr/bin/python3
""" Script that uses JSONPlaceholder API to get information about employee """
import requests
import sys
if __name__ == "__main__":
url = 'https://jsonplaceholder.typicode.com/'
user = '{}users/{}'.format(url, sys.argv[1])
res = requests.get(user)
json_o = res.json()
print("Employee {} is done with tasks".format(json_o.get('name')), end="")
todos = '{}todos?userId={}'.format(url, sys.argv[1])
res = requests.get(todos)
tasks = res.json()
l_task = []
for task in tasks:
if task.get('completed') is True:
l_task.append(task)
print("({}/{}):".format(len(l_task), len(tasks)))
for task in l_task:
print("\t {}".format(task.get("title")))
| luischaparroc/holberton-system_engineering-devops | 0x15-api/0-gather_data_from_an_API.py | 0-gather_data_from_an_API.py | py | 738 | python | en | code | 153 | github-code | 90 |
33956324358 | """Adding balance and total_references to user
Revision ID: a9078a30a48e
Revises: 8355ab728b72
Create Date: 2020-02-10 11:41:28.821496
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a9078a30a48e'
down_revision = '8355ab728b72'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('balance', sa.Integer(), nullable=True))
op.add_column('user', sa.Column('total_referrals', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'total_referrals')
op.drop_column('user', 'balance')
# ### end Alembic commands ###
| Rencode/referral_program | alembic/versions/a9078a30a48e_adding_balance_and_total_references_to_.py | a9078a30a48e_adding_balance_and_total_references_to_.py | py | 811 | python | en | code | 1 | github-code | 90 |
17068668525 | import random
from xml.dom.minidom import Document,Node
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class Diagram:
"""
Information for a diagram view of a L{psychsim.world.World}
"""
def __init__(self,args=None):
self.x = {}
self.y = {}
self.color = {}
if isinstance(args,Node):
self.parse(args)
def getX(self,key):
try:
return self.x[key]
except KeyError:
return None
def getY(self,key):
try:
return self.y[key]
except KeyError:
return None
def getColor(self,key):
"""
@warning: if no color exists, assigns a random color
"""
if not key in self.color:
self.color[key] = QColor(random.choice(QColor.colorNames()))
return self.color[key]
def setColor(self,key,value):
if not isinstance(value,QColor):
value = QColor(value)
self.color[key] = value
def clear(self):
self.x.clear()
self.y.clear()
def __xml__(self):
doc = Document()
root = doc.createElement('diagram')
for key,value in self.x.items():
node = doc.createElement('x')
node.setAttribute('key',key)
node.appendChild(doc.createTextNode(str(value)))
root.appendChild(node)
for key,value in self.y.items():
node = doc.createElement('y')
node.setAttribute('key',key)
node.appendChild(doc.createTextNode(str(value)))
root.appendChild(node)
for key,value in self.color.items():
node = doc.createElement('color')
if key:
node.setAttribute('key',key)
node.appendChild(doc.createTextNode(str(value.name())))
root.appendChild(node)
doc.appendChild(root)
return doc
def parse(self,root):
assert root.tagName == 'diagram'
node = root.firstChild
while node:
if node.nodeType == node.ELEMENT_NODE:
key = str(node.getAttribute('key'))
if not key:
key = None
if node.tagName == 'x':
self.x[key] = float(node.firstChild.data)
elif node.tagName == 'y':
self.y[key] = float(node.firstChild.data)
elif node.tagName == 'color':
self.setColor(key,str(node.firstChild.data).strip())
else:
raise NameError('Unknown element %s when parsing %s' % \
(node.tagName,self.__class__.__name__))
node = node.nextSibling
| pynadath/psychsim | psychsim/ui/diagram.py | diagram.py | py | 2,725 | python | en | code | 26 | github-code | 90 |
18718741352 | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
import statsmodels.api as sm
def pacf(x, tau):
x = x.to_numpy()
n = x.shape[0]
y0 = x[tau:]
y1 = x[:n-tau]
xx = np.zeros([n - tau, tau - 1])
for k in range(1, tau):
xx[:, k-1] = x[k:n-tau+k]
lm0 = linear_model.LinearRegression()
lm1 = linear_model.LinearRegression()
lm0.fit(xx, y0)
lm1.fit(xx, y1)
res0 = y0 - np.matmul(xx, lm0.coef_) - lm0.intercept_
res1 = y1 - np.matmul(xx, lm1.coef_) - lm1.intercept_
pause = 1
return np.corrcoef(res0, res1)[0, 1]
file = 'Assignment_time_series.xls'
df = pd.read_excel(file, sheet_name = None)['Time-series']
data = df.iloc[:, 1:]
# pause = 1
# check if the time series is from a AR(1) process -> 5
for i in range(5):
tdata = data.iloc[:, i]
rho1 = np.corrcoef(tdata[1:], tdata[:-1])[0, 1]
rho2 = np.corrcoef(tdata[2:], tdata[:-2])[0, 1]
print('for the {}th time seires, rho2 = {:.2f}, rho1 squared = {:.2f}'.format(i+1, rho2, rho1**2))
print('\n')
# check if the time series is from a AR(2) process -> 1
for i in range(5):
tdata = data.iloc[:, i]
rho1 = np.corrcoef(tdata[1:], tdata[:-1])[0, 1]
rho2 = np.corrcoef(tdata[2:], tdata[:-2])[0, 1]
rho3 = np.corrcoef(tdata[3:], tdata[:-3])[0, 1]
phi1 = rho1 * (1 - rho2) / (1 - rho1**2)
phi2 = (rho2 - rho1**2) / (1 - rho1**2)
rho3_est = phi1 * rho2 + phi2 * rho1
print('for the {}th time seires, sample-based rho3 est = {:.2f}, analytical rho3 = {:.2f}'.format(i+1, rho3, rho3_est))
print('\n')
# check if the time series is from a MA(1) process -> 2
for i in range(5):
tdata = data.iloc[:, i]
rho2 = np.corrcoef(tdata[2:], tdata[:-2])[0, 1]
print('for the {}th time seires, sample-based rho2 est = {:.2f}'.format(i+1, rho2))
print('\n')
# check if the time series is from a MA(2) process -> 3
for i in range(5):
tdata = data.iloc[:, i]
rho3 = np.corrcoef(tdata[3:], tdata[:-3])[0, 1]
print('for the {}th time seires, sample-based rho3 est = {:.2f}'.format(i+1, rho3))
print('\n')
# check if the time series is from a ARMA(1,1) process -> 4
for i in range(5):
tdata = data.iloc[:, i]
rho1 = np.corrcoef(tdata[1:], tdata[:-1])[0, 1]
rho2 = np.corrcoef(tdata[2:], tdata[:-2])[0, 1]
rho3 = np.corrcoef(tdata[3:], tdata[:-3])[0, 1]
print('for the {}th time seires, (rho1/rho0, rho2/rho1, rho3/rho2) = ({:.2f}, {:.2f}, {:.2f})'.format(i+1, rho1, rho2/rho1, rho3/rho2))
# check my pacf function
for i in range(5):
tdata = data.iloc[:, i]
mp0 = 1
mp1 = np.corrcoef(tdata[1:], tdata[:-1])[0, 1]
mp2 = pacf(tdata, tau = 2)
mp3 = pacf(tdata, tau = 3)
pp = sm.tsa.pacf(tdata, nlags=3)
print('for the {}th time seires, pacf of lag 1, 2, 3, 4 = ({:.2f} ({:.2f}), {:.2f} ({:.2f}), {:.2f} ({:.2f}), {:.2f} ({:.2f}))'.format(i+1, mp0, pp[0], mp1, pp[1], mp2, pp[2], mp3, pp[3]))
pause = 1 | cruiseryy/boot_camp | hw4/arma_test.py | arma_test.py | py | 2,967 | python | en | code | 1 | github-code | 90 |
3493325288 | from flask import Flask, render_template
from flask_wtf.csrf import CSRFProtect
from forms import QueryServices
import requests
app = Flask(__name__)
app.secret_key = b'ksdfglbvlsdfbos'
csrf = CSRFProtect(app)
BASE = 'http://service:5001/'
@app.route("/", methods=['GET', 'POST'])
def home():
form = QueryServices()
if form.validate_on_submit():
response = requests.get(BASE + f"services1")
response = response.json()
return render_template("home.html", form=form, value=response["data"])
return render_template("home.html", form=form)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
| johnlindsay93/istio_tutorial | homepage/app.py | app.py | py | 647 | python | en | code | 0 | github-code | 90 |
4536154818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# author: JianGe, created on: 2018/10/25
from .xpinyin import Pinyin
p = Pinyin()
def allPinyin(inputStr):
res = ''
for item in p.get_pinyin(inputStr, u""):
res += item
return res
def allInitials(inputStr):
return p.get_initials(inputStr, u"")
if __name__ == "__main__":
str_input = u'WOmeneabac世界欢迎你2-9'
print(allPinyin('我去你大爷88的额'))
print(allPinyin(u'山东钢铁'))
print(allInitials(u'山东钢铁'))
| laozeng1982/workoutDB | utilities/Chinese.py | Chinese.py | py | 526 | python | en | code | 0 | github-code | 90 |
38321524550 | """
GravMag: 3D forward modeling of total-field magnetic anomaly using polygonal
prisms
"""
from fatiando import logger, mesher, gridder, gravmag
from fatiando.vis import mpl, myv
log = logger.get()
log.info(logger.header())
log.info(__doc__)
log.info("Draw the polygons one by one")
bounds = [-5000, 5000, -5000, 5000, 0, 5000]
area = bounds[:4]
axis = mpl.figure().gca()
mpl.axis('scaled')
prisms = [
mesher.PolygonalPrism(
mpl.draw_polygon(area, axis, xy2ne=True),
0, 2000, {'magnetization':2})]
# Calculate the effect
shape = (100, 100)
xp, yp, zp = gridder.regular(area, shape, z=-500)
tf = gravmag.polyprism.tf(xp, yp, zp, prisms, 30, -15)
# and plot it
mpl.figure()
mpl.axis('scaled')
mpl.title("Total field anomalyproduced by prism model (nT)")
mpl.contourf(yp, xp, tf, shape, 20)
mpl.colorbar()
for p in prisms:
mpl.polygon(p, '.-k', xy2ne=True)
mpl.set_area(area)
mpl.m2km()
mpl.show()
# Show the prisms
myv.figure()
myv.polyprisms(prisms, 'magnetization')
myv.axes(myv.outline(bounds), ranges=[i*0.001 for i in bounds])
myv.wall_north(bounds)
myv.wall_bottom(bounds)
myv.show()
| fatiando/v0.1 | _static/cookbook/gravmag_mag_polyprism.py | gravmag_mag_polyprism.py | py | 1,115 | python | en | code | 0 | github-code | 90 |
20382780731 | import argparse
import pyaudio
import wave
import numpy as np
from scipy.io import wavfile
import matplotlib.pyplot as plt
from pydub import AudioSegment
def main(args):
if args.r:
print("Record Sound")
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:
if "Umik" in p.get_device_info_by_host_api_device_index(0, i).get('name'):
device_index=i
print("Input Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
CHUNK = 1024
RECORD_SECONDS = 30
WAVE_OUTPUT_FILENAME = ".\PDC Recordings\\Final_R.wav"
audio = pyaudio.PyAudio()
# start Recording
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK, input_device_index=device_index)
print ("recording...")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print ("finished recording")
# stop Recording
stream.stop_stream()
stream.close()
audio.terminate()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
if args.p:
print("Plot Sound")
wavFile =["PDC Recordings\\Final_R.wav"]
print("wavfile=",wavFile)
plt.rcParams["figure.autolayout"] = True
yff=[]
xff=[]
xt=[]
yt=[]
#fig, (ax1,ax2) = plt.plot(figsize=(15, 7))
for i in range (len(wavFile)):
wavposition= wavFile[i]
wav_obj = wave.open(wavposition, 'rb')
sample_freq = wav_obj.getframerate()
n_samples = wav_obj.getnframes()
t_audio = n_samples/sample_freq
n_channels = wav_obj.getnchannels()
signal_wave = wav_obj.readframes(n_samples)
signal_array = np.frombuffer(signal_wave, dtype=np.int16)
l_channel = signal_array[0::1]
times = np.linspace(0, n_samples/sample_freq, len(l_channel))
xt.append(times)
yt.append(l_channel)
plt.plot(xt[i],yt[i],alpha=0.5)
plt.title('Left Channel')
plt.ylabel('Signal Value')
plt.xlabel('Time (s)')
plt.show()
if args.l:
print("Loudness Check")
sound = AudioSegment.from_file("PDC Recordings\\6HZ_F.wav")
sound_RE = AudioSegment.from_file("PDC Recordings\\6HZ_F_RE.wav")
loudness = sound.dBFS
loudness_RE = sound_RE.dBFS
diff = loudness-loudness_RE
print(diff)
# make left channel 6dB quieter and right channe 2dB louder
sound_RE_adjusted = sound.apply_gain_stereo(diff, diff)
sound_RE_adjusted.export("PDC Adjusted\\6HZ_F.wav", format="wav")
if args.rl:
print("Loudness Check")
sound = AudioSegment.from_file("PDC Recordings\\Const_R.wav")
diff=-9.75
sound_RE_adjusted = sound.apply_gain_stereo(diff, diff)
sound_RE_adjusted.export("PDC Adjusted\\Const_R.wav", format="wav")
if args.fl:
print("Loudness Check")
sound = AudioSegment.from_file("PDC Recordings\\Roger_F.wav")
diff=-4.928
sound_RE_adjusted = sound.apply_gain_stereo(diff, diff)
sound_RE_adjusted.export("PDC Adjusted\\Roger_F.wav", format="wav")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-r", action="store_true", help="record sound")
parser.add_argument("-p", action="store_true", help="plot sound")
parser.add_argument("-l", action="store_true", help="loudness match")
parser.add_argument("-rl", action="store_true", help="rear loudness match, db-9.57")
parser.add_argument("-fl", action="store_true", help="front loudness match, db-9.57")
args = parser.parse_args()
main(args) | DanielQu1108/forJaime | main.py | main.py | py | 4,508 | python | en | code | 0 | github-code | 90 |
17959973129 | N = int(input())
P = list(map(int, input().split()))
def swap(i, j):
return j, i
now = P[0]
count = 0
for i in range(N-1):
after = P[i+1]
if now == i+1:
now, after = swap(now, after)
count += 1
now = after
if now == N:
count += 1
print(count) | Aasthaengg/IBMdataset | Python_codes/p03612/s932400470.py | s932400470.py | py | 290 | python | en | code | 0 | github-code | 90 |
29292760126 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#CSV:xpelan03
"""
" Soubor: csv.py
" Datum: 2015/04/18
" Autor: Lukas Pelanek, xpelan03@stud.fit.vutbr.cz
" Projekt: CSV2XML
" Popis: Program nacte zdrojovy soubor zapsany ve formatu CSV a prevede jej do formatu XML
"""
import params
import sys
path=sys.path[:]
sys.path=path[1:]
import csv as csvLib
sys.path=path[:]
import re
# parametry prikazove radky
parameters = {"help":False, "input":False, "output":False, "n":False, "r":False, "s":False, "h":False,
"c":False, "l":False, "i":False, "start":False, "e":False, "missing":False, "all":False, "padding":False}
params.getParams(parameters)
params.paramsCheck(parameters)
cols = 0; # promenna ve ktere uchovavam cislo radku
spaces = 0; # promenna ve ktere uchovavam pocet tabulatoru
head = [] # promenna ve ktere je ulozen prvni radek pokud je aktivni prepinac -h
x = 0 # pocitadlo prvku v hlavicce
rowCount = int(parameters["start"])
#funkce nahradi problematicke znaky
def replace(st):
st = st.replace("&", "&").replace("'", "'").replace(">", ">").replace('"', """).replace("<", "<")
return st
# funkce nahradi neplatne znaky ve jmene elementu
def nameReplace (st):
if len(st) > 1:
tmpStr = re.sub("[^:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]",parameters["h"],re.sub("\n",parameters['h']*2,st[0]))
tmpStr += re.sub("[^:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\.\-0-9\u00B7\u0300-\u036F\u203F-\u2040}]",parameters["h"],re.sub("\n",parameters['h']*2,st[1:]))
else:
tmpStr = re.sub("[^:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]",parameters["h"],re.sub("\n",parameters['h']*2,st))
return tmpStr
# funkce pro rozsireni padding, doplni odpovidajici pocet nul u jednotlivych sloupcu
def getColIndex(colLen, number):
if parameters["padding"] == True:
tmpNum = ""
for i in range(len(str(number)), len(str(colLen))):
tmpNum += "0"
tmpNum += str(number)
return tmpNum
return number
# funkce pro rozsireni padding, doplni odpovidajici pocet nul u jednotlivych radku
def getRowIndex(rowLen, number):
if parameters["padding"] == True:
maxIndex = int(parameters["start"])-1 + rowLen
tmpN = ""
for e in range(len(str(number)), len(str(maxIndex))):
tmpN += "0"
tmpN += str(number)
return tmpN
return number
# pokusim se otevrit vstupni soubor
if parameters["input"] != False:
try:
inputFile = open(parameters["input"], encoding = "utf-8", mode = "r")
except:
sys.exit(2)
else:
inputFile = sys.stdin
# pokusim se otevrit vystupni soubor
if parameters["output"] != False:
try:
outputFile = open(parameters["output"], encoding = "utf-8", mode = "w")
except:
sys.exit(3)
else:
outputFile = sys.stdout
reader = csvLib.reader(inputFile, delimiter=parameters["s"])
# ulozim vstup do promenne, protoze budu potrebovat projit vstup vicekrat
file = list(reader)
# pokud je vstupni soubor prazdny, pridam do souboru prazdny retezec
if len(file) == 0:
file.append("")
# ulozim si pocet sloupcu
colsLength = len(file[0])
rowsLength = len(file)
#pokud neni zadan prepinac -e a neodpovida pocet sloupcu -> chyba
if parameters["e"] == False:
for tmp in file:
if colsLength != len(tmp):
exit(32)
# pokud je aktivni prepinac -h, projdu hlavicku, nahradim neplatne znaky a znakontroluji validitu
if parameters["h"] != False:
head = file[0]
for b, i in enumerate(head):
head[b] = nameReplace(i);
if not params.checkName(head[b]):
exit(31)
file = file[1:]
# pokud neni aktivni prepinac -n, generuji XML hlavicku
if parameters["n"] == False:
outputFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
# pokud je aktivni prepinac -r, generuji root element
if parameters["r"] != False:
spaces += 1
outputFile.write("<%s>\n" % parameters["r"])
actualCol = colsLength
for row in file:
# pokud je aktivni prepinac -i, vlozim atribut index
if parameters["i"] == True:
outputFile.write("\t"*spaces + "<%s index=\"" % parameters["l"])
outputFile.write("%s\">\n" % getRowIndex(rowsLength, rowCount))
rowCount += 1
else:
outputFile.write("\t"*spaces + "<%s>\n" % parameters["l"])
spaces += 1
# zotaveni z chybneho poctu sloupcu
if len(row) < colsLength:
for a in range(0, (colsLength - len(row))):
# chybejici sloupce vyplnim znakem "<" pro identifikaci, tento symbol se mi na vstupu nikdy nemuze objevit
row.append("<")
for col in row:
if parameters["all"]:
actualCol = len(row)
cols += 1
# pokud neni aktivni prepinac --all-columns, prebyvajici sloupce ignoruji
if cols > colsLength:
if parameters["all"] == False:
break
# pokud je aktivni prepinac -h, tisknu elementy podle hlavicky, jinak podle parametru -c
if parameters["h"] != False and x < len(head):
outputFile.write("\t"*spaces + "<%s>\n" % head[x])
else:
outputFile.write("\t"*spaces + "<%s" % parameters["c"])
outputFile.write("%s>\n" % getColIndex(actualCol, cols))
spaces +=1
# pokud je pocet sloupcu mensi nebo roven nez na prvnim radku a sloupec nema hodnotu a zaroven je aktivni prepinac --missing-value, doplnim hodnotu, jinak prazdne pole
if cols <= colsLength and col == "<":
if parameters["missing"] != False:
outputFile.write("\t"*spaces + replace(parameters["missing"]) + "\n")
else:
outputFile.write("\t"*spaces + replace(col) + "\n")
spaces -=1
# pokud je aktivni prepinac -h, tisknu elementy podle hlavicky, jinak podle parametru -c
if parameters["h"] != False and x < colsLength:
outputFile.write("\t"*spaces + "</%s>\n" % head[x])
x += 1
else:
outputFile.write("\t"*spaces + "</%s" % parameters["c"])
outputFile.write("%s>\n" % getColIndex(actualCol, cols))
x = 0
cols = 0
spaces -= 1
outputFile.write("\t"*spaces + "</%s>\n" % parameters["l"])
# pokud je aktivni prepinac -r, tisknu root element
if parameters["r"] != False:
spaces -= 1
outputFile.write("</%s>\n" % parameters["r"])
| Mike-CZ/VUT-FIT | 2BIT/IPP/Projekt 2/csv.py | csv.py | py | 6,849 | python | cs | code | 0 | github-code | 90 |
17064092835 | import logging
from yolodeck.buttons.base_button import BaseButton
class BaseScreen(BaseButton):
def __init__(self, key_no):
self._logger = logging.getLogger('yolodeck')
self._screen_manager = None
self.buttons = {}
self.screen_buttons()
super().__init__(key_no)
def suspend(self):
for key_no, button in self.buttons.items():
button.suspend()
def resume(self):
for key_no, button in self.buttons.items():
button.resume()
def dispose(self):
for key_no, button in self.buttons.items():
button.dispose()
def register_button(self, key_no, button_class):
self.buttons[key_no] = button_class(key_no)
def screen_buttons(self):
pass
def render(self, screen_manager):
self._logger.info("Rendering screen: %s", type(self).__name__)
self._screen_manager = screen_manager
screen_manager.reset_screen()
for key_no, button in self.buttons.items():
button.set_screen_manager(screen_manager)
try:
button.initialize()
except Exception as ex:
self._logger.error(
"Key %s (%s) initialize() raised an unhandled exception: "
"%s",
key_no, type(self).__name__, str(ex))
def pressed(self, key_no):
if key_no not in self.buttons:
return
if issubclass(type(self.buttons[key_no]), BaseScreen):
return
self._logger.info("Key %s pressed on %s", key_no, type(self).__name__)
try:
self.buttons[key_no].pressed()
except Exception as ex:
self._logger.error(
"Key %s (%s) pressed() raised an unhandled exception: %s",
key_no, type(self).__name__, str(ex))
def released(self, key_no):
if key_no not in self.buttons:
return
if issubclass(type(self.buttons[key_no]), BaseScreen):
self._screen_manager.set_active_screen(self.buttons[key_no])
return
self._logger.info("Key %s released on %s", key_no,
type(self).__name__)
try:
self.buttons[key_no].released()
except Exception as ex:
self._logger.error(
"Key %s (%s) released() raised an unhandled exception: %s",
key_no, type(self).__name__, str(ex))
self._screen_manager.pop_active_screen()
| lamaral/yolodeck | yolodeck/screens/base_screen.py | base_screen.py | py | 2,513 | python | en | code | 1 | github-code | 90 |
74457547817 | from graphviz import Digraph
class BlockDiagram():
def __init__(self, chip):
self.chip = chip
g = Digraph(self.chip.name, graph_attr={"rankdir": "LR"})
sources = {}
sinks = {}
for instance in self.chip.instances:
for port, wire in instance.inputs.iteritems():
sinks[str(id(wire))] = str(id(instance)) + ":" + port
for port, wire in instance.outputs.iteritems():
sources[str(id(wire))] = str(id(instance)) + ":" + port
inputs = "|".join(["<%s> %s" % (i, i)
for i in instance.inputs.keys()])
outputs = "|".join(["<%s> %s" % (i, i)
for i in instance.outputs.keys()])
label = "{{%s}|%s|{%s}}" % (
inputs,
instance.component_name,
outputs
)
g.node(str(id(instance)), label=label, shape="record")
for input_ in self.chip.inputs.values():
sources[str(id(input_))] = str(id(input_))
g.node(str(id(input_)), label=input_.name, shape="record")
for output_ in self.chip.outputs.values():
sinks[str(id(output_))] = str(id(output_))
g.node(str(id(output_)), label=output_.name, shape="record")
for wire, source in sources.iteritems():
sink = sinks[wire]
g.edge(source, sink)
self.g = g
def render(self, *args, **vargs):
return self.g.render(*args, **vargs)
def view(self, *args, **vargs):
return self.g.view(*args, **vargs)
if __name__ == "__main__":
from chips.api.api import *
from chips.components.components import *
c = Chip("my_chip")
a = Input(c, "a")
b = Input(c, "b")
d = Input(c, "d")
e = Input(c, "e")
x, y = tee(c, add(c, add(c, a, b), add(c, d, e)))
discard(c, x)
discard(c, y)
b = BlockDiagram(c)
b.view()
| dawsonjon/Chips-2.0 | chips/utils/block_diagram.py | block_diagram.py | py | 1,969 | python | en | code | 225 | github-code | 90 |
18535781079 | S, k = open(0).read().split()
k = int(k)
x = set(sorted(S)[:min(k, len(S))])
A = set()
for i, c in enumerate(S):
if c in x:
s = ''
for j in range(i, min(i+k, len(S))):
s += S[j]
A.add(s)
print(sorted(A)[k-1]) | Aasthaengg/IBMdataset | Python_codes/p03353/s321410962.py | s321410962.py | py | 252 | python | en | code | 0 | github-code | 90 |
44855394670 | from typing import List
from aiogram.types import KeyboardButton, ReplyKeyboardMarkup, InlineKeyboardButton, InlineKeyboardMarkup
from aiogram.utils.keyboard import InlineKeyboardBuilder
from aiogram.filters.callback_data import CallbackData
from core import schemas
class BotFactory(CallbackData, prefix="bot"):
bot_id: int
class MenuBotFactory(CallbackData, prefix="menu_bot"):
action: str
bot_id: int
menu = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton(text="Добавить проект"),
KeyboardButton(text="Мои проекты")
],
],
resize_keyboard=True
)
cancel = ReplyKeyboardMarkup(keyboard=[[KeyboardButton(text="Отмена")]],
resize_keyboard=True)
def get_bots(bots: List[schemas.BotOut]):
builder = InlineKeyboardBuilder()
for bot in bots:
builder.button(text=f"{bot.name} ({bot.ques_count})", callback_data=BotFactory(bot_id=bot.bot_id))
builder.add(InlineKeyboardButton(text="Добавить бота", callback_data="add_bot"))
builder.adjust(1)
return builder.as_markup()
def get_bot(bot_id):
builder = InlineKeyboardBuilder()
buttons = [InlineKeyboardButton(text="Изменить приветствие",
callback_data=MenuBotFactory(action="change_hello_msg", bot_id=bot_id).pack()),
InlineKeyboardButton(text="Удалить проект",
callback_data=MenuBotFactory(action="delete", bot_id=bot_id).pack())]
builder.add(*buttons)
builder.adjust(1)
return builder.as_markup()
def get_delete(bot_id):
builder = InlineKeyboardBuilder()
builder.button(text="Да", callback_data=MenuBotFactory(action="sure_delete", bot_id=bot_id))
builder.button(text="Нет", callback_data=MenuBotFactory(action="cancel_delete", bot_id=bot_id))
return builder.as_markup()
| HungryStudent/multi_support_bot | keyboards/user.py | user.py | py | 1,951 | python | en | code | 0 | github-code | 90 |
19414412932 | import cv2
import numpy as np
cap = cv2.VideoCapture("run1.mp4")
while cap.isOpened():
ret,frame = cap.read()
#if Frame is read correctly ret is True
if not ret:
print('Cant find the file check it once again..')
break
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
gray = cv2.resize(gray,(0,0),fx = 0.5,fy = 0.5)
cv2.imshow("Gray frame",gray)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.waitKey(0)
cv2.destroyAllWindows() | Pavankunchala/Deep-Learning | Open-CV_Basics/playing_video_openCV.py | playing_video_openCV.py | py | 531 | python | en | code | 31 | github-code | 90 |
709331225 | results = {}
while True:
print("0-Exit,1-Add, 2-Search, 3-Delete")
n = int(input("Option\n"))
if n == 0:
break
elif n == 1:
print("Add")
rollno = int(input("Roll no\n"))
getter = results.get(rollno)
if getter is not None:
print('Already exits')
else:
name = input("name\n")
phy = int(input("phy\n"))
chem = int(input("chem\n"))
math = int(input("math\n"))
value = 'Pass'
if phy < 33 or chem < 33 or math < 33:
value = "Fail"
total = phy + chem + math
percentage = total / 3
results[rollno] = {"name": name, "total": total, "Result": value, "Percentage": percentage,
"physics": phy, "Chemistry": chem, "Math": math}
elif n == 2:
print("Search")
rollno = int(input("Roll no\n"))
result = results.get(rollno, "Not found")
print(result)
elif n== 3:
print("Delete")
rollno = int(input("Roll no\n"))
result = results.get(rollno,'not found')
result.pop("rollno",'no key found')
print("Deleted")
else:
print('Invalid option')
| Varanasi-Software-Junction/pythoncodecamp | dictionaries/marksheet.py | marksheet.py | py | 1,242 | python | en | code | 10 | github-code | 90 |
34845057314 | import pandas as pd
import re
import json
from typing import Optional,Tuple
import numpy as np
import random
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import os
warnings.filterwarnings('ignore')
class CallfunctionalFi():
def __init__(self,
df,
fix_sites: Optional[int] = None,
boostrap_n: Optional[int] = 100
) -> Optional[Tuple[np.ndarray, np.ndarray]]:
"""\
CallfunctionalFi(df)
"""
if fix_sites:
self.fix_sites = fix_sites
else:
self.fix_sites = int(df.shape[0]/5)
self.boostrap_n = boostrap_n
self.main(df)
def trans_data(self,df):
## filtered frequency
#df_di = pd.read_csv(file,sep='\t')
# df[f'{self.popA}_pop_freq'] = df[f'{self.popA}.het_alt.freq'] + df[f'{self.popA}.het_alt.freq']
# df[f'{self.popB}_pop_freq'] = df[f'{self.popB}.het_alt.freq'] + df[f'{self.popB}.het_alt.freq']
#df_di = df[(df[f'{self.popA}_pop_freq'] <= self.max_freq) & (df[f'{self.popA}_pop_freq'] <= self.max_freq)]
## category delerious and ben
df['Gscores'] = df['Gscores'].fillna(0)
df['Gscores'] = df['Gscores'].apply(lambda x:int(x))
df.loc[((df['functional']=='missense_variant') & (df['Gscores']>=150 )),'functional'] = 'missense_del'
df.loc[((df['functional']=='missense_variant') & (df['Gscores']<150 )),'functional'] = 'missense_ben'
return df
def call_spec_pop_freq(self, df):
## pop specific frequency
df_fi_AB = pd.DataFrame()
df_fi_AB['All'] = (df['HET_AB'] + df['HOM_AB']).groupby(df['functional']).sum()
df_fi_AB['HET'] = df['HET_AB'].groupby(df['functional']).sum()
df_fi_AB['HOM'] = df['HOM_AB'].groupby(df['functional']).sum()
df_fi_BA = pd.DataFrame()
df_fi_BA['All'] = (df['HET_BA'] + df['HOM_BA']).groupby(df['functional']).sum()
df_fi_BA['HET'] = df['HET_BA'].groupby(df['functional']).sum()
df_fi_BA['HOM'] = df['HOM_BA'].groupby(df['functional']).sum()
return df_fi_AB,df_fi_BA
def _jackknifes(self,fix_sites, all_sites):
"""
@param fix_sites: 固定位点数
@param all_sites: 总的数目
"""
start = random.randint(0, all_sites) # get start from 0 and all_sites
end = start + fix_sites
if end > all_sites:
start, end = self._jackknifes(fix_sites, all_sites) #重新得到的start, end
return start, end
#def get_regs(self,df):
#regs = []
#for idx,val in df[['#CHROM','POS']].groupby('#CHROM'):
# regs.append(f'{idx}:{min(val["POS"])}-{max(val["POS"])}')
#return ','.join(regs)
def _boostrap_run(self,df):
n = 0
fi_AB_infos = []
fi_BA_infos = []
#reg_n = []
while n<self.boostrap_n:
n = n + 1
start,end = self._jackknifes(self.fix_sites, df.shape[0])
df_di_n = df.iloc[start:end,]
#reg_n.append([n,self.get_regs(df_di_n)])
df_fi_AB,df_fi_BA = self.call_spec_pop_freq(df_di_n)
df_fi_AB.columns = [i+ f'_{n}' for i in df_fi_AB.columns]
df_fi_BA.columns = [i+ f'_{n}' for i in df_fi_BA.columns]
fi_AB_infos.append(df_fi_AB)
fi_BA_infos.append(df_fi_BA)
df_fi_AB_bs = pd.concat(fi_AB_infos,axis=1).fillna(0)
df_fi_BA_bs = pd.concat(fi_BA_infos,axis=1).fillna(0)
#df_reg_bs = pd.DataFrame(reg_n,columns=['N','Regions'])
return df_fi_AB_bs,df_fi_BA_bs
def main(self,df):
self.df_di = self.trans_data(df)
if self.boostrap_n > 0:
self.AB, self.BA = self._boostrap_run(self.df_di)
else:
self.AB, self.BA = self.call_spec_pop_freq(self.df_di)
class CallBurdenRisk():
def __init__(self,
fi_AB,
fi_BA,
norm_item: Optional[str] = 'intergenic_region',
) -> Optional[Tuple[np.ndarray, np.ndarray]]:
self.fi_AB = fi_AB
self.fi_BA = fi_BA
self.norm_item = norm_item
self.main()
def call_norm_risk(self, df_fi_AB, df_fi_BA, in_item, norm_item):
if not isinstance(in_item,list):
in_item = [in_item]
if not isinstance(norm_item,list):
norm_item = [norm_item]
AB = (df_fi_AB.loc[list(set(in_item) & set(df_fi_AB.index))].sum()/df_fi_AB.loc[norm_item].sum())
BA = (df_fi_BA.loc[list(set(in_item) & set(df_fi_BA.index))].sum()/df_fi_BA.loc[norm_item].sum())
return AB/BA
def norm_risk(self,
df_fi_AB,
df_fi_BA):
lof_items=['start_lost','splice_acceptor_variant','stop_gained','stop_lost','splice_donor_variant']
df_risk = pd.DataFrame()
df_risk['missense_del'] = self.call_norm_risk(df_fi_AB,df_fi_BA,'missense_del',self.norm_item)
df_risk['missense_ben'] = self.call_norm_risk(df_fi_AB,df_fi_BA,'missense_ben',self.norm_item)
df_risk['LOF'] = self.call_norm_risk(df_fi_AB,df_fi_BA,lof_items,self.norm_item)
return df_risk
def main(self):
self.df_risk = self.norm_risk(self.fi_AB,
self.fi_BA).reset_index()
try:
self.df_risk[['Group','Bs_N']] = self.df_risk['index'].apply(lambda x:x.split('_')).apply(pd.Series)
except:
self.df_risk['Group'] = self.df_risk['index'].apply(lambda x:x)
def get_gscores(x,Gscores):
aa1,aa2 = re.search(r'p\.(\w{3})\d+(\w{3})',x).groups()[:]
try:
missense_score = Gscores[aa1][aa2]
except:
print(x)
missense_score = Gscores[aa2][aa1]
return missense_score
def plot_burden_risk(df,kind='boxplot'):
df_mt = df[['missense_del','missense_ben','LOF','Group']].melt(id_vars=['Group'])
plt.rcParams['xtick.labelsize']=20
plt.rcParams['ytick.labelsize']=20
plt.rcParams['legend.fontsize']=16
plt.rcParams['legend.title_fontsize']=16
plt.rcParams['font.family'] = 'Helvetica'
fig,ax = plt.subplots(1,1,figsize=(6,6))
if kind == 'boxplot':
g = sns.boxplot(x='Group',y='value',hue='variable'
,data=df_mt,
hue_order=['LOF','missense_del','missense_ben'],
ax=ax,palette=['red','yellow','lightgreen'])
elif kind == 'barplot':
g = sns.barplot(x='Group',y='value',hue='variable'
,data=df_mt,
hue_order=['LOF','missense_del','missense_ben'],
ax=ax,palette=['red','yellow','lightgreen'])
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles=handles, labels = ['LOF','Missense deleterious','Missense benign'],
bbox_to_anchor=(1,0.65),frameon=False)
g.set_ylabel('Relative Burden Risk$_{A/B}$',fontsize=20,labelpad=10)
g.set_xlabel('')
plt.axhline(y=1,ls='--',color='r')
sns.despine()
def derived_allele(df, outgrp):
if isinstance(outgrp,str):
outgrp = [outgrp]
flag_info = [(df[f'{grp}.hom_alt.freq'] == 0) & (df[f'{grp}.het_alt.freq'] == 0) for grp in outgrp]
is_di = flag_info[0]
if len(flag_info) > 1:
for each in flag_info[1:]:
flag = np.logical_or(is_di,each)
df_info_di = df[is_di]
return df_info_di
def add_Gscores(df):
binpath = os.path.split(os.path.realpath(__file__))[0]
Gscores = json.load(open(f'{binpath}/Grantham_Scores.json','r'))
df.loc[df['functional'] == 'missense_variant','Gscores'] = \
df.loc[df['functional'] == 'missense_variant','hgv.p'].apply(lambda x:get_gscores(x,Gscores))
return df
def pop_hom_het_freq(df,popA,popB):
## filter poplation A and B with hom freq == 1
df = df[~((df[f'{popA}.hom_alt.freq'] == 1) |
(df[f'{popB}.hom_alt.freq'] == 1))]
df['HET_AB'] = df[f'{popA}.het_alt.freq']*(1 - df[f'{popB}.het_alt.freq'])
df['HOM_AB'] = df[f'{popA}.hom_alt.freq']*(1 - df[f'{popB}.hom_alt.freq'])
df['HET_BA'] = df[f'{popB}.het_alt.freq']*(1 - df[f'{popA}.het_alt.freq'])
df['HOM_BA'] = df[f'{popB}.hom_alt.freq']*(1 - df[f'{popA}.hom_alt.freq'])
return df | ZhengCQ/brrAB | bin/callrisk.py | callrisk.py | py | 8,420 | python | en | code | 1 | github-code | 90 |
13471579010 | import sys, json
import csv
from matching.games import HospitalResident
def parse_student_csv(file_p):
# student_preferences: Dict(str(student_id): ['section1', 'section2', ... (amount depending on student's availability -- varies)])
# section_preferences: Dict(str(section_name): ['01', '02', ... (section chooses back students who include it in the pref)])
# section_capacities: Dict(str(section_name): int -- room capacity)
no_str = 'I have a schedule conflict which prevents me from attending other section times except for the one(s) listed as higher preferences'
fields = []
rows = []
student_pref = {}
section_pref = {}
section_capa = {}
with open(file_p, 'r') as csvfile:
csvreader = csv.reader(csvfile)
fields = next(csvreader)
inv_fields = {v:i for i, v in enumerate(fields)}
# extracting each data row one by one
for i, row in enumerate(csvreader):
if row[0]:
student_id = str(i)
student_pref[student_id] = []
if row[inv_fields['First preference']] and row[inv_fields['First preference']] != no_str:
student_pref[student_id].append(row[inv_fields['First preference']])
if row[inv_fields['First preference']] not in section_pref:
section_pref[row[inv_fields['First preference']]] = [student_id]
else:
section_pref[row[inv_fields['First preference']]].append(student_id)
if row[inv_fields['Second preference']] and row[inv_fields['Second preference']] != no_str:
student_pref[student_id].append(row[inv_fields['Second preference']])
if row[inv_fields['Second preference']] not in section_pref:
section_pref[row[inv_fields['Second preference']]] = [student_id]
else:
section_pref[row[inv_fields['Second preference']]].append(student_id)
if row[inv_fields['Third preference']] and row[inv_fields['Third preference']] != no_str:
student_pref[student_id].append(row[inv_fields['Third preference']])
if row[inv_fields['Third preference']] not in section_pref:
section_pref[row[inv_fields['Third preference']]] = [student_id]
else:
section_pref[row[inv_fields['Third preference']]].append(student_id)
if row[inv_fields['Fourth preference']] and row[inv_fields['Fourth preference']] != no_str:
student_pref[student_id].append(row[inv_fields['Fourth preference']])
if row[inv_fields['Fourth preference']] not in section_pref:
section_pref[row[inv_fields['Fourth preference']]] = [student_id]
else:
section_pref[row[inv_fields['Fourth preference']]].append(student_id)
student_size = len(student_pref)
section_size = len(section_pref)
common = student_size // section_size
remain = student_size % section_size
for s in section_pref.keys():
section_capa[s] = common
for i in range(remain):
section_capa[section_pref.keys()[i]] += 1
return student_pref, section_pref, section_capa
def parse_input_pref(pref_in):
# student_preferences: Dict(str(student_id): ['section1', 'section2', ... (amount depending on student's availability -- varies)])
# section_preferences: Dict(str(section_name): ['01', '02', ... (section chooses back students who include it in the pref)])
student_pref = {}
section_pref = {}
sid_name_map = {}
for student in pref_in:
name = student['Name']
sid = student['SID']
sid_name_map[sid] = name
student_pref[sid] = []
for k in student['Sections']:
if student['Sections'][k] == 'Highly prefer':
student_pref[sid].append(k)
if k not in section_pref:
section_pref[k] = [sid]
else:
section_pref[k].append(sid)
elif student['Sections'][k] == 'Can make it':
student_pref[sid].append(k)
if k not in section_pref:
section_pref[k] = [sid]
else:
section_pref[k].append(sid)
elif student['Sections'][k] == 'Do not prefer but can make it':
student_pref[sid].append(k)
if k not in section_pref:
section_pref[k] = [sid]
else:
section_pref[k].append(sid)
return student_pref, section_pref, sid_name_map
def format_output(matching, sid_name_map):
meta_matching = {}
for section, students in matching.items():
meta_matching[section] = [{'Name': sid_name_map[sid], 'SID': sid} for sid in students]
return meta_matching
def allocated_unmatched(matching, section_capa, student_lst):
# allocate the unmatched
matched_students = []
for section, students in matching.items():
for student in students:
matched_students.append(student)
unmatched_students = list(set(student_lst) - set(matched_students))
pt = 0
for section, students in matching.items():
room_capa = section_capa[section]
if len(students) < room_capa:
opening = room_capa - len(students)
matching[section].extend(unmatched_students[pt:pt+opening])
pt += opening
return matching
def stringfy_result(matching):
str_m = {}
for section, students in matching.items():
str_m[section.name] = [s.name for s in students]
return str_m
# pref_data = json.loads(next(sys.stdin).strip())
# capacity = json.loads(next(sys.stdin).strip())
data = json.loads(next(sys.stdin).strip())
pref_data = data["prefData"]
capacity = {k: int(v) for k, v in data["capacity"].items()}
# with open("output.txt", "a") as f:
# f.write(repr(pref_data) + "\n")
# f.write(repr(capacity) + "\n")
student_pref, section_pref, sid_name_map = parse_input_pref(pref_data)
game = HospitalResident.create_from_dictionaries(student_pref, section_pref, capacity)
matching = game.solve(optimal="resident") #return a Dict(section_name: List[student_id])
assert game.check_validity()
assert game.check_stability()
str_matching = stringfy_result(matching)
balanced_matching = allocated_unmatched(str_matching, capacity, student_pref.keys())
output = format_output(balanced_matching, sid_name_map)
print(json.dumps(output))
| MohamedElgharbawy/p-cubed | python/run_assign.py | run_assign.py | py | 6,690 | python | en | code | 0 | github-code | 90 |
42363991910 | import re
from lark import Token
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckResult, CheckCategories
class ForbiddenResources(BaseResourceCheck):
def __init__(self):
name = "Make sure no forbidden resources are being created"
id = "GC_04"
supported_resources = ['google_project_iam_policy']
# CheckCategories are defined in models/enums.py
categories = [CheckCategories.BACKUP_AND_RECOVERY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
"""
Check for forbidden resources:
:return: <CheckResult>
"""
return CheckResult.FAILED
scanner = ForbiddenResources()
| jensskott/tf-compliance | checkov/GC_04_forbidden_resource.py | GC_04_forbidden_resource.py | py | 837 | python | en | code | 0 | github-code | 90 |
33687090409 | # Here is the a TIP CALCULATOR that uses various Arithmetic Operator.
print("==>","\033[35m","TIPS CALCULATOR","\033[0m","<==")
print()
bill = float(input("How much did you Spend: "))
percent = float(input("What percentage do you want to tip: "))
people = float(input("How many people in your group: "))
ps = (percent / 100) * bill
sum = bill + ps
div = sum / people
div = round(div, 2)
print("You each owe $", div)
| Innocentsax/Python_Series | Day_10.py | Day_10.py | py | 417 | python | en | code | 29 | github-code | 90 |
13607991602 | from nose.tools import assert_equal
import scrumble
cases = [
["1/1990", {'year': 1990, 'month': 1}],
["31 January 2013", {'year': 2013, 'month': 1, 'day': 31}],
["2012 10", {'year': 2012, 'month': 10}],
["Mar 2012", {'year': 2012, 'month': 3}],
[None, {}],
["", {}],
["not a date", None]
]
def test_basic():
for p in cases:
yield do_basic, p
def do_basic(p):
try:
return assert_equal(p[1], dict(scrumble.as_date(p[0])))
except scrumble.DateutilParseError:
assert p[1] is None
| scraperwiki/scrumble | test/test_real_dates.py | test_real_dates.py | py | 546 | python | en | code | 4 | github-code | 90 |
23856157148 | import cv2
import winsound
import argparse
from extraction_original import extract_parameters
from segmentation import segment_image
from extraction_single_image import search_borders, get_smallest_shape_scrambled
from solution import solve_puzzle
from result import *
from matching import *
# play background sound
winsound.PlaySound("test.wav", winsound.SND_ASYNC | winsound.SND_ALIAS)
def main(image_name):
image = cv2.imread(image_name)
total_pieces, method, type = extract_parameters(image_name, image)
all_array = segment_image(image, total_pieces, method, type)
# print_array(all_array)
border_types = search_borders(all_array)
array_c, array_e, array_m, array_c_types, array_e_types, array_m_types = sort_images(all_array, border_types)
print("containers with borders")
# print_array(array_c)
print(array_c_types)
print("--------")
# print_array(array_e)
print(array_e_types)
print("----------")
# print_array(array_m)
print(array_m_types)
print("----------")
result_array = solve_puzzle(total_pieces, method, array_c, array_e, array_m, array_c_types, array_e_types, array_m_types)
# print_array(result_array)
length, width = get_smallest_shape_scrambled(result_array)
final_image = show_puzzle(total_pieces, length, width, result_array)
return final_image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Project multimedia')
parser.add_argument("--method",
help="method name: (shuffled, rotated, scrambled)",
default="shuffled")
parser.add_argument('--dataset', help='dataset name: (2x2, 2x3, 3x3, 4x4, 5x5)', default="2x2")
args = parser.parse_args()
name = "images/jigsaw_" + args.method + "/jigsaw_" + args.method + "_" + args.dataset + "_0"
for i in range(9):
i = str(i)
print("puzzle" + i)
try:
image = main(name + i + ".png")
cv2.imshow("puzzle" + i, image)
except:
print("Program failed, possibly wrong method name or dataset name")
print("--------------------------------------------------------")
winsound.PlaySound(None, winsound.SND_ASYNC) # adding winning music
add_music()
cv2.waitKey()
cv2.destroyAllWindows()
| Laurens-VG/Puzzle-Solver | code/main.py | main.py | py | 2,316 | python | en | code | 0 | github-code | 90 |
28355693244 | from prettytable import PrettyTable
x = PrettyTable()
class TreeNode:
def __init__(self,data):
self.data=data
self.children=[]
self.child=None
self.parent=None
def add_children(self, children):
children.parent = self
self.children.append(children)
def add_child(self,child):
child.parent = self
self.child = child
def get_level(self):
level = 0
p = self.parent
while p:
level += 1
p = p.parent
return level
def insert_node(self,heightoftree,q,attributes,newvalues):
length = len(q)
for i in range(length):
p=q[i]
level=1
while(level<heightoftree):
p=p.child
level+=1
p.add_child(TreeNode(newvalues[i]))
def print_tree(self):
spaces = ' ' * self.get_level() * 3
prefix = spaces + "|--" if self.parent else ""
print(prefix + str(self.data))
if self.children:
for children in self.children:
children.print_tree()
if self.child:
self.child.print_tree()
def saved_tree(self,q,tablename,heightoftree):
saved2=[]
saved2.append(len(q))
saved2.append(heightoftree)
saved2.append(tablename)
while(len(q)!=0):
z=q[0]
if (z!=None):
saved2.append(z.data)
q.append(z.child)
q.remove(z)
else:
break
n = saved2[0]
updated_list = saved2[3:]
z = [updated_list[i:i + n] for i in range(0, len(updated_list), n)]
x.field_names = z[0]
data_rows = z[1:]
for i in range(len(data_rows)):
x.add_row(data_rows[i])
print(x)
x.clear_rows()
return saved2
def insert_operation(saved,attributes,newvalues):
width=saved[0]
heightoftree=saved[1]
root = TreeNode(saved[2])
for i in range(3,width+3):
root.add_children(TreeNode(saved[i]))
for a in root.children:
index=root.children.index(a)
interval=index+width+3
a.add_child(TreeNode(saved[interval]))
temp=a.child
n=width
while(interval+n<len(saved)):
b=saved[interval+n]
temp.add_child(TreeNode(b))
temp=temp.child
n+=width
q=root.children
root.insert_node(heightoftree,q,attributes,newvalues)
heightoftree+=1
tablename = root.data
final_list = root.saved_tree(q,tablename,heightoftree)
return final_list
if __name__ == '__main__':
attributes =['StudentID', 'Name', 'Grade', 'Contact']
condition='='
conattributes='Contact'
convalues=23546
newvalues=['T123', 'TPiyush', 'TABC', 123546]
saved = [4, 5,'Student', 'StudentID', 'Name', 'Grade', 'Contact', '123', 'Piyush', 'ABC', 23546, '234', 'Nirmal', 'ACB', 54556, '345', 'Falgun', 'ACA', 74656, '476', 'Test', 'dfb', 23546]
insert_operation(saved,attributes,newvalues) | FalgunMakadia/project-data-warehousing | Project/InsertOperation.py | InsertOperation.py | py | 3,081 | python | en | code | 0 | github-code | 90 |
40098167258 | class Ticket:
tikcount = 0
def __init__(self, name, numlist):
self.name = name
self.num1 = numlist[0]
self.num2 = numlist[1]
self.num3 = numlist[2]
self.num4 = numlist[3]
self.num5 = numlist[4]
self.num6 = numlist[5]
Ticket.tikcount += 1
| mcain84/powerball_project | ticket.py | ticket.py | py | 323 | python | en | code | 0 | github-code | 90 |
41978329299 | import ctypes
import pytest
c_lib = ctypes.CDLL('../solutions/1512-good-pair/good-pair.so')
@pytest.mark.parametrize('function', [c_lib.numIdenticalPairsSpace,
c_lib.numIdenticalPairsTime])
def test_good_pair(function):
array = [1,2,3,1,1,3]
arr = (ctypes.c_int * len(array))(*array)
out = function(arr, len(arr))
assert out == 4
| msztylko/2020ify-leetcoding | tests/test_1512.py | test_1512.py | py | 386 | python | en | code | 0 | github-code | 90 |
15069221041 | import time
from collections import namedtuple
TrackedSegment = namedtuple('TrackedSegment', ['expectedACKNum', 'sendTime']) # Named tuple for currently tracked segment
class Timer:
def __init__(self, estimatedRTT=0.5, devRTT=0.25, gamma=4):
# Default values
self.estimatedRTT = estimatedRTT
self.devRTT = devRTT
self.alpha = 0.125
self.beta = 0.25
self.gamma = gamma
# Variables to calculate SampleRTTs
self.trackedSegment = None
self.lastSentTime = None
# Min and max RTOs
self.minRTO = 0.2
self.maxRTO = 60
@property
def isTimedOut(self): # Checks if timer has timed out
return time.time() - self.lastSentTime >= self.RTO
@property
def RTO(self): # Returns RTO if its between the min and max
RTO = self.estimatedRTT + self.gamma * self.devRTT
if RTO < self.minRTO:
return self.minRTO
elif RTO > self.maxRTO:
return self.maxRTO
else:
return RTO
def start(self, RXT=False, nextSeqNum=None): # Starts timer
self.lastSentTime = time.time() # Records time to last sent segment
if RXT: # If this segment is a retransmission, currently tracked RTT is delayed - discard it
return self.discard()
elif self.trackedSegment is None: # If no currently tracked segment, track this one
print('====\n TRACKING: Seg Num: {}\n===='.format(nextSeqNum))
self.trackedSegment = TrackedSegment(expectedACKNum=nextSeqNum, sendTime=self.lastSentTime)
def update(self, ackNum): # Updates timer on each received ACK
if self.trackedSegment: # If a segment is currently being tracked
expectedACKNum = self.trackedSegment.expectedACKNum
if ackNum == expectedACKNum: # If ACKnum is for the currently tracked segment - update RTT and then discard currently tracked segment
self._updateRTT(time.time() - self.trackedSegment.sendTime)
self.discard()
elif ackNum > expectedACKNum: # ACKnum is not for currently tracked segment, then currently tracked RTT is delayed - discard it
self.discard()
def discard(self): # Discards currently tracked segment
self.trackedSegment = None
def _updateRTT(self, sampleRTT): # Updates Estimated and Dev RTTs for use in RTO calculation
print('===\nSampleRTT: {}'.format(sampleRTT))
self.estimatedRTT = (1 - self.alpha) * self.estimatedRTT + self.alpha * sampleRTT
self.devRTT = (1 - self.beta) * self.devRTT + self.beta * abs(sampleRTT - self.estimatedRTT)
print('NEW RTO: {}\n===='.format(self.RTO))
| AntFace/COMP3331-assignment | ass/timer.py | timer.py | py | 2,714 | python | en | code | 0 | github-code | 90 |
32800092340 | from selenium import webdriver
import os
from time import sleep
GOOGLE_CHROME_BIN = '/app/.apt/usr/bin/google_chrome'
CHROMEDRIVER_PATH = '/app/.chromedriver/bin/chromedriver'
def run(df, msg):
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
driver = webdriver.Chrome('/path-to/chromedriver')
# driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options)
driver.get('https://web.whatsapp.com')
sleep(5)
numbers = df.values.tolist()
data = {}
for num in numbers:
url = "https://web.whatsapp.com/send?phone=91" + str(num[1]) + "&text=" + "Hi " + num[0] + "! " + msg
driver.get(url)
sleep(3)
for i in range(20):
try:
button = driver.find_element_by_xpath("//span[@data-icon='send']")
button.click()
driver.execute_script("window.onbeforeunload = function() {};")
data[num[1]] = 1
break
except:
print("not sent yet")
data[num[1]] = 0
sleep(1)
print('Done ' + str(num[1]))
driver.quit()
return data
| debajyotiguha11/AutoText | AutoScrips/scripts.py | scripts.py | py | 1,406 | python | en | code | 1 | github-code | 90 |
74065744616 | import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as vmodels
from base import BaseModel
import copy
class MnistModel(BaseModel):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class Resnet_fc(nn.Module):
def __init__(self, nb_classes=0, toFreeze=False):
super(Resnet_fc, self).__init__()
base_model = vmodels.resnet50(pretrained=True)
base_model_copy = copy.deepcopy(base_model)
self.feature_extractor = nn.Sequential(*list(base_model_copy.children())[:-2])
if toFreeze:
for param in self.feature_extractor.parameters():
param.requires_grad = False
else:
for param in self.feature_extractor.parameters():
param.requires_grad = True
self.gap = nn.AvgPool2d(7, 1)
self.linear = nn.Linear(2048, nb_classes)
def forward(self, inputs):
x = self.feature_extractor(inputs)
x = self.gap(x).squeeze(-1).squeeze(-1)
x = self.linear(x)
return x
| learningman7777/CNN-CatClassification | model/model.py | model.py | py | 1,601 | python | en | code | 0 | github-code | 90 |
3225619020 | # coding: utf-8
# 导入相关函数库
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# CNN 前向传播参数
num_channels = 1
conv1_size = 3
conv1_deep = 32
conv2_size = 3
conv2_deep = 64
fc1_nodes = 128
num_classes = 10
# CNN 前向传播过程
def cnn_inference(input_x):
""" conv -> pooling -> conv -> pooling -> full connection -> full connection """
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable('weights',[conv1_size, conv1_size, num_channels, conv1_deep],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable('biases',[conv1_deep],initializer=tf.zeros_initializer)
conv1 = tf.nn.conv2d(input_x,conv1_weights,strides=[1,2,2,1],padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
with tf.variable_scope('layer2-pool1'):
pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
with tf.variable_scope('layer3-conv2'):
conv2_weights = tf.get_variable('weights',[conv2_size,conv2_size,conv1_deep,conv2_deep],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable('biases',[conv2_deep],initializer=tf.zeros_initializer)
conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1,2,2,1],padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
with tf.variable_scope('layer4-pool2'):
pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
pool2_shape = tf.shape(pool2)
# from functools import reduce
# total_nodes = reduce(tf.multiply,pool2_shape[1:])
total_nodes = pool2_shape[1]*pool2_shape[2]*pool2_shape[3]
pool2_flat = tf.reshape(pool2,(-1,total_nodes))
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable('weights',[total_nodes,fc1_nodes],initializer=tf.truncated_normal_initializer)
fc1_biases = tf.get_variable('biases',[fc1_nodes],initializer=tf.zeros_initializer)
fc1 = tf.multiply(pool2_flat,fc1_weights)+fc1_biases
relu3 = tf.nn.relu(fc1)
with tf.variable_scope('layer6-output'):
output_weights = tf.get_variable('weights',[fc1_nodes,num_classes],initializer=tf.truncated_normal_initializer)
output_biases = tf.get_variable('biases',[num_classes],initializer=tf.zeros_initializer)
logit = tf.multiply(relu3,output_weights)+output_biases
return logit
# define a function to reshape raw data
def data_reshape(data):
return np.reshape(data,[-1,28,28,1])
# load data
mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
# define placeholder
x = tf.placeholder(tf.float32,[None,28,28,1])
y = tf.placeholder(tf.float32,[None,10])
# inference
y_ = cnn_inference(x)
# define loss function
scores = tf.nn.softmax(y_)
cross_entropy = -tf.reduce_sum(y*tf.log(tf.clip_by_value(scores,1e-30,1.0)),1)
loss = tf.reduce_mean(cross_entropy)
learning_rate = 0.001
global_step = tf.Variable(1,trainable=False)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(loss,global_step=global_step)
# initializing all variables and trainging model
batch_size = 300
training_epochs = 3000
display_epoch = 10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
train_x, train_y = mnist.train.next_batch(batch_size)
_,training_loss = sess.run([optimizer,loss],feed_dict={x:data_reshape(train_x),y:train_y})
if (epoch+1) % display_epoch == 0:
print('After {} epochs, loss on training data is {}'.format(epoch+1))
| doer-lab/MNIST | cnn_inference_by_function.py | cnn_inference_by_function.py | py | 3,725 | python | en | code | 0 | github-code | 90 |
354332007 | from aiohttp import web
routes = web.RouteTableDef()
@routes.get('/root', name='root')
async def handler(request):
return web.Response(text='Whats up?')
url = request.app.router['user-info'].url_for(user='john_doe')
url_with_qs = url.with_query("a=b")
assert url_with_qs == '/john_doe/info?a=b'
app = web.Application()
app.add_routes(routes)
if __name__ == '__main__':
web.run_app(app)
| JaviMerino-11/tutorial_API | named_resources.py | named_resources.py | py | 404 | python | en | code | 0 | github-code | 90 |
434426678 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 16:31:59 2019
@author: agos6
"""
import numpy as np
inputFile = "C:/Users/agos6/Desktop/Aurore/SD/texte.txt"
class node():
"Define tree by root, nodes, leaves, and links, which are splitting criteria"
nodes_numbers = 0
def __init__(self):
self.value = None
self.attribute= None
self.data = None
self.classe = None
self.gain = None
self.parent=[]
self.haschildren = 0
self.left=None
self.right = None
self.level = None
def D(inputFile):
f = open(inputFile,'r')
D = []
for lines in f :
D.append(lines.split())
A = D[0]
del A[-1]#la colonne classe n'est pas un attribut donc on le retire de la liste des attributs
del D[0]
A = [A]
print(D)
def getMaxLevel(T):
return T.children[-1].level
def copyTree(rootTree):
P = rootTree
return P
def PostPrune(T,alpha, minNum, d):
levelMaximum = getMaxLevel(T)
for level in range(levelMaximum, 1, -1) :
for node in T.children :
if node is not None :
gen_T = gen_error(T.data, T, alpha)
P = copyTree(T)
if (node.level == level):
node.haschildren = 0
node.classe = determineClass(node, minNum, d)
new_P, listRemove, listIndex = deleteChildren(node, P)
gen_P = gen_error([],new_P,alpha)
if (gen_P<gen_T):
T = new_P
else : #we need to rebuild the tree as before to bring no changes
rebuildChildren(listRemove, listIndex, T)
return T
def deleteChildren(node, tree): #delete the children of a node in the tree
listToRemove=[]
listIndex = []
for nodee in tree.children :
for parent in nodee.parent :
if (parent == node):
listToRemove.append(nodee)
listIndex.append(tree.children.index(nodee))
for children in listToRemove :
tree.children.remove(children)
return tree, listToRemove, listIndex
def rebuildChildren(listToRemove, listIndex, tree):
for k in range(len(listToRemove)):
tree.children.insert(listIndex[k], listToRemove[k])
return tree
def determineClass(node, minNum,d):
D = node.data
if(len(D)!=0):
numRecords = len(D)-1
classIndex = len(D[0])-1
#First case : all records have same class
classe=D[0][classIndex]
classIndex = len(D[0])-1
somme = 0
for i in range(0,len(D)):
if (D[i][classIndex]==classe):
somme+=1
node.classe = D[0][classIndex]
if (somme == numRecords):
return classe
#Second case : number of records in D < minNum
elif(numRecords < minNum):
return d
#Third case : classe determined by majority
else :
classe = majorityClass(node)
return classe
def majorityClass(node):
dataSet = node.data
classIndex = len(dataSet[0])-1
num0 = countOccurenceClass(dataSet,0,classIndex)
num1 = countOccurenceClass(dataSet, 1, classIndex)
if (num0>=num1):
return 0
else :
return 1
def gen_error(dataTest, inputTree, alpha):
numLeaves, error = leavesCount(inputTree, alpha) #counts the number of leaves of a tree
dataTrain= inputTree.data
N = len(dataTrain) #works out the number of records
generalisationError = (error + numLeaves*alpha)/N
return generalisationError
def leavesCount(inputTree, alpha):
count = 0
error = 0
for nodee in inputTree.children:
if nodee.haschildren==0 :
count+=1
classe = nodee.classe
data = nodee.data
error+=countOccurenceClass(data, 1-int(classe), len(data[0])-1)
return count, error
def countOccurenceClass(listsplit, valueClass, classIndex):
somme = 0
for i in range(0,len(listsplit)):
if (int(listsplit[i][classIndex]) == valueClass):
somme = somme +1
return somme
| auroregosmant/SD201---Mining-of-Large-Datasets | decision trees q3.py | decision trees q3.py | py | 4,436 | python | en | code | 0 | github-code | 90 |
18364947249 | n = int(input())
a, b = [list(map(int, input().split())) for _ in range(2)]
tmp, cnt = 0, 0
for i in range(n):
tmp = min(a[i] - tmp, b[i])
cnt += tmp
tmp = min(a[i + 1], b[i] - tmp)
cnt += tmp
print(cnt) | Aasthaengg/IBMdataset | Python_codes/p02959/s646368610.py | s646368610.py | py | 220 | python | en | code | 0 | github-code | 90 |
35716364381 | """
Simple Code Example on how to use the CompanyIndexReader
"""
import pandas as pd
from secfsdstools.c_index.companyindexreading import CompanyIndexReader
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def indexreader():
""" CompanyIndexReader example"""
apple_cik = 320193
apple_index_reader = CompanyIndexReader.get_company_index_reader(cik=apple_cik)
print(apple_index_reader.get_latest_company_filing())
print(apple_index_reader.get_all_company_reports_df(forms=["10-K"]))
def run():
"""launch method"""
indexreader()
if __name__ == '__main__':
run()
| HansjoergW/sec-fincancial-statement-data-set | secfsdstools/x_examples/example_companyindexreader.py | example_companyindexreader.py | py | 672 | python | en | code | 12 | github-code | 90 |
72201307818 |
# | [2235](https://leetcode.com/problems/add-two-integers/description/) | [Add Two Integers](/LeetCode/Easy/2235.%20Add%20Two%20Integers/) | [Python](/LeetCode/Easy/2235.%20Add%20Two%20Integers/2235.%20Add%20Two%20Integers.py) | [Facebook](/Facebook/), [Google](/Google/), [Amazon](/Amazon/), [Apple](/Apple/)| Math | |
# | [2235](https://leetcode.com/problems/add-two-integers/description/) | [Add Two Integers](/LeetCode/Easy/2235.%20Add%20Two%20Integers/) | Hard | [Python](/LeetCode/Easy/2235.%20Add%20Two%20Integers/2235.%20Add%20Two%20Integers.py) | Math | |
#######################################
#######################################
hardest = 'Easy' # Medium, Hard
title = "1640. Check Array Formation Through Concatenation"
companies = [3] ## '1. Amazon', '2. Apple', '3. Facebook', '4. Google', '5. Microsoft'
url = 'https://leetcode.com/problems/check-array-formation-through-concatenation/description/'
topic = ['Array', 'Hashmap']
related = ''
#######################################
#######################################
hashmap = {1 : '[Amazon](/Amazon/)', 2 : '[Apple](/Apple/)', 3 : '[Facebook](/Facebook/)', 4 : '[Google](/Google/)', 5 : '[Microsoft](/Microsoft/)'}
title = title.strip()
titleNum = title.split('.')[0]
titleWords = title.split('.')[1].strip()
titlepath = '%20'.join([titleNum + '.'] + titleWords.split(' '))
path1 = '/LeetCode/' + hardest + '/' + titlepath + '/'
path2 = path1 + titlepath + '.py'
sec1 = '| [' + titleNum + ']' + '(' + url + ') '
sec2 = ' [' + titleWords + ']' + '(' + path1 + ') '
sec3 = ' ' + hardest + ' '
sec4 = ' [Python]' + '(' + path2 + ') '
sec5Array = [hashmap[x] for x in companies]
sec5 = ' ' + (', ').join(sec5Array) + ' '
sec6 = ' ' + (', ').join(topic) + ' '
sec7 = ' ' + related + ' |'
overallReadme = '|'.join([sec1, sec2, sec4, sec5, sec6, sec7])
companyReadme = '|'.join([sec1, sec2, sec3, sec4, sec6, sec7])
import os
os.mkdir('LeetCode/' + hardest + '/' + title + '/')
with open('LeetCode/' + hardest + '/' + title + '/' + title + '.py', 'w') as f:
f.write('# ' + hardest)
def prepend_line(file_name, insertline):
""" Insert given string as a new line at the beginning of a file """
# define name of temporary dummy file
dummy_file = file_name + '.bak'
# open original file in read mode and dummy file in write mode
with open(file_name, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
# Read lines from original file one by one and append them to the dummy file
prev = ''
insertNum = int(titleNum)
flag = True
prev = ''
for line in read_obj:
if line[0] == '#' and line[2].isalpha():
totalNumber = line.split('(')[1].split(' ')[0]
totalNumber = str(int(totalNumber) + 1)
#print (totalNumber)
newLine = '# ' + hardest + ' (' + totalNumber + ' problems)'
prev = newLine
write_obj.write(newLine + '\n')
continue
if line[0] == '|' and line[2] == '[' and flag:
curNum = int(line[3:8].split(']')[0])
if insertNum < curNum:
write_obj.write(insertline + '\n')
flag = False
elif flag and prev and prev[0] == '|' and prev[2] == '[':
write_obj.write(insertline + '\n')
flag = False
prev = line
write_obj.write(line)
# remove original file
os.remove(file_name)
# Rename dummy file as the original file
os.rename(dummy_file, file_name)
prepend_line('LeetCode/' + hardest + '/README.md', overallReadme)
# LeetCode/Easy/README.md
hashmap2 = {1 : 'Amazon/', 2 : 'Apple/', 3 : 'Facebook/', 4 : 'Google/', 5 : 'Microsoft/'}
for company in companies:
comName = hashmap2[company]
tempPath = comName + 'README.md'
# Amazon/README.md
prepend_line(tempPath, companyReadme)
| ArmanTursun/coding_questions | generate_readme.py | generate_readme.py | py | 3,926 | python | en | code | 0 | github-code | 90 |
22719441242 | from copy import deepcopy
from typing import Dict
import cv2
import numpy as np
import open3d as o3d
def get_aruco_masks(image: np.ndarray) -> Dict[int, np.ndarray]:
# Define the ArUco dictionary and detector parameters
dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_50)
parameters = cv2.aruco.DetectorParameters()
detector = cv2.aruco.ArucoDetector(dictionary, parameters)
# Detect the markers in the image
corners, ids, rejected = detector.detectMarkers(image)
masks = {}
# If markers are detected
if ids is not None:
for i, corner in enumerate(corners):
current_id = ids[i][0]
contour = np.squeeze(corner).astype(int) # Reshape corner list
current_mask = np.zeros_like(image[:, :, 0], dtype=np.uint8)
cv2.fillPoly(
current_mask, [contour], 1
) # Fill the detected marker with white color on the mask
masks[current_id] = current_mask
return masks
def colorize_mask(mask: np.ndarray) -> np.ndarray:
# Convert binary mask to a 3-channel visual mask
visual_mask = np.stack([mask * 255] * 3, axis=-1)
return visual_mask.astype(np.uint8)
def get_marker_pcd(
pcd_o3d: o3d.geometry.PointCloud, masks: Dict[int, np.ndarray], width: int, height: int
) -> Dict[int, o3d.geometry.PointCloud]:
marker_pcds = {}
# Convert the o3d.geometry.PointCloud points to numpy ndarray with the given width and height
pcd_points = np.asarray(pcd_o3d.points).reshape((height, width, 3))
# Convert the o3d.geometry.PointCloud colors to numpy ndarray with the same shape
pcd_colors = np.asarray(pcd_o3d.colors).reshape((height, width, 3))
for m_id, mask in masks.items():
# Find the bounding box of the mask
rows, cols = np.where(mask)
min_row, max_row = np.min(rows), np.max(rows)
min_col, max_col = np.min(cols), np.max(cols)
# Crop the point cloud data
cropped_pcd_points = pcd_points[min_row : max_row + 1, min_col : max_col + 1]
# Crop the color data
cropped_pcd_colors = pcd_colors[min_row : max_row + 1, min_col : max_col + 1]
# Crop the mask and expand its dimensions
cropped_mask_expanded = mask[min_row : max_row + 1, min_col : max_col + 1, np.newaxis]
# Apply the mask to the cropped point cloud and color data using broadcasting
masked_pcd_points = cropped_pcd_points * cropped_mask_expanded
masked_pcd_colors = cropped_pcd_colors * cropped_mask_expanded
# Extract the valid points and colors
valid_points = masked_pcd_points[cropped_mask_expanded[..., 0].astype(bool)]
valid_colors = masked_pcd_colors[cropped_mask_expanded[..., 0].astype(bool)]
# Create an Open3D point cloud, assign points and colors, and add to dictionary
new_pcd_o3d = o3d.geometry.PointCloud()
new_pcd_o3d.points = o3d.utility.Vector3dVector(valid_points)
new_pcd_o3d.colors = o3d.utility.Vector3dVector(valid_colors)
marker_pcds[m_id] = new_pcd_o3d
return marker_pcds
def project_points_onto_plane(
pcd: o3d.geometry.PointCloud, plane_model: list
) -> o3d.geometry.PointCloud:
# Extract plane parameters
a, b, c, d = plane_model
plane_normal = np.array([a, b, c])
plane_normal /= np.linalg.norm(plane_normal) # Normalize the plane normal
# For the point on the plane, we can arbitrarily pick z=0 to get the x and y.
# If c is not zero: -d/c will give z coordinate
# This is just an arbitrary point on the plane.
x0, y0, z0 = 0, 0, -d / c if c != 0 else 0
point_on_plane = np.array([x0, y0, z0])
# Project each point in the point cloud onto the plane
points = np.asarray(pcd.points)
projected_points = (
points - np.dot((points - point_on_plane), plane_normal)[:, np.newaxis] * plane_normal
)
# Create a new point cloud for the projected points
projected_pcd = o3d.geometry.PointCloud()
projected_pcd.points = o3d.utility.Vector3dVector(projected_points)
return projected_pcd
def get_marker_transformation_matrix(
plane_pcd: o3d.geometry.PointCloud, plane_model: np.ndarray
) -> np.ndarray:
centroid = np.mean(np.asarray(plane_pcd.points), axis=0)
a, b, c, d = plane_model
distance_to_plane = (a * centroid[0] + b * centroid[1] + c * centroid[2] + d) / np.sqrt(
a**2 + b**2 + c**2
)
centroid_on_plane = centroid - distance_to_plane * np.array([a, b, c])
# Normalize z_axis (normal of the plane)
z_axis = np.array(plane_model[:3])
z_axis /= np.linalg.norm(z_axis)
# Extract the x_axis and y_axis directly from the rotation matrix of the bounding box.
bb = plane_pcd.get_minimal_oriented_bounding_box()
bb_r = np.asarray(bb.R).copy()
x_axis_candidate = bb_r[:, 0]
y_axis_candidate = bb_r[:, 1]
world_x_axis = np.array([1, 0, 0])
angle_x = np.arccos(np.dot(world_x_axis, x_axis_candidate))
angle_y = np.arccos(np.dot(world_x_axis, y_axis_candidate))
if angle_y < angle_x or angle_y < (np.pi - angle_x):
x_axis_candidate, y_axis_candidate = y_axis_candidate, x_axis_candidate
x_axis = x_axis_candidate - np.dot(x_axis_candidate, z_axis) * z_axis
x_axis /= np.linalg.norm(x_axis)
y_axis = np.cross(z_axis, x_axis)
transformation_matrix = np.eye(4)
transformation_matrix[:3, 0] = x_axis
transformation_matrix[:3, 1] = y_axis
transformation_matrix[:3, 2] = z_axis
transformation_matrix[:3, 3] = centroid_on_plane
return transformation_matrix
# cv2.namedWindow("find_marker", cv2.WINDOW_NORMAL)
rgb = cv2.imread("/home/v/capture/marker_rgb.png")
# cv2.imshow("find_marker", rgb)
# cv2.waitKey(0)
pcd = o3d.io.read_point_cloud("/home/v/capture/marker_pcd.ply")
o3d.visualization.draw_geometries([deepcopy(pcd).remove_non_finite_points()])
masks = get_aruco_masks(rgb)
# for _, mask in masks.items(): # Extract just the mask from the dictionary item
# cv2.imshow("find_marker", np.vstack([rgb, colorize_mask(mask)]))
# cv2.waitKey(0)
# cv2.destroyAllWindows()
height = rgb.shape[0]
width = rgb.shape[1]
marker_pcds = get_marker_pcd(pcd, masks, width, height)
marker_pcds = {m_id: pcd_o3d.remove_non_finite_points() for m_id, pcd_o3d in marker_pcds.items()}
o3d.visualization.draw_geometries(list(marker_pcds.values()))
for _, marker in marker_pcds.items():
plane_model, inliers = marker.segment_plane(
distance_threshold=0.001, ransac_n=3, num_iterations=1000
)
# marker = project_points_onto_plane(marker, plane_model)
marker_tf = get_marker_transformation_matrix(marker, plane_model)
tf_vis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1).transform(marker_tf)
o3d.visualization.draw_geometries(
[
marker.select_by_index(inliers).paint_uniform_color([1.0, 0, 0]),
marker.select_by_index(inliers, invert=True),
tf_vis,
]
)
| vinceHuyghe/MRAC_ur_commander | capture_manager/scripts/marker.py | marker.py | py | 7,003 | python | en | code | 2 | github-code | 90 |
34731092857 | #!/usr/bin/env python3
""" Trains a Deep Q-Network (DQN) to play Atari's "Breakout." """
import gym
from keras import layers, models, optimizers
from rl import agents, memory, policy
total_training_steps = 10_000
memory_limit = 100_000
game_environment = gym.make(
"ALE/Breakout-v5",
disable_env_checker=True,
obs_type='grayscale',
#render_mode='human',
)
# The number of actions available to take in the game:
action_count = game_environment.action_space.n
# The resolution of the game screen in pixels:
game_x_resolution, game_y_resolution = game_environment.observation_space.shape
# The number of color channels (only used with "rbg" observation type):
# color_channels = 3
# The number of frames in an input window:
frames = 4
input_shape = (frames, game_x_resolution, game_y_resolution)
model = models.Sequential([
layers.Input(shape=input_shape),
layers.Conv2D(32, 8, strides=4, activation="relu",
data_format='channels_first'), # Channels are first when grayscale
layers.Conv2D(64, 4, strides=2, activation="relu",
data_format='channels_first'),
layers.Conv2D(64, 3, strides=1, activation="relu",
data_format='channels_first'),
layers.Flatten(),
layers.Dense(512, activation="relu"),
layers.Dense(action_count, activation="linear"),
])
agent = agents.DQNAgent(
model=model, # This works with keras-rl2 & TF2
nb_actions=action_count,
policy=policy.EpsGreedyQPolicy(),
memory=memory.SequentialMemory(memory_limit, window_length=frames)
)
agent.compile(optimizers.Adam(learning_rate=0.00025, clipnorm=1))
if __name__ == '__main__':
agent.fit(game_environment, total_training_steps, verbose=2)
agent.save_weights('policy.h5')
| keysmusician/holbertonschool-machine_learning | reinforcement_learning/0x01-deep_q_learning/train.py | train.py | py | 1,733 | python | en | code | 1 | github-code | 90 |
17963881699 | import collections
_=input()
a=list(map(int,input().split()))
c=collections.Counter(a)
l=sorted(c.items(), key=lambda x: x[0])
x=0
for i in l[::-1]:
if i[1]>3:
if x:
print(i[0]*x)
exit(0)
else:
print(i[0]*i[0])
exit(0)
elif i[1]>1:
if x:
print(i[0]*x)
exit(0)
else:
x=i[0]
print(0) | Aasthaengg/IBMdataset | Python_codes/p03625/s800696038.py | s800696038.py | py | 406 | python | en | code | 0 | github-code | 90 |
44000437349 | fat_percent=int(input())/100
proteins_percent=int(input())/100
carbons_percent=int(input())/100
calories_total=int(input())
water_percentage=int(input())/100
fat_grams=(fat_percent*calories_total)/9
proteins_grams=(proteins_percent*calories_total)/4
carbons_grams=(carbons_percent*calories_total)/4
sum_grams=fat_grams+proteins_grams+carbons_grams
one_gram_calories=calories_total/sum_grams
print_calories=one_gram_calories-water_percentage*one_gram_calories
print(format(print_calories, ".4f")) | HBall88/SoftUni-Python | python_basics_exam/1.py | 1.py | py | 495 | python | en | code | 1 | github-code | 90 |
18263835529 | class SegmentTree(object):
"""
セグメントツリー (0-indexed)
1. 値の更新 O(logN)
2. 区間クエリ O(logN)
"""
def __BinOp(self, x, y):
""" セグ木で使用する二項演算 """
return x | y
def __init__(self, init_ele, N:int):
"""
セグ木を構築する
init_ele: 単位元
N: 要素数
"""
self.__init_ele = init_ele
self.__n = 1
while self.__n < N:
self.__n <<= 1
self.__dat = [init_ele] * (2 * self.__n)
def update(self, k:int, x):
""" k番目の値をxに変更する """
k += self.__n - 1
self.__dat[k] = x
while k:
k = (k - 1) // 2
self.__dat[k] = self.__BinOp(
self.__dat[k * 2 + 1], self.__dat[k * 2 + 2]
)
def query(self, p:int, q:int):
""" 区間クエリ [p,q) """
if q <= p: return self.__init_ele
p += self.__n - 1
q += self.__n - 2
res = self.__init_ele
while q - p > 1:
if not (p & 1):
res = self.__BinOp(res, self.__dat[p])
if q & 1:
res = self.__BinOp(res, self.__dat[q])
q -= 1
p = p // 2
q = (q - 1) // 2
res = self.__BinOp(res, self.__dat[p])
if p != q: res = self.__BinOp(res, self.__dat[q])
return res
def get_num(self, k:int):
""" k番目の値を取得する """
k += self.__n - 1
return self.__dat[k]
############################################################
N = int(input())
ST = SegmentTree(0, N)
for i, s in enumerate(list(input())):
ST.update(i, 1 << (ord(s) - ord('a')))
Q = int(input())
for _ in range(Q):
x,y,z = map(str,input().split())
x = int(x); y = int(y)
if x == 1:
ST.update(y - 1, 1 << (ord(z) - ord('a')))
else:
z = int(z)
print(bin(ST.query(y - 1, z)).count("1")) | Aasthaengg/IBMdataset | Python_codes/p02763/s369010697.py | s369010697.py | py | 2,013 | python | ja | code | 0 | github-code | 90 |
18509292649 | import sys
def input():
return sys.stdin.readline()[:-1]
def main():
N = int(input())
# A, B, C = map(int,input().split())
# S = input()
if N < 1200:
print("ABC")
elif N < 2800:
print("ARC")
else:
print("AGC")
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03288/s500898753.py | s500898753.py | py | 298 | python | en | code | 0 | github-code | 90 |
38853106205 | import numpy as np
from sources.base_source import BaseSource
from models import modelw2v, W2V_SIZE
class Word2VecAvgSource(BaseSource):
def get_generator(self, X, y, batch_size=32):
def generator():
idx = 0
batch_x = []
batch_y = []
while True:
x_current = X[idx]
y_current = y[idx]
y_vec = y_current
x_vec = np.zeros(shape=(W2V_SIZE,))
word_cnt = 0
for word in x_current:
if word in modelw2v:
x_vec += modelw2v[word]
word_cnt += 1
if word_cnt != 0:
x_vec /= word_cnt
batch_x.append(x_vec)
batch_y.append(y_vec)
if len(batch_x) == batch_size:
yield np.array(batch_x), np.array(batch_y)
batch_x = []
batch_y = []
idx += 1
if idx == len(X):
idx = 0
return generator | anssar/Sber-ml | sources/word2vec_avg_source.py | word2vec_avg_source.py | py | 1,094 | python | en | code | 0 | github-code | 90 |
5026431525 | from api_manager.Project.Database import Worker as worker
class DatabaseManager:
def __init__(self):
self.worker=worker.DatabaseWorker()
self.products=[]
self.request_texts=""
self.updateProducts()
self.updateRequestTexts()
def updateProducts(self):
data=self.worker.select('SELECT name FROM "Product"')
for row in data:
self.products.append(row)
def updateRequestTexts(self):
data=self.worker.select('SELECT text FROM "Request"')
self.request_texts = ""
for row in data:
self.request_texts+='\n'+row[0]
def addRequest(self, product, request):
query=self.worker.insert('INSERT INTO "Request" '
'(text, count_received) VALUES '
f'(\'{request}\', 1);')
request_id = self.worker.select('SELECT id FROM "Request" '
f'WHERE text=\'{request}\'')
product_id = self.worker.select('SELECT id FROM "Product" '
f'WHERE name=\'{product}\'')
query = self.worker.insert('INSERT INTO "Product_Request "'
'(text, count_received) VALUES '
f'(\'{request}\', 1);')
self.updateRequestTexts()
def updateRequestStatistics(self, product, request):
query=self.worker.update('UPDATE "Request" SET '
'count_received=count_received+1 '
f'WHERE text=\'{request}\'')
request_id=self.worker.select('SELECT id FROM "Request" '
f'WHERE text=\'{request}\'')
request_id=request_id.pop()[0]
product_id = self.worker.select('SELECT id FROM "Product" '
f'WHERE name=\'{product}\'')
product_id=product_id.pop()[0]
query = self.worker.update('UPDATE "Product_Request" SET '
'count_received=count_received+1 '
f'WHERE request_id={request_id} AND product_id={product_id}')
def getMostPopularRequests(self, limit, product=""):
if product=="":
query=self.worker.select('SELECT text FROM "Request" '
'ORDER BY "Request".count_received '
f'LIMIT {limit}')
else:
query = self.worker.select('SELECT "Request".text FROM "Request" '
f'INNER JOIN "Product" ON "Product".name=\'{product}\' '
'INNER JOIN "Product_Request" ON "Product".id="Product_Request".product_id '
'WHERE "Request".id="Product_Request".request_id '
'ORDER BY "Request".count_received '
'LIMIT 5')
return query
def getLowSellingProducts(self, limit):
query = self.worker.select('SELECT name FROM "Product" '
'ORDER BY random() '
f'LIMIT {limit}')
return query
def getSimilarProducts(self, limit, product):
product_type=self.worker.select('SELECT type FROM "Product" '
f'WHERE name=\'{product}\'')[0][0]
query = self.worker.select('SELECT name FROM "Product" '
f'WHERE name!=\'{product}\' AND type=\'{product_type}\''
'ORDER BY random() '
f'LIMIT {limit}')
return query
| Sunests/ml_system_design_2023 | api_manager/Project/Database/Manager.py | Manager.py | py | 3,310 | python | en | code | 0 | github-code | 90 |
26217498327 | import smtplib
import pprint
def send_email(message):
sender = "ivankosarev07@gmail.com"
pasword = "310970qq"
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
try:
server.login(sender, pasword)
server.sendmail(sender,sender,message)
return "отправлено"
except Exception as _ex:
return f"{_ex}\жопа"
def main():
message = str(input("сообщениe: "))
print(send_email(message = message))
if __name__ == "__main__":
main()
| vankosarev/vankosarev | nachalo.py | nachalo.py | py | 561 | python | en | code | 0 | github-code | 90 |
18166592029 | import sys
input = sys.stdin.buffer.readline
H, W, M = map(int, input().split())
X = [0] * W
Y = [0] * H
Map = []
for _ in range(M):
h, w = map(int, input().split())
h -= 1
w -= 1
Y[h] += 1
X[w] += 1
Map.append((h, w))
MX = max(X)
MY = max(Y)
ans = MX + MY
Xans = set()
Yans = set()
for i, x in enumerate(X):
if x == MX:
Xans.add(i)
for i, y in enumerate(Y):
if y == MY:
Yans.add(i)
cnt = 0
for h, w in Map:
if h in Yans and w in Xans:
cnt += 1
if cnt == len(Xans) * len(Yans):
ans -= 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02580/s854446335.py | s854446335.py | py | 569 | python | en | code | 0 | github-code | 90 |
25238868181 | # This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
from tqdm import tqdm
import time
import progressbar
from tkinter import *
from tkinter import ttk
import time
import threading
from tkinter.ttk import *
import tkinter
import timeit
#https://www.youtube.com/watch?v=0WRMYdOwHYE
def start():
tasks = 10 #In unserem Beispiel könnten das die Rechenoperationen sein, welche wir zu der Zeit durchlaufen
x = 0 #x stellt die Rechenoperation dar, welche gerade ausgeführt wird
button['state']='disabled'
while (x < tasks):
time.sleep(1)
bar['value'] += 10 #die zehn sind hier ein Beispiel, wenn es 5 Operationen wären bspw 20
x+=1 #inkrementieren nach jeder ausgeführten Rechenoperation
percent.set(str((x/tasks)*100)+"%")
window.update_idletasks()
def quit():
window.destroy()
window = Tk()
window.title("Stauraumplanung")
### Window size
window.width = 500
window.height = 50
window.eval('tk::PlaceWindow . center')
percent = StringVar()
bar = Progressbar(window,orient=HORIZONTAL,length=400)
bar.pack(pady=10)
percentlabel = Label(window,textvariable=percent).pack()
button = Button(window,text="Download",command=start)
button2 = Button(window,text="Close",command=quit)
button.pack() #evtl pack(pady=10)
button2.pack()
window.mainloop()
########################################################################################################
#Sammlung Lara
#gute Beispiele für mip: https://python-mip.readthedocs.io/en/latest/examples.html
#https://python-mip.readthedocs.io/en/latest/classes.html
from sys import stdout
from mip import Model, xsum, minimize, maximize, BINARY
from itertools import product
import numpy as np
#______________________________________________________________________________________________________
#### Schach-Beispiel ####
# number of queens
"""n = 40
queens = Model()
x = [[queens.add_var('x({},{})'.format(i, j), var_type=BINARY)
for j in range(n)] for i in range(n)]
# one per row
for i in range(n):
queens += xsum(x[i][j] for j in range(n)) == 1, 'row({})'.format(i)
# one per column
for j in range(n):
queens += xsum(x[i][j] for i in range(n)) == 1, 'col({})'.format(j)
# diagonal \
for p, k in enumerate(range(2 - n, n - 2 + 1)):
queens += xsum(x[i][i - k] for i in range(n)
if 0 <= i - k < n) <= 1, 'diag1({})'.format(p)
# diagonal /
for p, k in enumerate(range(3, n + n)):
queens += xsum(x[i][k - i] for i in range(n)
if 0 <= k - i < n) <= 1, 'diag2({})'.format(p)
queens.optimize()
if queens.num_solutions:
stdout.write('\n')
for i, v in enumerate(queens.vars):
stdout.write('O ' if v.x >= 0.99 else '. ')
if i % n == n-1:
stdout.write('\n')"""
"""#Abfragen zum Testen im Notebook
queens.vars[17].x
queens.vars[40*40-1] #letzte Zahl innerhalb der range bei n=40
queens.vars[7*40+1].x
queens.vars[3*40-1].x
queens.vars[7*40+1]
queens.vars['x(7,1)']
print(queens.vars['x(7,1)'],'\t',queens.vars['x(7,1)'].x)
print(queens.vars[7*40+1],'\t',queens.vars[7*40+1].x)"""
#______________________________________________________________________________________________________
#### Unser Projekt ####
model = Model()
#Parameter definieren - P, h und m sind hier für die Tour 04587 aus den Novemberdaten
P=46
Q=max(0,P-33)
I=range(P)
J=range(2)
K=range(11)
L=range(3)
n=np.arange(P)
h=np.zeros(P)
h[10]=1 #hier vereinfacht nur eine Hochpalette angenommen
m=[891.2, 376.04, 376.04, 891.2, 141.032, 600.8, 600.8, 600.8, 376.04, 376.04, 196.392, 153.074, 362.078, 362.078, 376.04, 376.04, 376.04, 376.04, 376.04, 367.472, 323.03, 344.146, 258.46, 156.054, 276.918, 29.059, 113.13, 376.04, 376.04, 335.94, 313.952, 274.056, 189.256, 367.472, 278.1, 106.94, 525.008, 424.568, 377.743, 164.288, 367.472, 213.34, 376.04, 202.532, 378.2, 376.04]
M=100
#Variablen hinzufügen
"""x = [[[[model.add_var('x({},{},{},{})'.format(i,j,k,l), var_type=BINARY)
for l in range(1,3+1)] for k in range(1,4+1)] for j in range(1,2+1)] for i in range(1,n+1)]
# model.vars['x(5,2,4,3)'] entspricht x[4][1][3][2] - deshalb var-Erstellung nochmal geändert"""
x = [[[[model.add_var('x({},{},{},{})'.format(i,j,k,l), var_type=BINARY)
for l in L] for k in K] for j in J] for i in I]
# jetzt entspricht model.vars['x(4,1,3,2)'] auch x[4][1][3][2]
GL = model.add_var(name='GL', lb=0, var_type='C')
GR = model.add_var(name='GR', lb=0, var_type='C')
#Nebenbedingungen hinzufügen
#NB jede Pal einen Platz
for i in I:
model += xsum(x[i][j][k][l] for j in J for k in K for l in L) == 1
#NB jeder Platz max. eine Pal
for j,k,l in product(J,K,L):
model += xsum(x[i][j][k][l] for i in I) <= 1
#NB Hochpal muss unten stehen
model += xsum(h[i]*x[i][1][k][l] for i in I for k in K for l in L) == 0
#NB Reihe über Hochpal frei
for i,k,l in product(I,K,L):
model += h[i]*x[i][0][k][l] + h[i]*xsum(x[i_s][1][k][l_s] for l_s in L for i_s in I if i_s != i) <= 1+M*(1-x[i][0][k][l])
#NB Ladebalken Reihe oben max. 2t
for k in K:
model += xsum(m[i]*x[i][1][k][l] for i in I for l in L) <= 2000
#NB zuerst unten voll
model += xsum(x[i][1][k][l] for i in I for k in K for l in L) == Q
#NB unten nur hinten an der Tür frei
for k,l in product(K[1:],L):
model += xsum(x[i][0][k-1][l_s] for i in I for l_s in L) >= xsum(x[i][0][k][l] for i in I)*3
#NB oben kein Freiraum vorne, Y
#NB oben kein Freiraum hinten, Y
#NB oben kein Freiraum vorne, W
#NB oben kein Freiraum hinten, W
#NB Restreihe: bei zwei Paletten muss eine in der Mitte stehen
for j,k in product(J,K):
model += M*xsum(x[i][j][k][1] for i in I) >= xsum(x[i][j][k][l] for i in I for l in L)-1
#NB Restreihe: einzelne Palette darf nicht in der Mitte stehen
for j,k in product(J,K):
model += 2*(xsum(x[i][j][k][0] for i in I)+xsum(x[i][j][k][2] for i in I)) >= xsum(x[i][j][k][l] for i in I for l in L)
#NB Ladungsschwerpunkt Untergrenze
#NB Ladungsschwerpunkt Obergrenze
#NB Auslieferungsreihenfolge
for i,i_s,j,j_s,k,l,l_s in product(I,I,J,J,K[1:],L,L):
model += x[i][j][k][l]*n[i] <= x[i_s][j_s][k-1][l_s]*n[i_s] + (1-x[i_s][j_s][k-1][l_s])*M
"""#alternativ (falls Freiräume mittig zugelassen werden):
for i,i_s,j,j_s,k,k_s,l,l_s in product(I,I,J,J,K,K,L,L):
if k_s<k:
model += x[i][j][k][l]*n[i] <= x[i_s][j_s][k_s][l_s]*n[i_s] + (1-x[i_s][j_s][k_s][l_s])*M"""
#NB kühl-trocken
#NB zur Vorbereitung der Zielfunktion
model += GL >= xsum(x[i][j][k][0]*m[i] for i in I for j in J for k in K) - xsum(x[i][j][k][2]*m[i] for i in I for j in J for k in K)
model += GR >= xsum(x[i][j][k][2]*m[i] for i in I for j in J for k in K) - xsum(x[i][j][k][0]*m[i] for i in I for j in J for k in K)
#Zielfunktion
model.objective = minimize(GL+GR)
#Zielfunktion vereinfacht: Summe aller x (=P)
#model.objective = maximize(xsum(x[i][j][k][l] for i in I for j in J for k in K for l in L))
#Optimierung mit Abbruchkriterien
model.max_mip_gap_abs = 0.1
#model.max_solutions = 1
status = model.optimize() #max_seconds_same_incumbent=60 max_nodes=25
if model.num_solutions: #nur wenn überhaupt eine Lösung gefunden wurde
print('\nLösung gefunden, Status:',status)
#print('Summe über alle x =',xsum(x[i][j][k][l] for i in I for j in J for k in K for l in L).x)
print('ZFW =',GL.x+GR.x)
print(model.num_solutions)
print('\n')
for i,j,k,l in product(I,J,K,L):
if x[i][j][k][l].x >= 0.99:
print('x({},{},{},{})'.format(i,j,k,l))
else:
print('\n\nnichts gefunden')
print('Letzter Status:',status,'\n')
| Mortimo1996/Stauraumplanung_vcs | Archiv/main.py | main.py | py | 7,685 | python | de | code | 0 | github-code | 90 |
2149818242 | import random
from sys import argv
from PyDictionary import PyDictionary
from rearrange import anagramizer
def get_file_lines(filename):
file = open(filename, 'r')
all_lines = file.readlines()
all_lines = [line.strip() for line in all_lines]
file.close()
return all_lines
def random_dict_word(dictionary):
rand_index = random.randint(0, len(dictionary)-1)
return dictionary[rand_index]
def random_word_game(filename):
dictionary = get_file_lines(filename)
dictionary = random_dict_word(dictionary)
print(dictionary)
input(f"What does {dictionary} mean?")
pydictionary = PyDictionary(random_dict_word(dictionary))
print(dictionary)
print(pydictionary.printMeanings())
print(dictionary)
def autocomplete(filename):
files = get_file_lines(filename)
letter_input = input('what letter do you want all the words for? ')
words = list()
for line in files:
if letter_input in line[0]:
words.append(line)
return words
def better_anagram(filename):
files = get_file_lines(filename)
word_input = input('what word do you want all the words for? ')
words_anagram = anagramizer(word_input)
words = []
for line in files:
if line in words_anagram:
words.append(line)
return words
if __name__ == "__main__":
filename = '/usr/share/dict/words'
files = get_file_lines(filename)
number = argv[1:]
words = [random_dict_word(files) for _ in range(int(number[0]))]
print(words)
# random_word_game(filename)
autocomplete = autocomplete(filename)
print(autocomplete)
print(len(autocomplete))
better_anagram = better_anagram(filename)
print(better_anagram)
| omarsagoo/tweet_gen_app | static/code/dictionary_words.py | dictionary_words.py | py | 1,747 | python | en | code | 0 | github-code | 90 |
36518105590 | import time
from scraper import Scraper
start = time.time()
facebook = Scraper(
'Facebook', 'https://www.facebook.com/careers/jobs/?page=1&results_per_page=100&offices[0]=Dublin%2C%20Ireland#search_result')
data = facebook.start()
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
end = "Scraper took", "{:0>2}:{:0>2}:{:05.2f}".format(
int(hours), int(minutes), seconds), "to run"
print(end)
| ConanKeaveney/JobHub-Scrapers | companies/Facebook/test.py | test.py | py | 446 | python | en | code | 0 | github-code | 90 |
17922226966 | #!/usr/bin/env python
import os
from trackutil.confutil import get_config
from trackutil.ioutil import jsonload
from trackutil.logger import INFO
from trackutil.pathutil import get_datafiles_in_dir, mkdir
from trackutil.pathutil import get_storyline_module_dir
def main():
cfg = get_config()
root = cfg['data']['outdir']
root = os.path.join(root, cfg['storyline']['datadir'])
inputdir = os.path.join(root, cfg['storyline']['reformat']['datadir'])
outputdir = os.path.join(root, cfg['storyline']['summarize']['datadir'])
summarize(inputdir, outputdir)
def text_summary(inputfn, outputfn):
buckets = jsonload(inputfn)
if buckets is None:
return
text_summary_buckets(buckets, outputfn)
def text_summary_buckets(buckets, outputfn):
processed = 0
with open(outputfn, 'w') as f:
for b in buckets:
processed += 1
f.write('==================================\n')
f.write('{0}. {1}\n'.format(processed, b['signature']))
if b['i_bucket'] and b['t_bucket']:
bucket_type_str = 'I-bucket and T-bucket'
elif b['i_bucket']:
bucket_type_str = 'I-bucket'
elif b['t_bucket']:
bucket_type_str = 'T-bucket'
else:
bucket_type_str = 'Error'
f.write('Bucket type: {}\n'.format(bucket_type_str))
if 'likelihood' in b:
f.write('Likelihood: {}\n'.format(b['likelihood']))
f.write('\n')
for t in b['twitter']:
f.write('\t*** <{}> {}\n'.format(
t[0], t[1].encode('utf-8', 'ignore')))
if len(b['instagram']) > 0:
f.write('\n\t\t----------Instagram----------\n\n')
for i in b['instagram']:
f.write('\t+++ ({}) [{}]\n'.format(
b['instagram'].index(i),
i['name'].encode('utf-8', 'ignore')))
f.write('\t\t tags: {}\n'.format(
(', '.join(i['tags']).encode('utf-8', 'ignore'))))
f.write('\t\t url: {}\n'.format(
i['url'].encode('utf-8', 'ignore')))
f.write('\n')
def summarize(inputdir, outputdir):
mkdir(outputdir)
fn_list = get_datafiles_in_dir(inputdir)
processed = 0
total = len(fn_list)
# fnlist = ['{}.json'.format(fn) for fn in range(610, 626)]
for fn in fn_list:
inputfn = os.path.join(inputdir, fn)
outfn = fn[:-4] + 'txt'
outputfn = os.path.join(outputdir, outfn)
text_summary(inputfn, outputfn)
processed += 1
if processed % 10 == 0:
INFO("summarized {0}/{1}".format(processed, total))
def summarize_new(ts, input_module, output_module, cfg=None):
INFO('[Tbucket Summarize] {}'.format(ts))
if cfg is None:
cfg = get_config()
inputdir = get_storyline_module_dir(cfg, input_module)
outputdir = get_storyline_module_dir(cfg, output_module)
mkdir(outputdir)
inputfn = os.path.join(inputdir, '{}.json'.format(ts))
outputfn = os.path.join(outputdir, '{}.txt'.format(ts))
text_summary(inputfn, outputfn)
if __name__ == "__main__":
main()
# inputfn = '/home/shiguang/Projects/evtrack/data/workshop/storyline/absorb/610.json'
# outputfn = '/home/shiguang/absorbed_610.txt'
# text_summary(inputfn, outputfn)
| shiguangwang/storyline | storyline/summarize.py | summarize.py | py | 3,419 | python | en | code | 0 | github-code | 90 |
34731364547 | #!/usr/bin/env python3
""" Defines `inception_network` """
import tensorflow.keras as K
inception_block = __import__('0-inception_block').inception_block
def inception_network():
"""
Builds the inception network as described in Going Deeper with Convolutions
(2014).
Returns: A Keras Model of the Inception network.
"""
input_shape = (224, 224, 3)
X = K.layers.Input(input_shape)
conv2d_1 = K.layers.Conv2D(
filters=64,
kernel_size=7,
activation='relu',
strides=2,
padding='same'
)(X)
maxpool_1 = K.layers.MaxPool2D(
pool_size=3,
strides=2,
padding='same'
)(conv2d_1)
conv2d_2 = K.layers.Conv2D(
filters=192,
kernel_size=3,
activation='relu',
strides=1,
padding='same',
)(maxpool_1)
maxpool_2 = K.layers.MaxPool2D(
pool_size=3,
strides=2,
padding='same'
)(conv2d_2)
incep_1 = inception_block(maxpool_2, [64, 96, 128, 16, 32, 32])
incep_2 = inception_block(incep_1, [128, 128, 192, 32, 96, 64])
maxpool_3 = K.layers.MaxPool2D(
pool_size=3,
strides=2,
padding='same'
)(incep_2)
incep_3 = inception_block(maxpool_3, [192, 96, 208, 16, 48, 64])
incep_4 = inception_block(incep_3, [160, 112, 224, 24, 64, 64])
incep_5 = inception_block(incep_4, [128, 128, 256, 24, 64, 64])
incep_6 = inception_block(incep_5, [112, 144, 288, 32, 64, 64])
incep_7 = inception_block(incep_6, [256, 160, 320, 32, 128, 128])
maxpool_4 = K.layers.MaxPool2D(
pool_size=3,
strides=2,
padding='same'
)(incep_7)
incep_8 = inception_block(maxpool_4, [256, 160, 320, 32, 128, 128])
incep_9 = inception_block(incep_8, [384, 192, 384, 48, 128, 128])
avgpool = K.layers.AveragePooling2D(
pool_size=7,
strides=1,
padding='valid'
)(incep_9)
dropout = K.layers.Dropout(rate=0.4)(avgpool)
Y = K.layers.Dense(1000, activation='softmax')(dropout)
return K.Model(X, Y)
| keysmusician/holbertonschool-machine_learning | supervised_learning/0x08-deep_cnns/1-inception_network.py | 1-inception_network.py | py | 2,073 | python | en | code | 1 | github-code | 90 |
31466939260 | import numpy as np
import os
import argparse
from tqdm import tqdm
from multiprocessing import Process, Queue
from importlib.machinery import SourceFileLoader
import logging
import pickle
import imp
import matplotlib.pyplot as plt
from PIL import Image
import collections
def get_array_of_modes(cf, seg):
"""
Assemble an array holding all label modes.
:param cf: config module
:param seg: 4D integer array
:return: 4D integer array
"""
mode_stats = get_mode_statistics(cf.label_switches, exp_modes=cf.exp_modes)
switch = mode_stats['switch']
# construct ground-truth modes
gt_seg_modes = np.zeros(shape=(cf.num_modes,) + seg.shape, dtype=np.uint8)
for mode in range(cf.num_modes):
switched_seg = seg.copy()
for i, c in enumerate(cf.label_switches.keys()):
if switch[mode, i]:
init_id = cf.name2trainId[c]
final_id = cf.name2trainId[c + '_2']
switched_seg[switched_seg == init_id] = final_id
gt_seg_modes[mode] = switched_seg
return gt_seg_modes
def get_array_of_samples(cf, img_key):
"""
Assemble an array holding all segmentation samples for a given image.
:param cf: config module
:param img_key: string
:return: 5D integer array
"""
seg_samples = np.zeros(shape=(cf.num_samples,1,1) + tuple(cf.patch_size), dtype=np.uint8)
for i in range(cf.num_samples):
sample_path = os.path.join(cf.out_dir, '{}_sample{}_labelIds.npy'.format(img_key, i))
try:
seg_samples[i] = np.load(sample_path)
except:
print('Could not load {}'.format(sample_path))
return seg_samples
def get_array_of_samples_combined(cf, img_key):
"""
Assemble an array holding all segmentation samples for a given image.
:param cf: config module
:param img_key: string
:return: 5D integer array
"""
seg_samples = np.zeros(shape=(cf.num_samples,1) + tuple(cf.patch_size), dtype=np.uint8)
sample_path = os.path.join(cf.out_dir, '{}_16sample_labelIds.npy'.format(img_key))
try:
total_seg_samples = np.load(sample_path)
except:
print('Could not load {}'.format(sample_path))
for i in range(cf.num_samples):
seg_samples[i] = total_seg_samples[i]
return seg_samples
def get_mode_counts(d_matrix_YS):
"""
Calculate image-level mode counts.
:param d_matrix_YS: 3D array
:return: numpy array
"""
# assign each sample to a mode
mean_d = np.nanmean(d_matrix_YS, axis=-1)
sampled_modes = np.argmin(mean_d, axis=-2)
# count the modes
num_modes = d_matrix_YS.shape[0]
mode_count = np.zeros(shape=(num_modes,), dtype=np.int)
for sampled_mode in sampled_modes:
mode_count[sampled_mode] += 1
return mode_count
def get_pixelwise_mode_counts(data_loader, seg, seg_samples):
"""
Calculate pixel-wise mode counts.
:param data_loader: data loader used for the model, must has swticher implemented
:param seg: 4D array of integer labeled segmentations
:param seg_samples: 5D array of integer labeled segmentations
:return: array of shape (switchable classes, 3)
"""
assert seg.shape == seg_samples.shape[1:]
num_samples = seg_samples.shape[0]
pixel_counts = np.zeros(shape=(len(data_loader.switcher._label_switches),3), dtype=np.int)
# iterate all switchable classes
for i,c in enumerate(data_loader.switcher._label_switches.keys()):
c_id = data_loader.switcher._name2id[c]
alt_c_id = data_loader.switcher.name2id[c+'_2']
c_ixs = np.where(seg == c_id)
total_num_pixels = np.sum((seg == c_id).astype(np.uint8)) * num_samples
pixel_counts[i,0] = total_num_pixels
# count the pixels of original class|original class and alternative class|original class
for j in range(num_samples):
sample = seg_samples[j]
sampled_original_pixels = np.sum((sample[c_ixs] == c_id).astype(np.uint8))
sampled_alternative_pixels = np.sum((sample[c_ixs] == alt_c_id).astype(np.uint8))
pixel_counts[i,1] += sampled_original_pixels
pixel_counts[i,2] += sampled_alternative_pixels
return pixel_counts
def get_mode_statistics(label_switches, exp_modes=5):
"""
Calculate a binary matrix of switches as well as a vector of mode probabilities.
:param label_switches: dict specifying class names and their individual sampling probabilities
:param exp_modes: integer, number of independently switchable classes
:return: dict
"""
num_modes = 2 ** exp_modes
# assemble a binary matrix of switch decisions
switch = np.zeros(shape=(num_modes, 5), dtype=np.uint8)
for i in range(exp_modes):
switch[:,i] = 2 ** i * (2 ** (exp_modes - 1 - i) * [0] + 2 ** (exp_modes - 1 - i) * [1])
# calculate the probability for each individual mode
mode_probs = np.zeros(shape=(num_modes,), dtype=np.float32)
for mode in range(num_modes):
prob = 1.
for i, c in enumerate(label_switches.keys()):
if switch[mode, i]:
prob *= label_switches[c]
else:
prob *= 1. - label_switches[c]
mode_probs[mode] = prob
assert np.sum(mode_probs) == 1.
return {'switch': switch, 'mode_probs': mode_probs}
def get_energy_distance_components(gt_seg_modes, seg_samples, eval_class_ids, ignore_mask=None):
"""
Calculates the components for the IoU-based generalized energy distance given an array holding all segmentation
modes and an array holding all sampled segmentations.
:param gt_seg_modes: N-D array in format (num_modes,[...],H,W)
:param seg_samples: N-D array in format (num_samples,[...],H,W)
:param eval_class_ids: integer or list of integers specifying the classes to encode, if integer range() is applied
:param ignore_mask: N-D array in format ([...],H,W)
:return: dict
"""
num_modes = gt_seg_modes.shape[0]
num_samples = seg_samples.shape[0]
if isinstance(eval_class_ids, int):
eval_class_ids = list(range(eval_class_ids))
d_matrix_YS = np.zeros(shape=(num_modes, num_samples, len(eval_class_ids)), dtype=np.float32)
d_matrix_YY = np.zeros(shape=(num_modes, num_modes, len(eval_class_ids)), dtype=np.float32)
d_matrix_SS = np.zeros(shape=(num_samples, num_samples, len(eval_class_ids)), dtype=np.float32)
# iterate all ground-truth modes
for mode in range(num_modes):
##########################################
# Calculate d(Y,S) = [1 - IoU(Y,S)], #
# with S ~ P_pred, Y ~ P_gt #
##########################################
# iterate the samples S
for i in range(num_samples):
conf_matrix = calc_confusion(gt_seg_modes[mode], seg_samples[i],
loss_mask=ignore_mask, class_ixs=eval_class_ids)
iou = metrics_from_conf_matrix(conf_matrix)['iou']
d_matrix_YS[mode, i] = 1. - iou
###########################################
# Calculate d(Y,Y') = [1 - IoU(Y,Y')], #
# with Y,Y' ~ P_gt #
###########################################
# iterate the ground-truth modes Y' while exploiting the pair-wise symmetries for efficiency
for mode_2 in range(mode, num_modes):
conf_matrix = calc_confusion(gt_seg_modes[mode], gt_seg_modes[mode_2],
loss_mask=ignore_mask, class_ixs=eval_class_ids)
iou = metrics_from_conf_matrix(conf_matrix)['iou']
d_matrix_YY[mode, mode_2] = 1. - iou
d_matrix_YY[mode_2, mode] = 1. - iou
#########################################
# Calculate d(S,S') = 1 - IoU(S,S'), #
# with S,S' ~ P_pred #
#########################################
# iterate all samples S
for i in range(num_samples):
# iterate all samples S'
for j in range(i, num_samples):
conf_matrix = calc_confusion(seg_samples[i], seg_samples[j],
loss_mask=ignore_mask, class_ixs=eval_class_ids)
iou = metrics_from_conf_matrix(conf_matrix)['iou']
d_matrix_SS[i, j] = 1. - iou
d_matrix_SS[j, i] = 1. - iou
return {'YS': d_matrix_YS, 'SS': d_matrix_SS, 'YY': d_matrix_YY}
def calc_energy_distances(d_matrices, num_samples=None, source_probability_weighted=None, target_probability_weighted=None):
"""
Calculate the energy distance for each image based on matrices holding the combinatorial distances.
:param d_matrices: dict holding 4D arrays of shape \
(num_images, num_modes/num_samples, num_modes/num_samples, num_classes)
:param num_samples: integer or None
:param source_probability_weighted: probability vector (num_testing_sample, num_samples)
:param target_probability_weighted: probability vector (num_testing_sample, num_modes)
:param label_switches: None or dict
:param exp_mode: integer
:return: numpy array
"""
d_matrices = d_matrices.copy() # (num_testing_sample, num_modes, num_source, num_class)
if num_samples is None:
num_samples = d_matrices['SS'].shape[1]
d_matrices['YS'] = d_matrices['YS'][:,:,:num_samples]
d_matrices['SS'] = d_matrices['SS'][:,:num_samples,:num_samples]
# perform a nanmean over the class axis so as to not factor in classes that are not present in
# both the ground-truth mode as well as the sampled prediction
if (target_probability_weighted is not None) and (source_probability_weighted is None):
mode_probs = target_probability_weighted
mean_d_YS = np.nanmean(d_matrices['YS'], axis=-1) # average over classes
mean_d_YS = np.mean(mean_d_YS, axis=2) # average over source i.e. samples, since no source probability is provided
mean_d_YS = mean_d_YS * mode_probs
d_YS = np.sum(mean_d_YS, axis=1)
mean_d_SS = np.nanmean(d_matrices['SS'], axis=-1)
d_SS = np.mean(mean_d_SS, axis=(1, 2))
mean_d_YY = np.nanmean(d_matrices['YY'], axis=-1)
mean_d_YY = mean_d_YY * mode_probs[:, :, np.newaxis] * mode_probs[:, np.newaxis, :]
d_YY = np.sum(mean_d_YY, axis=(1, 2))
elif (target_probability_weighted is None) and (source_probability_weighted is not None):
mode_probs = source_probability_weighted
mean_d_YS = np.nanmean(d_matrices['YS'], axis=-1)
mean_d_YS = np.mean(mean_d_YS, axis=1) # average over target
mean_d_YS = mean_d_YS * mode_probs
d_YS = np.sum(mean_d_YS, axis=1)
mean_d_YY = np.nanmean(d_matrices['YY'], axis=-1)
d_YY = np.mean(mean_d_YY, axis=(1, 2))
mean_d_SS = np.nanmean(d_matrices['SS'], axis=-1)
mean_d_SS = mean_d_SS * mode_probs[:, :, np.newaxis] * mode_probs[:, np.newaxis, :]
d_SS = np.sum(mean_d_SS, axis=(1, 2))
elif (target_probability_weighted is not None) and (source_probability_weighted is not None):
mode_probs_target = target_probability_weighted
mode_probs_source = source_probability_weighted
mean_d_YS = np.nanmean(d_matrices['YS'], axis=-1)
mean_d_YS = mean_d_YS * mode_probs_target[:, :, np.newaxis] * mode_probs_source[:, np.newaxis, :]
d_YS = np.sum(mean_d_YS, axis=[1,2])
mean_d_SS = np.nanmean(d_matrices['SS'], axis=-1)
mean_d_SS = mean_d_SS * mode_probs_source[:, :, np.newaxis] * mode_probs_source[:, np.newaxis, :]
d_SS = np.sum(mean_d_SS, axis=(1, 2))
mean_d_YY = np.nanmean(d_matrices['YY'], axis=-1)
mean_d_YY = mean_d_YY * mode_probs_target[:, :, np.newaxis] * mode_probs_target[:, np.newaxis, :]
d_YY = np.sum(mean_d_YY, axis=(1, 2))
else:
mean_d_YS = np.nanmean(d_matrices['YS'], axis=-1)
d_YS = np.mean(mean_d_YS, axis=(1,2))
mean_d_SS = np.nanmean(d_matrices['SS'], axis=-1)
d_SS = np.mean(mean_d_SS, axis=(1, 2))
mean_d_YY = np.nanmean(d_matrices['YY'], axis=-1)
d_YY = np.nanmean(mean_d_YY, axis=(1, 2))
return 2 * d_YS - d_SS - d_YY
def eval_city(cf, file_list, data_loader, queue=None):
"""
Perform evaluation w.r.t the generalized energy distance based on the IoU as well as image-level and pixel-level
mode frequencies (using samples written to file).
:param cf: config module
:param cities: string or list of strings
:param queue: instance of multiprocessing.Queue
:param ixs: None or 2-tuple of ints
:return: NoneType or numpy array
"""
num_modes = cf.num_modes
num_samples = cf.num_samples
# evaluate only switchable classes, so a total of 10 here
eval_class_names = list(data_loader.switcher._label_switches.keys()) + list(data_loader.switcher._switched_name2Id.keys())
eval_class_ids = [data_loader.switcher._name2id[n] for n in eval_class_names]
d_matrices = {'YS': np.zeros(shape=(len(file_list), num_modes, num_samples, len(eval_class_ids)),
dtype=np.float32),
'YY': np.ones(shape=(len(file_list), num_modes, num_modes, len(eval_class_ids)),
dtype=np.float32),
'SS': np.ones(shape=(len(file_list), num_samples, num_samples, len(eval_class_ids)),
dtype=np.float32)}
sampled_mode_counts = np.zeros(shape=(num_modes,), dtype=np.int)
sampled_pixel_counts = np.zeros(shape=(len(cf.label_switches), 3), dtype=np.int)
logging.info('Evaluating class names: {} (corresponding to labels {})'.format(eval_class_names, eval_class_ids))
# iterate all validation images
for img_n, file in enumerate(tqdm(file_list)):
img_key = os.path.basename(file).replace("_16prob.npy", "")
data_dict = data_loader.get_gt_modes(img_key)
seg = data_dict['seg']
seg = seg[np.newaxis, np.newaxis]
ignore_mask = (seg == cf.ignore_label).astype(np.uint8)
seg_samples = np.load(file.replace("prob.npy", "sample_labelIds.npy"))[:,None,...]
gt_seg_modes = data_dict['gt_modes']
energy_dist = get_energy_distance_components(gt_seg_modes=gt_seg_modes, seg_samples=seg_samples,
eval_class_ids=eval_class_ids, ignore_mask=ignore_mask)
sampled_mode_counts += get_mode_counts(energy_dist['YS'])
sampled_pixel_counts += get_pixelwise_mode_counts(data_loader, seg, seg_samples)
for k in d_matrices.keys():
d_matrices[k][img_n] = energy_dist[k]
results = {'d_matrices': d_matrices, 'sampled_pixel_counts': sampled_pixel_counts,
'sampled_mode_counts': sampled_mode_counts, 'total_num_samples': len(file_list) * num_samples}
if queue is not None:
queue.put(results)
return
else:
return results
def eval_LIDC(cf, file_list, data_loader, queue=None):
# print(file_list)
num_modes = 4
num_samples = cf.num_samples
#
eval_class_ids = [1]
d_matrices = {'YS': np.zeros(shape=(len(file_list), num_modes, num_samples, len(eval_class_ids)),
dtype=np.float32),
'YY': np.ones(shape=(len(file_list), num_modes, num_modes, len(eval_class_ids)),
dtype=np.float32),
'SS': np.ones(shape=(len(file_list), num_samples, num_samples, len(eval_class_ids)),
dtype=np.float32)}
# iterate all validation images
for img_n, file in enumerate(tqdm(file_list)):
img_key = os.path.basename(file).replace("_16prob.npy", "")
seg_samples = np.load(file.replace("prob.npy", "sample_labelIds.npy"))[:,None,...]
# print(seg_samples.shape)
gt_seg_modes = data_loader.get_gt_modes(img_key)['gt_modes'][:,None, None, ...]
energy_dist = get_energy_distance_components(gt_seg_modes=gt_seg_modes, seg_samples=seg_samples,
eval_class_ids=eval_class_ids, ignore_mask=None)
# print(energy_dist)
for k in d_matrices.keys():
d_matrices[k][img_n] = energy_dist[k]
results = {'d_matrices': d_matrices}
if queue is not None:
queue.put(results)
return
else:
return results
#### parallel running ####
def runInParallel(fns_args, queue):
"""Run functions in parallel.
:param fns_args: list of tuples containing functions and a tuple of arguments each
:param queue: instance of multiprocessing.Queue()
:return: list of queue results
"""
proc = []
for fn in fns_args:
p = Process(target=fn[0], args=fn[1])
p.start()
proc.append(p)
return [queue.get() for p in proc]
def multiprocess_evaluation_city(cf):
"""Evaluate the energy distance in multiprocessing.
:param cf: config module"""
q = Queue()
results = runInParallel([(eval_city, (cf, cf.file_list[0:100], cf.data_loader, q)),
(eval_city, (cf, cf.file_list[100:200], cf.data_loader, q)),
(eval_city, (cf, cf.file_list[200:300], cf.data_loader, q)),
(eval_city, (cf, cf.file_list[300:400], cf.data_loader, q)),
(eval_city, (cf, cf.file_list[400:500], cf.data_loader, q)),
],
queue=q)
total_num_samples = 0
sampled_mode_counts = np.zeros(shape=(cf.num_modes,), dtype=np.int)
sampled_pixel_counts = np.zeros(shape=(len(cf.label_switches), 3), dtype=np.int)
d_matrices = {'YS':[], 'SS':[], 'YY':[]}
# aggregate results from the queue
for result_dict in results:
for key in d_matrices.keys():
d_matrices[key].append(result_dict['d_matrices'][key])
sampled_pixel_counts += result_dict['sampled_pixel_counts']
sampled_mode_counts += result_dict['sampled_mode_counts']
total_num_samples += result_dict['total_num_samples']
for key in d_matrices.keys():
d_matrices[key] = np.concatenate(d_matrices[key], axis=0)
# calculate frequencies
print('pixel frequencies', sampled_pixel_counts)
sampled_pixelwise_mode_per_class = sampled_pixel_counts[:,1:]
total_num_pixels_per_class = sampled_pixel_counts[:,0:1]
sampled_pixel_frequencies = sampled_pixelwise_mode_per_class / total_num_pixels_per_class
sampled_mode_frequencies = sampled_mode_counts / total_num_samples
print('sampled pixel frequencies', sampled_pixel_frequencies)
print('sampled_mode_frequencies', sampled_mode_frequencies)
results_dict = {'d_matrices': d_matrices, 'pixel_frequencies': sampled_pixel_frequencies,
'mode_frequencies': sampled_mode_frequencies}
results_file = os.path.join(cf.out_dir, 'eval_results.pkl')
with open(results_file, 'wb') as f:
pickle.dump(results_dict, f, pickle.HIGHEST_PROTOCOL)
logging.info('Wrote to {}'.format(results_file))
def multiprocess_evaluation_LIDC(cf):
"""Evaluate the energy distance in multiprocessing.
:param cf: config module"""
q = Queue()
results = runInParallel([
(eval_LIDC, (cf, cf.file_list[0:250], cf.data_loader, q)),\
(eval_LIDC, (cf, cf.file_list[250:500], cf.data_loader, q)), \
(eval_LIDC, (cf, cf.file_list[500:750], cf.data_loader, q)), \
(eval_LIDC, (cf, cf.file_list[750:1000], cf.data_loader, q)), \
(eval_LIDC, (cf, cf.file_list[1000:1250], cf.data_loader, q)), \
(eval_LIDC, (cf, cf.file_list[1250:1450], cf.data_loader, q)),
(eval_LIDC, (cf, cf.file_list[1450:1700], cf.data_loader, q)), \
(eval_LIDC, (cf, cf.file_list[1700:], cf.data_loader, q)),
],
queue=q)
# results = runInParallel([(eval_LIDC, (cf, cf.file_list[0:1], cf.data_loader, q)),
# ],
# queue=q)
# print(results)
d_matrices = {'YS':[], 'SS':[], 'YY':[]}
# aggregate results from the queue
for result_dict in results:
for key in d_matrices.keys():
d_matrices[key].append(result_dict['d_matrices'][key])
for key in d_matrices.keys():
d_matrices[key] = np.concatenate(d_matrices[key], axis=0)
results_dict = {'d_matrices': d_matrices}
results_file = os.path.join(cf.out_dir, 'eval_results.pkl')
with open(results_file, 'wb') as f:
pickle.dump(results_dict, f, pickle.HIGHEST_PROTOCOL)
logging.info('Wrote to {}'.format(results_file))
##### utility functions #####
def softmax_2_onehot(arr):
"""Transform a numpy array of softmax values into a one-hot encoded array. Assumes classes are encoded in axis 1.
:param arr: ND array
:return: ND array
"""
num_classes = arr.shape[1]
arr_argmax = np.argmax(arr, axis=1)
for c in range(num_classes):
arr[:,c] = (arr_argmax == c).astype(np.uint8)
return arr
def numpy_one_hot(label_arr, num_classes):
"""One-hotify an integer-labeled numpy array. One-hot encoding is encoded in additional last axis.
:param label_arr: ND array
:param num_classes: integer
:return: (N+1)D array
"""
# replace labels >= num_classes with 0
label_arr[label_arr >= num_classes] = 0
res = np.eye(num_classes)[np.array(label_arr).reshape(-1)]
return res.reshape(list(label_arr.shape)+[num_classes])
def calc_confusion(labels, samples, class_ixs, loss_mask=None):
"""
Compute confusion matrix for each class across the given arrays.
Assumes classes are given in integer-valued encoding.
:param labels: 4/5D array (1, num_class, h, w)
:param samples: 4/5D array
:param class_ixs: integer or list of integers specifying the classes to evaluate
:param loss_mask: 4/5D array
:return: 2D array
"""
try:
assert labels.shape == samples.shape
except:
raise AssertionError('shape mismatch {} vs. {}'.format(labels.shape, samples.shape))
if isinstance(class_ixs, int):
num_classes = class_ixs
class_ixs = range(class_ixs)
elif isinstance(class_ixs, list):
num_classes = len(class_ixs)
else:
raise TypeError('arg class_ixs needs to be int or list, not {}.'.format(type(class_ixs)))
if loss_mask is None:
shp = labels.shape
loss_mask = np.zeros(shape=(shp[0], 1, shp[2], shp[3]))
conf_matrix = np.zeros(shape=(num_classes, 4), dtype=np.float32)
for i,c in enumerate(class_ixs):
pred_ = (samples == c).astype(np.uint8)
labels_ = (labels == c).astype(np.uint8)
conf_matrix[i,0] = int(((pred_ != 0) * (labels_ != 0) * (loss_mask != 1)).sum()) # TP
conf_matrix[i,1] = int(((pred_ != 0) * (labels_ == 0) * (loss_mask != 1)).sum()) # FP
conf_matrix[i,2] = int(((pred_ == 0) * (labels_ == 0) * (loss_mask != 1)).sum()) # TN
conf_matrix[i,3] = int(((pred_ == 0) * (labels_ != 0) * (loss_mask != 1)).sum()) # FN
return conf_matrix
def metrics_from_conf_matrix(conf_matrix):
"""
Calculate IoU per class from a confusion_matrix.
:param conf_matrix: 2D array of shape (num_classes, 4)
:return: dict holding 1D-vectors of metrics
"""
tps = conf_matrix[:,0]
fps = conf_matrix[:,1]
fns = conf_matrix[:,3]
metrics = {}
metrics['iou'] = np.zeros_like(tps, dtype=np.float32)
# iterate classes
for c in range(tps.shape[0]):
# unless both the prediction and the ground-truth is empty, calculate a finite IoU
if tps[c] + fps[c] + fns[c] != 0:
metrics['iou'][c] = tps[c] / (tps[c] + fps[c] + fns[c])
else:
metrics['iou'][c] = 1
# metrics['iou'][c] = np.nan
return metrics
if __name__ == '__main__':
cf = imp.load_source('cf', 'LIDC_eval_config.py')
multiprocess_evaluation_LIDC(cf)
# conf_matrix = calc_confusion(np.concatenate([np.zeros([1,1, 10, 10]),np.ones([1,1, 10, 10])], axis=2),\
# np.concatenate([np.zeros([1,1, 15, 10]),np.ones([1,1, 5, 10])], axis=2), \
# [1], loss_mask=None)
# print(conf_matrix)
# print(metrics_from_conf_matrix(conf_matrix)['iou']) | sylqiu/modal_uncertainty | model_evaluator.py | model_evaluator.py | py | 24,589 | python | en | code | 2 | github-code | 90 |
19233814559 | from tanka.predule import Variable
def test_rosenbrock():
def rosenbrock(x0, x1):
y = 100 * (x1 - x0 ** 2) ** 2 + (1 - x0) ** 2
return y
x0 = Variable(0.0)
x1 = Variable(2.0)
lr = 0.001
iters = 10_0
for _ in range(iters):
print(x0, x1)
y = rosenbrock(x0, x1)
x0.zero_grad()
x1.zero_grad()
y.backward()
x0.data -= lr * x0.grad.data
x1.data -= lr * x1.grad.data
if __name__ == "__main__":
test_rosenbrock()
| ashigirl96/tanka | tests/tanka/test_optimization.py | test_optimization.py | py | 512 | python | en | code | 0 | github-code | 90 |
43262091528 | from rest_framework.test import APITestCase
from content.models import Word
from content.tests.factory import TextFactory
class TextDetailTest(APITestCase):
def setUp(self):
self.url = "/api/texts/1/"
self.text = TextFactory()
def test_GET(self):
res = self.client.get(self.url)
self.assertEqual(res.status_code, 200)
self.assertEqual(
res.data,
{
"id": self.text.id,
"name": self.text.name,
"body": self.text.body,
},
)
class TextWordDetailTest(APITestCase):
def setUp(self):
self.url = "/api/text_words/1/"
self.body = "test"
self.text = TextFactory(body=self.body)
self.word = Word.objects.first()
def test_GET(self):
res = self.client.get(self.url)
self.assertEqual(res.status_code, 200)
self.assertEqual(
res.json(),
[
{
"id": 1,
"text": self.text.id,
"word": self.word.name,
"status": self.word.status,
},
],
)
| charliewhu/Dj_Linguify | api/tests/test_endpoints.py | test_endpoints.py | py | 1,184 | python | en | code | 0 | github-code | 90 |
37070467415 | import json
import time
import uuid
import pytest
import requests
import redis
from allocation import config
def random_ref(prefix):
return prefix + '-' + uuid.uuid4().hex[:10]
def post_to_add_batch(ref, sku, qty, eta):
url = config.get_api_url()
r = requests.post(
f'{url}/add_batch',
json={'ref': ref, 'sku': sku, 'qty': qty, 'eta': eta}
)
assert r.status_code == 201
def post_to_allocate(orderid, sku, qty, expected_batch):
url = config.get_api_url()
r = requests.post(f'{url}/allocate', json={
'orderid': orderid, 'sku': sku, 'qty': qty,
})
assert r.status_code == 202
def wait_for(fn):
timeout = time.time() + 3
while time.time() < timeout:
r = fn()
if r:
return r
time.sleep(0.1)
pytest.fail('f{fn} never returned anything truthy')
def wait_for_assertion(fn):
timeout = time.time() + 3
while True:
try:
fn()
return
except AssertionError:
if time.time() > timeout:
raise
time.sleep(0.1)
def subscribe_to_allocated_events(r):
print('subscribing to allocated events')
pubsub = r.pubsub()
pubsub.subscribe('line_allocated')
confirmation = wait_for(pubsub.get_message)
assert confirmation['type'] == 'subscribe'
return pubsub
@pytest.mark.usefixtures('postgres_db')
@pytest.mark.usefixtures('restart_api')
@pytest.mark.usefixtures('restart_redis_pubsub')
def test_change_batch_quantity_leading_to_reallocation():
orderid, sku = random_ref('o'), random_ref('s')
batch1, batch2 = random_ref('b1'), random_ref('b2')
post_to_add_batch(batch1, sku, 10, '2011-01-02')
post_to_add_batch(batch2, sku, 10, '2011-01-03')
post_to_allocate(orderid, sku, 10, expected_batch=batch1)
r = redis.Redis(**config.get_redis_host_and_port())
pubsub = subscribe_to_allocated_events(r)
print('sending change batch quantity for', batch1)
r.publish('change_batch_quantity', json.dumps({
'batchid': batch1, 'sku': sku, 'qty': 5
}))
print('waiting for reallocation event')
messages = []
def check_messages():
messages.append(wait_for(pubsub.get_message))
print(messages)
data = json.loads(messages[-1]['data'])
assert data['orderid'] == orderid
assert data['batchid'] == batch2
wait_for_assertion(check_messages)
| mchoplin/python-leap-exs | tests/e2e/test_external_events.py | test_external_events.py | py | 2,423 | python | en | code | 0 | github-code | 90 |
10425821051 | import json
from typing import Optional
from confluent_kafka import Consumer, KafkaError
from geniusrise import Spout, State, StreamingOutput
class Kafka(Spout):
def __init__(self, output: StreamingOutput, state: State, **kwargs):
r"""
Initialize the Kafka class.
Args:
output (StreamingOutput): An instance of the StreamingOutput class for saving the data.
state (State): An instance of the State class for maintaining the state.
**kwargs: Additional keyword arguments.
## Using geniusrise to invoke via command line
```bash
genius Kafka rise \
streaming \
--output_kafka_topic kafka_test \
--output_kafka_cluster_connection_string localhost:9094 \
none \
listen \
--args topic=my_topic group_id=my_group
```
## Using geniusrise to invoke via YAML file
```yaml
version: "1"
spouts:
my_kafka_spout:
name: "Kafka"
method: "listen"
args:
topic: "my_topic"
group_id: "my_group"
output:
type: "streaming"
args:
output_topic: "kafka_test"
kafka_servers: "localhost:9094"
```
"""
super().__init__(output, state)
self.top_level_arguments = kwargs
def listen(
self,
topic: str,
group_id: str,
bootstrap_servers: str = "localhost:9092",
username: Optional[str] = None,
password: Optional[str] = None,
):
"""
📖 Start listening for data from the Kafka topic.
Args:
topic (str): The Kafka topic to listen to.
group_id (str): The Kafka consumer group ID.
bootstrap_servers (str): The Kafka bootstrap servers. Defaults to "localhost:9092".
username (Optional[str]): The username for SASL/PLAIN authentication. Defaults to None.
password (Optional[str]): The password for SASL/PLAIN authentication. Defaults to None.
Raises:
Exception: If unable to connect to the Kafka server.
"""
config = {
"bootstrap.servers": bootstrap_servers,
"group.id": group_id,
"auto.offset.reset": "earliest",
}
if username and password:
config.update(
{
"security.protocol": "SASL_PLAINTEXT",
"sasl.mechanisms": "PLAIN",
"sasl.username": username,
"sasl.password": password,
}
)
consumer = Consumer(config)
consumer.subscribe([topic])
while True:
try:
message = consumer.poll(1.0)
if message is None:
continue
if message.error():
if message.error().code() == KafkaError._PARTITION_EOF:
self.log.info(f"Reached end of topic {topic}, partition {message.partition()}")
else:
self.log.error(f"Error while consuming message: {message.error()}")
else:
# Use the output's save method
self.output.save(json.loads(message.value()))
# Update the state using the state
current_state = self.state.get_state(self.id) or {
"success_count": 0,
"failure_count": 0,
}
current_state["success_count"] += 1
self.state.set_state(self.id, current_state)
except Exception as e:
self.log.error(f"Error processing Kafka message: {e}")
# Update the state using the state
current_state = self.state.get_state(self.id) or {
"success_count": 0,
"failure_count": 0,
}
current_state["failure_count"] += 1
self.state.set_state(self.id, current_state)
consumer.close()
| geniusrise/geniusrise-listeners | geniusrise_listeners/kafka.py | kafka.py | py | 4,288 | python | en | code | 1 | github-code | 90 |
195234569 | import pynput, numpy, sys, cv2, os
if (os.getcwd() != os.path.dirname(__file__)):
if (os.path.dirname(__file__).replace(" ", "").__len__()):
os.chdir(os.path.dirname(__file__))
from typing import Callable, Iterator, Tuple, Union, Type, List
from keywizardUtilities.mob_utils.wizard_settings import WizardSettings, WizardUtils
from keywizardUtilities.mob_utils.wizard_utils2 import ModelManager
from keywizardUtilities.mob_utils.dungeon_utils import HalfangUtils
from pyautogui import screenshot as py_screenshot
from keyio.windowutils import WindowUtils
from keyio.keyutils import KeyUtils
class WizardPlayer:
instance_list = []
idle_function_list = []
window_controller = WindowUtils()
SELECT_WINDOW = False
@classmethod
def run_instances(class_) -> Type[ "WizardPlayer" ]:
(unpaused, nonterminal) = (True, True)
def fetch_instance() -> Iterator[ "WizardPlayer" ]:
index = 0
while True:
yield (index, class_.instance_list[ index ], class_.idle_function_list[ index ])
if (unpaused):
index = (index + 1) % class_.instance_list.__len__()
def keyboard_on_press(key : Union[ pynput.keyboard.Key, pynput.keyboard.KeyCode ]) -> None:
nonlocal unpaused, nonterminal
if (key == pynput.keyboard.Key.f9):
unpaused = not unpaused
elif (key == pynput.keyboard.Key.f10):
(unpaused, nonterminal) = (
False, False
)
WizardUtils.countdown(countdown_time = 3)
keyboard_listener = pynput.keyboard.Listener(on_press = keyboard_on_press)
keyboard_listener.start()
for index, instance, idle_function in fetch_instance():
if not (nonterminal):
break
if not (unpaused):
class_.print_status("Paused",
instance.num_potions, instance.cur_health, instance.cur_mana, instance.health_score, instance.mana_score, instance.potion_patience
)
else:
instance.execute_once(idle_function, verbose = True)
WizardUtils.sleep(1)
print("\nTerminated")
keyboard_listener.stop()
return class_
@classmethod
def initialize_instances(class_, instance_list : List[ "WizardSettings" ],
idle_function_list : List[ Callable ] = []) -> Type[ "WizardPlayer" ]:
for instance in instance_list:
class_.instance_list.append(WizardPlayer(instance))
class_.idle_function_list.append(None)
for index, idle_function in enumerate(idle_function_list):
class_.idle_function_list[ index ] = idle_function
return class_
@classmethod
def initialize(class_, book_model_name : str, pass_model_name : str, card_model_name : str,
deck_model_name : str, orbH_model_name : str, orbM_model_name : str,
digH_model_name : str, digM_model_name : str, digi_model_name : str) -> Type[ "WizardPlayer" ]:
ModelManager.initialize(
book_model_name, pass_model_name, card_model_name,
deck_model_name, orbH_model_name, orbM_model_name,
digH_model_name, digM_model_name, digi_model_name
)
return class_
@staticmethod
def screenshot() -> numpy.ndarray:
return cv2.cvtColor(numpy.uint8(py_screenshot()), cv2.COLOR_RGB2BGR)
@staticmethod
def __two_points_center(point_1 : Tuple[ int ], point_2 : Tuple[ int ]) -> Tuple[ int ]:
return (
(point_1[0] + point_2[0]) // 2,
(point_1[1] + point_2[1]) // 2
)
def __init__(self, player_settings : "WizardSettings") -> None:
self.player_settings = player_settings
if (self.SELECT_WINDOW):
print("")
sys.stdout.write("\r[ Press F8 to confirm selected window ]")
sys.stdout.flush()
key_controller = KeyUtils()
keep_running = True
@key_controller.monitor(KeyUtils.Key.KEY_F8)
def monitor_F8(key_code : int, key_pressed : bool) -> None:
nonlocal keep_running
if (key_pressed):
(x, y, *_) = self.window_controller.get_foreground_window()
y += 30
self.player_settings.configurations["window"]["coord"] = [ x, y ]
keep_running = False
key_controller.initialize_monitors()
key_controller.start_thread()
while (keep_running):
pass
key_controller.stop_thread()
sys.stdout.write("\r[ Window Selected ] ")
sys.stdout.flush()
print("\n")
(self.health_score, self.mana_score, self.should_idle, self.num_potions, self.cur_mana, self.cur_health) = (
0,
0,
True,
self.player_settings.configurations["runtime"]["max_potions"],
self.player_settings.configurations["runtime"]["full_mana"],
self.player_settings.configurations["runtime"]["full_health"]
)
(self.book_bounding_box, self.pass_bounding_box, self.deck_bounding_box, self.orbs_bounding_box, self.card_bounding_box) = (
WizardUtils.fetch_absolute_bounding_box(self.player_settings.configurations, "book"),
WizardUtils.fetch_absolute_bounding_box(self.player_settings.configurations, "pass"),
WizardUtils.fetch_absolute_bounding_box(self.player_settings.configurations, "deck"),
WizardUtils.fetch_absolute_bounding_box(self.player_settings.configurations, "orbs"),
WizardUtils.fetch_card_coordinates(self.player_settings.configurations)
)
(self.book_point, self.pass_point) = (
WizardUtils.bounding_box_centroid(self.book_bounding_box),
WizardUtils.bounding_box_centroid(self.pass_bounding_box)
)
self.idle_point = self.__two_points_center(self.book_point, self.pass_point)
self.red_frame = self.player_settings.get_colored_mask([ 0, 0, 255 ],
*WizardUtils.take_screenshot(resize_ratio = 1.0, color_mode = cv2.COLOR_RGB2BGR).shape[ 0:2 ]
)
self.window_bounding_box = WizardUtils.fetch_absolute_bounding_box(
self.player_settings.configurations, "window"
)
(self.digH_bounding_box, self.digM_bounding_box) = WizardUtils.fetch_number_coordinates(self.player_settings.configurations)
(self.panelH_bounding_box, self.panelM_bounding_box) = (
WizardUtils.fetch_absolute_bounding_box(self.player_settings.configurations, "health_panel"),
WizardUtils.fetch_absolute_bounding_box(self.player_settings.configurations, "mana_panel")
)
self.detect_digits = self.player_settings.configurations["runtime"]["detect_digits"]
self.potion_patience = self.player_settings.configurations["runtime"]["fill_patience"]
self.window_name = "Keywizard"
self.display_quantity = 7
self.show_window = False
def drink_potion(self) -> "WizardPlayer":
if (self.should_idle):
# mana score predicted by model indicates low mana
low_mana_1 = (self.mana_score <= self.player_settings.configurations["runtime"]["potion_thresh"])
# mana score tracked via counting indicates low mana
low_mana_2 = (self.cur_mana <= self.player_settings.configurations["runtime"]["fill_mana"])
# health score predicted by model indicates low health
low_health = (self.health_score <= 0)
if (((low_mana_1) and (low_mana_2)) or (low_health)):
self.potion_patience -= 1
elif (self.potion_patience != self.player_settings.configurations["runtime"]["fill_patience"]):
self.potion_patience = self.player_settings.configurations["runtime"]["fill_patience"]
if (self.potion_patience <= 0):
def click_idle() -> None:
WizardUtils.move_mouse(self.idle_point, time_interval = 0.20)
WizardUtils.click_mouse(hold_time = 0.3)
click_idle()
if (self.num_potions > 0):
pynput.keyboard.Controller().press(pynput.keyboard.Key.ctrl)
WizardUtils.sleep(0.5)
pynput.keyboard.Controller().press(pynput.keyboard.KeyCode(char = 'o'))
WizardUtils.sleep(0.1)
pynput.keyboard.Controller().release(pynput.keyboard.KeyCode(char = 'o'))
WizardUtils.sleep(0.3)
pynput.keyboard.Controller().release(pynput.keyboard.Key.ctrl)
self.cur_mana = self.player_settings.configurations["runtime"]["full_mana"]
self.num_potions = self.num_potions - 1
elif (self.num_potions > -1):
pynput.keyboard.Controller().press(pynput.keyboard.Key.end)
WizardUtils.sleep(0.2)
pynput.keyboard.Controller().release(pynput.keyboard.Key.end)
self.num_potions = self.num_potions - 1
self.potion_patience = self.player_settings.configurations["runtime"]["fill_patience"]
return self
def basic_idle(self, idle_time : Union[ float, int ] = 0.05) -> "WizardPlayer":
if (self.should_idle):
def click_idle() -> None:
WizardUtils.move_mouse(self.idle_point, time_interval = 0.20)
WizardUtils.click_mouse(hold_time = 0.3)
if (numpy.random.uniform(0, 1) <= self.player_settings.configurations["runtime"]["book_rate"]):
click_idle()
WizardUtils.move_mouse(self.book_point, time_interval = 0.75)
WizardUtils.click_mouse(hold_time = 0.3)
WizardUtils.sleep(2.2)
WizardUtils.move_mouse(self.book_point, time_interval = 0.50)
WizardUtils.click_mouse(hold_time = 0.3)
click_idle()
pynput.keyboard.Controller().press('a')
WizardUtils.sleep(idle_time)
pynput.keyboard.Controller().release('a')
WizardUtils.sleep(0.1)
pynput.keyboard.Controller().press('d')
WizardUtils.sleep(idle_time)
pynput.keyboard.Controller().release('d')
WizardUtils.sleep(0.4)
self.should_idle = False
return self
def halfang_idle(self) -> "WizardPlayer":
def click_idle() -> None:
WizardUtils.move_mouse(WizardUtils.point_by_offset(self.idle_point, (5, 5)), time_interval = 0.5)
WizardUtils.click_mouse(hold_time = 0.2)
def determine_state() -> int:
"""
Different States:
[ 0 ] - IDLE
[ 1 ] - COMBAT
[ 2 ] - ANIMATION / SCREEN LOADING / WINDOW MINIMIZED
"""
# >> determine the current state >>
screenshot = self.screenshot()
current_state = ModelManager.determine_state(
WizardUtils.crop_image(screenshot, self.book_bounding_box[0:2], self.book_bounding_box[2:4]),
WizardUtils.crop_image(screenshot, self.pass_bounding_box[0:2], self.pass_bounding_box[2:4])
)
# << determine the curernt state <<
return current_state
def entrance_exit_success() -> bool:
# whether or not the loading screen is present
return (determine_state() == ModelManager.STATE_ANIMATION)
def wait_success() -> bool:
# whether or not the loading screen has disappeared
return (determine_state() == ModelManager.STATE_IDLE)
def battle_reentrance_success() -> bool:
# whether or not we have entered combat
return (determine_state() == ModelManager.STATE_COMBAT)
if (self.should_idle):
# >> turning around to exit the cave >>
click_idle()
HalfangUtils.exit_halfang_2(entrance_exit_success)
# << turning around to exit the cave <<
# >> waiting for loading screen to disappear >>
while not (wait_success()):
WizardUtils.sleep(0.1)
WizardUtils.sleep(1.0)
# << waiting for loading screen to disappear <<
# >> pressing 'x' to re-enter the cave >>
click_idle()
HalfangUtils.enter_dungeon(entrance_exit_success)
# << pressing 'x' to re-enter the cave <<
# >> waiting for loading screen to disappear >>
while not (wait_success()):
WizardUtils.sleep(0.1)
WizardUtils.sleep(0.75)
# << waiting for loading screen to disappear <<
# >> running to enter combat with Halfang >>
click_idle()
HalfangUtils.enter_battle(battle_reentrance_success)
# << running to enter combat with Halfang
self.should_idle = False
return self
def select_cards(self, verbose : bool = True) -> "WizardPlayer":
def click_idle() -> None:
WizardUtils.move_mouse(WizardUtils.point_by_offset(self.idle_point, (5, 5)), time_interval = 0.5)
WizardUtils.click_mouse(hold_time = 0.2)
def click_pass() -> None:
WizardUtils.move_mouse(WizardUtils.point_by_offset(self.pass_point, (5, 2)), time_interval = 1.0)
WizardUtils.click_mouse(hold_time = 0.3)
def should_heal() -> bool:
"""
Criteria:
[ 1 ] Activation by probability ( heal rate )
[ 2 ] Health score lower than threshold ( heal thresh )
"""
# >> evaluates whether or not healing is necessary
by_rate = ((numpy.random.uniform(0, 1) <= self.player_settings.configurations["runtime"]["heal_rate"] ))
by_score = ((self.health_score <= self.player_settings.configurations["runtime"]["heal_thresh"]))
# << evaluates whether or not healing is necessary
return ((by_rate) and (by_score))
def verbose_string(first_card : Union[ type(None), int ],
second_card : Union[ type(None), int ],
action_type : int ) -> None:
nonlocal card_quantity
print_strings = [ "C-{}".format(card_quantity), "PASS" ]
if (action_type == ModelManager.ACTION_ATTACK):
if (second_card is None):
print_strings[1] = "{}".format(first_card)
else:
print_strings[1] = "{}+{}".format(first_card, second_card)
elif (action_type == ModelManager.ACTION_HEAL):
print_strings[1] = "HEAL"
self.print_status("Combat", self.num_potions, self.cur_health, self.cur_mana, self.health_score, self.mana_score, self.potion_patience, print_strings)
def adjust_mana(first_card : Union[ type(None), int ],
second_card : Union[ type(None), int ],
action_type : int ) -> None:
if (action_type == ModelManager.ACTION_ATTACK):
self.cur_mana -= self.player_settings.configurations["runtime"]["attack_mana"]
elif (action_type == ModelManager.ACTION_HEAL):
self.cur_mana -= self.player_settings.configurations["runtime"]["heal_mana"]
screenshot = self.screenshot()
# evaluates the number of cards present on the screen
card_quantity = ModelManager.count_cards(
WizardUtils.crop_image(screenshot, self.deck_bounding_box[0:2], self.deck_bounding_box[2:4])
)
click_idle()
if (card_quantity == 0):
# pass this round if no cards are present
click_pass()
else:
((first_card, second_card), (first_type, second_type), action_type) = ModelManager.find_card_combination([
WizardUtils.crop_image(screenshot, bounding_box[ 0:2 ], bounding_box[ 2:4 ])
for bounding_box in self.card_bounding_box[ card_quantity - 1 ]
], should_heal = should_heal, predictor = self.player_settings.configurations["runtime"]["card_predictor"])
if (verbose):
verbose_string(first_card, second_card, action_type)
if (self.detect_digits == 0):
adjust_mana(first_card, second_card, action_type)
if ((first_card is None) and (second_card is None)):
# pass this round if no cards are available
click_pass()
else:
if (first_card is not None):
# select the first card ( click to attack, heal or initiate enchantment )
WizardUtils.click_card(self.card_bounding_box, card_quantity, first_card + 1)
if (second_card is not None):
# select the second card ( click to complete enchantment )
WizardUtils.click_card(self.card_bounding_box, card_quantity, second_card + 1)
# the second card shifts to the left if the first card was to its left
if (first_card < second_card):
second_card -= 1
# select the second card after shifting ( click to attack )
WizardUtils.click_card(self.card_bounding_box, card_quantity - 1, second_card + 1, move_time = 1.5)
click_idle()
self.should_idle = True
return self
def execute_once(self, idle_function : Union[ type(None), Callable ] = None, verbose : bool = True) -> "WizardPlayer":
def crop_multiple(screenshot : numpy.ndarray, bounding_boxes : List[ Tuple[ int ] ]) -> numpy.ndarray:
return numpy.stack([
WizardUtils.crop_image(screenshot, bounding_box[ 0:2 ], bounding_box[ 2:4 ])
for bounding_box in bounding_boxes
])
def determine_health_mana_values(screenshot : numpy.ndarray) -> Tuple[ int ]:
health_value = ModelManager.classify_digits(crop_multiple(screenshot, self.digH_bounding_box[
ModelManager.determine_health_digit_count(crop_multiple(screenshot, [self.panelH_bounding_box])[0]) - 1]), combine_digits = True)
mana_value = ModelManager.classify_digits(crop_multiple(screenshot, self.digM_bounding_box[
ModelManager.determine_mana_digit_count(crop_multiple(screenshot, [self.panelM_bounding_box])[0]) - 1]), combine_digits = True)
return (health_value, mana_value)
if (idle_function is None):
idle_function = self.basic_idle
screenshot = self.screenshot()
(self.health_score, self.mana_score) = ModelManager.detect_health_mana_scores(
WizardUtils.crop_image(screenshot, self.orbs_bounding_box[ 0:2 ], self.orbs_bounding_box[ 2:4 ])
)
if (self.detect_digits == 1):
(self.cur_health, self.cur_mana) = determine_health_mana_values(screenshot)
else:
self.cur_health = "?"
current_state = ModelManager.determine_state(
WizardUtils.crop_image(screenshot, self.book_bounding_box[ 0:2 ], self.book_bounding_box[ 2:4 ]),
WizardUtils.crop_image(screenshot, self.pass_bounding_box[ 0:2 ], self.pass_bounding_box[ 2:4 ])
)
if (self.show_window):
target_mask = self.player_settings.generate_mask(
self.red_frame.shape, card_quantity = self.display_quantity, resize_ratio = 1)
display_frame = numpy.where(target_mask, self.red_frame, screenshot)
display_frame = WizardUtils.crop_image(
display_frame, self.window_bounding_box[ 0:2 ], self.window_bounding_box[ 2:4 ])
cv2.imshow(self.window_name, display_frame)
wait_key = cv2.waitKey(1)
if (ord("1") <= wait_key <= ord("7")):
self.display_quantity = (wait_key - ord("0"))
elif (cv2.getWindowProperty(self.window_name, cv2.WND_PROP_VISIBLE)):
cv2.destroyWindow(self.window_name)
if (current_state == ModelManager.STATE_ANIMATION):
if (verbose):
self.print_status("Animation", self.num_potions, "?", "?", "?", "?", self.potion_patience)
elif (current_state == ModelManager.STATE_IDLE):
if (verbose):
self.print_status("Idle", self.num_potions, self.cur_health, self.cur_mana, self.health_score, self.mana_score, self.potion_patience)
self.drink_potion(); idle_function()
elif (current_state == ModelManager.STATE_COMBAT):
if (verbose):
self.print_status("Combat", self.num_potions, self.cur_health, self.cur_mana, self.health_score, self.mana_score, self.potion_patience)
self.select_cards(verbose = verbose)
return self
@staticmethod
def print_status(state : str, num_potions : int, cur_health : int, cur_mana : int, health_score : int, mana_score : int, potion_patience : int, additional_strings : Union[ List[ str ], Tuple[ str ] ] = [], print_length : int = 80) -> None:
print_string = " ".join([
"[ {} ]".format(state),
"[ P-{} ]".format(num_potions),
"[ H-{} ]".format(cur_health),
"[ M-{} ]".format(cur_mana),
"[ R-{} ]".format(health_score),
"[ B-{} ]".format(mana_score),
"[ W-{} ]".format(potion_patience)
] + [
"[ {} ]".format(target_string)
for target_string in additional_strings
])
sys.stdout.write("\r" + WizardUtils.pad_string(print_string, print_length))
sys.stdout.flush()
def run_bot(self, idle_function : Union[ type(None), Callable ] = None) -> "WizardPlayer":
def keyboard_on_press(key : Union[ pynput.keyboard.Key, pynput.keyboard.KeyCode ]) -> None:
nonlocal unpaused, nonterminal
if (key == pynput.keyboard.Key.f9):
unpaused = not unpaused
elif (key == pynput.keyboard.Key.f10):
(unpaused, nonterminal) = (
False, False
)
elif (key == pynput.keyboard.Key.f11):
self.show_window = not self.show_window
(unpaused, nonterminal) = (True, True)
keyboard_listener = pynput.keyboard.Listener(on_press = keyboard_on_press)
keyboard_listener.start()
WizardUtils.countdown(countdown_time = 3)
while (nonterminal):
while (unpaused):
self.execute_once(idle_function)
WizardUtils.sleep(1)
if (cv2.getWindowProperty(self.window_name, cv2.WND_PROP_VISIBLE)):
cv2.destroyWindow(self.window_name)
self.print_status("Paused", self.num_potions, self.cur_health, self.cur_mana, self.health_score, self.mana_score, self.potion_patience)
WizardUtils.sleep(0.25)
self.print_status("Terminated", self.num_potions, self.cur_health, self.cur_mana, self.health_score, self.mana_score, self.potion_patience)
print("\n")
keyboard_listener.stop()
cv2.destroyAllWindows()
return self
def main(book_model_name : str,
pass_model_name : str,
deck_model_name : str,
card_model_name : str,
orbH_model_name : str,
orbM_model_name : str,
digH_model_name : str,
digM_model_name : str,
digi_model_name : str,
config_file : str) -> None:
player_settings = WizardSettings(config_file)
WizardPlayer.initialize(
book_model_name, pass_model_name, card_model_name,
deck_model_name, orbH_model_name, orbM_model_name,
digH_model_name, digM_model_name, digi_model_name
)
while True:
print("--options")
print("[ P ] Play [ append 'H' to farm Halfang dungeon ]")
print("[ C ] Configure Settings")
print("[ S ] Save Settings")
print("[ E ] Exit")
print("")
user_choice = input("> ").lower()
if (user_choice.startswith('p')):
if not (player_settings.settings_ready() or player_settings.load()):
player_settings.initialize_configurations().full_configure()
wizard_player = WizardPlayer(player_settings = player_settings)
idle_function = ((wizard_player.halfang_idle) if (user_choice.endswith("h")) else (None))
wizard_player.run_bot(idle_function)
elif user_choice.startswith('c'):
if (player_settings.settings_ready()):
player_settings.configure()
elif (player_settings.load()):
player_settings.configure()
else:
player_settings.initialize_configurations().full_configure()
elif (user_choice.startswith('s')):
player_settings.save()
elif (user_choice.startswith('e')):
return
if (__name__ == "__main__"):
book_model_name = "./keywizardModel/book_predictor.pb"
pass_model_name = "./keywizardModel/pass_predictor.pb"
card_model_name = [ "./keywizardModel/meteor/card_predictor.pb", "./keywizardModel/tempest/card_predictor.pb" ]
deck_model_name = "./keywizardModel/deck_predictor.pb"
orbH_model_name = "./keywizardModel/orbH_predictor.pb"
orbM_model_name = "./keywizardModel/orbM_predictor.pb"
digH_model_name = "./keywizardModel/digH_predictor.pb"
digM_model_name = "./keywizardModel/digM_predictor.pb"
digi_model_name = "./keywizardModel/digi_predictor.pb"
config_file = "./keywizardConfig/wizard_config.json"
test = 3
if (test == 1):
main(
book_model_name, pass_model_name, deck_model_name,
card_model_name, orbH_model_name, orbM_model_name,
digH_model_name, digM_model_name, digi_model_name,
config_file
)
elif (test == 2):
WizardPlayer.initialize(
book_model_name, pass_model_name, card_model_name,
deck_model_name, orbH_model_name, orbM_model_name,
digH_model_name, digM_model_name, digi_model_name
)
instance_list = [
WizardSettings("./keywizardConfig/wizard_config.json"),
WizardSettings("./keywizardConfig/wizard_config_sub.json")
]
for index, _ in enumerate(instance_list):
instance_list[ index ].load()
WizardPlayer.initialize_instances(instance_list)
WizardPlayer.run_instances()
elif (test == 3):
WizardPlayer.SELECT_WINDOW = True
main(
book_model_name, pass_model_name, deck_model_name,
card_model_name, orbH_model_name, orbM_model_name,
digH_model_name, digM_model_name, digi_model_name,
config_file
) | keyywind/keywiz | Mob-Farmer.py | Mob-Farmer.py | py | 30,532 | python | en | code | 0 | github-code | 90 |
35383484954 | # -*- coding: utf-8 -*-
from flask import request, jsonify
from application import app
from application.api import interface as InterfaceAPI
from application.api import use_case as UseCaseAPI
from application.util.parameter import search_parameter
from application.util.exception import try_except
from application.controller import login_required, user_real_name, cur_user
@app.route('/interface/add', methods=['POST'])
@login_required
@try_except
def add_interface():
"""
添加interface
"""
interface_json = request.get_json()
interface_json['create_by'] = user_real_name()
result = InterfaceAPI.add_interface(**interface_json)
return jsonify({'success': True, 'res': result})
@app.route('/interface/info', methods=['POST'])
@login_required
@try_except
def get_interface():
"""
根据过滤规则获取interface列表, 无规则则返回所有interface
"""
param_json = request.get_json()
page_index = int(param_json.pop('pageIndex')) if 'pageIndex' in param_json else 1
page_size = int(param_json.pop('pageSize')) if 'pageSize' in param_json else 10
result = InterfaceAPI.get_interface(**param_json)
if not (page_index and page_size):
return jsonify({'success': True, 'res': result})
return jsonify({'success': True, 'res': result[(page_index - 1) * page_size:page_index * page_size]})
@app.route('/interface/count', methods=['POST'])
@login_required
@try_except
def query_interface_count():
"""
获取数据库中所有interface的总个数
"""
result = InterfaceAPI.query_interface_count(**request.get_json())
return jsonify({'success': True, 'res': result})
@app.route('/interface/update', methods=['POST'])
@login_required
@try_except
def modify_interface():
"""
更新interface信息
1. 获取原interface参数信息
2. 将更新的interface内容写入数据库
3. 如果新旧参数无区别, 结束并返回
4. 如果新旧参数有区别, 更新所有use_case传给此interface的参数记录
"""
interface_id = request.get_json().get('id')
interface_old_info = InterfaceAPI.get_interface(id=interface_id)[0]
InterfaceAPI.modify_interface(**request.get_json())
interface_new_info = InterfaceAPI.get_interface(id=interface_id)[0]
relation_list = UseCaseAPI.get_relation(interface_id=interface_id)
old_analysis_str = ''.join([interface_old_info.get('interface_header'),
interface_old_info.get('interface_json_payload'),
interface_old_info.get('interface_url')])
new_analysis_str = ''.join([interface_new_info.get('interface_header'),
interface_new_info.get('interface_json_payload'),
interface_new_info.get('interface_url')])
old_param_list = search_parameter(old_analysis_str)
new_param_list = search_parameter(new_analysis_str)
update_param_list = list(set(old_param_list) ^ set(new_param_list))
if len(update_param_list) == 0:
return jsonify({'success': True})
else:
for param in update_param_list:
if '==' in param:
parameter_value = param.split('==')[1]
parameter_name = param.split('==')[0]
else:
parameter_value = ''
parameter_name = param
if param in old_param_list:
for p_relation in relation_list:
UseCaseAPI.del_case_parameter_relation(parameter_name=parameter_name, relation_id=p_relation['id'])
else: # 新增参数添加到各个用例中去
for relation in relation_list:
kwargs = {'relation_id': relation['id'],
'parameter_name': parameter_name,
'parameter_value': parameter_value}
UseCaseAPI.add_case_parameter_relation(**kwargs)
return jsonify({'success': True})
@app.route('/interface/delete', methods=['POST'])
@login_required
@try_except
def delete_interface():
"""
删除某个interface
1. 将interface数据从数据库中标记为已删除
2. 将所有use_case与此interface的关联关系标记为已删除
3. 将所有use_case传给此interface的参数记录标记为已删除
"""
user_id = cur_user()
interface_id = request.get_json().get('id')
relation_list = UseCaseAPI.get_relation(interface_id=interface_id)
if relation_list and user_id not in app.config['SUPER_MANAGER']:
return jsonify({'success': False, 'error': '存在关联用例, 需解除关联用例(或者管理员)删除'})
for interface_relation in relation_list:
parameter_info = UseCaseAPI.get_case_parameter_relation(id=interface_relation['id'])
for s_prama_relation in parameter_info:
UseCaseAPI.del_case_parameter_relation(id=s_prama_relation['id'])
UseCaseAPI.del_relation(interface_relation['id'])
InterfaceAPI.del_interface(**request.get_json())
return jsonify({'success': True})
| cnboysliber/AutoTest | application/controller/interface.py | interface.py | py | 5,087 | python | en | code | 4 | github-code | 90 |
43484105377 | # -*- conding:utf-8 -*-
__author__ = "snake"
"""
Appium的二次封装
"""
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Config():
"""
AppiumDesktop测试配置文件
"""
LEIDIAN_API_DESIRED_CAPS = {
"platformName":'Android',
"platformVersion":"5.1.1",
"deviceName":"vivo x6plus",
"appPackage":"io.appium.android.apis",
"appActivity":".ApiDemos",
"automationName": "UIAutomator2",
"unicodeKeyboard": True,
"resetKeyboard": True
}
LEIDIAN_ZHIHU_DESIRED_CAPS = {
"platformName":'Android',
"platfrmVersion":"5.1.1",
"deviceName":"vivo x6plus",
"appPackage":"com.zhihu.android",
"appActivity":".app.ui.activity.MainActivity",
"automationName": "UIAutomator2",
"unicodeKeyboard": True,
"resetKeyboard": True
}
M1NOTE_API_DESIRED_CAPS = {
"platformName":'Android',
"platfrmVersion":"5.1",
"deviceName":"m1note",
"appPackage":"io.appium.android.apis",
"appActivity":".ApiDemos",
"automationName": "UIAutomator2",
"unicodeKeyboard": True,
"resetKeyboard": True
}
M1NOTE_SCMCC_DESIRED_CAPS = {
"platformName":'Android',
"platfrmVersion":"5.1",
"deviceName":"m1note",
"appPackage":"com.sunrise.scmbhc",
"appActivity":".ui.activity.home.HomeActivity",
"automationName": "UIAutomator2",
"unicodeKeyboard": True,
"resetKeyboard": True
}
class PyAppium():
"""
AppiumDriver二次封装
"""
def __init__(self, url="http://localhost:4723/wd/hub", desired_caps={}, timeout=10):
"""
构造方法
"""
self._driver = webdriver.Remote(url, desired_caps)
self._timeout = timeout
def get_origina_driver(self):
"""
获取appium原始的driver
"""
return self._driver
def find_element(self, locator):
"""
查找单个元素
参数:
id: "id"
xpath: "xpath"
accessibility_id: "aid"
android_uiautomator: "aui"
"""
if not isinstance(locator, tuple) or len(locator) != 2:
raise Exception("输入的参数必须是(by, value)格式!")
# 简写
if locator[0] == "aid":
locator = (MobileBy.ACCESSIBILITY_ID, locator[1])
if locator[0] == "aui":
locator = (MobileBy.ANDROID_UIAUTOMATOR, locator[1])
try:
return WebDriverWait(self._driver, self._timeout).until(lambda s: s.find_element(*locator))
except:
raise Exception("未找到元素{}!".format(locator))
def find_elements(self, locator):
"""
查找多个元素
参数:
id: "id"
xpath: "xpath"
accessibility_id: "aid"
android_uiautomator: "aui"
"""
if not isinstance(locator, tuple) or len(locator) != 2:
raise Exception("输入的参数必须是(by, value)格式!")
# 简写
if locator[0] == "aid":
locator = (MobileBy.ACCESSIBILITY_ID, locator[1])
if locator[0] == "aui":
locator = (MobileBy.ANDROID_UIAUTOMATOR, locator[1])
try:
return WebDriverWait(self._driver, self._timeout).until(lambda s: s.find_elements(*locator))
except:
raise Exception("未找到元素{}!".format(locator))
def type_zh(self, locator, keywords):
"""
支持中文的输入
"""
self.find_element(locator).send_keys(keywords)
def type(self, locator, keywords):
"""
快速的输入,不支持中文
"""
self._driver.set_value(self.find_element(locator), keywords)
def click(self, locator):
"""
点击操作
"""
self.find_element(locator).click()
def switch_to_alert(self):
"""
appium作用域切换到alert
"""
self._driver.switch_to_alert()
def switch_to_default_content(self):
"""
appium切换到默认作用域
"""
self._driver.switch_to_default_content()
def does_exist(self, locator):
"""
动态判断元素是否存在
- 元素存在:True
- 元素不存在:False
"""
try:
self.find_element(locator)
return True
except:
return False
def does_toast_exist(self, text=None):
"""
根据toast文本判断toast是否存在
- 存在返回:元素, True
- 不存在返回:None, False
注意: 此方法需要指定启动参数 "automationName": "UIAutomator2" !
"""
try:
toast_loc = ("xpath", ".//*[contains(@text,'{}')]".format(text))
e = self.find_element(toast_loc)
return e, True
except:
return None, False
if __name__ == "__main__":
# desired_caps = {}
# desired_caps['platformName'] = 'Android' # 打开什么平台的app,固定的 > 启动安卓平台
# desired_caps['platformVersion'] = '5.1.1' # 安卓系统的版本号:adb shell getprop ro.build.version.release
# desired_caps['deviceName'] = 'vivo x6plus d' # 手机/模拟器的型号:adb shell getprop ro.product.model
# desired_caps['appPackage'] = 'io.appium.android.apis' # app的名字:adb shell dumpsys activity | findstr "mFocusedActivity"
# desired_caps['appActivity'] = '.ApiDemos' # app的启动页名字:adb shell dumpsys activity | findstr "mFocusedActivity"
# desired_caps['unicodeKeyboard'] = True # 为了支持中文
# desired_caps['resetKeyboard'] = True # 设置成appium自带的键盘
# pyappium = PyAppium(desired_caps=desired_caps)
pyappium = PyAppium(desired_caps=Config.M1NOTE_API_DESIRED_CAPS)
# 1. 查找元素的用法
# locator = ("id", "android:id/text1")
# locator = ("xpath", '//android.widget.TextView[@content-desc="Accessibility"]')
# locator = ("aid", "Accessibility")
# locator = ("aui", 'new UiSelector().text("Accessibility")')
# pyappium.find_element(locator).click()
# 2. 操作元素
app = ("aid", "App")
app_search = ("aid", "Search")
app_search_invoke = ("aid", "Invoke Search")
app_search_invoke_appdata = ("id", "io.appium.android.apis:id/txt_query_appdata")
pyappium.click(app)
pyappium.click(app_search)
pyappium.click(app_search_invoke)
pyappium.type(app_search_invoke_appdata, "hello appium!")
pyappium.type_zh(app_search_invoke_appdata, "hello appium!")
print(pyappium.does_exist(app))
| testjie/PyAppium | pyappium.py | pyappium.py | py | 7,175 | python | en | code | 1 | github-code | 90 |
16445702103 | # TASK:
#
# Write a random tester for the Queue class.
# The random tester should repeatedly call
# the Queue methods on random input in a
# semi-random fashion. for instance, if
# you wanted to randomly decide between
# calling enqueue and dequeue, you would
# write something like this:
#
# q = Queue(500)
# if (random.random() < 0.5):
# q.enqueue(some_random_input)
# else:
# q.dequeue()
#
# You should call the enqueue, dequeue,
# and checkRep methods several thousand
# times each.
import array
import random
class Queue:
def __init__(self,size_max):
assert size_max > 0
self.max = size_max
self.head = 0
self.tail = 0
self.size = 0
self.data = array.array('i', range(size_max))
def empty(self):
return self.size == 0
def full(self):
return self.size == self.max
def enqueue(self,x):
if self.size == self.max:
return False
self.data[self.tail] = x
self.size += 1
self.tail += 1
if self.tail == self.max:
self.tail = 0
return True
def dequeue(self):
if self.size == 0:
return None
x = self.data[self.head]
self.size -= 1
self.head += 1
if self.head == self.max:
self.head = 0
return x
def checkRep(self):
assert self.tail >= 0
assert self.tail < self.max
assert self.head >= 0
assert self.head < self.max
if self.tail > self.head:
assert (self.tail-self.head) == self.size
if self.tail < self.head:
assert (self.head-self.tail) == (self.max-self.size)
if self.head == self.tail:
assert (self.size==0) or (self.size==self.max)
# Write a random tester for the Queue class.
def test():
N = 100
for i in range(N):
size = random.randint(1,10000)
q = Queue(size)
q.checkRep()
is_empty = q.empty()
q.checkRep()
assert is_empty
is_full = q.full()
q.checkRep()
assert not is_full
# stress dequeue when empty
for i in range(size+1):
succ = q.dequeue()
q.checkRep()
assert succ is None
is_empty = q.empty()
q.checkRep()
assert is_empty
is_full = q.full()
q.checkRep()
assert not is_full
# stress normal enqueue
l = []
for i in range(size):
x = random.randint(1,1000000)
succ = q.enqueue(x)
q.checkRep()
assert succ
l.append(x)
is_full = q.full()
q.checkRep()
assert is_full
is_empty = q.empty()
q.checkRep()
assert not is_empty
# stress enqueue when full
for i in range(size+1):
succ = q.enqueue(i)
q.checkRep()
assert not succ
is_full = q.full()
q.checkRep()
assert is_full
is_empty = q.empty()
q.checkRep()
assert not is_empty
# stress normal dequeue
for i in range(size):
value = q.dequeue()
q.checkRep()
v = l.pop(0)
assert value == v
is_full = q.full()
q.checkRep()
assert not is_full
is_empty = q.empty()
q.checkRep()
if i < size-1:
assert not is_empty
else:
assert is_empty
is_empty = q.empty()
q.checkRep()
assert is_empty
is_full = q.full()
q.checkRep()
assert not is_full
# stress dequeue when empty again
for i in range(size+1):
succ = q.dequeue()
q.checkRep()
assert succ is None
is_empty = q.empty()
q.checkRep()
assert is_empty
is_full = q.full()
q.checkRep()
assert not is_full
test()
| spanners/udacity-homeworks | cs258/unit3/bounded_queue.py | bounded_queue.py | py | 4,061 | python | en | code | 0 | github-code | 90 |
18107974239 | class solve:
def __init__(self, n):
pass
def insertionsort(self, A, n, g):
for i in range(g, n):
v = A[i]
j = i - g
while j >= 0 and A[j] > v:
A[j + g] = A[j]
j = j - g
self.cnt += 1
A[j + g] = v
def shellsort(self, A, n):
self.cnt = 0
self.G = [1]
f = 1
while f * 3 + 1 <= n:
f = f * 3 + 1
self.G.append(f)
self.G = list(reversed(self.G))
self.m = len(self.G)
for g in self.G:
self.insertionsort(A, n, g)
n, *A = map(int, open(0).read().split())
solver = solve(n)
solver.shellsort(A, n)
print(solver.m)
print(' '.join(map(str, solver.G)))
print(solver.cnt)
print('\n'.join(map(str, A)))
| Aasthaengg/IBMdataset | Python_codes/p02262/s189015222.py | s189015222.py | py | 808 | python | en | code | 0 | github-code | 90 |
21286930000 | import os
import pickle
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db import IntegrityError
from django.shortcuts import redirect, render
from django.utils import timezone
import api.models
import dashboard.methodpack as mp
import dashboard.serializers as s
from .decorators import group_required
from .models import AccountFormP, AccountFormU
# NoAuth sayfalar:
def iletisim(request):
return render(request, 'dashboard/iletisim.html')
def lisans(request):
licenses = ''
with open(os.path.join(settings.BASE_DIR, 'dashboard', 'licenses'), 'r') as f:
licenses = f.read().replace('\n', '<br />')
return render(request, 'dashboard/lisans.html', {'licenses': licenses})
# Auth sayfalar:
@login_required
def anasayfa(request):
data = {'title': 'Dashboard'}
data['birinci_kalite_oran'], data['kalite1'], data['kalite2'], data['toplam_koli'] = mp.koli_son_durum_iterator(
api.models.KoliSonDurum.objects.latest('tarih'))
data['son_bes'] = api.models.KoliDegisiklik.objects.select_related(
'kullanici__profile').order_by('-id')[:5]
# TODO: Buraları veritabanına yazdır ay ay. Çok fazla gereksiz işlemci kullanımı var buralarda.
aylar = ['Oca', 'Şub', 'Mar', 'Nis', 'May', 'Haz', 'Tem', 'Ağu', 'Eyl', 'Eki', 'Kas', 'Ara']
tarih = timezone.localtime(timezone.now())
data['cizgi_grafik_veri'] = [[], [], []]
for i in range(6, 0, -1):
ay = tarih.month-i
yil = tarih.year
if ay < 1:
ay+=12
yil-=1
data['cizgi_grafik_veri'][0].append('{0}\'{1}'.format(aylar[ay-1], yil%100))
uretim, satis = mp.monthly_production_and_sales(ay, yil)
data['cizgi_grafik_veri'][1].append(uretim)
data['cizgi_grafik_veri'][2].append(satis)
return render(request, 'dashboard/anasayfa.html', data)
@login_required
def arama(request):
q = ''
if 'q' in request.GET.keys():
q = request.GET['q']
return render(request, 'dashboard/arama.html', {'q': q})
@login_required
def hesap(request):
loggedin_user = User.objects.get(pk=request.user.id)
if request.method == 'POST':
formu = AccountFormU(request.POST, instance=loggedin_user)
formp = AccountFormP(request.POST, request.FILES,
instance=loggedin_user.profile)
if formu.is_valid() and formp.is_valid():
formu.save()
profile_obj = formp.save()
if bool(formp.cleaned_data['photo']) and type(formp.cleaned_data['photo']) == InMemoryUploadedFile:
# Yeni fotoğraf yüklenmiş demektir.
# print(request.POST['croppieData'])
croppieData = tuple(map(int, request.POST['croppieData'].split(',')))
mp.profile_photo_resizer(profile_obj.photo.name, croppieData)
messages.add_message(request, messages.SUCCESS,
'Hesap bilgileriniz başarıyla kaydedilmiştir.')
# from: https://groups.google.com/forum/#!topic/django-users/SLw6SrIC8wI
return redirect('dashboard:anasayfa')
elif request.method == 'GET':
formu = AccountFormU(instance=loggedin_user)
formp = AccountFormP(instance=loggedin_user.profile)
return render(request, 'dashboard/hesap.html', {'formu': formu, 'formp': formp})
@login_required
def sifredegistir(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
# from: https://simpleisbetterthancomplex.com/tips/2016/08/04/django-tip-9-password-change-form.html
update_session_auth_hash(request, user)
messages.add_message(request, messages.SUCCESS,
'Şifreniz başarıyla değiştirilmiştir.')
return redirect('dashboard:anasayfa')
elif request.method == 'GET':
form = PasswordChangeForm(request.user)
return render(request, 'dashboard/sifredegistir.html', {'form': form})
@login_required
@group_required('editor')
def kolieklecikar(request):
son_on = api.models.KoliDegisiklik.objects.select_related(
'kullanici__profile').order_by('-id')[:10]
if request.method == 'POST':
form = api.models.KoliDegisiklikForm(request.POST)
if form.is_valid():
try:
# from: https://stackoverflow.com/a/46941862
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ THANK YOU SO MUCH!!!
koli_degisiklik_obj = form.save(commit=False)
koli_degisiklik_obj.kullanici_id = request.user.id
koli_degisiklik_obj.save()
messages.add_message(
request, messages.SUCCESS, 'Kayıt başarılı.')
return redirect('dashboard:kolieklecikar')
except IntegrityError:
messages.add_message(
request, messages.ERROR, 'Veritabanına geçersiz girdi yapmaya çalıştınız. Stokta var olandan daha fazla malzemeyi çıktı gibi göstermeye çalışıyor olabilirsiniz. Girdiğiniz verileri gözden geçirin.')
elif request.method == 'GET':
form = api.models.KoliDegisiklikForm()
return render(request, 'dashboard/koli/kolieklecikar.html', {'form': form, 'son_on': son_on})
@login_required
@group_required('editor')
def kolieklecikar_sil(request, pk):
if request.method == 'POST':
try:
delet_dis = api.models.KoliDegisiklik.objects.get(pk=pk)
delet_dis.delete()
messages.add_message(
request,
messages.SUCCESS,
'Silme işlemi başarılı.'
)
except IntegrityError:
messages.add_message(
request,
messages.ERROR,
'Kolideki değişiklikleri, kayıt sırasına göre, sondan başlayarak silmelisiniz. Ayrıca, başka gün içinde yapılan değişikliği silemezsiniz.'
)
return redirect('dashboard:kolieklecikar')
elif request.method == 'GET':
try:
record = api.models.KoliDegisiklik.objects.get(pk=pk)
except api.models.KoliDegisiklik.DoesNotExist:
messages.add_message(request, messages.ERROR,
'Böyle bir kayıt yok.')
return redirect('dashboard:kolieklecikar')
return render(request, 'dashboard/koli/kolieklecikar_sil.html', {'record': record})
@login_required
def kolirapor(request):
if 'istek' in request.GET.keys():
if request.GET['istek'] == 'deg_tumu':
data = api.models.KoliDegisiklik.objects.select_related(
'kullanici').order_by('-id')
return render(request, 'dashboard/koli/kolirapor_degtumu.html', {'data': data})
elif request.GET['istek'] == 'son_tumu':
data = None
if request.method == 'POST':
form = api.models.GunGetirForm(request.POST)
if form.is_valid():
try:
data = s.SingleRecordSerializer(
api.models.KoliSonDurum.objects.get(tarih=form.cleaned_data['gun']))
except api.models.KoliSonDurum.DoesNotExist:
messages.add_message(
request, messages.ERROR, 'Girilen güne ait veri bulunamadı.')
elif request.method == 'GET':
form = api.models.GunGetirForm()
return render(request, 'dashboard/koli/kolirapor_sontumu.html', {'form': form, 'data': data})
elif 'tarih' in request.GET.keys():
pass
elif 'detay' in request.GET.keys():
pass
# from: https://books.agiliq.com/projects/django-orm-cookbook/en/latest/select_some_fields.html
# The only difference between 'only' and 'values' is 'only' also fetches the id
# guess what: if you don't use 'only', get_mamul_model_display won't work. yeah, just try to live with that.
koli_son_degisiklikler = api.models.KoliDegisiklik.objects.order_by(
'-id').only('tarih', 'mamul_model', 'koli_turu', 'koli_adet')[:10]
koli_son_durum_ = s.SingleRecordSerializer(
api.models.KoliSonDurum.objects.latest('tarih'))
return render(request, 'dashboard/koli/kolirapor.html', {'koli_son_degisiklikler': koli_son_degisiklikler, 'durum': koli_son_durum_})
@login_required
@group_required('editor')
def koliguncelle(request):
api.models.KoliRestockFormset = forms.formset_factory(
api.models.KoliRestockForm)
if request.method == 'POST':
formset = api.models.KoliRestockFormset(request.POST)
if formset.is_valid():
for form in formset:
try:
record = api.models.KoliSonDurum.objects.latest('tarih')
new_value = form.cleaned_data['koli_adet'] - pickle.loads(vars(record)[form.cleaned_data['mamul_model']])[
form.cleaned_data['kalite']][form.cleaned_data['koli_turu']][form.cleaned_data['kolideki_mamul_adet']]
except (KeyError, EOFError, api.models.KoliSonDurum.DoesNotExist):
new_value = form.cleaned_data['koli_adet']
api.models.KoliDegisiklik.objects.create(
mamul_model=form.cleaned_data['mamul_model'],
koli_turu=form.cleaned_data['koli_turu'],
kolideki_mamul_adet=form.cleaned_data['kolideki_mamul_adet'],
kalite=form.cleaned_data['kalite'],
koli_adet=new_value,
notlar='Bu değişiklik Koli Güncelleme ekranından yapılmıştır.',
kullanici=request.user
)
messages.add_message(request, messages.SUCCESS, 'Kayıt başarılı.')
return redirect('dashboard:koliguncelle')
elif request.method == 'GET':
formset = api.models.KoliRestockFormset()
return render(request, 'dashboard/koli/koliguncelle.html', {'formset': formset})
@login_required
@group_required('editor')
def hammaddeeklecikar(request):
son_on = api.models.HammaddeDegisiklik.objects.select_related(
'kullanici__profile').order_by('-id')[:10]
if request.method == 'POST':
form = api.models.HammaddeDegisiklikForm(request.POST)
if form.is_valid():
try:
hammadde_degisiklik_obj = form.save(commit=False)
hammadde_degisiklik_obj.kullanici_id = request.user.id
hammadde_degisiklik_obj.save()
messages.add_message(
request, messages.SUCCESS, 'Kayıt başarılı.')
return redirect('dashboard:hammaddeeklecikar')
except IntegrityError:
messages.add_message(
request, messages.ERROR,
'Veritabanına geçersiz girdi yapmaya çalıştınız. Stokta var olandan daha fazla malzemeyi çıktı gibi göstermeye çalışıyor olabilirsiniz. Girdiğiniz verileri gözden geçirin.')
elif request.method == 'GET':
form = api.models.HammaddeDegisiklikForm()
return render(request, 'dashboard/hammadde/hammaddeeklecikar.html', {'form': form, 'son_on': son_on})
@login_required
@group_required('editor')
def hammaddeeklecikar_sil(request, pk):
if request.method == 'POST':
try:
delet_dis = api.models.HammaddeDegisiklik.objects.get(pk=pk)
delet_dis.delete()
messages.add_message(request, messages.SUCCESS,
'Silme işlemi başarılı.')
except IntegrityError:
messages.add_message(
request, messages.ERROR, 'Başka bir gün içinde yapılmış değişikliği silemezsiniz.')
return redirect('dashboard:hammaddeeklecikar')
elif request.method == 'GET':
try:
record = api.models.HammaddeDegisiklik.objects.get(pk=pk)
except api.models.HammaddeDegisiklik.DoesNotExist:
messages.add_message(request, messages.ERROR,
'Böyle bir kayıt yok.')
return redirect('dashboard:hammaddeeklecikar')
return render(request, 'dashboard/hammadde/hammaddeeklecikar_sil.html', {'record': record})
@login_required
def hammadderapor(request):
if 'istek' in request.GET.keys():
if request.GET['istek'] == 'deg_tumu':
data = api.models.HammaddeDegisiklik.objects.select_related(
'kullanici').order_by('-id')
return render(request, 'dashboard/hammadde/hammadderapor_degtumu.html', {'data': data})
elif request.GET['istek'] == 'son_tumu':
data = None
if request.method == 'POST':
form = api.models.GunGetirForm(request.POST)
if form.is_valid():
try:
data = s.SingleRecordSerializer(
api.models.HammaddeSonDurum.objects.get(tarih=form.cleaned_data['gun']))
except api.models.HammaddeSonDurum.DoesNotExist:
messages.add_message(
request, messages.ERROR, 'Girilen güne ait veri bulunamadı.')
elif request.method == 'GET':
form = api.models.GunGetirForm()
return render(request, 'dashboard/hammadde/hammadderapor_sontumu.html', {'form': form, 'data': data})
ham_son_degisiklikler = api.models.HammaddeDegisiklik.objects.order_by(
'-id').only('tarih', 'madde', 'miktar')[:10]
ham_son_durum = s.SingleRecordSerializer(
api.models.HammaddeSonDurum.objects.latest('tarih'))
return render(request, 'dashboard/hammadde/hammadderapor.html', {'ham_son_degisiklikler': ham_son_degisiklikler, 'durum': ham_son_durum})
@login_required
@group_required('editor')
def hammaddeguncelle(request):
api.models.HammaddeRestockFormset = forms.formset_factory(
api.models.HammaddeRestockForm)
if request.method == 'POST':
formset = api.models.HammaddeRestockFormset(request.POST)
if formset.is_valid():
for form in formset:
try:
record = api.models.HammaddeSonDurum.objects.latest(
'tarih')
new_value = form.cleaned_data['miktar'] - \
vars(record)[form.cleaned_data['madde']]
except (KeyError, EOFError, api.models.HammaddeSonDurum.DoesNotExist):
new_value = form.cleaned_data['miktar']
api.models.HammaddeDegisiklik.objects.create(
madde=form.cleaned_data['madde'],
miktar=new_value,
notlar='Bu değişiklik Hammadde Güncelleme ekranından yapılmıştır.',
kullanici=request.user
)
messages.add_message(request, messages.SUCCESS, 'Kayıt başarılı.')
return redirect('dashboard:hammaddeguncelle')
elif request.method == 'GET':
formset = api.models.HammaddeRestockFormset()
return render(request, 'dashboard/hammadde/hammaddeguncelle.html', {'formset': formset})
@login_required
def yasak(request):
return render(request, 'dashboard/yasak.html')
@login_required
def kolirapor_tek_detay(request, urun):
data = {}
data['urun'] = mp.find_full_name_from_choice(
urun, api.models.KoliDegisiklik.MAMUL_SECENEKLERI)
if data['urun'] is None:
return redirect('dashboard:kolirapor')
data['detaylar'] = s.KoliSonDurumSerializer(
vars(api.models.KoliSonDurum.objects.latest('tarih'))[urun])
data['son_on'] = api.models.KoliDegisiklik.objects.select_related(
'kullanici__profile').filter(mamul_model=urun).order_by('-id')[:10]
return render(request, 'dashboard/koli/kolirapor_tek_detay.html', data)
| Tospaa/OzerLastikDjango | dashboard/views.py | views.py | py | 16,256 | python | tr | code | 0 | github-code | 90 |
34444118120 | import os
import sys
assert sys.version_info.major == 3, ('Python {0} is not supported, '
'please use Python 3.'.format(
sys.version_info.major))
def change_vcxproj(filename):
print('Processing {filename}...'.format(**locals()))
with open(filename, 'rt') as infile:
contents = infile.read()
start = contents.find('<PropertyGroup Label="Globals">')
end = contents.find('</PropertyGroup>', start)
insert = '\t<PlatformToolset>{0}</PlatformToolset>\n\t'.format(toolset)
contents = insert.join((contents[:end], contents[end:]))
with open(filename, 'wt') as outfile:
outfile.write(contents)
assert len(sys.argv) == 2, 'Usage: visual_studio.py toolset'
toolset = sys.argv[1]
print('\nChanging project toolsets to "{0}"...'.format(toolset))
for path in ('windows/vs2010', ):
for filename in os.listdir(path):
(root, ext) = os.path.splitext(filename)
if ext == '.vcxproj':
change_vcxproj(os.path.join(path, filename))
print('Done.')
| Hoshino19680329/traKmeter | Builds/visual_studio_fix.py | visual_studio_fix.py | py | 1,086 | python | en | code | 0 | github-code | 90 |
70032305258 | import os
from PIL import Image
directory = 'C:/Users/mousu/Documents/university/3.5 - Year 2022-23/Term 2/CV/Coursework/OwnData/ownDataset/ownTest/images'
for filename in os.listdir(directory):
if filename.endswith('.jpg'):
img_path = os.path.join(directory, filename)
with Image.open(img_path) as img:
img = img.convert('RGB')
img.save(img_path[:-3] + 'jpeg')
| MousufCZ/face-mask-identification-models | Personal_Dataset/Data_Prep/changeJPGtoJPEG.py | changeJPGtoJPEG.py | py | 408 | python | en | code | 0 | github-code | 90 |
23642700271 | import logging
from json import JSONDecodeError
import requests
from django.conf import settings
from utils.exceptions import APIHttpException, APIJsonException
from .resources import (
ActionPlanMilestoneResource,
ActionPlanResource,
ActionPlanStakeholderResource,
ActionPlanTaskResource,
BarriersResource,
CommoditiesResource,
DocumentsResource,
EconomicAssessmentResource,
EconomicImpactAssessmentResource,
FeedbackResource,
GroupsResource,
MentionResource,
NotesResource,
NotificationExclusionResource,
PublicBarrierNotesResource,
PublicBarriersResource,
ReportsResource,
ResolvabilityAssessmentResource,
SavedSearchesResource,
StrategicAssessmentResource,
UsersResource,
)
logger = logging.getLogger(__name__)
class MarketAccessAPIClient:
def __init__(self, token=None, **kwargs):
self.token = token or settings.TRUSTED_USER_TOKEN
self.barriers = BarriersResource(self)
self.documents = DocumentsResource(self)
self.economic_assessments = EconomicAssessmentResource(self)
self.economic_impact_assessments = EconomicImpactAssessmentResource(self)
self.groups = GroupsResource(self)
self.commodities = CommoditiesResource(self)
self.notes = NotesResource(self)
self.public_barrier_notes = PublicBarrierNotesResource(self)
self.public_barriers = PublicBarriersResource(self)
self.reports = ReportsResource(self)
self.resolvability_assessments = ResolvabilityAssessmentResource(self)
self.strategic_assessments = StrategicAssessmentResource(self)
self.saved_searches = SavedSearchesResource(self)
self.users = UsersResource(self)
self.mentions = MentionResource(self)
self.notification_exclusion = NotificationExclusionResource(self)
self.action_plans = ActionPlanResource(self)
self.action_plan_milestones = ActionPlanMilestoneResource(self)
self.action_plan_tasks = ActionPlanTaskResource(self)
self.action_plan_stakeholders = ActionPlanStakeholderResource(self)
self.feedback = FeedbackResource(self)
def request(self, method, path, **kwargs):
url = f"{settings.MARKET_ACCESS_API_URI}{path}"
headers = {
"Authorization": f"Bearer {self.token}",
"X-User-Agent": "",
"X-Forwarded-For": "",
}
response = getattr(requests, method)(url, headers=headers, **kwargs)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
logger.warning(e)
raise APIHttpException(e, response)
return response
def get(self, path, raw=False, **kwargs):
response = self.request("get", path, **kwargs)
if raw:
return response
try:
return response.json()
except JSONDecodeError:
raise APIJsonException(
f"Non json response at '{response.url}'. "
f"Response text: {response.text}"
)
def post(self, path, **kwargs):
return self.request_with_results("post", path, **kwargs)
def patch(self, path, **kwargs):
return self.request_with_results("patch", path, **kwargs)
def put(self, path, **kwargs):
return self.request_with_results("put", path, **kwargs)
def delete(self, path, **kwargs):
return self.request("delete", path, **kwargs)
def request_with_results(self, method, path, **kwargs):
response = self.request(method, path, **kwargs)
return self.get_results_from_response_data(response.json())
def get_results_from_response_data(self, response_data):
if response_data.get("response", {}).get("success"):
return response_data["response"].get(
"result", response_data["response"].get("results")
)
else:
return response_data
| uktrade/market-access-python-frontend | utils/api/client.py | client.py | py | 3,986 | python | en | code | 5 | github-code | 90 |
8473416356 | n = int(input())
nums = list(map(int, input().split(' ')))
res = 0
for i in nums:
cnt = 0
if i == 1 :
continue
for j in range(2, i+1):
if(i % j == 0):
cnt+=1
if(cnt == 1):
res += 1
print(res) | ji-hun-choi/Baekjoon | 08.기본_수학2/01978.py | 01978.py | py | 244 | python | en | code | 1 | github-code | 90 |
73822511017 | # coding=utf-8
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.neighbors import KNeighborsRegressor
import pandas as pd
from sklearn.preprocessing import StandardScaler
cali = datasets.california_housing.fetch_california_housing()
X = cali['data']
Y = cali['target']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.8)
def a_predict_origin_data():
regressor = KNeighborsRegressor()
regressor.fit(X_train, Y_train)
Y_est = regressor.predict(X_test)
print("MAE=", mean_squared_error(Y_test, Y_est))
# a_predict_origin_data()
def b_z_score():
regressor = KNeighborsRegressor()
scaler = StandardScaler()
# 对数据进行归一化 z-scores方法
X_train2 = scaler.fit_transform(X_train)
X_test2 = scaler.fit_transform(X_test)
regressor.fit(X_train2, Y_train)
Y_est = regressor.predict(X_test2)
print("MAE=", mean_squared_error(Y_test, Y_est))
b_z_score()
def c_none_linear():
# 使用非线性变换
regressor = KNeighborsRegressor()
scaler = StandardScaler()
non_linear_feat = 5
X_train_new_feat = np.sqrt(X_train[:, non_linear_feat])
print(X_train_new_feat.shape)
X_train_new_feat.shape = (X_train_new_feat.shape[0], 1)
X_train_extend = np.hstack([X_train, X_train_new_feat])
X_test_new_feat = np.sqrt(X_test[:, non_linear_feat])
print(X_test_new_feat.shape)
X_test_new_feat.shape = (X_test_new_feat.shape[0], 1)
X_test_extend = np.hstack([X_test, X_test_new_feat])
X_train_extend_transformed = scaler.fit_transform(X_train_extend)
X_test_extend_transformed = scaler.fit_transform(X_test_extend)
regressor.fit(X_train_extend_transformed, Y_train)
Y_est = regressor.predict(X_test_extend_transformed)
print("MAE=", mean_squared_error(Y_test, Y_est))
c_none_linear()
| helloexp/ml | ml/data_scince/c_thrid/b_knn/knn_regressor.py | knn_regressor.py | py | 1,936 | python | en | code | 0 | github-code | 90 |
26437263184 | from typing import List, Any
from talipp.indicators.Indicator import Indicator
from talipp.ohlcv import OHLCV
class VWMA(Indicator):
"""
Volume Weighted Moving Average
Output: a list of floats
"""
def __init__(self, period: int, input_values: List[OHLCV] = None):
super().__init__()
self.period = period
self.initialize(input_values)
def _calculate_new_value(self) -> Any:
if len(self.input_values) < self.period:
return None
else:
s = 0.0
v = 0.0
for value in self.input_values[-self.period:]:
s += value.close * value.volume
v += value.volume
return s / v | nardew/talipp | talipp/indicators/VWMA.py | VWMA.py | py | 722 | python | en | code | 177 | github-code | 90 |
71868302057 | import socket
import json
# 获取本地ip
def get_local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
return ip
except Exception:
print("未联网或ip获取失败,ip将被置为空字符串")
return ''
finally:
s.close()
# 尝试转成json,如果出错,则给一个自定义的默认值
def try_transto_json(json_str, default_value):
try:
result = json.loads(json_str)
except Exception as e:
print(e)
result = default_value
return result | liblikewhen/common_util | system_util/system_util.py | system_util.py | py | 622 | python | en | code | 1 | github-code | 90 |
18446144899 | def resolve():
a = []
b = []
for i in range(3):
A, B = map(int, input().split())
a.append(A)
a.append(B)
for j in range(1, 5):
b.append(a.count(j))
if b.count(2) == 2 and b.count(1) == 2:
print("YES")
else:
print("NO")
resolve() | Aasthaengg/IBMdataset | Python_codes/p03130/s225811738.py | s225811738.py | py | 300 | python | en | code | 0 | github-code | 90 |
582629615 | from objectClass import *
import bisect
import os
import struct
from math import log10
import math
from huff import *
def computeScore(frequency, total, num):
return (1+log10(frequency))*(log10(total/num))
class invertedIndex:
def __init__(self):
self.invertedList = invertedObject()
self.filename = "./invertedIndexCompress.bin"
self.configFile = "./inverted.config"
self.numBytes = 2
self.sizeChar = 1
self.sizeDoc = 2
self.sizeFptr = 2
self.sizePad = 2
self.distinctWords = 0
self.totalDoc = 0
self.memSize = 0
self.offset = 0
self.distinctWords = 0
self.posSize = 2
self.huffTable = {}
f = open(self.configFile, "r")
content = f.read()
lists = content.split("\n")
self.memSize = int(lists[0])
self.totalDoc = int(lists[1])
#self.distinctWords = int(lists[2])
f.close()
with open("Hufftable.txt", "r") as f:
for line in f:
lists = line.split('\t')
self.huffTable[lists[0]] = lists[1].strip()
self.decodeTree = createTree()
self.decodeTree.recreateTree(self.huffTable)
def readFromFile(self):
f = open(self.filename, "rb")
newObject = compressObject()
f.seek(self.offset)
currSize = 0
numbytes = f.read(self.numBytes)
if numbytes == "":
f.close()
return newObject
numbytes = struct.unpack('<H', numbytes)
numbytes = numbytes[0]
while currSize < self.memSize:
padding = struct.unpack('<H',f.read(self.sizePad))[0]
readEntry = f.read(numbytes)
newObject.compressString.append(readEntry)
newObject.pad.append(padding)
currSize += numbytes + self.sizePad + self.numBytes
numbytes = f.read(self.numBytes)
if numbytes == "":
break
numbytes = struct.unpack('<H', numbytes)
numbytes = numbytes[0]
self.offset += currSize
f.close()
return newObject
def findPosList(self, fname, frequency):
filename = "./invertedFiles/word" + str(fname) + ".bin"
f = open(filename, "rb")
posList = []
for freq in frequency:
newList = []
for k in range(0, freq):
newList.append(struct.unpack('<H',f.read(self.sizeDoc))[0])
posList.append(newList)
f.close()
return posList
def extractData(self, encodedString, padding):
newString = self.decodeTree.decodeHuff(encodedString, padding, self.huffTable)
newObject = wordObjectClass()
if newString == "":
return newObject
newList = newString.split(" ")
newObject.word = newList[0]
newObject.fptr = int(newList[1])
for k in range(2,len(newList),2):
newObject.docs.append(int(newList[k]))
newObject.frequency.append(int(newList[k+1]))
return newObject
def findSequentialTerms(self, termDict, terms):
docList = []
posList = []
flag = True
for term in terms:
if term not in termDict:
return []
if flag == True:
docList = termDict[term].docs
for k in range(0, len(docList)):
posList.append(termDict[term].posList[k])
flag = False
else:
removeElem = []
for k in range(0,len(docList)):
if docList[k] in termDict[term].docs:
matchIndex = termDict[term].docs.index(docList[k])
newVector = []
for i in range(0, len(posList[k])):
found = False
for j in range(0, len(termDict[term].posList[matchIndex])):
if posList[k][i] + 1 == termDict[term].posList[matchIndex][j]:
found = True
break
if found == True:
newVector.append(posList[k][i]+1)
if len(newVector) != 0:
posList[k] = newVector
else:
removeElem.append(k)
else:
removeElem.append(k)
copyDoc = []
copyPos = []
for k in range(0, len(docList)):
if k not in removeElem:
copyDoc.append(docList[k])
copyPos.append(posList[k])
docList = copyDoc
posList = copyPos
return docList
def findDocumentsWithTerm(self, terms):
self.offset = 0
docList = []
flag = False
allDocList = []
while True:
newObject = self.readFromFile()
if len(newObject.pad) == 0:
break
for k in range(0, len(newObject.pad)):
decodedData = self.extractData(newObject.compressString[k], newObject.pad[k])
if decodedData.word in terms:
allDocList.append(decodedData.docs)
if len(allDocList) != len(terms):
return []
docList = allDocList[0]
for k in range(0, len(allDocList)):
docList = list(set(docList).intersection(allDocList[k]))
return docList
def findDocumentsWithPhrase(self, terms):
self.offset = 0
termDict = {}
while True:
newObject = self.readFromFile()
if len(newObject.pad) == 0:
break
for k in range(0, len(newObject.pad)):
decodedData = self.extractData(newObject.compressString[k], newObject.pad[k])
if decodedData.word in terms:
newInfo = wordInfo()
newInfo.docs = decodedData.docs
newInfo.posList = self.findPosList(decodedData.fptr, decodedData.frequency)
termDict[decodedData.word] = newInfo
return self.findSequentialTerms(termDict, terms)
def findTopKDocuments(self, terms, numResult):
self.offset = 0
termList = []
docList = []
freqList = []
while True:
newObject = self.readFromFile()
if len(newObject.pad) == 0:
break
for k in range(0, len(newObject.pad)):
decodedData = self.extractData(newObject.compressString[k], newObject.pad[k])
if decodedData.word in terms:
termList.append(decodedData.word)
docList.append(decodedData.docs)
freqList.append(decodedData.frequency)
scoreDoc = {}
for k in range(0, len(termList)):
size = len(docList[k])
for i in range(0, size):
if docList[k][i] not in scoreDoc:
scoreDoc[docList[k][i]] = computeScore(freqList[k][i], self.totalDoc, size)
else:
scoreDoc[docList[k][i]] += computeScore(freqList[k][i], self.totalDoc, size)
newList = sorted(scoreDoc.iteritems(), key=lambda x:-x[1])[:min(numResult, len(scoreDoc))]
return newList
| arikj/textIndexing | InvertedFile_compressed/invertedIndex.py | invertedIndex.py | py | 5,858 | python | en | code | 0 | github-code | 90 |
28524888951 | """preProcessing.py: some tools for doing preprocessing on OpenFOAM cases,
updating dictionaries and writing scripts to execute OpenFOAM cases on a computing cluster."""
import os
import numpy as np
def update_blockMeshDict(path, domain, mindist=None, nx=100, ny=100):
"""Replaces the minimum and maximum extents of the blockMeshDict found by the path according to xmin, xmax, ymin & ymax.
If mindist is provided, calculate the required amount of cells so that two cells fit within mindist diagonally,
otherwise nx and ny define the amount of cells.
PARAMETERS
----------
path : str
Path to the folder where the blockMeshDict file is located.
domain : dict
Dictionary containing parameters of the modelling domain.
Mandatory keywords:
xmin : int, float
Lower value of the domain size along the x-axis (mm).
xmax : int, float
Upper value of the domain size along the x-axis (mm).
ymin : int, float
Lower value of the domain size along the y-axis (mm).
ymax : int, float
Upper value of the domain size along the y-axis (mm).
height : int, float
Height of the domain (i.e. its thickness along the z-axis) (mm).
mindist : float, int
Minimum distance between grains in the model.
nx : int
Number of cells along x-axis if mindist is not used.
ny : int
Number of cells along y-axis if mindist is not used."""
xmax, xmin, ymax, ymin = domain["xmax"], domain["xmin"], domain["ymax"], domain["ymin"]
height = domain["height"]
bmd_old = open("{0}{1}blockMeshDict".format(path, os.sep), "r")
bmd_new = open("{0}{1}blockMeshDict_new".format(path, os.sep), "w")
if mindist:
# Calculate required amount of cells in x and y direction
cellsize = mindist / np.sqrt(8)
nx = int(np.ceil((xmax - xmin) / cellsize))
ny = int(np.ceil((ymax - ymin) / cellsize))
y_dist = ymax - ymin
top_found = False
bottom_found = False
top_cyclic = False
bottom_cyclic = False
for line in bmd_old.readlines():
if line.startswith("x_min"):
line = "x_min\t{0};\n".format(xmin)
elif line.startswith("x_max"):
line = "x_max\t{0};\n".format(xmax)
elif line.startswith("y_min"):
line = "y_min\t{0};\n".format(ymin)
elif line.startswith("y_max"):
line = "y_max\t{0};\n".format(ymax)
elif line.startswith("z_min"):
line = "z_min\t0;\n"
elif line.startswith("z_max"):
line = "z_max\t{0};\n".format(height)
elif line.startswith("nx"):
line = "nx\t{0};\n".format(nx)
elif line.startswith("ny"):
line = "ny\t{0};\n".format(ny)
elif line.strip().startswith("top"):
top_found = True
elif top_found and line.strip().startswith("type"):
if line.strip().split()[-1] == "cyclic;" or line.strip().split()[-1] == "cyclicAMI;":
top_cyclic = True
elif top_found and top_cyclic and line.strip().startswith("separationVector"):
line = "\t\tseparationVector (0 -{0}e-3 0);\n".format(y_dist)
top_found = False
elif line.strip().startswith("bottom"):
bottom_found = True
elif bottom_found and line.strip().startswith("type"):
if line.strip().split()[-1] == "cyclic;" or line.strip().split()[-1] == "cyclicAMI;":
bottom_cyclic = True
elif bottom_found and bottom_cyclic and line.strip().startswith("separationVector"):
line = "\t\tseparationVector (0 {0}e-3 0);\n".format(y_dist)
bottom_found = False
bmd_new.write(line)
bmd_old.close()
bmd_new.close()
os.replace("{0}{1}blockMeshDict_new".format(path, os.sep), "{0}{1}blockMeshDict".format(path, os.sep))
def update_snappyHexMeshDict(path, stl_filename, height, mindist, location_in_mesh, refinement=False, castellated_mesh=True, snap=True):
"""Update snappyHexMeshDict with new .stl filename and point in mesh.
PARAMETERS
----------
path : str
Path to the folder where the snappyHexMeshDict is located.
stl_filename : str
Filename of the stl file which will be incorporated into the snappyHexMeshDict.
height : int, float
Height of the domain (i.e. its thickness along the z-axis) (mm).
mindist : float, int
Minimum distance between grains in the model (mm).
location_in_mesh : array_like
Array of length 3 containing the coordinates to a random location inside of the mesh.
refinement : bool
Whether or not refinement should be enabled in the snappyHexMeshDict.
castellated_mesh : bool
Whether or not castellatedMesh step should be enabled in the snappyHexMeshDict.
snap : bool
Whether or not the snap step should be enabled in the snappyHexMeshDict."""
# Convert to meters
height = height * 0.001
mindist = mindist * 0.001
# Calculate approximate minimum cell size along x and y-dimensions
cellsize = mindist / np.sqrt(8)
shmd_old = open("{0}{1}snappyHexMeshDict".format(path, os.sep), "r")
shmd_new = open("{0}{1}snappyHexMeshDict_new".format(path, os.sep), "w")
geometry_found = False
refinement_found = False
for line in shmd_old.readlines():
if line.startswith("geometry"):
geometry_found = True
elif line.startswith("{") and geometry_found:
pass
elif geometry_found:
line = "\t{0}.stl\n".format(stl_filename)
geometry_found = False
if line.startswith("castellatedMesh") and not line.startswith("castellatedMeshControls"):
line = "castellatedMesh\t{0};\n".format("true" if castellated_mesh else "false")
if line.startswith("snap") and line.split()[0] == "snap":
line = "snap\t\t\t{0};\n".format("true" if snap else "false")
if line.strip().startswith("refinementSurfaces"):
refinement_found = True
elif line.strip().startswith("level") and refinement_found:
line = "\t\t\tlevel ({0} {0});\n".format(1 if refinement else 0)
refinement_found = False
if line.strip().startswith("locationInMesh"):
line = "\tlocationInMesh ({0}e-3 {1}e-3 {2}e-3);\n".format(*[coord for coord in location_in_mesh])
if line.strip().startswith("minVol") and not line.strip().startswith("minVolRatio"):
# Set minimum volume to a fraction of expected cell volume
line = "\tminVol\t{0};\n".format(cellsize**2 * height * 0.0001)
shmd_new.write(line)
shmd_old.close()
shmd_new.close()
os.replace("{0}{1}snappyHexMeshDict_new".format(path, os.sep), "{0}{1}snappyHexMeshDict".format(path, os.sep))
def update_decomposeParDict(path, n_cores):
"""Updates decomposeParDict with the appropriate amount of cores.
PARAMETERS
----------
path : str
Path to the folder where the decomposeParDict is located.
n_cores : int
Number of cores the case will be decomposed into."""
dpd_old = open("{0}{1}decomposeParDict".format(path, os.sep), "r")
dpd_new = open("{0}{1}decomposeParDict_new".format(path, os.sep), "w")
# Find a nice distribution of cores over x and y
nx = int(np.ceil(np.sqrt(n_cores)))
while not n_cores % nx == 0:
nx += 1
ny = n_cores // nx
for line in dpd_old.readlines():
if line.startswith("numberOfSubdomains"):
line = "numberOfSubdomains\t{0};\n".format(n_cores)
elif line.strip() and line.split()[0] == "n":
line = "\tn\t\t\t\t({0} {1} 1);\n".format(nx, ny)
dpd_new.write(line)
dpd_old.close()
dpd_new.close()
os.replace("{0}{1}decomposeParDict_new".format(path, os.sep), "{0}{1}decomposeParDict".format(path, os.sep))
def update_extrudeMeshDict(path, height):
"""Updates extrudeMesh dict with correct domain height.
PARAMETERS
----------
path : str
Path to the folder in which the extrudeMeshDict file is located.
height : float, int
Height of the model (z-axis)."""
emd_old = open("{0}{1}extrudeMeshDict".format(path, os.sep), "r")
emd_new = open("{0}{1}extrudeMeshDict_new".format(path, os.sep), "w")
for line in emd_old.readlines():
if line.startswith("thickness"):
line = "thickness\t{0};\n".format(height*0.001)
emd_new.write(line)
emd_old.close()
emd_new.close()
os.replace("{0}{1}extrudeMeshDict_new".format(path, os.sep), "{0}{1}extrudeMeshDict".format(path, os.sep))
def create_script_header(n_tasks, tasks_per_node, threads_per_core, partition, name):
"""Creates a header for a bash script to be submitted to a cluster running Slurm.
PARAMETERS
----------
n_tasks : int
Maximum number of tasks to be launched by the script.
tasks_per_node : int
Amount tasks to be invoked per computing node.
threads_per_core : int
Restrict node selection to nodes with at least the specified number of threads per core.
partition : str
Which queue to submit the script to.
name : str
Name of the job.
RETURNS
-------
header : str
A header to be put at the top of a batch script to be submitted to a cluster running Slurm."""
header = """#!/bin/bash
#SBATCH --ntasks={0}
#SBATCH --ntasks-per-node={1}
#SBATCH --threads-per-core={2}
#SBATCH --partition={3}
#SBATCH -o {4}.%N.%j.out
#SBATCH -e {4}.%N.%j.err
#SBATCH --job-name {4}\n\n""".format(n_tasks, tasks_per_node, threads_per_core, partition, name)
return header
def create_script_modules(modules, scripts):
"""Creates the part of a bash script that loads modules and sources scripts.
PARAMETERS
----------
modules : list
List of the names of modules to be loaded (as strings).
scripts : list
List of the scripts to be sourced (as strings).
RETURNS
-------
module_string : str
String to be added to bash script to load modules and source given scripts."""
module_string = ""
for module in modules:
module_string += "module load {0}\n".format(module)
for script in scripts:
module_string += "source {0}\n".format(script)
module_string += "\n"
return module_string
def create_pre_processing_script(case_name, n_tasks, tasks_per_node, threads_per_core, partition, modules, scripts, refinement=False):
"""Create a bash script to do pre-processing of a case.
PARAMETERS
----------
case_name : str
Name of the case for which the bash script is created.
n_tasks : int
Maximum number of tasks to be launched by the script.
tasks_per_node : int
Amount tasks to be invoked per computing node.
threads_per_core : int
Restrict node selection to nodes with at least the specified number of threads per core.
partition : str
Which queue to submit the script to.
modules : list
List of the names of modules to be loaded (as strings).
scripts : list
List of the scripts to be sourced (as strings).
refinement : bool
Whether or not snappyHexMesh should run a refinement phase."""
header = create_script_header(n_tasks, tasks_per_node, threads_per_core, partition, "{0}_pre".format(case_name))
module_string = create_script_modules(modules, scripts)
commands = "blockMesh | tee blockMesh.log\n"
if n_tasks > 1:
commands += """decomposePar
mpirun -np {0} snappyHexMesh -parallel | tee snappyHexMesh_0.log
reconstructParMesh
rm -rf processor*
cp -rf {1}/polyMesh constant/
rm -rf 1 2\n""".format(n_tasks, "1" if refinement else "2")
else:
commands += "snappyHexMesh -overwrite | tee snappyHexMesh_0.log\n"
commands += "extrudeMesh | tee extrudeMesh.log\n"
# Write these commands to a first script if refinement is active
if refinement:
script = open("preprocessing_0.sh", "w")
script.write(header)
script.write(module_string)
script.write(commands)
script.close()
# Start a new set of commands that will be run after the first set if refinement is active
commands = ""
if n_tasks > 1:
commands += """decomposePar
mpirun -np {0} snappyHexMesh -parallel | tee snappyHexMesh_1.log
reconstructParMesh
rm -rf processor*
cp -rf 1/polyMesh constant/
rm -rf 1\n""".format(n_tasks)
else:
commands += "snappyHexMesh -overwrite -parallel | tee snappyHexMesh_1.log\n"
script = open("preprocessing{0}.sh".format("_1" if refinement else ""), "w")
script.write(header)
script.write(module_string)
script.write(commands)
script.close()
def create_simulation_script(case_name, n_tasks, tasks_per_node, threads_per_core, partition, modules, scripts):
"""Create a bash script to run a prepared OpenFOAM case using simpleFoam solver and export to VTK.
PARAMETERS
----------
case_name : str
Name of the case for which the bash script is created.
n_tasks : int
Maximum number of tasks to be launched by the script.
tasks_per_node : int
Amount tasks to be invoked per computing node.
threads_per_core : int
Restrict node selection to nodes with at least the specified number of threads per core.
partition : str
Which queue to submit the script to.
modules : list
List of the names of modules to be loaded (as strings).
scripts : list
List of the scripts to be sourced (as strings).
"""
header = create_script_header(n_tasks, tasks_per_node, threads_per_core, partition, "{0}_sim".format(case_name))
module_string = create_script_modules(modules, scripts)
if n_tasks > 1:
commands = """decomposePar
mpirun -np {0} simpleFoam -parallel | tee simpleFoam.log
reconstructPar
rm -rf processor*\n""".format(n_tasks)
else:
commands = "simpleFoam | tee simpleFoam.log\n"
commands += "foamToVTK -latestTime -ascii\n"
script = open("runSimulations.sh", "w")
script.write(header)
script.write(module_string)
script.write(commands)
script.close()
def check_log(log_file):
"""Checks OpenFOAM log file to see if an OpenFOAM process ended properly or aborted due to an error.
Returns True if log ended properly, else returns False.
PARAMETERS
----------
log_file : str
Path to the log file to be checked.
RETURNS
-------
status : bool
True or False value depending on whether or not the OpenFOAM process ended properly, respectively."""
# Get the last word from the log file using the 'tail' command
last_word = os.popen("tail {0}".format(log_file)).read().split()[-1]
# If log file ends with the word 'End', we know that the process ended properly, otherwise something went wrong
if last_word == "End" or last_word == "run":
status = True
else:
status = False
return status
| bramvanderhoek/GrainSizeAnalysis | Python_Lib/preProcessing.py | preProcessing.py | py | 15,103 | python | en | code | 0 | github-code | 90 |
71805399657 | from typing import Dict, List
import operation_loader
import sys
from gfsm.transition import Transition
from gfsm.state import State
from gfsm.action import fsm_action
class FsmBuilder():
def __init__(self, config: Dict, definition: Dict):
self._config = config
self._definition = definition
self._action_wrapper = fsm_action
self._first_state_name = ''
self._init_action = None
self._events: List[str] = []
self._states: Dict[str, State] = {}
# API
@property
def events(self) -> List[str]:
return self._events
@property
def states(self) -> Dict[str, State]:
return self._states
@property
def first_state_name(self) -> str:
return self._first_state_name
@property
def init_action(self):
return self._init_action
#
@staticmethod
def _is_correct_action_name(name: str) -> bool:
if name and name.strip() and len(name) >= 3 and '.' in name:
return True
return False
@staticmethod
def get_value(data: Dict, key: str) -> List[str] | str:
if key and key.strip() and key in data:
return data[key]
return ''
def _set_runtime_environment(self) -> None:
user_actions_paths = self.get_value(self._config, 'user-actions-paths')
for path in user_actions_paths:
sys.path.append(path)
user_action_wrapper_path = \
self.get_value(self._config, 'user-action-wrapper-path')
if len(user_action_wrapper_path) > 1:
sys.path.append(user_action_wrapper_path)
user_action_wrapper_name = \
self.get_value(self._config, 'user-action-wrapper-name')
if self._is_correct_action_name(user_action_wrapper_name):
self._action_wrapper = operation_loader.get(user_action_wrapper_name)
def _load_action(self, action_name):
# Load the action from actions implementation by name
if self._is_correct_action_name(action_name):
if action_name.startswith('____'):
# use defsult action's wrapper
action_name = action_name[4:]
return fsm_action(operation_loader.get(action_name))
return self._action_wrapper(operation_loader.get(action_name))
return None
def _build_state(self, state_def: Dict, idx: int) -> None:
name = self.get_value(state_def, 'name')
entry_action = self._load_action(self.get_value(state_def, 'entry-action'))
exit_action = self._load_action(self.get_value(state_def, 'exit-action'))
state = State(idx, name)
state.entry_action = entry_action
state.exit_action = exit_action
self._states[state.name] = state
return
def _build_transition(self, tr_def: Dict) -> None:
tr_name = self.get_value(tr_def, 'name')
tr_event = self.get_value(tr_def, 'event')
target = self._states[self.get_value(tr_def, 'target')]
tr_action = self.get_value(tr_def, 'action')
action = self._load_action(tr_action)
transition = Transition(tr_name, target.name, action)
if 'start-action' in tr_def:
tr_start_action = self.get_value(tr_def, 'start-action')
start_action = self._load_action(tr_start_action)
transition.start_action = start_action
if 'end-action' in tr_def:
tr_end_action = self.get_value(tr_def, 'end-action')
end_action = self._load_action(tr_end_action)
transition.end_action = end_action
# associate the event with Transition via State
src = self._states.get(self.get_value(tr_def, 'src'))
src.transitions[tr_event] = transition
return
def _build_state_transitions(self, state_def: Dict) -> None:
trs_def = self.get_value(state_def, 'transitions')
for tr_def in trs_def:
self._build_transition(tr_def)
return
def build(self) -> None:
self._set_runtime_environment()
print("FSM bulder. Build the fsm from: {}".format(self._config['info']))
# build events
events_def = self._config['events']
for en in events_def:
self._events.append(en)
# build states
states_def = self.get_value(self._definition, 'states')
for i, state_def in enumerate(states_def):
self._build_state(state_def, i)
# build transitions and sssociate events with Transition via State"
for state_def in states_def:
self._build_state_transitions(state_def)
# get init action
init_action = \
self._load_action(self.get_value(self._definition, 'init-action'))
# Setup FSM implementation
self._init_action = init_action
self._first_state_name = self.get_value(self._definition, 'first-state')
return
| ekarpovs/gfsm | gfsm/fsm_builder/fsm_builder.py | fsm_builder.py | py | 4,490 | python | en | code | 0 | github-code | 90 |
2461844791 | class Solution(object):
def checkValid(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
transposed = zip(*matrix)
rng = list(range(1, len(matrix) + 1))
transposed = list(map(lambda x: list(x), transposed))
for i in xrange(len(matrix)):
crn_ln = matrix[i]
crn_ln.sort()
if crn_ln != rng:
return False
crn_transposed = transposed[i]
crn_transposed.sort()
if crn_transposed != rng:
return False
return True | petrosDemetrakopoulos/Leetcode | code/Python/2133-CheckIfEveryRowAndColumnContainsAllNumbers.py | 2133-CheckIfEveryRowAndColumnContainsAllNumbers.py | py | 600 | python | en | code | 0 | github-code | 90 |
40307766672 | #!/usr/bin/env python-sirius
"""."""
import sys
from siriuspy.search import PSSearch
from siriuspy.namesys import SiriusPVName
from siriuspy.pwrsupply.data import PSData
def get_all_psnames():
"""."""
pss = PSSearch()
# psn = pss.get_psnames() + li_psnames
psn = pss.get_psnames()
psnames = [SiriusPVName(psname) for psname in psn]
return psnames
def select_psnames(psgroup):
"""."""
psnames = []
allps = get_all_psnames()
if psgroup in allps:
psnames.append(psgroup)
elif psgroup == 'all':
psnames = allps
elif psgroup.lower() == 'tb':
for ps in allps:
if ps.sec == 'TB':
psnames.append(ps)
elif psgroup.lower() == 'bo':
for ps in allps:
if ps.sec == 'BO':
psnames.append(ps)
elif psgroup.lower() == 'ts':
for ps in allps:
if ps.sec == 'TS':
psnames.append(ps)
elif psgroup.lower() == 'si':
for ps in allps:
if ps.sec == 'SI':
psnames.append(ps)
if psgroup.lower() == 'bo-correctors':
for ps in allps:
if ps.sec == 'BO' and (ps.dev == 'CH' or ps.dev == 'CV'):
psnames.append(ps)
elif psgroup.lower() == 'li':
for ps in allps:
if ps.sec == 'LA' and ps.sub == 'CN':
psnames.append(ps)
if ps.sec == 'LI':
psnames.append(ps)
elif psgroup.lower() in ('li-dipole', 'li-spectrometer'):
for ps in allps:
if ps.sec == 'LA' and ps.sub == 'CN' and ps.dis == 'H1DPPS':
psnames.append(ps)
elif psgroup.lower() in ('li-quadrupoles',):
for ps in allps:
if ps.sec == 'LA' and ps.sub == 'CN' and 'QPS' in ps.dis:
psnames.append(ps)
elif psgroup.lower() in ('li-correctors',):
for ps in allps:
if ps.sec == 'LA' and ps.sub == 'CN' and 'CPS' in ps.dis:
psnames.append(ps)
return sorted(psnames)
def print_pvs(psnames):
"""."""
for ps in psnames:
psdata = PSData(ps)
db = psdata.propty_database
for prop in db:
print(ps + ':' + prop)
def run():
"""."""
args = sys.argv[1:]
if not args:
args = ['all']
psnames = []
for arg in args:
psnames += select_psnames(arg)
psnames = sorted(psnames)
print_pvs(psnames)
if __name__ == '__main__':
run()
| lnls-sirius/scripts | bin/sirius-script-app-ps-pvsprint.py | sirius-script-app-ps-pvsprint.py | py | 2,476 | python | en | code | 0 | github-code | 90 |
932784150 | import requests
# go here to create a token for your app
# https://www.yammer.com/client_applications
token = "MYTOKENGOESHERE"
# this is the community ID
groupID = "78686445568"
page = 1
pagesize = 50
endpoint = "https://www.yammer.com/api/v1/users/in_group/{}.json?page={}".format(groupID,page)
headers = {"Authorization": "Bearer " + token}
data = requests.get(endpoint, headers=headers).json()
numberofpages = data['total_count']//pagesize
count = 0
for x in range(numberofpages+1):
# print("THIS IS PAGE {}".format(page))
page = page + 1
for i in data['users']:
count = count + 1
print(i["email"] + ";")
endpoint = "https://www.yammer.com/api/v1/users/in_group/{}.json?page={}".format(groupID,page)
data = requests.get(endpoint, headers=headers).json()
print ("number of users: {}".format(count))
# Print on a file, run: python3 yammer_UsersInACommunity.py > email.txt
| vivamau/vivamau_scripts | yammer_UsersInACommunity.py | yammer_UsersInACommunity.py | py | 889 | python | en | code | 0 | github-code | 90 |
11552315040 | #! /usr/bin/python3
from collections import defaultdict
class Graph:
def __init__(self):
''' using default dict to store edges & weights of initialized graph '''
self.graph = defaultdict(dict)
def addEdge(self, u, v, weight=1):
''' adding edges u->v with weight or 1 otherwise '''
self.graph[u][v] = weight
def dijkstra(self, src):
''' to find all pair shortest path - minimum spanning tree for the given node'''
spanning_tree = {}
permanent_list = spanning_tree
tentitive_list = {} # to store node and value to reach it
visited = [False]*(len(self.graph)) # to keep track of visted vertices
# add source to tentative list
tentitive_list[src] = 0
# run till tentative list is empty
while tentitive_list:
# can use prority queue here
tentitive_minimum = min(tentitive_list,key=tentitive_list.get) #move minimum of tentative to permanent_list
permanent_list[tentitive_minimum] = tentitive_list.get(tentitive_minimum)
visited[tentitive_minimum] = True
# add all unvisited neighbouring nodes of last added node(to permanent_list) to tentitive list
for neighbour in self.graph[tentitive_minimum]:
if not visited[neighbour]:
new_dist = tentitive_list[tentitive_minimum] + self.graph[tentitive_minimum][neighbour]
# if new cost is less than cumulative cost of node in tentitive list replace it.
if new_dist < tentitive_list.get(neighbour, float('inf')):
tentitive_list[neighbour] = new_dist
del tentitive_list[tentitive_minimum]
return spanning_tree
if __name__ == "__main__":
q = int(input("Enter number of graphs\n"))
for a0 in range(q):
g = Graph()
print("Enter no. of vertices and edges")
n, m = input().strip().split(' ')
n, m = [int(n), int(m)]
print("Enter %d edges u->v:w\n"%m) # directed graph ,add two edges for undirected
for edges_i in range(m):
u,v,w = [int(edges_temp) for edges_temp in input().strip().split(' ')]
g.addEdge(u,v,w)
g.addEdge(v,u,w)
print("Enter edge 0-(%d) to start traversal"%(n-1))
source = int(input().strip())
all_pair_shortest_path = g.dijkstra(source)
print(all_pair_shortest_path)
| naveenrajm7/py-algo-ds | graph/dijkstra.py | dijkstra.py | py | 2,456 | python | en | code | 0 | github-code | 90 |
22665969340 | SHARE_DEFAULT_FIELDS = [
'dynamics.wrf_core', 'domains.max_dom',
'domains.timespan.start_date',
'domains.timespan.end_date',
'running.input.interval_seconds',
('geogrid.io_form', 'io_form_geogrid'),
]
GEOGRID_DEFAULT_FIELDS = [
'domains.parent_id',
'domains.geometry.parent_grid_ratio',
'domains.geometry.i_parent_start',
'domains.geometry.j_parent_start',
'domains.geometry.e_we',
'domains.geometry.e_sn',
'domains.geometry.geog_data_res',
'@base.geometry.dx',
'@base.geometry.dy',
'geometry.geog_data_path',
'geometry.opt_geogrid_tbl_path',
]
GEOMETRY_PROJECTION_FIELDS = {
'lambert': [
'geometry.ref_lat',
'geometry.ref_lon',
'geometry.truelat1',
'geometry.truelat2',
'geometry.stand_lon'
]
}
UNGRIB_DEFAULT_FIELDS = [
'ungrib.out_format',
'ungrib.prefix',
]
METGRID_DEFAULT_FIELDS = [
'metgrid.fg_name',
'metgrid.io_form_metgrid',
'metgrid.opt_metgrid_tbl_path',
]
FDDA_DEFAULT_FIELDS = [
]
TIME_CONTROL_DEFAULT_FIELDS = [
('domains.timespan.start.year', 'start_year'),
('domains.timespan.start.month', 'start_month'),
('domains.timespan.start.day', 'start_day'),
('domains.timespan.start.hour', 'start_hour'),
('domains.timespan.start.minute', 'start_minute'),
('domains.timespan.start.second', 'start_second'),
('domains.timespan.end.year', 'end_year'),
('domains.timespan.end.month', 'end_month'),
('domains.timespan.end.day', 'end_day'),
('domains.timespan.end.hour', 'end_hour'),
('domains.timespan.end.minute', 'end_minute'),
('domains.timespan.end.second', 'end_second'),
'running.input.interval_seconds',
('domains.running.history.interval', 'history_interval'),
('running.history.io_form', 'io_form_history'),
'domains.running.output.frames_per_outfile',
'domains.running.input.input_from_file',
'running.input.restart',
'running.input.io_form_restart',
'running.input.io_form_input',
'running.input.io_form_boundary',
('running.debug.level', 'debug_level'),
('running.history.outname', 'history_outname'),
]
DOMAINS_DEFAULT_FIELDS = [
('running.time_step_seconds', 'time_step'),
'running.time_step_fract_num',
'running.time_step_fract_den',
'running.feedback',
'running.smooth_option',
'domains.max_dom',
'domains.parent_id',
'domains.geometry.grid_id',
'domains.geometry.e_vert',
'domains.geometry.e_we',
'domains.geometry.e_sn',
'domains.geometry.dx',
'domains.geometry.dy',
'domains.geometry.i_parent_start',
'domains.geometry.j_parent_start',
'domains.geometry.parent_grid_ratio',
'domains.running.parent_time_step_ratio',
'real.num_metgrid_levels',
'real.num_metgrid_soil_levels',
'real.eta_levels',
'real.auto_levels_opt',
'running.parallel.numtiles',
]
PHYSICS_DEFAULT_FIELDS = [
('domains.physics.mp', 'mp_physics'),
('domains.physics.ra_lw', 'ra_lw_physics'),
('domains.physics.ra_sw', 'ra_sw_physics'),
'domains.physics.radt',
('domains.physics.sf_sfclay', 'sf_sfclay_physics'),
('domains.physics.sf_surface', 'sf_surface_physics'),
('domains.physics.bl_pbl', 'bl_pbl_physics'),
('domains.physics.cu', 'cu_physics'),
'domains.physics.cudt',
'physics.num_soil_layers',
'physics.num_land_cat',
'physics.surface_input_source',
]
DYNAMICS_DEFAULT_FIELDS = [
'dynamics.rk_ord',
'domains.dynamics.diff_opt',
'domains.dynamics.km_opt',
'domains.dynamics.non_hydrostatic',
]
BOUNDARY_CONTROL_FIELDS = [
'geometry.boundary.spec_bdy_width',
'geometry.boundary.spec_zone',
'geometry.boundary.relax_zone',
'domains.geometry.boundary.specified',
'domains.geometry.boundary.nested',
]
GRIB2_DEFAULT_FIELDS = [
]
NAMELIST_QUILT_DEFAULT_FIELDS = [
'running.parallel.nio_tasks_per_group',
'running.parallel.nio_groups'
]
DEFAULTS = {
'global': {
'geometry': {
'map_proj': 'lambert',
'opt_geogrid_tbl_path': '/wrf/WPS/geogrid',
'boundary': {
'spec_bdy_width': 5,
'spec_zone': 1,
'relax_zone': 4,
'constant_bc': False,
'spec_exp': 0.33,
},
},
'real': {
'auto_levels_opt': 2,
},
'dynamics': {
'wrf_core': 'ARW',
'rk_ord': 3,
},
'running': {
'debug': {
'level': 0,
},
'input': {
'restart': False,
'io_form_input': 2,
'io_form_restart': 2,
'io_form_boundary': 2,
},
'history': {
'outname': "/run/wrfout_d<domain>_<date>.nc",
'io_form': 2,
},
'feedback': 0,
'smooth_option': 2,
'parallel': {
'nio_tasks_per_group': 0,
'nio_groups': 1,
'numtiles': 1,
},
},
'geogrid': {
'io_form': 2,
'opt_output_from_geogrid_path': '.',
},
'ungrib': {
'out_format': 'WPS',
'prefix': 'FILE',
},
'metgrid': {
'fg_name': 'FILE',
'io_form_metgrid': 2,
'opt_metgrid_tbl_path': '/wrf/WPS/metgrid',
},
'physics': {
'swint_opt': 0,
'convtrans_avglen_m': 20,
'ishallow': 0,
'surface_input_source': 1,
'num_soil_layers': 5,
'num_land_cat': 21,
'maxiens': 1,
'maxens': 3,
'maxens2': 3,
'maxens3': 16,
'ensdim': 144,
'mp_zero_out': 0,
'usemonalb': False,
'mosaic_lu': 0,
'mosaic_soil': 0,
},
},
'domains': {
'base': {
'timespan': {
'start': {
'year': 0,
'month': 0,
'day': 0,
'hour': 0,
'minute': 0,
'second': 0,
},
'end': {
'year': 0,
'month': 0,
'day': 0,
'hour': 0,
'minute': 0,
'second': 0,
},
},
'running': {
'output': {
'frames_per_outfile': 1,
},
'input': {
'input_from_file': True,
},
},
'physics': {
'mp': 0,
'ra_lw': 0,
'ra_sw': 0,
'sf_sfclay': 0,
'sf_surface': 0,
'bl_pbl': 0,
'bldt': 0,
'cu': 0,
'cu_diag': 0,
'cudt': 0,
'radt': 0,
'cu_rad_feedback': False,
},
'dynamics': {
'diff_opt': 1,
'km_opt': 1,
'non_hydrostatic': True,
},
},
},
}
| tdm-project/tdm-tools | tdm/wrf/constants.py | constants.py | py | 7,254 | python | en | code | 0 | github-code | 90 |
18269083719 | l=input("").split(" ")
a=int(l[0])
b=int(l[1])
c=int(l[2])
if(a==b and a==c):
print("No")
elif((a-b)*(b-c)*(c-a)==0):
print("Yes")
else:
print("No") | Aasthaengg/IBMdataset | Python_codes/p02771/s839843245.py | s839843245.py | py | 160 | python | en | code | 0 | github-code | 90 |
18476426229 | def is_753(n):
n = str(n)
return(n.count("7") >= 1 and n.count("3") >= 1 and n.count("5") >= 1)
n = int(input())
v = [7, 5, 3]
li = []
prev = [7, 5, 3]
for i in range(9):
tmp = []
for j in v:
for k in prev:
tmp.append(k * 10 + j)
prev = tmp
li = li + prev
li = [x for x in li if is_753(x) and x <= n]
print(len(li)) | Aasthaengg/IBMdataset | Python_codes/p03212/s766035094.py | s766035094.py | py | 366 | python | en | code | 0 | github-code | 90 |
71248172137 | from collections import deque
from sys import maxsize
def citire(orientat=False,nume_fisier="b4.in"):
n=0
la=[]
with open(nume_fisier) as f:
linie=f.readline()
n,m=(int(z) for z in linie.split())
la=[[] for i in range(n+1)]
for i in range(m):
x,y=(int(z) for z in f.readline().split())
la[x].append(y)
if not orientat:
la[y].append(x)
s, d = (int(z) for z in f.readline().split())
return n, la, s, d
def find_paths(paths, path, parent, u):
if u == -1:
paths.append(path.copy())
return
for par in parent[u]:
path.append(u)
find_paths(paths, path, parent, par)
path.pop()
n, la, s, d = citire()
def BFS(parent, n, start):
dist = [maxsize for _ in range(n + 1)]
q = deque()
q.append(start)
parent[start] = [-1]
dist[start] = 0
while len(q) > 0:
u = q.popleft()
for v in la[u]:
if dist[v] > dist[u] + 1:
dist[v] = dist[u] + 1
q.append(v)
parent[v].clear()
parent[v].append(u)
elif dist[v] == dist[u] + 1:
parent[v].append(u)
def print_paths(n, start, end):
paths = []
path = []
parent = [[] for _ in range(n+1)]
BFS(parent, n, start)
find_paths(paths, path, parent, end)
for v in paths:
v = reversed(v)
for u in v:
print(u, end=" ")
print()
print_paths(n, s, d) | DanNimara/FundamentalAlgorithms-Graphs | Lab1/B4.py | B4.py | py | 1,537 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.