seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
38313678941 | import os
import shutil
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
if __name__ == '__main__':
if '{{ cookiecutter.create_author_file }}' != 'y':
remove_file('AUTHORS.rst')
if '{{ cookiecutter.use_oasis }}' != 'y':
remove_file('{{cookiecutter.project_slug}}/oasis.py')
if '{{ cookiecutter.create_PyPI_package}}' != 'y':
remove_file('setup.py')
if '{{ cookiecutter.include_notebooks}}' != 'y':
shutil.rmtree('notebooks')
if 'Not open source' == '{{ cookiecutter.open_source_license }}':
remove_file('LICENSE')
| ferie24/DemoNotebook | ArangoDB_Template/hooks/post_gen_project.py | post_gen_project.py | py | 671 | python | en | code | 0 | github-code | 36 |
32473828762 | from config import bot, chat_id
from time import sleep
from telebot import types
from plugins.error import Error
from plugins.message import shout
import random
from plugins.error import in_chat, check_private
@in_chat()
def say(m):
bot.delete_message(m.chat.id, m.message_id)
try:
if m.chat.type != "private":
markup = types.InlineKeyboardMarkup() #Отвечаем, если выхов был из супер чата
link_bot= types.InlineKeyboardButton(text='Перейти в лс', url='t.me/cat_glav_bot') #Отвечаем, если выхов был из супер чата
markup.add(link_bot) #Отвечаем, если выхов был из супер чата
sent=bot.send_message(m.chat.id, "Команда /say работает только в лс бота", reply_markup = markup) #Отвечаем, если выхов был из супер чата
sleep(10)
bot.delete_message(m.chat.id,sent.message_id)
if m.reply_to_message:
sticker_id = bot.get_file(m.reply_to_message.sticker.file_id)
bot.send_sticker(chat_id, sticker_id.file_id)
sent=bot.send_message(m.chat.id, "Стикер успешно отправлен!")#Отвечаем, если команда пришла не из супер чата
sleep(10)
bot.delete_message(m.chat.id,sent.message_id)
else:
bot.send_message(chat_id, f"_{random.choice(shout)}:_ *'{m.text[5:]}'* 😱 ", parse_mode="Markdown") #Обработать команду и отправить то, что находится с 5 символа и до...
sent=bot.send_message(m.chat.id, "Сообщение успешно отправлено!")#Отвечаем, если команда пришла не из супер чата
sleep(10)
bot.delete_message(m.chat.id,sent.message_id)
except Exception:
Error(m, bot).error()
| evilcatsystem/telegram-bot | plugins/say.py | say.py | py | 2,015 | python | ru | code | 1 | github-code | 36 |
44209779593 | """OWM API parser for creating weather report of different cities"""
from app.input_file_handler import *
from app.logging_and_error_handler import *
import requests
class CityReport:
"""Get report from OWM for current city and process it"""
def __init__(self, city: City):
"""
:param city: city class, for which we want to have report
"""
self.city = city
self.api_key = "81f4883e62f5ec5c7ec74e04ebb662ed" # Unique API key for our client
self.base_url = "http://api.openweathermap.org/data/2.5/" # OpenWeatherAPI basic url for requests of data
self.weather_report = {}
self.forecast_report = {}
self.create_city_report()
self.create_city_report("forecast")
self.validate_status_code()
def create_city_report(self, service="weather", params="&units=metric"):
"""
Send request to OWM API for weather report and save it as json
:param params: Add to OWM API request some parameters. Useful for forecast days limitation, temperature units.
:param service: Which OWM API service is use - Weather report or Forecast Report
"""
complete_url = self.base_url + service + "?appid=" + self.api_key + "&q=" + self.city.city + params
owm_report = requests.get(complete_url)
owm_report_json = owm_report.json()
if service == "weather":
logging(3, f"Weather report for city {self.city} is successfully generated")
self.weather_report = owm_report_json
elif service == "forecast":
logging(3, f"Forecast report for city {self.city} is successfully generated")
self.forecast_report = owm_report_json
else:
logging(1, "Faulty report creation service parameter")
raise AttributeError
return owm_report_json
def validate_status_code(self):
"""
Check a report for status code of creation
HTTP Status Code 200 means, that request is successfully created and returned
HTTP Status Code 404 means, that city is not found
"""
if self.weather_report['cod'] == 200 and self.forecast_report['cod'] == '200':
self.city.is_exist = True
logging(3, f"City {self.city} existence is confirmed")
return True
elif self.weather_report['cod'] == '404' or self.forecast_report['cod'] == '404':
self.city.is_exist = False
logging(2, f"City {self.city} does not exist and reports are unusable")
return False
class CityReportProcess:
"""Process created full report for filtering and formatting variables until required condition"""
def __init__(self, report: CityReport):
"""
:param report: CityReport class, where is created a full report
"""
self.report = report
self.validate_report_for_existing_city()
self.main_details_ready_report = {}
self.weather_ready_report = {}
self.forecast_ready_report = {}
self.process_main_details()
self.process_current_weather()
self.process_forecast()
logging(3, f"Report for city {self.report.city} is successfully processed")
def process_main_details(self):
"""
Process full report for saving only required details about city and report itself
Required: city, coordinates, temperatureUnit
:return: filtered main details report dictionary
"""
full_report = self.report.weather_report
city = full_report['name']
temperature_unit = "Celsius"
coordinates = str(full_report['coord']['lat']) + "," + str(full_report['coord']['lon'])
report = {"city": city, "coordinates": coordinates, "temperatureUnit": temperature_unit}
self.main_details_ready_report = report
return report
def process_current_weather(self):
"""
Process full report for saving only required details about current weather information
Required: date, temperature, humidity, pressure
:return: filtered current weather report dictionary
"""
full_report = self.report.weather_report
full_report_weather = self.process_main_weather_details(full_report)
# Convert epoch timestamp to the date
date = datetime.datetime.fromtimestamp(full_report['dt']).strftime("%d-%m-%Y")
report = {"date": date}
report.update(full_report_weather)
self.weather_ready_report = report
return report
def process_forecast(self):
"""
Process full report for saving only required details about 3 days forecast
Required for each day: date, temperature, humidity, pressure
:return: filtered forecast report dictionary
"""
full_report = self.report.forecast_report
full_report_all_msg = full_report['list']
today_date = datetime.date.today().strftime("%Y-%m-%d") + " 12:00:00"
day_num = 0
report = []
for forecast_msg in full_report_all_msg:
forecast_timestamp = forecast_msg['dt_txt']
timestamp_check = re.search("....-..-.. 12:00:00", forecast_timestamp) # Use only launch forecasts
if day_num < 3 and timestamp_check is not None and forecast_timestamp != today_date:
forecast_for_day = self.process_forecast_day(forecast_msg)
report.append(forecast_for_day)
day_num += 1
if day_num == 3:
break
self.forecast_ready_report = report
return report
def process_forecast_day(self, forecast_msg):
"""
Process forecast report for saving only required details about one day forecast
Required for each day: date, temperature, humidity, pressure
:type forecast_msg: dict of full report for that day
:return: filtered forecast day report dictionary
"""
weather_report = self.process_main_weather_details(forecast_msg)
# Convert epoch timestamp to the date
date = datetime.datetime.fromtimestamp(forecast_msg['dt']).strftime("%d-%m-%Y")
report = {"date": date, "weather": weather_report}
return report
@staticmethod
def process_main_weather_details(report_to_process) -> dict:
"""
Process weather report for saving only required details about weather
Required: date, temperature, humidity, pressure
:type report_to_process: dict of full report for that day
:return: filtered only weather day report dictionary
"""
weather_report = report_to_process['main']
temperature = weather_report['temp']
humidity = weather_report['humidity']
pressure = weather_report['pressure']
report = {"temperature": temperature, "humidity": humidity, "pressure": pressure}
return report
def validate_report_for_existing_city(self):
"""City, which is not exist --> doesn't have report to process --> must not be processed"""
if self.report.city.is_exist is not True:
raise CityNotExistError
return True
| vizamo/Python-Study-Coding | ICD0004/Final Project/app/owm_parser.py | owm_parser.py | py | 7,368 | python | en | code | 0 | github-code | 36 |
44648666543 | from rest_framework.documentation import include_docs_urls
from django.contrib import admin
from django.urls import path, re_path, include
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns += [
path(
'drf-docs/',
include_docs_urls(
title='DRF Docs',
authentication_classes=[],
permission_classes=[],
),
),
]
urlpatterns += [
path('board/', include('board.urls')),
path('account/', include('account.urls')),
]
| Endlex-net/Easting_kanban | kanban/urls.py | urls.py | py | 539 | python | en | code | 3 | github-code | 36 |
39014063579 | # 55+20-50+40-30+20
data = input().split('-') # [55+20, 50+40, 30+20] 첫번째 값은 제외해야하구나
cnt = 0
temp = []
for i in range(1, len(data)): # 첫번째 값 이후부터 '+' split하고 cnt에 - 해준다.
temp = data[i].split('+')
for num in temp:
cnt -= int(num)
if '+' in data[0]: # 만약에 첫번째 값에 +가 있다면 분리해서 int로 더 해줘야함
print(sum(map(int, data[0].split('+'))) + cnt)
else: # 없으면 그냥 더해주면 됨
print(int(data[0]) + cnt)
| eomsteve/algo_study | nogusdn/1541 잃어버린 괄호.py | 1541 잃어버린 괄호.py | py | 521 | python | ko | code | 0 | github-code | 36 |
11497660843 | '''
github.com/razyar
'''
import sys
import os
from PyQt4 import QtCore, QtGui
class QDataViewer(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setGeometry(600, 400, 600, 400)
self.setWindowTitle('iSpace Platform - Upload')
self.quitButton = QtGui.QPushButton('Cancel', self)
self.uploadButton = QtGui.QPushButton('Select', self)
hBoxLayout = QtGui.QHBoxLayout()
hBoxLayout.addWidget(self.quitButton)
hBoxLayout.addWidget(self.uploadButton)
self.setLayout(hBoxLayout)
self.connect(self.quitButton, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.SLOT('quit()'))
self.connect(self.uploadButton, QtCore.SIGNAL('clicked()'), self.open)
def open (self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File', '.')
os.system('sudo cp %s /var/www/html/' % filename)
def main():
app = QtGui.QApplication(sys.argv)
mw = QDataViewer()
mw.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| razyar/iSpace | upload.py | upload.py | py | 1,062 | python | en | code | 0 | github-code | 36 |
16490047889 | #!/usr/bin/env python
# coding: utf-8
# 다운로드한 웹 페이지 또는 RSS에서의 스크레이핑을 시도해보자. 파이썬 표준 라이브러리로 저장한 파일에서 도서의 제목 및 URL 등의 데이터를 추출한다.
# * 정규표현식
#
# * XML 파서
# * 정규 표현식으로 Scraping할 때에는 HTML을 단순한 문자열로 취급하고, 필요한 부분을 추출한다. 제대로 마크업되지 않은 웹 페이지도 문자열의 특징을 파악하면 쉬운 스크레이핑 작업이 가능하다.
# * XML 파서로 하는 Scraping할 때는 HTML태그를 분석(파싱:: parsing)하고, 필요한 부분을 추출한다. 블로그 또는 뉴스 사이트 정보를 전달하는 RSS처럼 많은 정보가 XML에는 제공된다.
# 참고) XML과 HTML은 비슷해보이나, XML파서에 HTML을 곧바로 넣어 분석할 수는 없다. HTML은 종료 태그(tag)가 생략되는 등 XML에 비하면 더욱 유연하게 사용되기 때문이다.
#
# 뿐 만 아니라, 웹 브라우저는 문법에 문제가 있는 HTML이라도 어떻게든 출력해주는 경향이 있다.
#
# 하지만 parsing의 경우 문제가 있는 HTML은 제대로 파싱할 수 없으며, 이러한 문제가 있는 웹 페이지는 생각보다 많다.
# ***::: 따라서, HTML을 parsing할 때에는 HTML 전용 parser가 필요하다***
# In[1]:
### 정규표현식 scraping
import re
# re.search() 함수를 사용하면 두 번째 매개변수의 문자열이 첫 번째 매개변수의 정규식에 일치하는지 확인 가능하다
# 맞을 경우에는 match 객체를 반환, 그렇지 않다면, None을 반환한다.
# 아래의 예에서는 match 객체가 반환되었다.
# match='abc'를 보면 abc가 매치된 것이 확인 가능하다.
re.search(r'a.*c', 'abc123DEF')
# In[2]:
# 다음 예제는 정규표현식에 일치하지 않으므로 None을 반환한다
# interactive shell에서 결과가 없을 경우, 바로 >>>을 출력한다.
re.search(r'a.*d', 'abc123DEF')
# In[3]:
# 세 번째 매개변수로 옵션을 지정한다
# re.IGNORECASE(또는 re.I)를 지정하면 대소문자를 무시한다
# 이외에도 굉장히 다양한 옵션이 존재한다
re.search(r'a.*d', 'abc123DEF', re.IGNORECASE)
# In[4]:
# match 객체의 group()메서드로 일치한 값을 추출
# 매개변수에 0을 지정하면 매치된 모든 값을 반환
m = re.search(r'a(.*)c', 'abc123DEF')
m.group(0)
# In[5]:
# 매개변수에 1이상의 숫자를 지정하면 정규 표현식에서 ()로 감싼 부분에 해당하는 값을 추출
# 1이라면 1번째 그룹, 2라면 2번째 그룹이 추출된다.
m.group(1)
# In[6]:
# re.findall() 함수를 사용하면 정규 표현식에 맞는 모든 부분을 추출 가능하다
# 다음 예에서는 2글자 이상의 단어를 모두 추출해보자.
# \w는 유니코드로 글자를 비교한다. 이 밖에도 공백 문자는 \s 등으로 추출 가능하다
re.findall(r'\w{2,}', 'This is a pen')
# In[7]:
# re.sub()함수를 사용하면 정규 표현식에 맞는 부분을 바꿀 수 있다.
# 3번째 매개변수에 넣은 문자열에서 첫 번재 정규 표현식에 맞는 부분을
# 2번째 매개변수로 변경한다.
re.sub(r'\w{2,}','yes_good', 'This is a pen')
# In[8]:
import re
from html import unescape
# 이전 절에서 다운로드한 파일을 열고 html이라는 변수에 저장해보자.
with open('dp.html') as f:
html = f.read()
# re.findall()을 사용해 도서 각각에 해당하는 HTML을 추출한다.\
for partial_html in re.findall(r'<td class="left"><a.*?<\td>', html, re.DOTALL):
# 도서의 URL을 추출하자.
url = re.search(r'<a href="(.*?)">', partial_html).group(1)
print(url)
# In[ ]:
for partial_html in re.findall(r'<td class="left"><a.*?</td>', html, re.DOTALL):
# 도서의 URL을 추출하자.
print(partial_html)
url = re.search(r'<a href="(.*?)">', partial_html).group(1)
url = 'https://hanbit.co.kr/' + url
# print(url)
# 태그를 제거하여 도서의 제목을 추출한다.
title = re.sub(r'<.*?>','', partial_html)
title = unescape(title)
print('url:', url)
print('title:', title)
print('---'*35)
# In[ ]:
# end of the file
| polkmn222/Statistic-Python | 0627/02)웹페이지에서_기초_크롤링_2.py | 02)웹페이지에서_기초_크롤링_2.py | py | 4,338 | python | ko | code | 0 | github-code | 36 |
32432567007 | from django import template
from django.utils.translation import ugettext as _
from message.models import Message
from milestone.models import Milestone
from lib import utils
register = template.Library()
def displaystatistic(context, name, trans_name, number):
icons_folder = '/media/basepage/images/icons/'
icon = ''
if name == 'message':
icon = 'comment.png'
elif name == 'milestone':
icon = 'clock.png'
elif name == 'wikiboard':
icon = 'page.png'
elif name == 'file':
icon = 'page_white_put.png'
elif name == 'todo':
icon = 'note.png'
icon = icons_folder + icon
return {'icon': icon, 'name': trans_name, 'number': number}
register.inclusion_tag("lib/displaystatistic.html", takes_context=True)(displaystatistic)
| joaquimrocha/Rancho | rancho/lib/templatetags/displaystatistic.py | displaystatistic.py | py | 794 | python | en | code | 7 | github-code | 36 |
35802636506 | from tkinter import *
from PyQt4 import QtGui,QtCore
import cv2
import re
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
class Window(QtGui.QMainWindow):
def __init__(self, parent=None):
super(Window,self).__init__(parent)
self.setGeometry(150,150,680,565)
self.setWindowTitle('Motion Scanner')
self.video = QtGui.QLabel('', self)
self.video.setGeometry(20, 20, 640, 485)
self.btn1 = QtGui.QPushButton('Start', self)
self.btn1.setGeometry(50, 515, 100, 30)
self.btn1.clicked.connect(self.Start)
self.btn3 = QtGui.QPushButton('Scan', self)
self.btn3.setGeometry(170, 515, 100, 30)
self.btn3.clicked.connect(self.Stop)
self.output = QtGui.QLabel('', self)
self.output.setGeometry(290, 515, 150, 30)
myPixmap = QtGui.QPixmap("I:/projects/py/loadin/camera.jpg")
myScaledPixmap = myPixmap.scaled(self.video.size())
self.video.setPixmap(myScaledPixmap)
self.cap = cv2.VideoCapture(1)
self.show()
def Start(self):
self.fps=30
self.timer = QtCore.QTimer()
ret, frame = self.cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = QtGui.QImage(frame, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
pix = QtGui.QPixmap.fromImage(img)
self.a=frame
self.video.setPixmap(pix)
self.timer.timeout.connect(self.Start)
self.timer.start(1000. / self.fps)
def Stop(self):
cv2.imwrite("Scan1.jpg", self.a)
self.timer.stop()
opts = Options()
opts.set_headless()
assert opts.headless
driver = Firefox(options=opts)
# navigate to the application home page
driver.get("https://images.google.com/")
# click on camera image
search_field = driver.find_element_by_id("qbi")
search_field.click()
driver.find_element_by_class_name('qbtbha.qbtbtxt.qbclr').click()
# clicking on upload image
b = driver.find_element_by_css_selector("input[type=\"file\"]")
b.clear()
# uploading image
b.send_keys("I:\\\\projects\\\\py\\\\Scan1.jpg")
search_form = driver.find_element_by_id('mKlEF')
search_form.submit()
driver.implicitly_wait(30)
# getting results
RESULTS_LOCATOR = "//div/h3/a"
# WebDriverWait(driver, 10).until(
# EC.visibility_of_element_located((By.XPATH, RESULTS_LOCATOR)))
page1_results = driver.find_elements(By.XPATH, RESULTS_LOCATOR)
a = " "
# storing all the results in a
for item in page1_results:
a += item.text
print()
# finding the most repeated word and showing it
frequency = {}
document_text = a
text_string = document_text.lower()
match_pattern = re.findall(r'\b[a-z]{3,15}\b', text_string)
for word in match_pattern:
count = frequency.get(word, 0)
frequency[word] = count + 1
# frequency_list = frequency.keys()
result=max(frequency.keys(), key=(lambda k: frequency[k]))
print(max(frequency.keys(), key=(lambda k: frequency[k])))
cv2.putText(self.a, result, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 3, 4)
self.output.setText(result)
driver.close()
app=QtGui.QApplication(sys.argv)
GUI=Window()
sys.exit(app.exec_()) | lucifer6666/Reverse-Image-Search | revimage.py | revimage.py | py | 3,601 | python | en | code | 0 | github-code | 36 |
69905549866 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
"""Perform inference on a single image or all images with a certain extension
(e.g., .jpg) in a folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import glob
import logging
import os
import sys
import time
import numpy
from caffe2.python import workspace
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.io import cache_url
from detectron.utils.logging import setup_logging
from detectron.utils.timer import Timer
import detectron.utils.stylizeimage as style
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.vis as vis_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
def infer_method(im,mymethod="back"):
logger = logging.getLogger(__name__)
#styleimage = style.style_method()
merge_cfg_from_file("configs/DensePoseKeyPointsMask_ResNet50_FPN_s1x-e2e.yaml")
cfg.NUM_GPUS = 1
myweights = cache_url("DensePoseKeyPointsMask_ResNet50_FPN_s1x-e2e.pkl", cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = infer_engine.initialize_model_from_cfg(myweights)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
out_name = os.path.join(
"tools/output", '{}'.format(os.path.basename("myresult") + '.jpg')
)
#logger.info('Processing {} -> {}'.format(im_name, out_name))
im_name = "love.jpg"
im2 = cv2.imread("tools/iron8.jpg")
timers = defaultdict(Timer)
t = time.time()
with c2_utils.NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
model, im, None, timers=timers)
if im2 is not None:
cls_boxes2, cls_segms2, cls_keyps2, cls_bodys2 = infer_engine.im_detect_all(
model, im2, None, timers=timers)
logger.info('Inference time: {:.3f}s'.format(time.time() - t))
for k, v in timers.items():
logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
if mymethod == "back":
vis_utils.change_background(
im[:, :, ::-1], # BGR -> RGB for visualization
im2[:, :, ::-1],
im_name,
"static/img",
cls_boxes,
cls_segms,
cls_keyps,
cls_bodys,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2)
elif mymethod == "iron":
vis_utils.ironman(
im[:, :, ::-1], # BGR -> RGB for visualization
im2[:, :, ::-1],
im_name,
args.output_dir,
cls_boxes,
cls_boxes2,
cls_segms,
cls_keyps,
cls_bodys,
cls_segms2,
cls_keyps2,
cls_bodys2,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2)
elif mymethod == 'style_b':
styleimage = cv2.cvtColor(numpy.array(style.stylize_img(im_name,args.image_second)),cv2.COLOR_RGB2BGR)
resized_im = style.tensor_to_image(style.load_to_mask(im_name))
opencvImage = cv2.cvtColor(numpy.array(resized_im), cv2.COLOR_RGB2BGR)
print(opencvImage)
with c2_utils.NamedCudaScope(0):
bo,se,ke,bod = infer_engine.im_detect_all(model, opencvImage, None, timers=timers)
vis_utils.change_background(
opencvImage[:, :, ::-1], # BGR -> RGB for visualization
styleimage[:, :, ::-1],
"stylized_img.jpg",
args.output_dir,
bo,
se,
ke,
bod,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2)
else:
vis_utils.vis_one_image(
im[:, :, ::-1], # BGR -> RGB for visualization
im_name,
args.output_dir,
cls_boxes,
cls_segms,
cls_keyps,
cls_bodys,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2)
| chhari/tiredwebsite | infer_website.py | infer_website.py | py | 5,203 | python | en | code | 0 | github-code | 36 |
15521725834 | '''
40. Combination Sum II
Given a collection of candidate numbers (candidates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
'''
class Solution:
def combination(self,nums,target,smallest):
if target == 0:
return [[]]
flag = True
for n in nums:
if nums[n] > 0:
if flag:
minNum = n
flag = False
else: minNum = n if n < minNum else minNum
if flag or target < minNum:
return []
ans = []
for n in nums:
if nums[n] > 0 and n >= smallest:
nums[n] -= 1
res = self.combination(nums,target-n,n)
nums[n] += 1
for r in res:
r.append(n)
ans += res
return ans
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
if not candidates:
return []
from collections import defaultdict
nums = defaultdict(int)
candidates.sort()
minNum = candidates[0]
for c in candidates:
nums[c] += 1
return self.combination(nums,target,minNum)
class Solution2:
def combination(self,candidates,target):
#print(candidates,target)
if target == 0:
return [[]]
if not candidates or target < candidates[0]:
return []
ans = []
cnt = 0
curr = candidates[0]
for i,c in enumerate(candidates):
if c == curr:
cnt += 1
else:
for j in range(cnt):
res = self.combination(candidates[i:],target-curr*(j+1))
for r in res:
r += [curr] * (j+1)
ans += res
cnt = 1
curr = c
for j in range(cnt):
res = self.combination([],target-curr*(j+1))
for r in res:
r += [curr] * (j+1)
ans += res
return ans
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
if not candidates:
return []
candidates.sort()
return self.combination(candidates,target)
if __name__ == '__main__':
print(Solution2().combinationSum2([1],2))
print(Solution2().combinationSum2([1,2,8],9)) | MarshalLeeeeee/myLeetCodes | 40-combinationSum2.py | 40-combinationSum2.py | py | 3,071 | python | en | code | 0 | github-code | 36 |
30233007647 | #https://github.com/wintun-devop
#https://www.youtube.com/channel/UCz9ebjc-_3t3p49gGpwyAKA/videos
#declare a string
a_name="Win Tun Hlaing"
about="He is am IT Professional."
#change all to title case
a_name_capital=a_name.title()
#change all to lower case
a_name_lower=a_name.lower()
#change all to upper case
a_name_upper=a_name.upper()
#rjust a string
a_name_rjust=a_name.rjust(20,'.')
#replace a string
a_name_replace_character=a_name.replace("Hlaing","Lin")
#split a string which going to be a list
a_name_split=a_name.split()
#revert a string
a_name_reverse=reversed(a_name)
#partition a string which going to be a tuple
a_name_partition=a_name.partition(' ')
#count a group of string
x = a_name.count("n")
#format string
a_format_string="My name is {name}.{About}".format(name=a_name,About=about)
#f string
an_fstring=f"My name is {a_name}.{about}"
print(an_fstring)
| wintun-devop/python | data-type-operation/string-minipulation.py | string-minipulation.py | py | 874 | python | en | code | 0 | github-code | 36 |
2232093034 | import interfaceAi
class console():
def __init__(self):
self.yes = ("yes", "ja", "y", "j", "1", "")
self.no = ("no", "nein", "n", "0")
self.printProgressCounter = 0
def chooseAi(self, playerId):
validAnswer = False
ifAi = interfaceAi.interfaceAi()
while not validAnswer:
aiList = ifAi.getAiList()
print("choose AI for", self.getPlayerName(playerId))
itemCount = 0
for aiItem in ifAi.getAiList():
itemCount += 1
print(str(itemCount) + ") " + aiItem)
choice = input("choice:")
if choice.isdigit():
choiceInt = int(choice) - 1
if len(aiList) >= choiceInt and choiceInt >= 0:
return choiceInt
def chooseAiGameRounds(self):
validAnswer = False
while not validAnswer:
choice = input("how many rounds should the AI play:")
if choice.isdigit():
return int(choice)
def choosePlayer(self, playerId):
validAnswer = False
# ifAi = interfaceAi.interfaceAi()
while not validAnswer:
print("Who should player", self.getPlayerName(playerId), "be?")
print("1) user")
print("2) CPU")
choice = input("choice:")
if choice == "1":
return "user"
elif choice == "2":
return "ai"
def getPlayerName(self, playerId):
if playerId == 0:
return "-"
elif playerId == 1:
return "X"
elif playerId == 2:
return "O"
def printGameField(self, gameField):
finalTable = ""
# header row / top cords
finalTable += " " + " ".join(str(i) for i in range(3)) + "\n"
rowCount = 0
for row in gameField:
# left hand cords
finalTable += str(rowCount) + " "
for field in row:
finalTable += self.getPlayerName(field) + " "
finalTable += "\n"
rowCount += 1
print(finalTable)
def printStats(self, gl):
self.printGameField(gl.getField())
print(13 * "*")
print("* WINNER:", self.getPlayerName(gl.getWinner()) + " *")
print(13 * "*")
print("total games:", gl.getPlayedRoundsCount())
print("X wins", gl.score[1], "matches")
print("O wins", gl.score[2], "matches")
print("draws ", gl.score[0], "matches")
def printProgress(self, currentGame, maxGame):
"""
http://www.prozentrechnung.net/
function prozentrechner2 ()
{
x = document.formular2.x.value*100;
y = x/document.formular2.y.value;
z = (Math.round(y/0.01)*0.01)
document.formular2.ergebnis2.value = z
}
"""
self.printProgressCounter += 1
if maxGame < 1000 or self.printProgressCounter >= maxGame / 100 or currentGame == maxGame:
self.printProgressCounter = 0
import os
os.system('clear')
x = currentGame * 100
y = x / maxGame
z = int(y) / 2
tmpStr = '[' + '{:<50}'.format(int(z) * "#") + ']'
print(tmpStr)
formatStr = '{:^' + str(len(tmpStr)) + '}'
print(formatStr.format(str(currentGame) + ' / ' + str(maxGame)))
def getUserInput(self):
cords = []
while len(cords) != 2:
cords = []
choose = input('type XY-coordinates: ')
for c in choose:
if c.isdigit():
cInt = int(c)
# if cInt >= 0 and cInt <= 2:
if 0 <= cInt <= 2:
cords.append(cInt)
return cords
def playAgain(self):
validAnswer = False
answer = False
while not validAnswer:
yesNo = input('play again? Y/n')
if yesNo.lower() in self.yes:
validAnswer = True
answer = True
elif yesNo.lower() in self.no:
validAnswer = True
answer = False
return answer | pythonfoo/TicTacToe | bison/interfaceConsole.py | interfaceConsole.py | py | 4,207 | python | en | code | 0 | github-code | 36 |
73360141224 | # django imports
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.db import IntegrityError
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
# portlets imports
import portlets.utils
from portlets.models import PortletAssignment
from portlets.models import PortletBlocking
from portlets.models import PortletRegistration
from portlets.models import Slot
# lfs imports
import lfs.core.utils
from lfs.core.utils import LazyEncoder
@login_required
def portlets_inline(request, obj, template_name="manage/portlets/portlets_inline.html"):
"""Displays the assigned portlets for given object.
"""
ct = ContentType.objects.get_for_model(obj)
parent_for_portlets = obj.get_parent_for_portlets()
if parent_for_portlets:
parent_slots = portlets.utils.get_slots(parent_for_portlets)
else:
parent_slots = None
return render_to_string(template_name, RequestContext(request, {
"slots" : portlets.utils.get_slots(obj),
"parent_slots" : parent_slots,
"parent_for_portlets" : parent_for_portlets,
"portlet_types" : PortletRegistration.objects.filter(active=True),
"object" : obj,
"object_type_id" : ct.id,
}))
@login_required
def update_portlets(request, object_type_id, object_id):
"""Update portlets blocking.
"""
# Get content type to which the portlet should be added
object_ct = ContentType.objects.get(pk=object_type_id)
object = object_ct.get_object_for_this_type(pk=object_id)
blocked_slots = request.POST.getlist("block_slot")
for slot in Slot.objects.all():
if str(slot.id) in blocked_slots:
try:
PortletBlocking.objects.create(
slot_id=slot.id, content_type_id=object_type_id, content_id=object_id)
except IntegrityError:
pass
else:
try:
pb = PortletBlocking.objects.get(
slot=slot, content_type=object_type_id, content_id=object_id)
pb.delete()
except PortletBlocking.DoesNotExist:
pass
html = portlets_inline(request, object)
result = simplejson.dumps({
"html" : html,
"message" : _(u"Portlet has been updated.")},
cls = LazyEncoder
)
return HttpResponse(result)
@login_required
def add_portlet(request, object_type_id, object_id, template_name="manage/portlets/portlet_add.html"):
"""Form and logic to add a new portlet to the object with given type and id.
"""
# Get content type to which the portlet should be added
object_ct = ContentType.objects.get(pk=object_type_id)
object = object_ct.get_object_for_this_type(pk=object_id)
# Get the portlet type
portlet_type = request.REQUEST.get("portlet_type", "")
if request.method == "GET":
try:
portlet_ct = ContentType.objects.filter(model=portlet_type.lower())[0]
mc = portlet_ct.model_class()
form = mc().form(prefix="portlet")
return render_to_response(template_name, RequestContext(request, {
"form" : form,
"object_id" : object_id,
"object_type_id" : object_ct.id,
"portlet_type" : portlet_type,
"slots" : Slot.objects.all(),
}))
except ContentType.DoesNotExist:
pass
else:
try:
ct = ContentType.objects.filter(model=portlet_type.lower())[0]
mc = ct.model_class()
form = mc().form(prefix="portlet", data=request.POST)
portlet = form.save()
slot_id = request.POST.get("slot")
position = request.POST.get("position")
PortletAssignment.objects.create(
slot_id=slot_id, content=object, portlet=portlet, position=position)
html = portlets_inline(request, object)
result = simplejson.dumps({
"html" : html,
"message" : _(u"Portlet has been added.")},
cls = LazyEncoder
)
return HttpResponse(result)
except ContentType.DoesNotExist:
pass
@login_required
def delete_portlet(request, portletassignment_id):
"""Deletes a portlet for given portlet assignment.
"""
try:
pa = PortletAssignment.objects.get(pk=portletassignment_id)
except PortletAssignment.DoesNotExist:
pass
else:
pa.delete()
return lfs.core.utils.set_message_cookie(
request.META.get("HTTP_REFERER"),
msg = _(u"Portlet has been deleted."))
@login_required
def edit_portlet(request, portletassignment_id, template_name="manage/portlets/portlet_edit.html"):
"""Form and logic to edit the portlet of the given portlet assignment.
"""
try:
pa = PortletAssignment.objects.get(pk=portletassignment_id)
except PortletAssignment.DoesNotExist:
return ""
if request.method == "GET":
slots = []
for slot in Slot.objects.all():
slots.append({
"id" : slot.id,
"name" : slot.name,
"selected" : slot.id == pa.slot.id,
})
form = pa.portlet.form(prefix="portlet")
return render_to_response(template_name, RequestContext(request, {
"form" : form,
"portletassigment_id" : pa.id,
"slots" : slots,
"position" : pa.position,
}))
else:
form = pa.portlet.form(prefix="portlet", data=request.POST)
portlet = form.save()
# Save the rest
pa.slot_id = request.POST.get("slot")
pa.position = request.POST.get("position")
pa.save()
html = portlets_inline(request, pa.content)
result = simplejson.dumps({
"html" : html,
"message" : _(u"Portlet has been saved.")},
cls = LazyEncoder
)
return HttpResponse(result) | django-lfs/lfs | manage/views/lfs_portlets.py | lfs_portlets.py | py | 6,283 | python | en | code | 23 | github-code | 36 |
5313775703 | import numpy as np
import cv2
import os.path as osp
from glob import glob
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.metrics import confusion_matrix
import random
"""
return all gif frames as a Python list
"""
def load_gif(path):
im = Image.open(path)
n_frames = im.n_frames
count = 0
ret = []
while count < n_frames:
im.seek(count)
imframe = im.copy()
if count == 0:
palette = imframe.getpalette()
elif count <= n_frames // 2:
imframe.putpalette(palette)
# add the interesting frames
ret.append(imframe)
count = count+1
return ret
def load_gif_gray(path):
im = Image.open(path)
ret = np.array(im.convert('L'))
return ret
def load_gif_color(path):
im = Image.open(path)
return im
def get_image_directories(data_path, categories):
return [osp.join(data_path, category) for category in categories]
def load_images(limit, path):
"""
try to load paths for each category as much as limit_each_category
"""
image_paths = []
image_ids = []
files = glob(osp.join(path, '*.jpg'))
random.shuffle(files)
files = files[:limit]
image_paths.extend(files)
image_ids = [osp.split(image_path)[-1].split('.')[0] for image_path in image_paths]
return image_paths, image_ids
def load_agument_image_paths(agument_path, image_paths, bases):
agument_paths = []
agument_labels = []
agument_ids = []
for image_path in image_paths:
category = osp.split(osp.split(image_path)[-2])[-1]
image_name = osp.split(image_path)[-1]
for base in bases:
target_path = osp.join(agument_path, category, str(base) + '_' + image_name)
if osp.exists(target_path):
agument_paths.append(target_path)
agument_labels.append(category)
agument_ids.append(image_name.split('.')[0])
return agument_paths, agument_labels, agument_ids
def show_results(train_image_paths, test_image_paths, train_labels, test_labels,
categories, abbr_categories, predicted_categories):
"""
shows the results
:param train_image_paths:
:param test_image_paths:
:param train_labels:
:param test_labels:
:param categories:
:param abbr_categories:
:param predicted_categories:
:return:
"""
cat2idx = {cat: idx for idx, cat in enumerate(categories)}
# confusion matrix
y_true = [cat2idx[cat] for cat in test_labels]
y_pred = [cat2idx[cat] for cat in predicted_categories]
cm = confusion_matrix(y_true, y_pred)
cm = cm.astype(np.float) / cm.sum(axis=1)[:, np.newaxis]
acc = np.mean(np.diag(cm))
print(cm)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap('jet'))
plt.title('Confusion matrix. Mean of diagonal = {:4.2f}%'.format(acc*100))
tick_marks = np.arange(len(categories))
plt.tight_layout()
plt.xticks(tick_marks, abbr_categories, rotation=45)
plt.yticks(tick_marks, categories) | CS6220-YelpImageSearch/YelpImageSearch | evaluation/utils.py | utils.py | py | 3,045 | python | en | code | 1 | github-code | 36 |
40171037803 | import pygame
import config
pygame.init()
class Text:
""" Text class handles all text displays on a game window,
which is passed by the game handle. Each instance of Text
is its own type of text (e.g. velocity output, position output),
allowing for easy enabling and disabling of text display.
"""
white = config.WHITE
black = config.BLACK
possible_positions = ('topleft', 'topright', 'bottomright', 'bottomleft',
'center')
def __init__(self, game_window, font_size=config.default_font_size):
""" Class constructor initializing with the
pygame game_window/screen handle.
"""
self.game_window = game_window
self.text_surfaces = []
self.text_rects = []
self.text_positions = []
self.font_size = int(font_size)
self.normal_font = pygame.font.Font(config.font, self.font_size)
def text_objects(self, text: str, font, color: tuple):
""" Takes text and pygame font and returns a text surface and rect.
"""
text_surface = font.render(text, True, color)
return text_surface, text_surface.get_rect()
def message_display(self, text, x: int, y: int,
position: str = 'topleft',
color: tuple = white):
""" Takes text and places it at (x, y) coordinates.
The position argument is a string representating the
rectangle origin location.
For example, position can be 'bottomright'
or 'center'.
"""
text_surface, text_rect = self.text_objects(
text=text, font=self.normal_font, color=color)
# Set the coordinates of the rectangle depending on position
if position not in Text.possible_positions:
print("WARNING: {position} does not exist!"
"Defaulting to 'topleft'.")
position = "topleft"
setattr(text_rect, position, (x, y))
# Fills previous text with black rectangle.
self.game_window.fill(Text.black, text_rect)
# Blit the new text onto the surface.
self.game_window.blit(text_surface, text_rect)
# Append list of text surfaces, rectancles, and positions
self.text_surfaces.append(text_surface)
self.text_rects.append(text_rect)
self.text_positions.append(position)
pygame.display.update()
return text_surface
def change_text(self, index: int, new_text: str) -> None:
""" Updates the text in the list with a new text at the index.
Automatically finds the coordinates of the previous text.
"""
# Establish the previous text rect
prev_rect = self.text_rects[index]
# Set up the new message text and font
color = Text.white
text_surface, text_rect = self.text_objects(
text=new_text, font=self.normal_font, color=color)
# Set the proper coordinates for the new text rect (old coordinates)
position = self.text_positions[index] # e.g. 'topleft', 'center'
prev_rect_position = getattr(prev_rect, position)
setattr(text_rect, position, prev_rect_position)
# Fill old text with black using the previous rect
self.game_window.fill(Text.black, prev_rect)
# Blit the new text surface
self.game_window.blit(text_surface, text_rect)
pygame.display.update([text_rect, prev_rect])
# Update the list of text_rects and text_surfaces
self.text_surfaces[index] = text_surface
self.text_rects[index] = text_rect
| tbone-iii/Car-Driving-Simulator | text_display.py | text_display.py | py | 3,644 | python | en | code | 0 | github-code | 36 |
1124135740 | #!/usr/bin/python3
"""This defines a class Student."""
class Student:
"""This represents a student."""
def __init__(personal, first_name, last_name, age):
"""It initializes a new Student.
Args:
first_name (str): The first name of the student.
last_name (str): The last name of the student.
age (int): The age of the student.
"""
personal.first_name = first_name
personal.last_name = last_name
personal.age = age
def to_json(personal, attrs=None):
"""This gets a dictionary representation of the Student.
If attrs is a list of strings, represents only those attributes
included in the list.
Args:
attrs (list): (Optional) The attributes to represent.
"""
if (type(attrs) == list and
all(type(ele) == str for ele in attrs)):
return {k: getattr(personal, k) for k in attrs if hasattr(personal, k)}
return personal.__dict__
| Fran6ixneymar/alx-higher_level_programming | 0x0B-python-input_output/10-student.py | 10-student.py | py | 1,016 | python | en | code | 0 | github-code | 36 |
39915116580 | import pandas as pd
import numpy as np
import datetime as dt
import os
import matplotlib.pyplot as plt
from util import get_data, plot_data
# prepare the data
def normalize_stocks(prices):
prices.fillna(method='ffill', inplace=True)
prices.fillna(method='bfill', inplace=True)
return prices / prices.iloc[0]
# The function to return SMA
# price < sma, BUY
# price > sma, SELL
"""Calculate simple moving average indicator
Parameters:
price: Normalized adjusted close price
rolling_mean: Rolling mean of certain numbers of days
Returns: SMA
"""
def compute_sma(normalized_price, rolling_days):
columns = ['SMA']
sma = pd.DataFrame(0, index = normalized_price.index, columns = columns)
sma['SMA'] = normalized_price.rolling(window=rolling_days, min_periods = rolling_days).mean()
return sma
# the function to return momentum
# negative --> postive, Buy
# postive --> negative, Sell
"""Calculate momentum indicator:
momentum[t] = (price[t]/price[t-rolling_days]) - 1
Parameters:
price: Normalized adjusted close price
rolling_days: Number of days to look back
Returns: Momentum
"""
def compute_momentum(normalized_price, rolling_days):
momentum = pd.DataFrame(0, index = normalized_price.index, columns = ['Momentum'])
momentum['Momentum'] = (normalized_price/normalized_price.shift(rolling_days))-1
return momentum
# the function to return Exponential moving average (EMA)
# price < ema, BUY
# price > ema, SELL
"""Calculate EMA indicator:
EMA = Closing price x multiplier + EMA (previous day) x (1-multiplier)
Parameters:
price: Normalized adjusted close price
rolling_days: Number of days to look back
Returns: EMA
"""
def compute_ema(normalized_price, rolling_days):
ema = pd.DataFrame(0, index = normalized_price.index, columns = ['EMA'])
ema['EMA'] =normalized_price.ewm(span= rolling_days,adjust=False).mean()
return ema
# MACD: Moving Average Convergence Divergence
# Signal Line > MACD Line , SELL
# Signal Line < MACD Line, BUY
"""Calculate MACD indicator:
MACD Line: (12-day EMA - 26-day EMA)
Signal Line: 9-day EMA of MACD Line
Parameters:
price: Normalized adjusted close price
Returns: MACD line and Signal line
"""
def compute_macd(normalized_price):
macd = pd.DataFrame(0, index = normalized_price.index, columns = ['ema_12','ema_26','macd_raw','MACD'])
macd['ema_12'] = normalized_price.ewm(span=12, adjust=False).mean()
macd['ema_26'] = normalized_price.ewm(span=26, adjust=False).mean()
macd['MACD'] = macd['ema_12'] - macd['ema_26']
macd['Signal'] = macd['MACD'].ewm(span=9, adjust=False).mean()
macd['MACD_diff'] = macd['Signal'] - macd['MACD']
return macd['MACD_diff']
# Stochastic Oscillator
# signal line (%D) > indicator line (%K), Overbought, SELL
# signal line (%D) < indicator line (%K), Oversold, BUY
"""Calculate Stochastic Oscillator indicator:
Indicator line (%K): (C−L14/H14−L14)*100
C = The most recent closing price
L14 = The lowest price traded of the 14 previous trading sessions
H14 = The highest price traded during the same 14-day period
%K = The current value of the stochastic indicator
Signal line (%D): D=100*(H3/L3)
H3=Highest of the three previous trading sessions
L3=Lowest price traded during the same three-day period
%D = The current value of the stochastic signal
Parameters:
price: Normalized adjusted close price
Returns: %K and %D
"""
def compute_kd(normalized_price):
KD = pd.DataFrame(0, index = normalized_price.index, columns = ['%K','%D'])
#compute K%
L14= normalized_price.rolling(14).min()
H14= normalized_price.rolling(14).max()
KD['%K']= ((normalized_price- L14)/ (H14-L14))*100
KD['%D']= KD['%K'].rolling(3).mean()
KD['%KD'] = KD['%D'] -KD['%K']
return KD['%KD']
def compute_indicators(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,12,31), \
syms=['JPM']):
# Read in date range, prices and symbols
symbol = syms[0]
dates = pd.date_range(sd, ed)
prices_all = get_data(syms, dates)
prices = prices_all[syms] # portfolio symbols
prices_SPY = prices_all['SPY'] # SPY, for comparison
prices_SPY_normalized = normalize_stocks(prices_SPY)
normalized_price = normalize_stocks(prices)
rolling_days = 20
sma = compute_sma(normalized_price, rolling_days)
columns = ['Price/SMA']
prices_sma_ratio = pd.DataFrame(0, index = normalized_price.index, columns = columns)
prices_sma_ratio['Price/SMA'] = normalized_price[symbol]/sma['SMA']
momentum = compute_momentum(normalized_price, rolling_days)
ema = compute_ema(normalized_price, rolling_days)
columns = ['Price/EMA']
prices_ema_ratio = pd.DataFrame(0, index = normalized_price.index, columns = columns)
prices_ema_ratio['Price/EMA'] = normalized_price[symbol]/ema['EMA']
macd = compute_macd(normalized_price)
kd = compute_kd(normalized_price)
sma_plot = pd.concat([normalized_price, sma, prices_sma_ratio], axis=1)
sma_plot.columns = [symbol, 'SMA', 'Price/SMA']
sma_plot.plot(grid=True, title='Simple Moving Average', use_index=True)
plt.savefig("sma.png")
momentum_plot = pd.concat([normalized_price, momentum], axis=1)
momentum_plot.plot(grid=True, title='Momentum', use_index=True)
plt.savefig("momentum.png")
ema_plot = pd.concat([normalized_price, ema, prices_ema_ratio], axis=1)
ema_plot.columns = [symbol, 'EMA', 'Price/EMA']
ema_plot.plot(grid=True, title='Exponential Moving Average', use_index=True)
plt.savefig("ema.png")
macd_plot = pd.DataFrame(0, index = normalized_price.index, columns = columns)
macd_plot = pd.concat([normalized_price, macd['ema_12'], macd['ema_26'],macd['MACD'],macd['Signal']], axis=1)
macd_plot.columns = [symbol, '12 days EMA', '26 days EMA', 'MACD','Signal']
fig, axes = plt.subplots(2, 1)
fig.suptitle('Moving Average Convergence Divergence')
axes[0].plot(macd_plot["JPM"])
axes[0].plot(macd_plot["12 days EMA"])
axes[0].plot(macd_plot["26 days EMA"])
axes[1].plot(macd_plot["MACD"])
axes[1].plot(macd_plot["Signal"])
#axes[0].legend(loc="lower left")
#axes[1].legend(loc="lower left")
axes[0].get_xaxis().set_visible(False)
axes[0].get_yaxis().set_visible(True)
axes[1].tick_params(labelrotation=45)
plt.savefig("macd.png")
kd_plot = pd.DataFrame(0, index = normalized_price.index, columns = columns)
kd_plot = pd.concat([normalized_price, kd['%K'], kd['%D']], axis=1)
kd_plot.columns = [symbol, '%K', '%D']
fig, axes = plt.subplots(2, 1)
fig.suptitle('Stochastic Oscillator')
axes[0].plot(kd_plot["JPM"])
axes[1].plot(kd_plot["%K"])
axes[1].plot(kd_plot["%D"])
#axes[0].legend(loc="lower left")
#axes[1].legend(loc="lower left")
axes[0].get_xaxis().set_visible(False)
axes[0].get_yaxis().set_visible(True)
axes[1].tick_params(labelrotation=45)
plt.savefig("kd.png")
def test_code():
compute_indicators()
if __name__ == "__main__":
test_code()
| EntingHsiao/Stock_trading_with_ML | indicators.py | indicators.py | py | 7,063 | python | en | code | 1 | github-code | 36 |
23504518781 | import struct
from pathlib import Path
from npkpy.common import NPKError, NPKIdError, NPKMagicBytesError
from npkpy.npk.npk_constants import CNT_HANDLER
from npkpy.npk.cnt_basic import BYTES_LEN_CNT_ID, BYTES_LEN_CNT_PAYLOAD_LEN
from npkpy.npk.npk_file_basic import FileBasic
MAGIC_BYTES = b"\x1e\xf1\xd0\xba"
BYTES_LEN_MAGIC_HEADER = 4
BYTES_LEN_PCK_SIZE_LEN = 4
"""
0____4____8____b____f
| | | | |
0_|AAAA|BBBB| C ..... |
1_|....|....|....|....|
A = MAGIC BYTES (4)
B = PCK SIZE (4)
C = Begin of Container area
"""
class Npk(FileBasic):
__cnt_list = None
def __init__(self, file_path: Path):
super().__init__(file_path)
self.cnt_offset = 8
self._data = self.read_data_from_file(offset=0, size=self.cnt_offset)
self._check_magic_bytes(error_msg="Magic bytes not found in Npk file")
self.pck_header = self.pck_cnt_list[0]
@property
def pck_magic_bytes(self):
return struct.unpack_from("4s", self._data, 0)[0]
@property
def pck_payload_len(self):
self.__pck_payload_size_update()
payload_len = struct.unpack_from("I", self._data, 4)[0]
return payload_len
def __pck_payload_size_update(self):
if any(cnt.modified for cnt in self.pck_cnt_list):
current_size = 0
for cnt in self.pck_cnt_list:
current_size += cnt.cnt_full_length
cnt.modified = False
struct.pack_into("I", self._data, 4, current_size)
@property
def pck_full_size(self):
return BYTES_LEN_MAGIC_HEADER + BYTES_LEN_PCK_SIZE_LEN + self.pck_payload_len
@property
def pck_full_binary(self):
binary = MAGIC_BYTES + struct.pack("I", self.pck_payload_len)
for cnt in self.pck_cnt_list:
binary += cnt.cnt_full_binary
return binary
@property
def pck_enumerate_cnt(self):
for pos, cnt in enumerate(self.pck_cnt_list):
yield pos, cnt
@property
def pck_cnt_list(self):
if not self.__cnt_list:
self.__cnt_list = self.__parse_all_cnt()
return self.__cnt_list
def __parse_all_cnt(self):
lst = []
offset = self.cnt_offset
while offset < self.file.stat().st_size - 1:
lst.append(self.__get_cnt(offset))
offset += BYTES_LEN_CNT_ID + BYTES_LEN_CNT_PAYLOAD_LEN + lst[-1].cnt_payload_len
return lst
def __get_cnt(self, offset):
cnt_id = struct.unpack_from("H", self.read_data_from_file(offset, 2))[0]
payload_len = struct.unpack_from("I", self.read_data_from_file(offset + BYTES_LEN_CNT_ID, 4))[0]
pkt_len = BYTES_LEN_CNT_ID + BYTES_LEN_CNT_PAYLOAD_LEN + payload_len
data = self.read_data_from_file(offset, pkt_len)
if len(data) != pkt_len:
raise NPKError(f"File maybe corrupted. Please download again. File: {self.file.absolute()}")
try:
return CNT_HANDLER[cnt_id](data, offset)
except KeyError as e:
raise NPKIdError(f"Failed with cnt id: {cnt_id}\n"
f"New cnt id discovered in file: {self.file.absolute()}") from e
def _check_magic_bytes(self, error_msg):
if not self.pck_magic_bytes == MAGIC_BYTES:
raise NPKMagicBytesError(error_msg)
| botlabsDev/npkpy | npkpy/npk/npk.py | npk.py | py | 3,324 | python | en | code | 13 | github-code | 36 |
15134640368 | import codecs
import logging
import pandas as pd
from ekorpkit import eKonf
log = logging.getLogger(__name__)
class BaseInfo:
Keys = eKonf.Keys
def __init__(self, **args):
self.args = eKonf.to_config(args)
self._initialized = False
def __str__(self):
classname = self.__class__.__name__
s = f"{classname} :\n{self.INFO}"
return s
def init_info(self, data):
if self._initialized:
return data
if isinstance(data, pd.DataFrame):
log.info(
f"index: {self.INDEX}, index of data: {data.index.name}, columns: {list(data.columns)}, id: {self.IDs}"
)
if data.index.name is None:
data.index.name = self.INDEX
elif self.INDEX is None:
self.INDEX = data.index.name
elif self.INDEX != data.index.name and self.INDEX in data.columns:
data = self.set_index(data, self.INDEX)
elif self.INDEX != data.index.name and self.INDEX not in data.columns:
log.warning(f"{self.INDEX} not in dataframe")
if not self.IDs or self.IDs[0] == self.Keys.INDEX.value:
self.IDs = [self.INDEX]
self.set_dtypes(data)
self._initialized = True
return data
def set_index(self, data, name):
if isinstance(data, pd.DataFrame):
if name in data.columns:
data.set_index(name, inplace=True)
self.INDEX = name
else:
log.warning(f"{name} not in dataframe")
return data
def reset_index(
self,
data,
rename_old_index=None,
drop=False,
):
if isinstance(data, pd.DataFrame):
if self.INDEX in data.columns:
data.drop(self.INDEX, axis=1, inplace=True)
data = data.reset_index(drop=drop)
if not drop and rename_old_index is not None and self.INDEX in data.columns:
data = data.rename(columns={self.INDEX: rename_old_index})
self.INDEX = self.Keys.INDEX.value
self.set_dtypes(data)
return data
def reset_id(self, data):
if isinstance(data, pd.DataFrame):
data.rename(columns={self.ID: self._ID}, inplace=True)
data = self.reset_index(data, rename_old_index=self.ID)
return data
def combine_ids(self, data):
if self.IDs is None:
return data
if isinstance(data, pd.DataFrame):
if len(self.IDS) > 1:
data[self.ID] = data[self.IDs].apply(
lambda row: self.ID_SEPARATOR.join(row.values.astype(str)),
axis=1,
)
return data
def common_columns(self, dataframes):
"""
Find common columns between dataframes
"""
if not isinstance(dataframes, list):
dataframes = [dataframes]
common_columns = list(set.intersection(*(set(df.columns) for df in dataframes)))
df = dataframes[0][common_columns].copy()
dtypes = df.dtypes.apply(lambda x: x.name).to_dict()
self.DATATYPEs = dtypes
return common_columns
def to_datetime(self, data):
if self.DATETIME_INFO is None:
return data
_columns = eKonf.ensure_list(self.DATETIME_INFO.get(eKonf.Keys.COLUMNS))
_format = self.DATETIME_INFO.get(eKonf.Keys.FORMAT, None)
rcParams = self.DATETIME_INFO.get(eKonf.Keys.rcPARAMS) or {}
if _columns is None:
log.info("No datetime column found")
return data
if isinstance(data, pd.DataFrame):
for _col in _columns:
if _col in data.columns:
data[_col] = pd.to_datetime(data[_col], format=_format, **rcParams)
log.info(f"converted datetime column {_col}")
return data
def append_id(self, _id):
log.info(f"Adding id [{_id}] to {self.IDs}")
if self.IDs is None:
self.IDs = [_id]
else:
if isinstance(self.IDs, str):
self.IDs = [self.IDs]
self.IDs += [_id]
log.info(f"Added id [{_id}], now {self.IDs}")
def append_dataset(self, data, _dataset):
if _dataset is None:
return data
if isinstance(data, pd.DataFrame):
data[self.DATASET] = _dataset
if self.DATASET not in self.IDs:
self.append_id(self.DATASET)
if self.DATA and self.DATASET not in self.DATA:
self.DATATYPEs[self.DATASET] = "str"
log.info(f"Added a column [{self.DATASET}] with value [{_dataset}]")
return data
def append_split(self, data, _split):
if _split is None:
return data
if isinstance(data, pd.DataFrame):
data[self.SPLIT] = _split
if self.SPLIT not in self.IDs:
self.append_id(self.SPLIT)
if self.DATA and self.SPLIT not in self.DATA:
self.DATATYPEs[self.SPLIT] = "str"
log.info(f"Added a column [{self.SPLIT}] with value [{_split}]")
return data
def set_dtypes(self, data):
if isinstance(data, pd.DataFrame):
dtypes = data.dtypes.apply(lambda x: x.name).to_dict()
self.DATATYPEs = dtypes
return data
@property
def _ID(self):
return self.Keys._ID.value
@property
def ID_SEPARATOR(self):
return eKonf.Defaults.ID_SEP.value
@property
def INFO(self):
return self.args
@property
def DATETIME_INFO(self):
return self.INFO.get(self.Keys.DATETIME)
@DATETIME_INFO.setter
def DATETIME_INFO(self, value):
self.INFO[eKonf.Keys.DATETIME.value] = value
@property
def DATATYPEs(self):
return self.INFO.get(eKonf.Keys.DATA)
@DATATYPEs.setter
def DATATYPEs(self, value):
self.INFO[eKonf.Keys.DATA.value] = value
@property
def COLUMNs(self):
return self.INFO.get(eKonf.Keys.COLUMNS) or {}
@COLUMNs.setter
def COLUMNs(self, value):
self.INFO[eKonf.Keys.COLUMNS.value] = value
@property
def DATA(self):
if self.DATATYPEs is None:
return None
return list(self.DATATYPEs.keys())
@property
def DATASET(self):
return eKonf.Keys.DATASET.value
@property
def INDEX(self):
return self.COLUMNs.get(eKonf.Keys.INDEX) or eKonf.Keys.INDEX.value
@INDEX.setter
def INDEX(self, value):
self.COLUMNs[eKonf.Keys.INDEX.value] = value
@property
def ID(self):
return eKonf.Keys.ID.value
@property
def IDs(self):
return eKonf.ensure_list(self.COLUMNs.get(self.ID))
@IDs.setter
def IDs(self, value):
self.COLUMNs[self.ID] = value
@property
def SPLIT(self):
return eKonf.Keys.SPLIT.value
class CorpusInfo(BaseInfo):
def __init__(self, **args):
super().__init__(**args)
def to_timestamp(self, data, metadata=None):
if self.TIMESTAMP_INFO is None:
return data, metadata
_key = self.TIMESTAMP_INFO.get(eKonf.Keys.KEY)
_format = self.TIMESTAMP_INFO.get(eKonf.Keys.FORMAT)
rcParams = self.TIMESTAMP_INFO.get(eKonf.Keys.rcPARAMS) or {}
if _key is None:
log.info("No timestamp key found")
return data, metadata
if isinstance(data, pd.DataFrame):
if _key in data.columns:
data[self.TIMESTAMP] = pd.to_datetime(
data[_key], format=_format, **rcParams
)
log.info(f"Loaded timestamp column {self.TIMESTAMP}")
elif metadata is not None and _key in metadata.columns:
metadata[self.TIMESTAMP] = pd.to_datetime(
metadata[_key], format=_format, **rcParams
)
df_dt = metadata[self.MERGE_META_ON + [self.TIMESTAMP]].copy()
data = data.merge(df_dt, on=self.MERGE_META_ON, how="left")
# metadata.drop(self.TIMESTAMP, axis=1, inplace=True)
log.info(f"Timestamp column {self.TIMESTAMP} added to data")
return data, metadata
def combine_texts(self, data):
if self.TEXTs is None:
return data
if isinstance(data, pd.DataFrame):
data[self.TEXTs] = data[self.TEXTs].fillna("")
if len(self.TEXTs) > 1:
data[self.TEXT] = data[self.TEXTs].apply(
lambda row: self.SEGMENT_SEP.join(row.values.astype(str)),
axis=1,
)
self.DATATYPEs = {
k: v for k, v in self.DATATYPEs.items() if k not in self.TEXTs
}
self.DATATYPEs[self.TEXT] = "str"
return data
def merge_metadata(self, data, metadata):
if metadata is None:
return data
meta_cols = [col for col in metadata.columns if col not in data.columns]
meta_cols += self.MERGE_META_ON
data = data.merge(metadata[meta_cols], on=self.MERGE_META_ON, how="left")
return data
def append_split_to_meta(self, metadata, _split):
if _split is None:
return metadata
if isinstance(metadata, pd.DataFrame):
metadata[self.SPLIT] = _split
if self.METADATA and self.SPLIT not in self.METADATA:
self.METATYPEs[self.SPLIT] = "str"
log.info(f"Added a column [{self.SPLIT}] with value [{_split}]")
return metadata
def append_corpus(self, data, _corpus):
if _corpus is None:
return data
if isinstance(data, pd.DataFrame):
data[self.CORPUS] = _corpus
if self.CORPUS not in self.IDs:
self.append_id(self.CORPUS)
if self.DATA and self.CORPUS not in self.DATA:
self.DATATYPEs[self.CORPUS] = "str"
if self.METADATA and self.CORPUS not in self.METADATA:
self.METATYPEs[self.CORPUS] = "str"
log.info(f"Added a column [{self.CORPUS}] with value [{_corpus}]")
return data
@property
def MERGE_META_ON(self):
return eKonf.ensure_list(self.COLUMNs.get(eKonf.Keys.META_MERGE_ON)) or self.IDs
@MERGE_META_ON.setter
def MERGE_META_ON(self, value):
self.COLUMNs[eKonf.Keys.META_MERGE_ON.value] = value
@property
def TEXT(self):
return eKonf.Keys.TEXT.value
@property
def TEXTs(self):
return eKonf.ensure_list(self.COLUMNs.get(self.TEXT))
@TEXTs.setter
def TEXTs(self, value):
self.COLUMNs[self.TEXT] = value
@property
def METADATA(self):
if self.METATYPEs is None:
return None
return list(self.METATYPEs.keys())
@property
def TIMESTAMP(self):
return eKonf.Keys.TIMESTAMP.value
@property
def CORPUS(self):
return eKonf.Keys.CORPUS.value
@property
def METATYPEs(self):
return self.INFO.get(eKonf.Keys.META)
@METATYPEs.setter
def METATYPEs(self, value):
self.INFO[eKonf.Keys.META.value] = value
@property
def TIMESTAMP_INFO(self):
return self.INFO.get(self.TIMESTAMP)
@TIMESTAMP_INFO.setter
def TIMESTAMP_INFO(self, value):
self.INFO[self.TIMESTAMP] = value
@property
def SEGMENT_SEP(self):
return codecs.decode(
self.INFO.get("segment_separator", "\n\n"), "unicode_escape"
)
@property
def SENTENCE_SEP(self):
return codecs.decode(
self.INFO.get("sentence_separator", "\n"), "unicode_escape"
)
class DatasetInfo(BaseInfo):
def __init__(self, **args):
super().__init__(**args)
class FeatureInfo(BaseInfo):
def __init__(self, **args):
super().__init__(**args)
@property
def Y(self):
return self.COLUMNs.get(eKonf.Keys.Y)
@Y.setter
def Y(self, value):
self.COLUMNs[eKonf.Keys.Y.value] = value
@property
def X(self):
return eKonf.ensure_list(self.COLUMNs.get(eKonf.Keys.X))
@X.setter
def X(self, value):
self.COLUMNs[eKonf.Keys.X.value] = value
| entelecheia/ekorpkit | ekorpkit/info/column.py | column.py | py | 12,331 | python | en | code | 4 | github-code | 36 |
24111054603 | def fizz1():
for i in range(1,50):
div3 = i % 3
div5 = i % 5
prtStr = ""
if div3 == 0:
prtStr += "Fizz"
if div5 == 0:
prtStr += "Buzz"
print(f"{i} {prtStr}")
fizz1() | General-Mudkip/Computer-Science-y10 | Other/fizzbuzz.py | fizzbuzz.py | py | 243 | python | en | code | 0 | github-code | 36 |
12486364960 | """
Lists
Check your answer: https://judge.softuni.bg/Contests/Practice/Index/425#5
06. * Winecraft
Problem:
You will be given a sequence of integers, which will represent grapes. On the next line,
you will be given N - an integer, indicating the growth days. You must increment every integer in the list by 1 N times.
However, if one of the grapes’ value is greater than the grape to its left and is also greater than the one to his
right, it becomes Greater grape.
The Greater grape steals the values, which would have been incremented to its neighbors, and adds them to itself,
instead of being incremented by 1 like normal. On top of that the grapes, which are neighbors of the Greater grape
are decremented by 1 (note: if any of the greater grapes’ neighboring grapes have a value of 0,
DON’T decrement it and DON’T add its value to the greater grape).
Example: If we the list 1 12 4. The element at position 1 is greater grape, because it is bigger than the elements
on the left and on the right:
- First iteration: The Greater grape increases with 1 by default and takes 2 from its neighbors. The new list look like:
0 15 3
- Second iteration: The Greater grape increases with 1 by default and takes only 1 from its neighbors. This is because
the grape on left is 0 and the Greater grape takes only from the left one. The list now looks like this: 0 16 2
Lesser grapes don’t get incremented when they have as neighbor Greater grape , but instead they have their values
decremented by 1 by their neighboring Greater grapes (if there are such), therefore their values get added to the
Greater grapes.
After you're done with the growing (processed the grapes N times), every grape which has a value, lower than N
should be set to a value of 0 and you should not increment them or steal values from them.
The process should then repeat, again incrementing everything N times, where the Greater grapes steal from the
lesser grapes, until your list contains less than N grapes with value more than N.
After that, print the remaining grapes on the console (one line, space-separated).
Examples:
Input: Output:
4 4 15 4 7 24
3
Input: Output:
10 11 12 13 19 13 20 35 44
5
Input: Output:
6 7 6 2 16 5
3
"""
def growing(grapes, n):
for times in range(n):
grapes_copy = grapes.copy()
for i in range(1, len(grapes)-1):
if grapes[i-1] < grapes[i] > grapes[i+1]: # Classified as greater grape
grapes[i] += 1
if grapes[i-1] > 0:
grapes[i-1] -= 1
grapes[i] += 1
if grapes[i+1] > 0:
grapes[i+1] -= 1
grapes[i] += 1
# Incrementing all grapes but the greater grapes by 1
# ______________________________________________________
for e in range(len(grapes)):
if grapes_copy[e] == grapes[e]: # the value has not been changed
if grapes[e] > 0:
grapes[e] += 1
# ______________________________________________________
def solve(grapes, n):
grapes_copy = grapes.copy()
while len(grapes_copy) >= n:
growing(grapes, n)
grapes_copy = [e for e in grapes if e > n] # filtering the list by removing the numbers less or equal to n
for e in range(len(grapes)):
if grapes[e] < n:
grapes[e] = 0
return ' '.join(map(str, grapes_copy))
def main():
grapes = [int(e) for e in input().split()]
n = int(input())
print(solve(grapes, n))
if __name__ == '__main__':
main()
| SimeonTsvetanov/Coding-Lessons | SoftUni Lessons/Python Development/Python Fundamentals June 2019/Problems and Files/04. LISTS/03. Lists-Exercises-More/06. Winecraft.py | 06. Winecraft.py | py | 3,764 | python | en | code | 9 | github-code | 36 |
26610511083 | #DEFAULT ARGUMENTS#
import torch
import numpy as np
from utilities.optirank.src.BCD.Create_Instructions_List import classical_architecture_separate_b_once
from utilities.small_functions import percentage_zero, percentage_ones
from utilities.optirank.src.relaxation.lambda_P_setting import delta_loss
from utilities.optirank.src.BCD.BCD_units.convergence_criterion import absolute_delta_args
default_BCD_args = {"BCD_architecture": classical_architecture_separate_b_once, "max_iter": 10000, "search_method__L_min": 10**(-10),
"search_method__eta": [1.5, 1.5], "search_method__init_L_method": "hessian_proj", "initializazionparameters__name": "gamma_05_w_0",
"search_method__n_min": -1, "search_method__L_start": "previous",
"search_method__search_method_name": "first_best", "search_method__n_max": np.inf}
#chosen after inspection of setting lambda_P strategy
default_setting_lambda_P_strategy_args = {"class":delta_loss, "args":{"M": 100, "delta_lambda_min": 10**(-20), "with_interpolation": False}}
default_bilinear_ranking_classifier_args = {"rounding_threshold": 0.0, "setting_lambda_P_strategy_args": default_setting_lambda_P_strategy_args, "convergence_criterion_args": absolute_delta_args, "high_tol": False, "max_relaxation_iter": 10000, "tol_dist_to_border": 10**(-10), **default_BCD_args}
default_optirank_args = {**default_bilinear_ranking_classifier_args, "R_normalization": "k"}
#default_bilinear_optirank_args_no_constraint_sum_gamma = {**default_bilinear_ranking_classifier_args, "R_normalization": "d"}
#functions for diagnostics
subgradients_funs_dict = {
"|dsurrogate_loss/dw|min": lambda p: torch.norm(p.subgradient_minimal_norm_surrogate_loss_on_w()).item(),
"|dsurrogate_loss/dgamma_dual|": lambda p: torch.norm(p.gradient_surrogate_loss_with_penalties_dgamma_dual()).item(),
"|dsurrogate_loss/db|":lambda p: torch.norm(p.dlogloss_db()).item(),
"|dsurrogate_loss/dgamma|proj": lambda p: torch.norm(p.gradient_surrogate_loss_on_gamma()).item()
}
percentages_funs_dict = {"perc_gamma_1": lambda p: percentage_ones(p.gamma.numpy()),
"perc_gamma_0": lambda p: percentage_zero(p.gamma.numpy())} | paolamalsot/optirank | utilities/optirank/classifiers/default_args.py | default_args.py | py | 2,224 | python | en | code | 0 | github-code | 36 |
3954925048 | # -*- codind: utf-8 -*-
import os, sys, random, argparse, time
import math
import json
import codecs
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
install_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(install_path)
root_dir = os.path.join(install_path, 'exps/')
if not os.path.exists(root_dir):
os.makedirs(root_dir)
from xslu.utils import make_logger, read_emb
from xslu.optim import Optim
import xslu.Constants as Constants
from text.text import build_class_vocab
from text.dstc2 import slot2dic, process_sent
from model import RNN2One
from dataloader import OneBestIter4STC
from trainer import OneBestTrainer4STC
def model_opts(parser):
parser.add_argument('-model_type', default='RNN2One', type=str,
help="which model to use: RNN2One")
def train_opts(parser):
# Data options
parser.add_argument('-experiment', required=True,
help="Root path for saving results, models and logs")
parser.add_argument('-data_root', required=True,
help="Path prefix to the train and valid and class")
parser.add_argument('-save_model', default='best.pt',
help="Saved model filename")
parser.add_argument('-load_emb', action='store_true',
help='whether to load pre-trained word embeddings')
parser.add_argument('-fix_emb', action='store_true',
help='whether to fix pre-trained word embeddings')
parser.add_argument('-deviceid', default=0, type=int,
help="device id to run, -1 for cpus")
parser.add_argument('-batch_size', default=10, type=int,
help="batch size")
parser.add_argument('-epochs', default=100, type=int,
help="epochs")
parser.add_argument('-optim', default='adam', type=str,
help="optimizer")
parser.add_argument('-lr', default=0.001, type=float,
help="learning rate")
parser.add_argument('-max_norm', default=5, type=float,
help="threshold of gradient clipping (2 norm), < 0 for no clipping")
parser.add_argument('-seed', default=3435,
help='random seed')
def test_opts(parser):
# Data options
parser.add_argument('-test_json', default='test.json', type=str,
help="preprocessed test json file")
parser.add_argument('-save_decode', default='decode.json', type=str,
help="Path to the file of saving decoded results")
parser.add_argument('-load_chkpt', default=None, type=str,
help="Path to the checkpoint file to be loaded")
def parse_args():
parser = argparse.ArgumentParser(
description='Program Options',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-mode', default='train', type=str,
help="run mode: train, test, error")
model_opts(parser)
train_opts(parser)
test_opts(parser)
opt = parser.parse_args()
print(opt)
if opt.fix_emb:
assert opt.load_emb is True
opt.memory = torch.load(opt.data_root + 'memory.pt')
opt.class2idx = opt.memory['class2idx']
if opt.load_emb:
opt.word2idx = opt.memory['word2idx_w_glove']
else:
opt.word2idx = opt.memory['word2idx']
if opt.deviceid >= 0:
torch.cuda.set_device(opt.deviceid)
opt.cuda = True
else:
opt.cuda = False
# fix random seed
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
return opt
def make_model(opt):
if opt.model_type == 'RNN2One':
func = RNN2One
else:
raise Exception('Undefined model type!')
model = func(len(opt.word2idx), len(opt.class2idx))
if opt.cuda:
model = model.cuda()
return model
def train(opt):
# basics definition
opt.experiment = os.path.join(root_dir, opt.experiment)
if not os.path.exists(opt.experiment):
os.makedirs(opt.experiment)
opt.save_model = os.path.join(opt.experiment, opt.save_model)
opt.log_path = os.path.join(opt.experiment, 'log.train')
opt.logger = make_logger(opt.log_path)
# dataIter definition
train_iter = OneBestIter4STC(opt.data_root+'train', opt.word2idx, opt.class2idx,
opt.batch_size, opt.cuda, True)
valid_iter = OneBestIter4STC(opt.data_root+'valid', opt.word2idx, opt.class2idx,
opt.batch_size, opt.cuda, False)
# model definition
model = make_model(opt)
if opt.load_emb:
emb = read_emb(opt.word2idx)
model.emb.init_weight_from_pre_emb(emb, opt.fix_emb)
print(model)
# criterion definition
criterion = nn.BCELoss(reduction='sum')
if opt.cuda:
criterion = criterion.cuda()
# optimizer definition
optimizer = Optim(opt.optim, opt.lr, max_grad_norm=opt.max_norm)
optimizer.set_parameters(model.named_parameters())
print('Trainable parameter number: {}'.format(len(optimizer.params)))
# training procedure
trainer = OneBestTrainer4STC(model, criterion, optimizer, opt.logger)
trainer.train(opt.epochs, train_iter, valid_iter, opt.save_model)
def test(opt):
opt.experiment = os.path.join(root_dir, opt.experiment)
opt.load_chkpt = os.path.join(opt.experiment, opt.load_chkpt)
opt.save_decode = os.path.join(opt.experiment, opt.save_decode)
opt.test_json = os.path.join(opt.data_root, opt.test_json)
idx2class = {v:k for k,v in opt.class2idx.items()}
model = make_model(opt)
chkpt = torch.load(opt.load_chkpt, map_location=lambda storage, log: storage)
model.load_state_dict(chkpt)
# =======================================
model.eval()
# =======================================
sessions = json.loads(open(opt.test_json).read())['sessions']
print('Decoding ...')
decode_sessions = {'sessions': []}
for session in sessions:
n_session = {}
n_session['session-id'] = session['session-id']
n_session['turns'] = []
for turn in session['turns']:
asr_hyps = turn['asr-hyps']
sent = asr_hyps[0]['asr-hyp']
tokens = process_sent(sent)
if len(tokens) == 0:
slu_hyp = []
else:
sent_ids = [opt.word2idx.get(w) if w in opt.word2idx else Constants.UNK for w in tokens]
datas = torch.from_numpy(np.asarray(sent_ids, dtype='int64')).view(1, -1)
if opt.cuda:
datas = datas.cuda()
probs = model(datas, None)
scores = probs.data.cpu().view(-1,).numpy()
pred_classes = [i for i,p in enumerate(scores) if p > 0.5]
classes = [idx2class[i] for i in pred_classes]
slu_hyp = [slot2dic(string) for string in classes]
n_session['turns'].append(
{
'asr-hyps': asr_hyps,
'slu-hyps': [{'slu-hyp': slu_hyp, 'score': 1.0}]
}
)
decode_sessions['sessions'].append(n_session)
string = json.dumps(decode_sessions, sort_keys=True, indent=4, separators=(',', ':'))
with open(opt.save_decode, 'w') as f:
f.write(string)
print('Decode results saved in {}'.format(opt.save_decode))
if __name__ == '__main__':
opt = parse_args()
if opt.mode == 'train':
train(opt)
elif opt.mode == 'test':
test(opt)
else:
raise ValueError("unsupported type of mode {}".format(opt.mode))
| ZiJianZhao/Unaligned-SLU | base/stc/main.py | main.py | py | 7,763 | python | en | code | 1 | github-code | 36 |
20522871772 | from django.shortcuts import render, get_object_or_404
from .models import Project
from django.db.models import Q
# Create your views here.
def render_producto(request):
print(request.GET)
queryset = request.GET.get("buscar")
print(queryset)
productos = Project.objects.all()
if queryset:
print("hola")
productos = Project.objects.filter(
Q(title__icontains = queryset) |
Q(description__icontains = queryset)
).distinct()
else:
print("puto")
return render(request , 'producto.html',{"productos":productos})
def producto_detail(request, producto_id):
producto = get_object_or_404(Project, pk=producto_id)
related_products = Project.objects.all()[:4]
return render(request, 'producto_detail.html', {"productos": producto, 'related_products':related_products}) | Eliothd2/Imprentala-Website | productos/views.py | views.py | py | 871 | python | en | code | 0 | github-code | 36 |
32925197471 | import time
import random
from copy import deepcopy
class cube():
def __init__ (self, model=None):
if model == None:
self.face = [
[[0,0,0],
[0,0,0],
[0,0,0]],
[[1,1,1],
[1,1,1],
[1,1,1]],
[[2,2,2],
[2,2,2],
[2,2,2]],
[[3,3,3],
[3,3,3],
[3,3,3]],
[[4,4,4],
[4,4,4],
[4,4,4]],
[[5,5,5],
[5,5,5],
[5,5,5]],
]
else:
self.face = model
def get(self, value):
return value
# 0=Front, 1=Back, 2=Top, 3=Right, 4=Bottom, 5=left
def spin(self, side, direction):
held = deepcopy(self.face)
if direction == "C" or direction == "c":
times = 3
elif direction == "A" or direction == "a":
times = 1
for each in range (0,times):
for i in range (0,3):
for e in range (0,3):
self.face [side] [e] [i] = int(held [side] [i] [2-e])
if side == 0:
for i in range (0,3):
self.face [2] [2] [i] = int(held [3] [i] [0])
self.face [3] [i] [0] = int(held [4] [0] [2-i])
self.face [4] [0] [i] = int(held [5] [i] [2])
self.face [5] [i] [2] = int(held [2] [2] [2-i])
elif side == 1:
for i in range (0,3):
self.face [3] [i] [2] = int(held [2] [0] [i])
self.face [2] [0] [i] = int(held [5] [2-i] [0])
self.face [5] [i] [0] = int(held [4] [2] [i])
self.face [4] [2] [i] = int(held [3] [2-i] [2])
elif side == 2:
for i in range (0,3):
self.face [0] [0] [i] = int(held [5] [0] [i])
self.face [5] [0] [i] = int(held [1] [0] [i])
self.face [1] [0] [i] = int(held [3] [0] [i])
self.face [3] [0] [i] = int(held [0] [0] [i])
elif side == 3:
for i in range (0,3):
self.face [0] [i] [2] = int(held [2] [i] [2])
self.face [2] [i] [2] = int(held [1] [2-i] [0])
self.face [1] [i] [0] = int(held [4] [2-i] [2])
self.face [4] [i] [2] = int(held [0] [i] [2])
elif side == 4:
for i in range (0,3):
self.face [0] [2] [i] = int(held [3] [2] [i])
self.face [3] [2] [i] = int(held [1] [2] [i])
self.face [1] [2] [i] = int(held [5] [2] [i])
self.face [5] [2] [i] = int(held [0] [2] [i])
elif side == 5:
for i in range (0,3):
self.face [0] [i] [0] = int(held [4] [i] [0])
self.face [4] [i] [0] = int(held [1] [2-i] [2])
self.face [1] [i] [2] = int(held [2] [2-i] [0])
self.face [2] [i] [0] = int(held [0] [i] [0])
def print(self, face):
print()
for i in range(0,3):
output = " "
for e in range (0,3):
output += str(face[2][i][e])
output += " "
print(output)
print()
for i in range(0,3):
output = ""
for e in range (0,3):
output += str(face[5][i][e])
output += " "
output += " "
for e in range (0,3):
output += str(face[0][i][e])
output += " "
output += " "
for e in range (0,3):
output += str(face[3][i][e])
output += " "
output += " "
for e in range (0,3):
output += str(face[1][i][e])
output += " "
print (output)
print()
for i in range(0,3):
output = " "
for e in range (0,3):
output += str(face[4][i][e])
output += " "
print(output)
print()
def game():
rubik = cube()
while True:
rubik.print(rubik.face)
side = int(input("What side do u wanna turn? -> "))
direction = input("Clockways (C) or Anticlockways (A)? -> ")
rubik.spin(side, direction)
game()
| jonoreilly/python | pyscripts/rubik/rubik.py | rubik.py | py | 4,719 | python | en | code | 0 | github-code | 36 |
22565281077 | class Solution:
def twoCitySchedCost(self, costs: List[List[int]]) -> int:
arr = []
n = len(costs)
for i in range(n):
a, b = costs[i][0], costs[i][1]
arr.append((b-a,i))
ans = 0
arr.sort(reverse = True)
mid = (n-1)//2
for i in range(mid + 1):
index = arr[i][1]
ans += costs[index][0]
for i in range(mid+1,n):
index = arr[i][1]
ans += costs[index][1]
return ans | miedan/competetive-programming | two-city-scheduling.py | two-city-scheduling.py | py | 521 | python | en | code | 0 | github-code | 36 |
32195231000 | def addBinary(a, b):
#print(len(a)," ",len(b))
if len(a)<len(b):
while len(a)<len(b):
a='0'+a
elif len(b)<len(a):
while len(b)<len(a):
b='0'+b
#print(a," ",b)
binary=''
carry=0
for i in range(len(a)-1,-1,-1):
#print(a[i]," ",b[i]," ",carry)
if a[i]=='1' and b[i]=='1' and carry==0:
binary+='0'
carry=1
elif a[i]=='1' and b[i]=='1' and carry==1:
binary+='1'
#carry=1
elif a[i]=='0' and b[i]=='0' and carry==0:
binary+='0'
elif a[i]=='0' and b[i]=='0' and carry==1:
binary+='1'
carry=0
elif ((a[i]=='0' and b[i]=='1') or (a[i]=='1' and b[i]=='0')) and carry==1:
binary+='0'
#carry=1
elif ((a[i]=='0' and b[i]=='1') or (a[i]=='1' and b[i]=='0')) and carry==0:
binary+='1'
if carry==1:
binary+='1'
binary=list(binary)
for i in range(len(binary)//2):
temp=binary[i]
binary[i]=binary[len(binary)-1-i]
binary[len(binary)-1-i]=temp
bitstring=''
for x in binary:
bitstring+=x
return bitstring | JimNtantakas/Leetcode-problems | 67.py | 67.py | py | 1,236 | python | en | code | 0 | github-code | 36 |
38799393419 | from discord import Interaction
import bdg
import enum
class FormatStyle(enum.Enum):
UPPER = 0
LOWER = 1
REVERSED = 2
SPACED = 3
HACKER = 4
IRONIC = 5
hacker_dict = {
"a": "4",
"s": "5",
"o": "0",
"e": "3"
}
class FormatCommand(bdg.BdgCommand):
header = {
'name': "formatar",
'description': "Formate um texto de acordo com o estilo selecionado"
}
params = {
'estilo': "O estilo do texto, podendo ser: UPPER, LOWER, REVERSED, SPACED, HACKER, IRONIC",
'texto': "O texto a ser formatado no estilo indicado"
}
async def on_command(self, i: Interaction, estilo: FormatStyle, texto: str):
text = ""
if estilo == FormatStyle.UPPER:
text = texto.upper()
if estilo == FormatStyle.LOWER:
text = texto.lower()
if estilo == FormatStyle.REVERSED:
# Loop pelo "texto" ao contrário.
for char in texto[len(texto)-1:-1:-1]:
text += char
elif estilo == FormatStyle.SPACED:
chars = list(texto)
text = " ".join(chars)
elif estilo == FormatStyle.HACKER:
# Para cada caractére, use-o como chave no dicionário "hacker_style_dict",
# ... se a chave não existe, use o próprio caractére
text = "".join([hacker_dict.get(char.lower(), char) for char in texto])
elif estilo == FormatStyle.IRONIC:
# Se 'c' for par é maiúsculo, senão é minúsculo
for c in range(len(texto)):
char = texto[c]
text += char.upper() if c % 2 else char.lower()
await i.response.send_message(":speech_balloon: | " + text) | DanielKMach/BotDusGuri | src/commands/fun/format.py | format.py | py | 1,487 | python | pt | code | 1 | github-code | 36 |
39964545331 | import openai
openai.api_key = "[YOUR_OPENAI_API]"
chat_history = [{"role": "system", "content": "You are a assistant."}]
def bot_message(input):
chat_history.append({"role": "user", "content": f"{input}"})
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chat_history
)
return chat
def start_conversation():
print("Hi, I'm a chatbot. How can I help you today?")
token_count = 0
while True:
user_input = input("> ")
prompt = f"{user_input}"
response = bot_message(prompt)
role = response.choices[0].message.role
answer = response.choices[0].message.content
return_message = f"BOT : {answer}"
history_message = {"role": f'{role}', "content": f"{answer}"}
chat_history.append(history_message)
completion_token = response.usage.completion_tokens
prompt_token = response.usage.prompt_tokens
used_tokens = completion_token + prompt_token
token_count = token_count + used_tokens
token_message = f"In this conversation, you use {used_tokens} tokens. Completion : {completion_token}, Prompt : {prompt_token}"
total_token_message = f"You used {token_count} Tokens"
print(return_message)
print(token_message)
print(total_token_message)
start_conversation() | ingyunson/gpt_chatbot | GPT_chatbot.py | GPT_chatbot.py | py | 1,411 | python | en | code | 0 | github-code | 36 |
18583370851 | from pygame.locals import *
import figures
import pygame
import random
import sys
types = [figures.Square, figures.Line]
class Game:
def __init__(self, window=(500, 500), speed=5, block=20, fps=20):
pygame.init()
pygame.display.set_caption("Tetrissss")
self.WINDOW_SIZE = window
self.SPEED = speed
self.BLOCK = block
self.FPS = fps
# Calcul des dimensions de la grille / de l'adresse de chaque case et mise en place
self.nb_blocks = ((self.WINDOW_SIZE[0] - 1) // (self.BLOCK + 1), (self.WINDOW_SIZE[1] - 1) // (self.BLOCK + 1))
self.padd = ((self.WINDOW_SIZE[0] - 1) % (self.BLOCK + 1), (self.WINDOW_SIZE[1] - 1) % (self.BLOCK + 1))
self.grid_x = {i: (self.padd[0] // 2) + 2 + i * (self.BLOCK + 1) for i in range(self.nb_blocks[0])}
self.grid_y = {i: (self.padd[1] // 2) + 2 + i * (self.BLOCK + 1) for i in range(self.nb_blocks[1])}
self.clock = pygame.time.Clock()
self.clock.tick(self.FPS)
self.surface = pygame.display.set_mode(self.WINDOW_SIZE)
# Instanciation figure(s) et variables de suivi du mur de blocs
self.fig = random.choice(types)(self, self.BLOCK, 5, 0)
self.fig.draw()
self.limit = {i: self.nb_blocks[1] - 1 for i in range(self.nb_blocks[0])}
self.wall = {y: {} for y in range(self.nb_blocks[1])}
self.lines = {i: self.nb_blocks[0] for i in range(self.nb_blocks[1])}
self.playing = True
def draw_grid(self):
"""Mise en place de la grille"""
self.surface.fill((0, 0, 0))
curs = (self.padd[0] // 2) + 1
for _ in range(self.nb_blocks[0] + 1):
pygame.draw.line(self.surface, (20, 20, 20), (curs, self.padd[1] // 2),
(curs, self.WINDOW_SIZE[1] - (self.padd[1] // 2 + self.padd[1] % 2)))
curs += self.BLOCK + 1
curs = (self.padd[1] // 2) + 1
for _ in range(self.nb_blocks[1] + 1):
pygame.draw.line(self.surface, (20, 20, 20), (self.padd[0] // 2, curs),
(self.WINDOW_SIZE[0] - (self.padd[0] // 2 + self.padd[0] % 2), curs))
curs += self.BLOCK + 1
def block_to_wall(self):
"""Ajout d'un bloc ayant achevé sa chute au mur de blocs"""
for block in self.fig.get_top():
self.limit[block[0]] = block[1] - 1
if block[1] <= 2:
self.playing = False # TODO : perdu
full_lines = []
for block in self.fig.get_blocks():
self.wall[block[1]][block[0]] = self.fig.color
self.lines[block[1]] -= 1
if self.lines[block[1]] == 0:
full_lines.append(block[1])
if len(full_lines) > 0:
full_lines.sort(reverse=True)
for i in range(len(full_lines)):
self.del_line(full_lines[i] + i)
# TODO : bonus si plusieurs lignes complétées en même temps
del self.fig
# Instanciation aléatoire d'une figure
self.fig = random.choice(types)(self, self.BLOCK, 5, 0)
def del_line(self, y):
"""Suppression d'une ligne complète"""
for x, val in self.limit.items():
self.limit[x] += 1
toDel = []
iterate = True
while iterate:
self.lines[y] = self.lines[y - 1]
for bl, col in self.wall[y].items():
if bl in self.wall[y - 1].keys():
self.wall[y][bl] = self.wall[y - 1][bl]
else:
toDel.append((y, bl))
y -= 1
if self.lines[y] == self.nb_blocks[0]:
iterate = False
for blY, blX in toDel:
del self.wall[blY][blX]
def game_loop(self):
count = 0
while self.playing:
pygame.display.update()
self.draw_grid()
# Lorsque la figure atteint le sol, ses blocs sont intégrés au mur et la limite recalculée
if not self.fig.falling:
self.block_to_wall()
# Affichage du bloc courant et du mur de blocs
self.fig.draw()
for y, bl in self.wall.items():
for x, col in bl.items():
pygame.draw.rect(self.surface, col, (self.grid_x[x], self.grid_y[y], self.BLOCK, self.BLOCK))
# Commandes utilisateur
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT:
self.fig.move(-1)
elif event.key == K_RIGHT:
self.fig.move(1)
elif event.key == K_DOWN:
self.fig.fall()
elif event.key == K_UP:
while self.fig.falling:
self.fig.fall()
elif event.key == K_SPACE:
self.fig.turn()
# Chute du bloc courant
if count < self.SPEED:
count += 1
elif count == self.SPEED:
self.fig.fall()
count = 0
self.clock.tick(self.FPS)
if __name__ == "__main__":
game = Game(window=(230, 300))
game.game_loop()
| Clapsouille/Tetris | main.py | main.py | py | 5,526 | python | en | code | 0 | github-code | 36 |
694411018 | import datetime
from airflow import models
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
default_dag_args = {
# https://airflow.apache.org/faq.html#what-s-the-deal-with-start-date
'start_date': datetime.datetime(2020, 4, 27)
}
dataset = 'airflow'
table = 'Country'
query_cmd = 'bq query --use_legacy_sql=false '
table_cmd = 'create or replace table ' + dataset + '.' + table + '(id int64, name string)'
insert_cmd = 'insert into ' + dataset + '.' + table + '(id, name) values(1, "\'"USA"\'")'
with models.DAG(
'country2',
schedule_interval=None,
default_args=default_dag_args) as dag:
create_dataset = BashOperator(
task_id='create_dataset',
bash_command='bq --location=US mk --dataset ' + dataset)
create_table = BashOperator(
task_id='create_table',
bash_command=query_cmd + "'" + table_cmd + "'",
trigger_rule='all_done')
insert_row = BashOperator(
task_id='insert_row',
bash_command=query_cmd + "'" + insert_cmd + "'",
trigger_rule='one_success')
create_dataset >> create_table >> insert_row | cs327e-spring2020/snippets | country2.py | country2.py | py | 1,245 | python | en | code | 2 | github-code | 36 |
43165322693 | # Python code to check if a file exist in a folder
# whose name is equal to the row data of csv file.
# Importing required libraries
import os
import csv
# Opening CSV file in a variable.
with open("Python\\Colors.csv", newline='') as f:
# Reading CSV file
ereader = csv.DictReader(f)
# Initializing loop to get CSV file data in a variable.
for row in ereader:
# Getting values in variables
a = row['name']
b = row['hex']
# Initializing loop for the files in the defined folder
for filename in os.listdir("Python\\Sample_videos"):
# Applying condition to check if file name is equal to CSV row data or not.
if ((filename.endswith(".mp4")) & (filename[:-4] == b[1:])): # here "(filename.endswith(".mp4"))" is used for checking for a particular file type.
print("File named",filename,"found in dir.")
else:
continue
| WaliKhan09/Programming | Python/File_CSV_loop.py | File_CSV_loop.py | py | 973 | python | en | code | 1 | github-code | 36 |
11378302691 | from functools import reduce
n = int(input())
data = list(map(int, input().split()))
def mean(data):
sum = reduce(lambda x, y: x + y, data)
return sum / len(data)
def stdev(data):
mu = mean(data)
sq_sq_dists_mean = list(map(lambda x: (x - mu) ** 2, data))
sum_sq_dists_mean = reduce(lambda x, y: x + y, sq_sq_dists_mean)
return round((sum_sq_dists_mean / len(data)) ** 0.5, 1)
print(stdev(data))
| scouvreur/hackerrank | 10-days-of-statistics/python/day_1_3.py | day_1_3.py | py | 427 | python | en | code | 1 | github-code | 36 |
10762352030 | import os
import cv2
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from handy_msgs.msg import Float32Stamped
import readchar
import json
class MyNode(Node):
def __init__(self, name, published_image):
super().__init__(name)
self.image_publisher = self.create_publisher(Image, published_image, 10)
self.dist_publisher = self.create_publisher(Float32Stamped, '/gps/distance', 10)
self.bridge = CvBridge()
self.image_paths = []
self.gps_paths = []
def load_image_paths(self, folder_path):
for file_name in os.listdir(folder_path):
if file_name.endswith('.jpg') or file_name.endswith('.png'):
self.image_paths.append(os.path.join(folder_path, file_name))
new_array = sorted(self.image_paths, key=lambda name: name.lower())
for filename in new_array:
name, ext = os.path.splitext(filename)
new_filename = name + '.json'
self.gps_paths.append(new_filename)
self.image_paths = new_array
def publish(self, index, distance):
file_name = self.image_paths[index]
image = cv2.imread(file_name)
ros_image = self.bridge.cv2_to_imgmsg(image, 'bgr8')
self.image_publisher.publish(ros_image)
with open(self.gps_paths[index]) as f:
data = json.load(f)
json_str = json.dumps(data)
self.get_logger().info(f"JSON {json_str}")
if distance:
new_msg = Float32Stamped()
new_msg.header = ros_image.header
new_msg.data = float(data['DIST'])
self.dist_publisher.publish(new_msg)
self.get_logger().info(f"Published image {file_name}")
def main(args=None):
rclpy.init(args=args)
folder_path = '/doc/DATA/R4C/data/Cam/20230223143203'
front_node = MyNode('image_player', '/Pioneer3at/camera_front')
back_node = MyNode('image_player', '/Pioneer3at/camera_back')
front_node.load_image_paths(folder_path + '/RGB-18443010C1A2DF0F00')
back_node.load_image_paths(folder_path + '/RGB-18443010B1F4DE0F00')
counter = 0
while rclpy.ok():
key = readchar.readkey()
if key == ' ':
front_node.publish(counter, distance=True)
back_node.publish(counter, distance=True)
counter += 1
if key == 'r':
counter = 0
if key == 'b':
counter -= 2
front_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | bresilla/webo | webots_ros2_pioneer3at/webots_ros2_pioneer3at/utils/image_player.py | image_player.py | py | 2,641 | python | en | code | 0 | github-code | 36 |
950544652 | pkgname = "grilo"
pkgver = "0.3.16"
pkgrel = 0
build_style = "meson"
configure_args = [
"-Denable-introspection=true",
"-Denable-vala=true",
"-Denable-gtk-doc=false",
"-Dsoup3=true",
]
hostmakedepends = [
"meson",
"pkgconf",
"gobject-introspection",
"glib-devel",
"gettext",
"vala",
]
makedepends = [
"glib-devel",
"libxml2-devel",
"libsoup-devel",
"gtk+3-devel",
"liboauth-devel",
"totem-pl-parser-devel",
]
pkgdesc = "Framework for media discovery"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.1-or-later"
url = "https://wiki.gnome.org/Projects/Grilo"
source = f"$(GNOME_SITE)/{pkgname}/{pkgver[:-3]}/{pkgname}-{pkgver}.tar.xz"
sha256 = "884580e8c5ece280df23aa63ff5234b7d48988a404df7d6bfccd1e77b473bd96"
@subpackage("grilo-devel")
def _devel(self):
return self.default_devel()
| chimera-linux/cports | main/grilo/template.py | template.py | py | 865 | python | en | code | 119 | github-code | 36 |
73581418663 | from flask import Flask, render_template, jsonify
from database import engine, text, load_job_from_db
app = Flask(__name__)
def load_jobs_from_db():
try:
with engine.connect() as conn:
result = conn.execute(text("select * from jobs"))
column_names = result.keys()
jobs = []
for row in result.all():
jobs.append(dict(zip(column_names, row)))
return jobs
except Exception as e:
print(f"An error occurred while loading jobs from database: {e}")
return []
@app.route("/")
def hello():
job_list = load_jobs_from_db()
return render_template('home.html', jobs=job_list)
@app.route("/api/<id>")
def show_job(id):
job_list = load_job_from_db(id)
if not job_list:
return "Not found", 404
return render_template('jobpage.html', job=job_list)
@app.route("/api/jobs")
def list_jobs():
job_list = load_jobs_from_db()
return jsonify(job_list)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| Maulikdavra/mdfinance-carrer-webiste-v2 | app.py | app.py | py | 974 | python | en | code | 0 | github-code | 36 |
11394835592 | '''
Script to export PASCAL VOC 2012 annotation data in VIA format
Author: Abhishek Dutta <adutta@robots.ox.ac.uk>
12 Apr. 2018
'''
import xmltodict
import os
import json
base_dir = '/data/datasets/voc2012/VOCdevkit/VOC2012/'
img_dir = os.path.join(base_dir, 'JPEGImages/')
ann_dir = os.path.join(base_dir, 'Annotations')
set_dir = os.path.join(base_dir, 'ImageSets', 'Main')
def get_via_fileid(filename, filesize):
return filename + str(filesize);
def get_file_size(filename):
return os.path.getsize(filename)
def get_region_attributes(d):
ri = {}
ri['shape_attributes'] = {}
if 'bndbox' in d:
x0 = int( float(d['bndbox']['xmin']) )
y0 = int( float(d['bndbox']['ymin']) )
x1 = int( float(d['bndbox']['xmax']) )
y1 = int( float(d['bndbox']['ymax']) )
ri['shape_attributes']['name'] = 'rect'
ri['shape_attributes']['x'] = x0
ri['shape_attributes']['y'] = y0
ri['shape_attributes']['width'] = x1 - x0
ri['shape_attributes']['height'] = y1 - y0
ri['region_attributes'] = {}
if 'name' in d:
ri['region_attributes']['name'] = d['name']
if 'pose' in d:
ri['region_attributes']['pose'] = d['pose']
if 'truncated' in d:
ri['region_attributes']['truncated'] = d['truncated']
if 'difficult' in d:
ri['region_attributes']['difficult'] = d['difficult']
return ri
def voc_xml_to_json(fn):
print(fn)
with open(fn) as f:
d = xmltodict.parse(f.read())
d = d['annotation']
img_fn = d['filename']
img_path = os.path.join(img_dir, img_fn)
img_size = get_file_size(img_path)
img_id = get_via_fileid(img_fn, img_size)
js = {}
js[img_id] = {}
js[img_id]['fileref'] = img_path
js[img_id]['size'] = img_size
js[img_id]['filename'] = img_fn
js[img_id]['base64_img_data'] = ''
fa = {}
if 'source' in d:
if 'database' in d['source']:
fa['database'] = d['source']['database']
if 'annotation' in d['source']:
fa['annotation'] = d['source']['annotation']
if 'image' in d['source']:
fa['image'] = d['source']['image']
if 'size' in d:
if 'width' in d['size']:
fa['width'] = d['size']['width']
if 'height' in d['size']:
fa['height'] = d['size']['height']
if 'depth' in d['size']:
fa['depth'] = d['size']['depth']
if 'segmented' in d:
fa['segmented'] = d['segmented']
js[img_id]['file_attributes'] = fa
js[img_id]['regions'] = []
if isinstance(d['object'], list):
region_count = len(d['object'])
for i in range(0, region_count):
ri = get_region_attributes( d['object'][i] )
js[img_id]['regions'].append(ri)
else:
r = get_region_attributes( d['object'] )
js[img_id]['regions'].append(r)
return js
outjson_fn = '/data/datasets/via/import/pascal_voc/_via_project_pascal_voc2012_import.js'
outjson_f = open(outjson_fn, 'w')
outjson_f.write('var via_project_pascal_voc2012 = \'{"_via_settings":{}, "_via_attributes":{}, "_via_img_metadata":{')
first = True
for file in os.listdir(ann_dir):
if file.endswith(".xml"):
file_path = os.path.join(ann_dir, file)
js = voc_xml_to_json(file_path)
js_str = json.dumps(js)
if not first:
outjson_f.write( "," ) # remove first and last curley braces
else:
first = False
outjson_f.write( js_str[1:-1] ) # remove first and last curley braces
outjson_f.write("}}\';")
outjson_f.close()
print('\nWritten everything to {}'.format(outjson_fn))
| ox-vgg/via | via-2.x.y/scripts/import/pascal_voc/exp_annotations.py | exp_annotations.py | py | 3,496 | python | en | code | 184 | github-code | 36 |
73088171303 | # -*- coding: utf-8 -*-
from fabric.api import *
import os
#env.hosts = ['catchment.niva.no']
env.hosts=['35.242.200.24']
env.user='jose-luis'
env.key_filename='/home/jose-luis/.ssh/fimexKeys/jose-luis'
env.roledefs={'ncquery':['35.242.200.24'],
'basin': ['catchment.niva.no'], #
}
global path, file
#------------------------------------------------------------------------------------------------------------
#Setting up virtual machine with necessary dependencies to install fimex
def whoAmI():
run('uname -a')
run ('whoami')
def updateMachine():
run('sudo apt-get update')
def installUtilities():
run('yes | sudo apt-get install gcc g++ gfortran cmake make git libnetcdf-dev libnetcdff-dev netcdf-bin xmlstarlet tmux unzip python3-netcdf4 cdo parallel nco')
def installFimex():
run('echo | sudo add-apt-repository ppa:met-norway/fimex && sudo apt-get update && yes | sudo apt-get install fimex-0.66-bin libfimex-dev fimex-0.66-dbg && sudo ln -s /usr/bin/fimex-0.66 /usr/bin/fimex')
def installGcsfuse():
run('''export GCSFUSE_REPO=gcsfuse-`lsb_release -c -s` &&
echo "deb http://packages.cloud.google.com/apt $GCSFUSE_REPO main" | sudo tee /etc/apt/sources.list.d/gcsfuse.list &&
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - &&
sudo apt-get update &&
sudo apt-get install gcsfuse
''')
#------------------------------------------------------------------------------------------------------------
#Getting netcdf files from metno openDAP server and processing them
def getFromList(fileList,box,path,outputFile,var):
run('''rm -rf {}'''.format(path))
run('''mkdir -p {}'''.format(path))
put(fileList, path)
put('getsubset.sh',path)
run('''cd {} && chmod +x getsubset.sh && ./getsubset.sh '{}' '{}' '{}' '{}' '''.format(path,fileList,box,outputFile,var))
@task
def installDependencies():
updateMachine.roles=('ncquery',)
installUtilities.roles=('ncquery',)
installFimex.roles=('ncquery',)
installGcsfuse.roles=('ncquery',)
execute(updateMachine)
execute(installUtilities)
execute(installFimex)
@task
def getDataForBasin(fileList,box,path,outputFile,var):
getFromList.roles=('ncquery',)
execute(getFromList,fileList,box,path,outputFile,var)
| Lecheps/VansjoModelling | fabfile.py | fabfile.py | py | 2,424 | python | en | code | 0 | github-code | 36 |
4014322782 | import unittest
import skill_tree
class TestSkillTree(unittest.TestCase):
def test_skill_tree1(self):
result = skill_tree.solution('CBD', ['BACDE', 'CBADF', 'AECB', 'BDA'])
self.assertEqual(result, 2)
if __name__ == '__main__':
unittest.main()
| ThreeFive85/Algorithm | Programmers/level2/skillTree/test_skill_tree.py | test_skill_tree.py | py | 273 | python | en | code | 1 | github-code | 36 |
9294254942 | ## This module
from StreamAnimations import utils, sprite
from StreamAnimations.sprite import hitbox
from StreamAnimations.canvases import SinglePageCanvas
from StreamAnimations.engine.renderers.gif import GifRenderer
from StreamAnimations.engine import utils as engineutils
from StreamAnimations.systems import twodimensional
## Builtin
import pathlib
import random
ROOT = pathlib.Path(__file__).resolve().parent
OUTDIR = ( ROOT / "output").resolve()
OUTDIR.mkdir(exist_ok = True)
SAMPLEDIR = (ROOT / "samples").resolve()
SPRITESIZE = 32
CANVASSIZE = 384, 216
BASEHEIGHT = 10
def load_walk():
up, down, right = utils.import_spritesheet((SAMPLEDIR / "Walk Up.png").resolve()), \
utils.import_spritesheet((SAMPLEDIR / "Walk Down.png").resolve()), \
utils.import_spritesheet((SAMPLEDIR / "Walk Right.png"))
directions = dict(up = utils.split_spritesheet(up, SPRITESIZE, SPRITESIZE),
down = utils.split_spritesheet(down, SPRITESIZE, SPRITESIZE),
right = utils.split_spritesheet(right, SPRITESIZE, SPRITESIZE)
)
directions["left"] = utils.mirror_sprite(directions['right'])
return directions
def load_printer():
frames = []
for zlevel in range(1,6):
sheet = utils.import_spritesheet( (SAMPLEDIR / f"Prusa Z{zlevel}.png").resolve())
frames.extend(utils.split_spritesheet(sheet, SPRITESIZE, SPRITESIZE))
return {"idle": frames}
def load_desk():
return {"idle":utils.split_spritesheet(utils.import_spritesheet( (SAMPLEDIR / "Desk-1.png").resolve()), SPRITESIZE, 45)}
def load_desk_text():
return {"idle": utils.split_spritesheet(utils.import_spritesheet( (SAMPLEDIR / "stream.png").resolve() ), SPRITESIZE, 21)}
walk = load_walk()
me = twodimensional.Sprite2D(directionalsprites= walk, hitboxes = [], animations = {"idle":[walk['down'][0],]})
mehitbox = hitbox.MaskedHitbox(hitbox.create_rect_hitbox_image(me.get_image().width, BASEHEIGHT),anchor="bl")
me.add_hitbox(mehitbox)
printer = sprite.StationarySprite(animations=load_printer())
printerhitbox = hitbox.MaskedHitbox(hitbox.create_rect_hitbox_image(printer.get_image().width, BASEHEIGHT//2), anchor="bl")
printer.add_hitbox(printerhitbox)
desk = sprite.StationarySprite(animations= load_desk())
deskhitbox = hitbox.MaskedHitbox(hitbox.create_rect_hitbox_image(desk.get_image().width, BASEHEIGHT),anchor="bl")
desk.add_hitbox(deskhitbox)
monitortext = sprite.CosmeticSprite(animations= load_desk_text(), offset = (12, 12), parent = desk)
canvas = SinglePageCanvas(CANVASSIZE, SPRITESIZE // 4)
canvas.add_listener("movement", engineutils.collision_stop_rule)
canvas.add_sprite(me, (50, 70))
canvas.add_sprite(printer, (80,80))
canvas.add_sprite(monitortext, (0,0))
canvas.add_sprite(desk, (50, 50))
## ANIMATION
renderer = GifRenderer(canvas, sorter= twodimensional.twod_sprite_sorter)
with renderer.frame(): pass
path = ["right","right","right","right","right", "up", "up", "up", "up","left","left","left","left","left"]
for i in range(100):
with renderer.frame() as frame:
#frame.move_sprite(me, random.choice(twodimensional.TwoDimensional_4Way.directions()))
if path: frame.move_sprite(me, path.pop(0))
if(printer.animations.is_last_frame()): printer.animations.pause()
renderer.save((OUTDIR / "map2.gif"), 10, scale = 5) | AdamantLife/StreamAnimations | sample.py | sample.py | py | 3,312 | python | en | code | 0 | github-code | 36 |
73313892585 | #coding=gbk
#文件备份改进版,以当天日期为目录,默认以时间为zip文件名字,如果用户输入备注则以时间+备注作为文件名字
import os
import time
#需要备份的文件目录列表
source=[r'C:\Users\zk\Pictures', r'"C:\Users\zk\Documents\WeChat Files\zhengknight\Files"']
#目标文件夹
target_dir=r'e:\backup'
#如果目标目录不存在则创建
if not os.path.exists(target_dir):
os.mkdir(target_dir)
#当日文件夹
today=target_dir + os.sep + time.strftime('%Y%m%d')
#将当前时间作为备份的文件名
now = time.strftime('%H%M%S')
#获取用户输入备注信息
comment = input('请输入备注信息-->')
#检查用户是否输入备注
if len(comment)==0:
target=today + os.sep + now + '.zip'
else:
#此处一个物理行换行的时候pycharm自动给加上反斜杠了
target = today + os.sep + now + '_'\
+comment.replace(' ','_') + '.zip'
#如果当日文件夹不存在则创建
if not os.path.exists(today):
os.mkdir(today)
print('当日文件夹目录创建成功',today)
#构造zip命令行
zip_command = 'zip -r {0} {1}'.format(target,' '.join(source))
print('zip_command')
print(zip_command)
print('Running')
if os.system(zip_command)==0:
print('已成功备份到',target)
else:
print('备份失败!!!')
| zhengknight/pyExise | backup_ver3.py | backup_ver3.py | py | 1,146 | python | zh | code | 0 | github-code | 36 |
13285114730 | from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer
from pathlib import Path
columns={0: 'text', 1: 'label'}
dataPath='../data/'
test_file=input("Test data file name:")
corpus: Corpus = ColumnCorpus(dataPath, columns, train_file='train_data.txt', test_file=test_file, dev_file='test_data.txt')
tag_type='label'
tag_dict=corpus.make_tag_dictionary(tag_type=tag_type)
print(tag_dict)
embedding_types = [WordEmbeddings('glove')]
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
tagger: SequenceTagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dict,
tag_type=tag_type, use_crf=True)
str_path = input("path to saved model:")
path = Path(str_path)
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.final_test(path, eval_mini_batch_size=32) | gfreitag/Recipe-Project | python/tester_script.py | tester_script.py | py | 1,031 | python | en | code | 0 | github-code | 36 |
42898520329 | print("Will I be printed in main when called? -Yes!")
# print(__name__) # 'module' if a file is imported as a module, or __main__ if it's executed directly
"""
Personal/Private variable.
Unlike many other programming languages, Python has no means of allowing you to hide such variables
from the eyes of the module's users.
You can only inform your users that this is your variable, that they may read it,
but that they should not modify it under any circumstances.
This is done by preceding the variable's name with _ (one underscore) or __ (two underscores),
but remember, it's only a convention. Your module's users may obey it or they may no
"""
__counter = 0
def suml(the_list):
global __counter
__counter += 1
the_sum = 0
for element in the_list:
the_sum += element
return the_sum
def prodl(the_list):
global __counter
__counter += 1
prod = 1
for element in the_list:
prod *= element
return prod
"""
the functions defined inside the module (suml() and prodl()) are available for import;
we've used the __name__ variable to detect when the file is run stand-alone, and seized this
opportunity to perform some simple tests
"""
if __name__ == "__main__":
print("I prefer to be a module, but I can do some tests for you.")
my_list = [i+1 for i in range(5)]
print(suml(my_list) == 15)
print(prodl(my_list) == 120)
"""
#! /usr/bin/env python3
the line starting with { #! } has many names - it may be called shabang, shebang, hashbang, poundbang or
even hashpling (don't ask us why). The name itself means nothing here - its role is more important.
From Python's point of view, it's just a comment as it starts with #. For Unix and Unix-like OSs
(including MacOS) such a line instructs the OS how to execute the contents of the file (in other
words, what program needs to be launched to interpret the text).
In some environments (especially those connected with web servers) the absence of that line will cause trouble;
a string (maybe a multiline) placed before any module instructions (including imports) is called
the doc-string, and should briefly explain the purpose and contents of the module;
""" | bacholabadze/Python_Essentials | Modules and Packages/modules/module.py | module.py | py | 2,225 | python | en | code | 0 | github-code | 36 |
7795693438 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : 地牢逃脱.py
# @Author: smx
# @Date : 2020/2/16
# @Desc :
#
# 最终的题意是:从给定起点(一定为'.'),按照给定的若干跳跃(可以跨过障碍,但不可以落在'x'上),到达任意一个'.'的最小步骤次数集合中,选择一个最大的!
# 如果存在一个点'.'从起点始终无法抵达,则认为起点到该点的最小距离为无穷∞,则返回-1.
# 因此,从起点开始广度优先遍历到所有可达点,记录每个可达点的最小距离,将其存入集合中。
# 然后遍历集合寻找最大的距离。如果存在一个点'.'无法抵达,直接返回-1.
# 所以是假设重点是任意点,找到一点,其达到的最短路径最大
def BFS(m, n, matrix, x, y, arr, k):
queue = [(x, y)]
vis = [[1 for _ in range(n)] for _ in range(m)]
vis[x][y] = 0
count = 1
step = 0
max_count = sum([matrix[i].count('.') for i in range(m)])
while queue:
step += 1
length = len(queue)
while length:
top = queue.pop(0)
for i in range(k):
new_x = top[0] + arr[i][0]
new_y = top[1] + arr[i][1]
if 0 <= new_x < m and 0 <= new_y < n and matrix[new_x][new_y] == '.' and vis[new_x][new_y] == 1:
queue.append((new_x, new_y))
vis[new_x][new_y] = 0
count += 1
if count == max_count:
return step
length -= 1
return -1
if __name__ == '__main__':
m, n = map(int, input().strip().split(' '))
matrix = []
for _ in range(n):
matrix.append(list(input().strip()))
x, y = map(int, input().strip().split(' '))
k = int(input().strip())
arr = []
for _ in range(k):
arr.append(list(map(int, input().strip().split(' '))))
ans = BFS(m, n, matrix, x, y, arr, k)
print(ans)
| 20130353/Leetcode | target_offer/dfs+bfs+动态规划/DFS+BFS/地牢逃脱.py | 地牢逃脱.py | py | 1,973 | python | zh | code | 2 | github-code | 36 |
24924994804 | import sys
input = sys.stdin.readline
n = int(input())
L = []
for i in range(n):
name, a, b, c = input().split()
L.append([int(a), int(b), int(c), name])
L = sorted(L, key=lambda x: (-x[0], x[1], -x[2], x[3]))
for l in L:
print(l[3]) | pla2n/python_practice | python/ThisIsCodingTestWithPython2/14-23 국영수.py | 14-23 국영수.py | py | 245 | python | en | code | 0 | github-code | 36 |
4107555647 | def possible_scores(roundNumbers, totalScore, teams, team1, team2, c, d, n, amount, history, history2):
# roundNumbers = the rounds that are left
# totalScore = the scores of all four teams
# team is my favourable team
# team1 and team2 are the teams that compete, c and d are team1 score and team2 score respectively
# n = a number; how i keep track of which rounds are played - this is the base case
# amount is amount of times my team comes on top
# history is the history of games for team1
# history2 is the history of games for team2
# abc is the match that is happening currently
# this adds the three possible outcomes of the game to each team, and to their history
totalScore[team1] += c
totalScore[team2] += d
history.append([team1, c])
history2.append([team2, d])
# this is the base case: if N becomes the amount of rounds left that means all rounds are played
# then we check to see if my favourable team wins
if n == len(roundNumbers):
x = 0
for abc in range(0, 5):
if abc == teams:
pass
elif totalScore[teams] > totalScore[abc]:
x += 1
# x is the counter to see how many of the matches my team won
# its out of five because i have a score[0] so that
# when i call the teams i don't need to do score[x - 1]
if x == 4:
amount.append(1)
# because this part of the tree is a dead end, this deletes the history of the dead end
totalScore[history[-1][0]] -= history[-1][1]
totalScore[history2[-1][0]] -= history2[-1][1]
del history[-1]
del history2[-1]
return
else:
# this finds out what the next match is, assigns the teams to team1 and 2
# and then there's recursion for the three outcomes
abc = roundNumbers[n]
team1 = abc[0]
team2 = abc[1]
c = 0
d = 0
n += 1
possible_scores(roundNumbers, totalScore, teams, team1, team2, c + 3, d, n, amount, history, history2)
possible_scores(roundNumbers, totalScore, teams, team1, team2, c, d + 3, n, amount, history, history2)
possible_scores(roundNumbers, totalScore, teams, team1, team2, c + 1, d + 1, n, amount, history, history2)
# we delete the history again after the third recursion
# because we know all three outcomes of that portion of the tree diagram
totalScore[history[-1][0]] -= history[-1][1]
totalScore[history2[-1][0]] -= history2[-1][1]
del history[-1]
del history2[-1]
# this is the code
team = int(input())
played = int(input())
rounds = [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]
score = [-10000, 0, 0, 0, 0]
# based on our input, I see which team won, and add points to the team's score
for i in range(0, played):
qq = input()
x_split = qq.split()
y = [int(x_split[0]), int(x_split[1])]
z = rounds.index(y)
del rounds[z]
if int(x_split[2]) > int(x_split[3]):
score[y[0]] += 3
elif int(x_split[3]) > int(x_split[2]):
score[y[1]] += 3
else:
score[y[0]] += 1
score[y[1]] += 1
team1 = 0
team2 = 0
c = 0
d = 0
n = 0
amount = []
history = []
history2 = []
# calls the function
possible_scores(rounds, score, team, team1, team2, c, d, n, amount, history, history2)
print(len(amount))
| AAZZAZRON/DMOJ-Solutions | ccc13s3.py | ccc13s3.py | py | 3,399 | python | en | code | 1 | github-code | 36 |
19622754731 | h, m, s = map(int, input().split())
input = int(input())
s = s + input
m = m + int(s/60)
h = h + int(m/60)
s = s % 60
m = m % 60
h = h % 24
print(h,m,s)
| monegit/algorithm-study | Training-Site/Baekjoon/2/2000/2530/2530.py | 2530.py | py | 155 | python | en | code | 0 | github-code | 36 |
7037221042 | from flask import Flask,render_template,request
import tweepy
import re
import pandas as pd
from tweepy import OAuthHandler
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import naive_bayes
from sklearn.metrics import roc_auc_score
#======================================================= ======================================================
df = pd.read_csv("/home/saurabh/Sentiment_Analysis_Dataset.csv")
t = pd.DataFrame()
t['Sentiment'] = df.Sentiment
t['Text'] = df.SentimentText
#======================================================= ======================================================
stop_words = set(stopwords.words("english"))
vectorizer = TfidfVectorizer(use_idf = True, lowercase = True , strip_accents = 'ascii' , stop_words = stop_words )
X = vectorizer.fit_transform(t.Text)
y = t.Sentiment
X_train,X_test,y_train,y_test = train_test_split(X,y)
clf = naive_bayes.MultinomialNB()
clf.fit(X_train,y_train)
#======================================================= ======================================================
def classifier(queries):
#===================================================================
#
query = queries
tknzr=TweetTokenizer(strip_handles=True,reduce_len=True)
consumer_key="YOUR_KEY"
consumer_secret="YOUR SECRET_TOKEN"
access_token="YOUR TOKEN"
access_token_secret="TOKEN_SECRET"
try:
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
tweets_caught = api.search(q=query,count=5)
except:
print("Error")
#====================================================================
#===========================cleaning tweet===========================
count = 0
text = []
raw_tweet = []
for tweet in tweets_caught:
clean_text = []
words = tknzr.tokenize(tweet.text)
for w in words:
if w not in stop_words:
clean_text.append(w)
str = " "
for w in clean_text:
str = str+w+" "
URLless_str = re.sub(r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))', '', str)
if tweet.retweet_count > 0:
if URLless_str not in text:
text.append(URLless_str)
raw_tweet.append(tweet.text)
count = count+1
else:
text.append(URLless_str)
raw_tweet.append(tweet.text)
count = count + 1
#
#======================================================================
text_vec = vectorizer.transform(text)
Resultant_Sentiment = clf.predict(text_vec)
answer = pd.DataFrame()
answer["tweet"] = raw_tweet
answer["Sentiment"] = Resultant_Sentiment
return answer
#======================================================= ======================================================
app = Flask(__name__)
@app.route('/')
def dir1():
return render_template("profile.html")
@app.route('/sentiment' , methods = ['POST'])
def sentiment():
queries = request.form['query']
answer = classifier(queries)
return render_template("sentiment.html",sentiments=answer)
if __name__ == '__main__':
app.run()
#======================================================= ======================================================
| saurabhc104/sentiment_analysis | analysis.py | analysis.py | py | 3,670 | python | en | code | 0 | github-code | 36 |
16574355106 | import collections
import heapq
from typing import List
class Solution:
def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:
adj = collections.defaultdict(list)
for i in range(len(edges)):
src, dst = edges[i]
adj[src].append([dst, succProb[i]])
adj[dst].append([src, succProb[i]])
pq = [(-1, start)]
vis = set()
while pq:
prob, curr = heapq.heappop(pq)
vis.add(curr)
if curr == end:
return prob * -1
for neig, edgeProb in adj[curr]:
if neig not in vis:
heapq.heappush(pq, (prob * edgeProb, neig))
return 0 | BLANK00ANONYMOUS/PythonProjects | Leetcode Daily Challenges/14_feb_2023.py | 14_feb_2023.py | py | 759 | python | en | code | 0 | github-code | 36 |
41145576381 | from calendar import c
from pyexpat import model
from re import L
import numpy as np
import pandas as pd
compas_scores_raw= pd.read_csv("compas_score_raw.csv", lineterminator='\n')
compas_scores_two_year= pd.read_csv("compas_scores_two_years.csv", lineterminator='\n')
print('-----------------Compas Scores Raw-----------------')
print('type',type(compas_scores_raw))
print('-----------------Compas Scores Two Year-----------------')
print('type',type(compas_scores_two_year))
#number of rows and columns
print('-----------------Compas Scores Raw-----------------')
print('shape',compas_scores_raw.shape)
print('-----------------Compas Scores Two Year-----------------')
print('shape',compas_scores_two_year.shape)
# fitlering the data with the following conditions
# 1. if charge data was not within 30 days of arrest
# 2. c_charge_degree is not missing
# 3. score_text is not missing
# 4. is_recid is not missing -1 means missing
print('-----------------Compas Scores two year-----------------')
df= compas_scores_two_year[[ 'age', 'c_charge_degree','race', 'age_cat', 'score_text', 'sex', 'priors_count', 'days_b_screening_arrest', 'decile_score', 'is_recid', 'c_jail_in', 'c_jail_out', 'v_decile_score','two_year_recid\r']]
print(np.shape(df))
df = df.loc[(df['days_b_screening_arrest'] <= 30) & (df['days_b_screening_arrest'] >= -30) & (df['is_recid'] != -1) & (df['c_charge_degree'] != 'O') & (df['score_text'] != 'N/A')]
print('shape of filtered data',df.shape)
#length of stay in jail
df['length_of_stay'] = pd.to_datetime(df['c_jail_out']) - pd.to_datetime(df['c_jail_in'])
df['length_of_stay'] = df['length_of_stay'].astype('timedelta64[D]')
df['length_of_stay'] = df['length_of_stay'].astype(int)
print(df['length_of_stay'])
print('shape of filtered data',df.shape)
print('length of stay',df['length_of_stay'].describe())
#correlation between length of stay and decile score
print('correlation between length of stay and decile score',df['length_of_stay'].corr(df['decile_score']))
print('-----------------describe age-----------------')
print(df['age'].describe())
print('-----------------describe race----------------')
print(df['race'].describe())
print('-----------------race split-----------------')
race = ['African-American', 'Caucasian', 'Hispanic', 'Asian', 'Native American', 'Other']
for i in race :
print( i,len(df[df['race']== i])/len(df['race']))
print ('-----------------describe score text----------------')
print('low ', len(df[df['score_text'] == 'Low']))
print('medium ', len(df[df['score_text'] == 'Medium']))
print('high ', len(df[df['score_text'] == 'High']))
#race and sex split
female = []
male = []
for i in race :
temp = len(df[(df['race']== i) & (df['sex'] == 'Male')] )
print(temp)
male.append(temp)
temp = len(df[(df['race']== i) & (df['sex'] == 'Female')])
female.append(temp)
print(race)
print ('female', female)
print('male', male)
f = pd.crosstab(df['sex'], df['race'])
print('f',f)
# find decide score for african american
print('-----------------decile score for african american-----------------')
print(df[(df['race']) == 'African-American']['decile_score'].describe())
decile = [1,2,3,4,5,6,7,8,9,10]
# plot decide score for african american
import matplotlib.pyplot as plt
# bar plot for decide score for african american and caucasian
df_race_decile_score = df[['race', 'decile_score']]
df_african = df_race_decile_score[ df_race_decile_score['race'] == 'African-American']
df_caucasian = df_race_decile_score[ df_race_decile_score['race'] == 'Caucasian']
counts_decile_AA = []
counts_decile_C = []
temp = []
for i in decile:
temp = len(df_african[df_african['decile_score'] == i])
counts_decile_AA.append(temp)
temp = len(df_caucasian[df_caucasian['decile_score'] == i])
counts_decile_C.append(temp)
fig = plt.figure()
ax = fig.subplots(1,2)
ax[0].bar(decile, counts_decile_AA)
ax[0].set_title('African American')
ax[1].bar(decile, counts_decile_C)
ax[1].set_title('Caucasian')
ax[0].set_ylabel('Count')
ax[0].set_xlabel('Decile score')
ax[0].set_ylim(0, 650)
ax[1].set_ylabel('Count')
ax[1].set_xlabel('Decile score')
ax[1].set_ylim(0, 650)
plt.show()
# # plot volinent decide score for african american and caucasian
df_race_V_decile_score = df[['race', 'v_decile_score']]
df_african = df_race_V_decile_score[ df_race_V_decile_score['race'] == 'African-American']
df_caucasian = df_race_V_decile_score[ df_race_V_decile_score['race'] == 'Caucasian']
counts_decile_AA = []
counts_decile_C = []
temp = []
for i in decile:
temp = len(df_african[df_african['v_decile_score'] == i])
counts_decile_AA.append(temp)
temp = len(df_caucasian[df_caucasian['v_decile_score'] == i])
counts_decile_C.append(temp)
fig = plt.figure()
ax = fig.subplots(1,2)
ax[0].bar(decile, counts_decile_AA)
ax[0].set_title('African American')
ax[1].bar(decile, counts_decile_C)
ax[1].set_title('Caucasian')
ax[0].set_ylabel('Count')
ax[0].set_xlabel('Violent Decile score')
ax[0].set_ylim(0, 850)
ax[1].set_ylabel('Count')
ax[1].set_xlabel('Violent Decile score')
ax[1].set_ylim(0, 850)
plt.show()
# create some factors for logistic regression
df_c_charge_degree = df[['c_charge_degree']]
df_age_cat = df[['age_cat']]
df_race = df[['race']]
df_sex = df[['sex']]
df_age_race = df[['race']]
df_score = df[['score_text']]
# df_c_charge_degree = pd.get_dummies(df_c_charge_degree)
# print('head', df_c_charge_degree.head())
#labels, uniques = pd.factorize(df_c_charge_degree)
#factorize df_c_charge_degree
crime_factor, u_charge_degree = pd.factorize(df_c_charge_degree['c_charge_degree'])
f_age_cat, u_age_cat= pd.factorize(df_age_cat['age_cat'])
#relevel age cat with reference = 1
f_age_cat = f_age_cat - 1
#factorize race
f_race_AA, u_race_AA= pd.factorize(df_age_race['race'] == 'African-American')
f_race_C, u_race = pd.factorize(df_age_race['race'] == 'Caucasian')
#relevel race with reference = 3
print('----------------race----------------')
print("Numeric Representation : \n", f_race_AA)
print("Unique Values : \n", u_race_AA)
#factorize gender with male and female labels
f_gender, uniques_gender = pd.factorize(df_sex['sex'])
print("Numeric Representation : \n", f_gender)
print("Unique Values : \n", uniques_gender)
#factorise score text
f_score_text, u_score_text = pd.factorize(df_score['score_text'] != 'Low')
print("Numeric Representation : \n", f_score_text)
print("size of f_score_text", len(f_score_text))
print("Unique Values : \n", u_score_text)
# create a new maxtrix with the factors
priors_count = df[['priors_count']]
two_year_recid = df[['two_year_recid\r']]
X = np.column_stack(( f_age_cat, crime_factor, f_race_AA, f_race_C, f_gender, priors_count, two_year_recid ))
# build a binmal logistic regression model to explain the score text given the factors
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(penalty='l2', C=1)
model.fit(X, f_score_text)
ypred = model.predict(X)
#print summary
print('intercept', model.intercept_)
#print coefficients with corresponding factors
print('coefficients', model.coef_)
print('score', model.score(X, f_score_text))
#model accuracy
from sklearn.metrics import accuracy_score
print('accuracy', accuracy_score(f_score_text, ypred))
import statsmodels.api as sm
model = sm.GLM(f_score_text, X, family=sm.families.Binomial())
results = model.fit()
print(results.summary())
| dansmith5764/A-study-of-fairness-in-transfer-learning | Compas/Parse_1.py | Parse_1.py | py | 7,675 | python | en | code | 0 | github-code | 36 |
21688080146 | # 2013 GCE A Level P1
"""
Task 1.1
"""
def main():
# read data from file and store in wordList
# wordList format: [(term,occ)]
wordList = []
inF = open('WORDS1.TXT', 'r')
term = inF.readline().rstrip()
while term != '':
occ = int(inF.readline().rstrip())
wordList.append((term, occ))
term = inF.readline().rstrip()
inF.close()
# compute the highest number of occurrrences
maxTerm, maxOcc = wordList[0] # initialise maxTerm and maxOcc with
# the 1st word and its occurrencs
for term, occ in wordList:
if occ > maxOcc:
maxOcc = occ
maxTerm = term
# output the term with the highest number of occurrences
print('Highest occurring term:', maxTerm)
main()
| ayangler/h2-comp | PYPs/2013 GCE A Level P1/Task 1.1.py | Task 1.1.py | py | 769 | python | en | code | 0 | github-code | 36 |
36947640459 | from __future__ import print_function
__revision__ = "src/engine/SCons/Tool/FortranCommon.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import re
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
def isfortran(env, source):
"""Return 1 if any of code in source has fortran files in it, 0
otherwise."""
try:
fsuffixes = env['FORTRANSUFFIXES']
except KeyError:
# If no FORTRANSUFFIXES, no fortran tool, so there is no need to look
# for fortran sources.
return 0
if not source:
# Source might be None for unusual cases like SConf.
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext in fsuffixes:
return 1
return 0
def _fortranEmitter(target, source, env):
node = source[0].rfile()
if not node.exists() and not node.is_derived():
print("Could not locate " + str(node.name))
return ([], [])
# This has to match the def_regex in the Fortran scanner
mod_regex = r"""(?i)^\s*MODULE\s+(?!PROCEDURE|SUBROUTINE|FUNCTION|PURE|ELEMENTAL)(\w+)"""
cre = re.compile(mod_regex,re.M)
# Retrieve all USE'd module names
modules = cre.findall(node.get_text_contents())
# Remove unique items from the list
modules = SCons.Util.unique(modules)
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX', target=target, source=source)
moddir = env.subst('$FORTRANMODDIR', target=target, source=source)
modules = [x.lower() + suffix for x in modules]
for m in modules:
target.append(env.fs.File(m, moddir))
return (target, source)
def FortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.StaticObjectEmitter(target, source, env)
def ShFortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.SharedObjectEmitter(target, source, env)
def ComputeFortranSuffixes(suffixes, ppsuffixes):
"""suffixes are fortran source files, and ppsuffixes the ones to be
pre-processed. Both should be sequences, not strings."""
assert len(suffixes) > 0
s = suffixes[0]
sup = s.upper()
upper_suffixes = [_.upper() for _ in suffixes]
if SCons.Util.case_sensitive_suffixes(s, sup):
ppsuffixes.extend(upper_suffixes)
else:
suffixes.extend(upper_suffixes)
def CreateDialectActions(dialect):
"""Create dialect specific actions."""
CompAction = SCons.Action.Action('$%sCOM ' % dialect, '$%sCOMSTR' % dialect)
CompPPAction = SCons.Action.Action('$%sPPCOM ' % dialect, '$%sPPCOMSTR' % dialect)
ShCompAction = SCons.Action.Action('$SH%sCOM ' % dialect, '$SH%sCOMSTR' % dialect)
ShCompPPAction = SCons.Action.Action('$SH%sPPCOM ' % dialect, '$SH%sPPCOMSTR' % dialect)
return CompAction, CompPPAction, ShCompAction, ShCompPPAction
def DialectAddToEnv(env, dialect, suffixes, ppsuffixes, support_module = 0):
"""Add dialect specific construction variables."""
ComputeFortranSuffixes(suffixes, ppsuffixes)
fscan = SCons.Scanner.Fortran.FortranScan("%sPATH" % dialect)
for suffix in suffixes + ppsuffixes:
SCons.Tool.SourceFileScanner.add_scanner(suffix, fscan)
env.AppendUnique(FORTRANSUFFIXES = suffixes + ppsuffixes)
compaction, compppaction, shcompaction, shcompppaction = \
CreateDialectActions(dialect)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in suffixes:
static_obj.add_action(suffix, compaction)
shared_obj.add_action(suffix, shcompaction)
static_obj.add_emitter(suffix, FortranEmitter)
shared_obj.add_emitter(suffix, ShFortranEmitter)
for suffix in ppsuffixes:
static_obj.add_action(suffix, compppaction)
shared_obj.add_action(suffix, shcompppaction)
static_obj.add_emitter(suffix, FortranEmitter)
shared_obj.add_emitter(suffix, ShFortranEmitter)
if '%sFLAGS' % dialect not in env:
env['%sFLAGS' % dialect] = SCons.Util.CLVar('')
if 'SH%sFLAGS' % dialect not in env:
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS' % dialect)
# If a tool does not define fortran prefix/suffix for include path, use C ones
if 'INC%sPREFIX' % dialect not in env:
env['INC%sPREFIX' % dialect] = '$INCPREFIX'
if 'INC%sSUFFIX' % dialect not in env:
env['INC%sSUFFIX' % dialect] = '$INCSUFFIX'
env['_%sINCFLAGS' % dialect] = '$( ${_concat(INC%sPREFIX, %sPATH, INC%sSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)' % (dialect, dialect, dialect)
if support_module == 1:
env['%sCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['%sPPCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['SH%sCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['SH%sPPCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
else:
env['%sCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['%sPPCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['SH%sCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['SH%sPPCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
def add_fortran_to_env(env):
"""Add Builders and construction variables for Fortran to an Environment."""
try:
FortranSuffixes = env['FORTRANFILESUFFIXES']
except KeyError:
FortranSuffixes = ['.f', '.for', '.ftn']
#print("Adding %s to fortran suffixes" % FortranSuffixes)
try:
FortranPPSuffixes = env['FORTRANPPFILESUFFIXES']
except KeyError:
FortranPPSuffixes = ['.fpp', '.FPP']
DialectAddToEnv(env, "FORTRAN", FortranSuffixes,
FortranPPSuffixes, support_module = 1)
env['FORTRANMODPREFIX'] = '' # like $LIBPREFIX
env['FORTRANMODSUFFIX'] = '.mod' # like $LIBSUFFIX
env['FORTRANMODDIR'] = '' # where the compiler should place .mod files
env['FORTRANMODDIRPREFIX'] = '' # some prefix to $FORTRANMODDIR - similar to $INCPREFIX
env['FORTRANMODDIRSUFFIX'] = '' # some suffix to $FORTRANMODDIR - similar to $INCSUFFIX
env['_FORTRANMODFLAG'] = '$( ${_concat(FORTRANMODDIRPREFIX, FORTRANMODDIR, FORTRANMODDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
def add_f77_to_env(env):
"""Add Builders and construction variables for f77 to an Environment."""
try:
F77Suffixes = env['F77FILESUFFIXES']
except KeyError:
F77Suffixes = ['.f77']
#print("Adding %s to f77 suffixes" % F77Suffixes)
try:
F77PPSuffixes = env['F77PPFILESUFFIXES']
except KeyError:
F77PPSuffixes = []
DialectAddToEnv(env, "F77", F77Suffixes, F77PPSuffixes)
def add_f90_to_env(env):
"""Add Builders and construction variables for f90 to an Environment."""
try:
F90Suffixes = env['F90FILESUFFIXES']
except KeyError:
F90Suffixes = ['.f90']
#print("Adding %s to f90 suffixes" % F90Suffixes)
try:
F90PPSuffixes = env['F90PPFILESUFFIXES']
except KeyError:
F90PPSuffixes = []
DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes,
support_module = 1)
def add_f95_to_env(env):
"""Add Builders and construction variables for f95 to an Environment."""
try:
F95Suffixes = env['F95FILESUFFIXES']
except KeyError:
F95Suffixes = ['.f95']
#print("Adding %s to f95 suffixes" % F95Suffixes)
try:
F95PPSuffixes = env['F95PPFILESUFFIXES']
except KeyError:
F95PPSuffixes = []
DialectAddToEnv(env, "F95", F95Suffixes, F95PPSuffixes,
support_module = 1)
def add_f03_to_env(env):
"""Add Builders and construction variables for f03 to an Environment."""
try:
F03Suffixes = env['F03FILESUFFIXES']
except KeyError:
F03Suffixes = ['.f03']
#print("Adding %s to f95 suffixes" % F95Suffixes)
try:
F03PPSuffixes = env['F03PPFILESUFFIXES']
except KeyError:
F03PPSuffixes = []
DialectAddToEnv(env, "F03", F03Suffixes, F03PPSuffixes,
support_module = 1)
def add_f08_to_env(env):
"""Add Builders and construction variables for f08 to an Environment."""
try:
F08Suffixes = env['F08FILESUFFIXES']
except KeyError:
F08Suffixes = ['.f08']
try:
F08PPSuffixes = env['F08PPFILESUFFIXES']
except KeyError:
F08PPSuffixes = []
DialectAddToEnv(env, "F08", F08Suffixes, F08PPSuffixes,
support_module = 1)
def add_all_to_env(env):
"""Add builders and construction variables for all supported fortran
dialects."""
add_fortran_to_env(env)
add_f77_to_env(env)
add_f90_to_env(env)
add_f95_to_env(env)
add_f03_to_env(env)
add_f08_to_env(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mongodb/mongo | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/FortranCommon.py | FortranCommon.py | py | 9,651 | python | en | code | 24,670 | github-code | 36 |
418860871 | import sys
import json
import logging
import argparse
import select
import time
import logs.server_log_config
from socket import socket, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
from common.variables import ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, RESPONSE, \
MESSAGE, MESSAGE_TEXT, ERROR, DEFAULT_PORT, MAX_CONNECTIONS, SENDER
from common.utils import get_message, send_message
from decorators import log
# Инициализация логирования сервера:
SERVER_LOGGER = logging.getLogger('server')
@log
def process_client_message(message, messages_list, client):
"""
Обработчик сообщений от клиентов.
Функция принимает словарь-сообщение от клиента, проверяет корректность, возвращает словарь-ответ для клиента.
:param message:
:param messages_list:
:param client:
:return:
"""
SERVER_LOGGER.debug(f'Разбор сообщения от клиента: {message}.')
# Если это сообщение присутствует, принимаем и отвечаем.
if ACTION in message and message[ACTION] == PRESENCE and TIME in message \
and USER in message and message[USER][ACCOUNT_NAME] == 'Guest':
send_message(client, {RESPONSE: 200})
return
# Если это сообщение, то добавляем его в очередь сообщений. Ответ не требуется.
elif ACTION in message and message[ACTION] == MESSAGE and TIME in message \
and MESSAGE_TEXT in message:
messages_list.append((message[ACCOUNT_NAME], message[MESSAGE_TEXT]))
return
else:
send_message(client, {
RESPONSE: 400,
ERROR: 'Bad request',
})
return
@log
def arg_parser():
"""Парсер аргументов командной строки."""
parser = argparse.ArgumentParser()
parser.add_argument('-p', default=DEFAULT_PORT, type=int, nargs='?')
parser.add_argument('-a', default='', nargs='?')
namespace = parser.parse_args(sys.argv[1:])
listen_address = namespace.a
listen_port = namespace.p
# Проверка получения корректного номера порта для работы сервера.
if not 1023 < listen_port < 65535:
SERVER_LOGGER.critical(
f'Попытка запуска сервера с неподходящим номером порта: {listen_port}.'
f' Допустимые адреса с 1024 до 65535. Клиент завершается.'
)
sys.exit(1)
return listen_address, listen_port
def main():
"""
Загрузка параметров командной строки.
Если нет параметров, то задаем значения по умолчанию.
:return:
"""
listen_address, listen_port = arg_parser()
SERVER_LOGGER.info(f'Запущен сервер. Порт для подключений: {listen_port}, '
f'адрес, с которого принимаются подключения: {listen_address}. '
f'Если адрес не указан, то принимаются соединения с любых адресов.')
# Готовим сокет.
transport = socket(AF_INET, SOCK_STREAM)
transport.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
transport.bind((listen_address, listen_port))
transport.settimeout(1)
# Список клиентов, очередь сообщений.
clients = []
messages = []
# Слушаем порт.
transport.listen(MAX_CONNECTIONS)
while True:
try:
client, client_address = transport.accept()
except OSError as err:
print(err.errno)
pass
else:
SERVER_LOGGER.info(f'Установлено соединение с ПК {client_address}.')
clients.append(client)
recv_data_list = []
send_data_list = []
err_list = []
# Проверяем на наличие ждущих клиентов.
try:
if clients:
recv_data_list, send_data_list, err_list = select.select(clients, clients, [], 0)
except OSError:
pass
# Принимаем сообщения и еcли они есть, то кладем в словарь. В случае ошибки исключаем клиента.
if recv_data_list:
for client_with_message in recv_data_list:
try:
process_client_message(get_message(client_with_message), messages, client_with_message)
except:
SERVER_LOGGER.info(f'Клиент {client_with_message.getpeername()} отключился от сервера.')
clients.remove(client_with_message)
# Если есть сообщения для отправки и ожидающие клиенты, то отправляем им сообщение.
if messages and send_data_list:
message = {
ACTION: MESSAGE,
SENDER: messages[0][0],
TIME: time.time(),
MESSAGE_TEXT: messages[0][1]
}
del messages[0]
for waiting_client in send_data_list:
try:
send_message(waiting_client, message)
except:
SERVER_LOGGER.info(f'Клиент {waiting_client.getpeername()} отключился от сервера.')
waiting_client.close()
clients.remove(waiting_client)
if __name__ == '__main__':
main()
| Shorokhov-A/repo_client-server-apps_python | practical_task_7/server.py | server.py | py | 5,881 | python | ru | code | 0 | github-code | 36 |
40943554690 | import sys
import ruamel
from ruamel.yaml import YAML
from datetime import datetime, date, time
from common import *
import fileinput
class NonAliasingRTRepresenter(ruamel.yaml.RoundTripRepresenter):
def ignore_aliases(self, data):
return True
def main():
out = {'races': []}
yaml = YAML(typ='safe')
with open('data/common.yaml', 'r') as fi:
ydat = yaml.load(fi)
d = date.today()
year = d.year
month = d.month + 1
if month == 13:
# skip Smarch
year = year + 1
month = 1
day = 1
try:
while True:
d = date(year, month, day)
for w in ydat['weekly']:
if w['isoweekday'] == d.isoweekday():
t = time.fromisoformat(w['time'])
dt = datetime.combine(d, t, tzinfo=RACETZ)
r = {'datetime': dt.strftime('%Y-%m-%d %H:%M')}
for k in w.keys():
if k not in ('isoweekday', 'time', 'msg'):
r[k] = w[k]
kw = dt.isocalendar().week
sunday = d.isoweekday() == 7
casual = r['skills_preset'] == 'casual'
# alternate simple and complex, and do the opposite on Sunday
simple = True
if (kw % 2 == 0 and casual) or (kw % 2 != 0 and not casual):
simple = False
if sunday:
simple = not simple
if simple:
r['desc'] += ' - Simple'
else:
r['desc'] += ' - Complex'
out['races'].append(r)
day = day + 1
except ValueError:
pass
# ugly hack
replace_last = 'Casual - Complex' if month % 2 == 0 else 'Hard - Complex'
for i, obj in reversed(list(enumerate(out['races']))):
if obj['desc'] == replace_last:
out['races'][i]['desc'] = obj['desc'].replace('Complex', 'Mystery')
break
yout = YAML()
yout.default_flow_style = False
yout.version = (1, 2)
yout.indent(mapping=2, sequence=4, offset=2)
yout.Representer = NonAliasingRTRepresenter
with open('data/races-new.yaml', 'w') as fout:
yout.dump(out, fout)
# add whitespace
with fileinput.FileInput('data/races-new.yaml', inplace=True) as f:
for line in f:
if line.startswith(' -') and f.lineno() > 4:
print()
print(line, end='')
if __name__ == '__main__':
main()
| pkdawson/workrobot | new_schedule.py | new_schedule.py | py | 2,605 | python | en | code | 0 | github-code | 36 |
30374326803 | import numpy as np
from scipy.special import expit
def piston_action(alfa, p_linea, p_servicio):
alfa %= 2 * np.pi
return (p_servicio - p_linea) * expit((alfa - np.pi) * 20)
def build_indep(
alfa: np.float32,
beta: np.float32,
omega: np.float32,
mb: np.float32,
mp: np.float32,
R: np.float32,
Ib: np.float32,
Lg: np.float32,
L: np.float32,
D_plunger,
p_linea,
p_servicio,
) -> np.ndarray:
p_alfa = piston_action(alfa, p_linea, p_servicio)
dbdt = (R / L) * (np.cos(alfa) / np.cos(beta)) * omega
d2bdt2 = (dbdt**2 - omega**2) * np.tan(beta)
b0 = mb * Lg * (R / L) * omega**2 * np.sin(alfa)
b1 = (
-mb
* R
* omega**2
* (
np.cos(alfa)
+ (1 - Lg / L)
* (
(R / L) * (np.cos(alfa) ** 2 / np.cos(beta) ** 3)
- np.sin(alfa) * np.tan(beta)
)
)
)
b2 = Ib * d2bdt2
b3 = 0
b4 = mp * R * omega**2 * (
(np.cos(alfa + beta) / np.cos(beta))
+ (R / L) * (np.cos(alfa) ** 2 / np.cos(beta) ** 3)
) - p_alfa * (D_plunger**2 * np.pi / 4)
return np.array([b0, b1, b2, b3, b4])
def build_matrix(L: np.float32, Lg: np.float32, beta: np.float32) -> np.ndarray:
m = np.eye(5)
m[4, 4] = 0
m[4, 3] = 1
m[3, 3] = 0
m[3, 4] = -1
m[3, 2] = 1
m[0, 2] = 1
m[1, 3] = 1
vec = np.array(
[
-(L - Lg) * np.sin(beta),
(L - Lg) * np.sin(beta),
Lg * np.cos(beta),
-Lg * np.sin(beta),
0,
]
)
m[2] = vec
return m
def solve_system(
alfa,
beta,
omega,
mb: np.float32,
mp: np.float32,
R: np.float32,
Ib: np.float32,
L,
Lg,
D_plunger,
p_linea,
p_servicio,
):
A = build_matrix(L, Lg, beta)
b = build_indep(
alfa, beta, omega, mb, mp, R, Ib, Lg, L, D_plunger, p_linea, p_servicio
)
sol = np.linalg.solve(A, b) / 1000
R = np.sqrt((sol[0] + sol[2]) ** 2 + (sol[1] + sol[3]) ** 2)
theta_R = np.arctan((sol[1] + sol[3]) / (sol[0] + sol[2]))
ret_dict = {
"module_A": np.sqrt(sol[0] ** 2 + sol[1] ** 2),
"phase_A": np.arctan2(sol[1], sol[0]),
"module_B": np.sqrt(sol[2] ** 2 + sol[3] ** 2),
"phase_B": np.arctan2(sol[3], sol[2]),
"Total_mod": R,
"Total_phase": theta_R,
"solution": sol,
}
return ret_dict
def get_max(res):
modA = np.array([x["module_A"] for x in res])
phiA = np.array([x["phase_A"] for x in res])
modB = np.array([x["module_B"] for x in res])
phiB = np.array([x["phase_B"] for x in res])
XA = modA * np.cos(phiA)
YA = modA * np.sin(phiA)
XB = modB * np.cos(phiB)
YB = modB * np.sin(phiB)
Amax = np.argmax(modA)
Bmax = np.argmax(modB)
XAmax = XA[Amax]
YAmax = YA[Amax]
XBmax = XB[Bmax]
YBmax = YB[Bmax]
print(f"XAmax = {XAmax}, YAmax = {YAmax}")
print(f"XBmax = {XBmax}, YBmax = {YBmax}")
return 1, 1
| hanzy1110/paper_fem_bombas | src/solve.py | solve.py | py | 3,065 | python | en | code | 0 | github-code | 36 |
14570657497 | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.graphics import Color, Line, Rectangle
from image_to_text import get_text, train_model
import cv2
from string import ascii_uppercase as UC
class DrawWidget(RelativeLayout):
def __init__(self, **kwargs):
super(DrawWidget, self).__init__(**kwargs)
with self.canvas:
Color(*(1,1,1),mode="rgb")
self.rect = Rectangle(size = self.size, pos = self.pos)
self.bind(size=self.update_rect)
def on_touch_down(self, touch):
color = (0,0,0)
with self.canvas:
Color(*color,mode="rgb")
width = 2.5
x,y = self.to_local(x=touch.x, y=touch.y)
touch.ud["line"] = Line(points=(x, y),width=width)
def on_touch_move(self,touch):
x,y = self.to_local(x=touch.x, y=touch.y)
touch.ud['line'].points += [x, y]
def update_rect(self, instance, value):
self.rect.size = self.size
self.rect.pos = self.pos
class DrawApp(App):
def build(self):
self.title = 'Convert To Text'
parent = RelativeLayout()
self.draw = DrawWidget(size_hint=(0.5,0.8),pos_hint={'x':0,'y':0.2})
clear_btn = Button(size_hint=(0.5,0.1),text="Clear",pos_hint={'x':0,'y':0.1})
clear_btn.bind(on_release=self.clear_canvas)
convert_btn = Button(size_hint=(0.5,0.1),text="Convert to text",pos_hint={'x':0.5,'y':0.1})
convert_btn.bind(on_release=self.convert)
self.label = Label(size_hint=(0.5,0.9),pos_hint={'x':0.5,'y':0.2})
label1 = Label(size_hint=(0.3,0.1),pos_hint={"x":0,"y":0},text="Wrong conversion? Type in correct capital letters comma separated and train")
label1.bind(width=lambda *x: label1.setter('text_size')(label1, (label1.width, None)), texture_size=lambda *x: label1.setter('height')(label1, label1.texture_size[1]))
self.inp_txt = TextInput(size_hint=(0.4,0.1),pos_hint={"x":0.3,"y":0})
self.train_btn = Button(size_hint=(0.3,0.1),pos_hint={"x":0.7,"y":0},text="Train", disabled=True)
self.train_btn.bind(on_release = self.train)
parent.add_widget(self.draw)
parent.add_widget(self.label)
parent.add_widget(clear_btn)
parent.add_widget(convert_btn)
parent.add_widget(label1)
parent.add_widget(self.inp_txt)
parent.add_widget(self.train_btn)
return parent
def clear_canvas(self, obj):
self.draw.canvas.clear()
with self.draw.canvas:
Color(*(1,1,1),mode="rgb")
self.draw.rect = Rectangle(size = self.draw.size, pos = (0,0))
self.draw.bind(size=self.draw.update_rect)
self.train_btn.disabled = True
def convert(self, obj):
self.train_btn.disabled = False
self.draw.export_to_png("draw.png")
img = cv2.imread("draw.png")
self.lets, self.imgs = get_text(img)
txt = " ".join(self.lets)
self.label.text = txt
def train(self, obj):
let = self.inp_txt.text
let = let.replace(" ","").split(",")
lbls = []
chars = list(UC)
for l in let:
lbls.append(chars.index(l))
if len(lbls) == len(self.imgs):
train_model(self.imgs, lbls)
if __name__ == "__main__":
DrawApp().run()
| ShantanuShinde/Character-Recognition-with-CNN | Character Recognition App/paintapp.py | paintapp.py | py | 3,519 | python | en | code | 0 | github-code | 36 |
19449055889 | from django.db import models
import re
import statistics
from .direct_indicator import DirectIndicator
from .question_response import QuestionResponse
find_square_bracket_keys = re.compile(r"\[(.*?)\]")
class IndirectIndicator(models.Model):
topic = models.ForeignKey('Topic', related_name='indirect_indicators', on_delete=models.SET_NULL, null=True)
method = models.ForeignKey("Method", related_name="indirect_indicators", on_delete=models.CASCADE, null=True)
key = models.CharField(max_length=255, blank=False)
formula = models.CharField(max_length=1000, unique=False, blank=False)
name = models.CharField(max_length=255, unique=False, blank=False)
description = models.TextField(blank=True, null=True)
pre_unit = models.CharField(max_length=30, blank=True, default="") # Examples: $,€
post_unit = models.CharField(max_length=30, blank=True, default="") # Examples: %, points, persons
cut_off_lower_limit = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
cut_off_upper_limit = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
TEXT = "text"
INTEGER = "integer"
DOUBLE = "double"
DATE = "date"
BOOLEAN = "boolean"
SINGLECHOICE = "singlechoice"
MULTIPLECHOICE = "multiplechoice"
DATA_TYPES = (
(TEXT, "text"),
(INTEGER, "integer"),
(DOUBLE, "double"),
(DATE, "date"),
(BOOLEAN, "boolean"),
(SINGLECHOICE, "singlechoice"),
(MULTIPLECHOICE, "multiplechoice")
)
datatype = models.CharField(max_length=50, blank=False, choices=DATA_TYPES, default="text")
PERFORMANCE = "performance"
SCORING = "scoring"
CERTIFICATION = "certification"
INDICATOR_TYPES = (
(PERFORMANCE, "performance"),
(SCORING, "scoring"),
(CERTIFICATION, "certification")
)
type = models.CharField(max_length=50, blank=False, choices=INDICATOR_TYPES, default="scoring")
calculation = ''
absolute_weights = []
indicator_impact = None
critical_impact_by = {}
value = None
has_conditionals = False
exception = None
exception_detail = None
responses = None
# used to find absolute weights
expression = ''
class Meta:
unique_together = ['key', 'method']
def __init__(self, *args, **kwargs):
super(IndirectIndicator, self).__init__(*args, **kwargs)
self.calculation = self.formula.replace("\n", "")
if self.calculation.strip().startswith("IF"):
self.has_conditionals = True
def __str__(self):
return self.key
# calculation_keys are all indicators that are used within the formula of this indirect indicator
@property
def calculation_keys(self):
calculation_keys = re.findall(find_square_bracket_keys, self.calculation) #self.formula.replace("\n", ""))
# print('*****************************', self.formula, self.calculation, calculation_keys)
calculation_keys_uniques = list(set(calculation_keys))
if self.key in calculation_keys_uniques:
calculation_keys_uniques.remove(self.key)
return calculation_keys_uniques
# Used for calculation of absolute weights
@property
def formula_keys(self):
formula_keys = re.findall(find_square_bracket_keys, self.formula)
calculation_keys_uniques = list(set(formula_keys))
if self.key in calculation_keys_uniques:
calculation_keys_uniques.remove(self.key)
return calculation_keys_uniques
def find_weights(self, weight_dict):
self.absolute_weights = [weight_dict]
return self.absolute_weights
# Replaces indicator keys with corresponding value to be able to calculate the indirect indicator (used in 'utils > calculate_indicators')
def find_values(self, key_value_list):
calculation = self.calculation
if not None in key_value_list.values():
for calculation_key in self.calculation_keys:
if calculation_key in key_value_list:
value = key_value_list[calculation_key]
if isinstance(value, dict):
value = max(value, key=value.get)
calculation = calculation.replace(f"[{calculation_key}]", f"{value}")
self.calculation = calculation
else:
print('Missing values in key_value_list!')
# Calculates indicator formula
def calculate(self):
if len(self.calculation_keys) and not self.has_conditionals:
self.exception = Exception("Not all keys are replaced with values")
return
self.exception = None
self.error = None
functionList = ['sum(', 'avg(', 'min(', 'max(', 'median(', 'mode(']
# If there are conditionals
if self.has_conditionals:
self.value = None
value = self.calculate_conditionals()
self.value = value
# if there's a function
elif any(func in self.calculation for func in functionList):
key = re.findall(find_square_bracket_keys, self.formula)
if len(key):
question_responses = QuestionResponse.objects.filter(survey_response__esea_account=4, survey_response__finished=True)
directind = DirectIndicator.objects.filter(method=self.method, key=key[0]).first()
indirectind = IndirectIndicator.objects.filter(method=self.method, key=key[0]).first()
if directind is not None:
indicator = directind
indicator.filter_responses(question_responses)
responses = [float(r) for r in indicator.responses]
if 'avg(' in self.calculation:
self.value = sum(responses)/len(responses) # int(direct_indicator.value)
elif 'sum(' in self.calculation:
self.value = sum(responses)
elif 'min(' in self.calculation:
self.value = min(responses)
elif 'max(' in self.calculation:
self.value = max(responses)
elif 'median(' in self.calculation:
self.value = statistics.median(responses)
elif 'mode(' in self.calculation:
self.value = statistics.mode(responses)
else:
self.value = 1
print('There are no responses to calculate the sum with.')
return
# If a regular calculation can be performed
else:
try:
self.expression = self.formula
self.value = eval(self.calculation)
return self.value
except Exception as e:
print('error!', self.calculation, self.has_conditionals)
self.value = None
# Calculates conditional formulas (IF..THEN..)
def calculate_conditionals(self, verbose=False):
formula = self.calculation.replace('IF', '@@IF').replace('ELSE', '##ELSE').replace('THEN', '%%THEN')
formula = [x.strip() for x in re.split('@@|##|%%', formula)]
formula = list(filter(lambda x: x != '', formula))
if verbose:
print(f'\n {self.key}:::::::::: Start Conditional Calculations... \nformula: {formula}')
full_formula = self.formula.replace('IF', '@@IF').replace('ELSE', '##ELSE').replace('THEN', '%%THEN')
full_formula = [x.strip() for x in re.split('@@|##|%%', full_formula)]
full_formula = list(filter(lambda x: x != '', full_formula))
ifs = 1
elses = 0
last_if = False
search_else = False
val = None
for i, cond in enumerate(formula):
bracket_keys = list(set(re.findall(find_square_bracket_keys, cond)))
if self.key in bracket_keys:
bracket_keys.remove(self.key)
if len(bracket_keys):
print('Invalid Partial Condition: ', bracket_keys)
# raise Exception("invalid partial condition")
# Skips code till it finds the corresponding then/else statements corresponding to the IF statement that fails or succeeds.
if search_else:
if 'IF' in cond:
ifs += 1
if 'ELSE' in cond:
elses += 1
if ifs != elses:
continue
else:
search_else = False
last_if = True
ifs = 1
elses = 0
# Checks whether if statement equates to True
if 'IF' in cond:
cond = cond.replace('IF', '').replace('(', '').replace(')', '').replace('"', '').strip()
last_if = False
if 'AND' in cond:
conds = cond.split("AND")
conds = self.process_expression(conds)
evaluatedconds = [eval(n) for n in conds]
if not False in evaluatedconds:
last_if = True
else:
search_else = True
continue
if 'OR' in cond:
conds = cond.split("OR")
conds = self.process_expression(conds)
evaluatedconds = [eval(n) for n in conds]
if True in evaluatedconds:
last_if = True
else:
search_else = True
continue
cond = self.process_expression(cond)
if eval(cond):
last_if = True
else:
search_else = True
continue
# Serves conditional outcome
if (last_if and '=' in cond) or (cond == formula[-1]):
cond = cond.replace('(', '').replace(')', '')
[var, val] = cond.split('=')
var = var.replace('THEN', '').replace('ELSE', '')
var = var.replace('[', '').replace(']', '').strip()
if var != self.key:
raise Exception('Assignment variable does not match the key of this indirect indicator')
val = val.replace('"', '')
if verbose:
print('====', self.key, val)
# Used for extracting weights
self.expression = full_formula[i]
try:
val = eval(val)
except:
pass
return str(val)
def process_expression(self, conds):
allowedOperators = ['<', '<=', '==', '>=', '>', '=']
if not isinstance(conds, list):
conds = [conds]
for index, cond in enumerate(conds):
# cond = cond.replace('=', '==')
processed_cond = re.split('(<|<=|==|>=|>|=)', cond)
for idx, value in enumerate(processed_cond):
if value not in allowedOperators:
# Makes eval() of string equals string possible
processed_cond[idx] = f'"{value.strip().lower()}"'
conds[index] = ''.join(processed_cond)
if len(conds) == 1:
conds = conds[0]
return conds | sergioespana/openESEA | backend/core/models/indirect_indicator.py | indirect_indicator.py | py | 11,653 | python | en | code | 1 | github-code | 36 |
31110031201 |
from lib_das.DAS220_240 import telnet_das220_240
import time
import socket
import numpy
def REMOTE_SAMPLING_EXAMPLE():
print("-------------------------------------------------------")
print(" Remote Sammpling example : ")
print(" - Getting NUMBER_OF_SAMPLE sample of CHANNEL_LIST channel each SAMP_TIME")
print(" - Save the result into a CSV files")
launch_time = time.strftime("%d_%b_%Y_%H_%M_%S")
# Connexion to the DAS220 or DAS240
mydas = telnet_das220_240(DAS_IP)
#Setting the channel in the list to thermocouple K +/-30°C range and enable it
mydas.all_chan_off()
for chan in CHANNEL_LIST:
mydas.set_chanmes(chan,'THErmo K,COMP,CEL',60,0,0)
#Disabling command printing
mydas.PR_OUT_CMD = 0
mydas.PR_OUT = 1
tab_res = []
time_res = []
for chan in CHANNEL_LIST:
tab_res.append([]) #create one list by channel
#Make the measure
for i in range(0,NUMBER_OF_SAMPLE):
print("sample " + str(i))
time.sleep(SAMP_TIME)
time_res.append(time.strftime("%H:%M:%S"))
for ichan in range(0,len(CHANNEL_LIST)):
tab_res[ichan].append(mydas.get_chanmes(CHANNEL_LIST[ichan]))
mydas.close();
#Create CSV files
f_name = "Remote_sampling_test_"+ launch_time + ".csv"
flog = open(f_name,"a")
#Create the first line
first_string = "Sample Time" + CSV_SEPARATOR
for ichan in range(0,len(CHANNEL_LIST)):
first_string += "CHAN_" + CHANNEL_LIST[ichan] + CSV_SEPARATOR
flog.write(first_string + "\n")
#write each result in the csv files
for ires in range(1, NUMBER_OF_SAMPLE):
line = time_res[ires] + CSV_SEPARATOR
for ichan in range(0,len(CHANNEL_LIST)):
line += str(tab_res[ichan][ires]).replace('.',CSV_DECIMAL_DELIMITER) + CSV_SEPARATOR
flog.write(line + "\n")
flog.close()
print("Test file Name : " + f_name )
'''
main script
'''
#Update the DAS IP adress here
DAS_IP = "192.168.0.72"
SAMP_TIME = 1 #time in seconds
NUMBER_OF_SAMPLE = 10
CHANNEL_LIST = ["A1","A2"]
CSV_SEPARATOR = ';'
CSV_DECIMAL_DELIMITER = ',' #update following your country
print("********* Starting DAS220/240 Remote sampling Script **************")
REMOTE_SAMPLING_EXAMPLE()
print("**************** End of DAS220/240 Test Script *******************")
| sefram/DAS220-BAT-and-DAS240-BAT | Python/REMOTE_SAMPLING_EXAMPLE.py | REMOTE_SAMPLING_EXAMPLE.py | py | 2,474 | python | en | code | 1 | github-code | 36 |
15544612417 | from rally.common import logging
from rally.task import hook
LOG = logging.getLogger(__name__)
class Trigger(hook.HookTrigger):
"""DEPRECATED!!! USE `rally.task.hook.HookTrigger` instead."""
def __init__(self, *args, **kwargs):
super(Trigger, self).__init__(*args, **kwargs)
LOG.warning("Please contact Rally plugin maintainer. The plugin '%s' "
"inherits the deprecated base class(Trigger), "
"`rally.task.hook.HookTrigger` should be used instead."
% self.get_name())
@property
def context(self):
action_name, action_cfg = self.hook_cfg["action"]
trigger_name, trigger_cfg = self.hook_cfg["trigger"]
return {"description": self.hook_cfg["description"],
"name": action_name,
"args": action_cfg,
"trigger": {"name": trigger_name,
"args": trigger_cfg}}
| DeanHwd/rally | rally/task/trigger.py | trigger.py | py | 948 | python | en | code | 0 | github-code | 36 |
13768273606 | from os import stat
from flask import Flask, Response
from flask_restx import Resource, Api, reqparse
import random
import json
import os
from werkzeug.exceptions import BadHost, BadRequest
app = Flask(__name__)
api = Api(app)
#Cette route va permettre de retourner la valeur +1
@api.route('/plus_one/<int:number>')
@api.doc(params={"x": "Must be an integer."})
class Add(Resource):
def get(self, number):
return {'value': number+1}
#Cette route permet de retourner la valeur au carré et, pour mon cas j'ai testé de transmettre deux paramètres
@api.route('/square')
@api.doc(params={"int": "Must be an integer", "email": "Must be a string"}, location="query")
class Square(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('int', type=int)
parser.add_argument('email', type=str)
args = parser.parse_args()
return {'value': args['int'] ** 2, 'email': args['email']}
#Cette route prend en paramètre le choix de l'utilisateur sous forme de int et renvoie le message final
@api.route('/game/<int:choice>')
@api.doc(params={"choice": "1: Pierre \n 2: Papier \n 3: Ciseaux \n 4: Lézard \n 5: Spock"})
class Game(Resource):
def get(self, choice):
liste_choices = ["Pierre", "Papier", "Ciseaux", "Lézard", "Spock"]
def choice_computer(index):
possibilities = ["Pierre", "Papier", "Ciseaux", "Lézard", "Spock"]
possibilities.remove(possibilities[index])
computer = possibilities[random.randint(0, len(possibilities)-1)]
return computer
if choice not in [1,2,3,4,5]:
return Response(
"Send a number between 1 and 5 !",
status=400,
)
else:
user_choice= liste_choices[choice-1]
computer_choice= choice_computer(choice-1)
index_computer = liste_choices.index(computer_choice)
result = {0: {1: True, 2: False, 3: True, 4: False },
1: {0: True, 2: False, 3: False, 4: True},
2: {0: False, 1: True, 3: True, 4: False},
3: {0: False, 1: True, 2: False, 4: True},
4: {0: True, 1: False, 2: True, 3: False} }
if result[choice-1][index_computer] == False:
with open('stats.json') as json_file:
data = json.load(json_file)
data[user_choice] = int(data[user_choice]) + 1
with open('stats.json', 'w') as json_file:
json.dump(data, json_file)
return {"ordinateur" : computer_choice, "user": user_choice, "message": "Vous avez perdu."}
else:
with open('stats.json') as json_file:
data = json.load(json_file)
data[user_choice] = int(data[user_choice]) + 1
with open('stats.json', 'w') as json_file:
json.dump(data, json_file)
return {"ordinateur" : computer_choice, "user": user_choice, "message": "Vous avez gagné !"}
@api.route('/stats')
class Stats(Resource):
def get(self):
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
json_url = os.path.join(SITE_ROOT,"stats.json")
data = json.load(open(json_url))
return data
if __name__ == '__main__':
app.run(debug=True) | eparisLR/FLASK-RESTX | api.py | api.py | py | 3,444 | python | en | code | 0 | github-code | 36 |
38875746376 |
import torch
import torch.nn as nn
from torch.utils.cpp_extension import load
import os
import time
import random
import math
cur_path = os.path.dirname(os.path.realpath(__file__))
cpu_unsorted_segsum = load('cpu_unsorted_segsum',
[f'{cur_path}/cpu_extension.cc'],
extra_cflags=['-fopenmp', '-O3', '-march=native'],
extra_ldflags=['-lgomp', '-O3', '-march=native'],
verbose=False)
import cpu_unsorted_segsum
if torch.cuda.is_available():
cuda_unsorted_segsum = load('cuda_unsorted_segsum',
[f'{cur_path}/cuda_extension.cu'],
extra_cflags=['-fopenmp', '-O3', '-march=native'],
extra_ldflags=['-lgomp', '-O3', '-march=native'],
verbose=False)
import cuda_unsorted_segsum
else:
cuda_unsorted_segsum = None
print('CUDA not available, cuda_unsorted_segsum will not be available')
def unsorted_segment_sum_ref(
data : torch.Tensor,
indices : torch.Tensor,
num_segments : int
) -> torch.Tensor:
return torch.cat([
data[indices == i].sum(dim=0, keepdim=True)
for i in range(num_segments)
], dim=0)
class UnsortedSegmentSum(torch.autograd.Function):
@staticmethod
def forward(ctx, data : torch.Tensor, indices : torch.Tensor, num_segments : int) -> torch.Tensor:
ctx.save_for_backward(indices)
M = cuda_unsorted_segsum if data.device.type == 'cuda' else cpu_unsorted_segsum
assert M is not None, f'No backend for {data.device}'
if len(data.shape) == 2:
return M.unsorted_segment_sum_fwd(data, indices, num_segments)
else:
raise NotImplementedError()
@staticmethod
def backward(ctx, grad):
indices, = ctx.saved_tensors
M = cuda_unsorted_segsum if grad.device.type == 'cuda' else cpu_unsorted_segsum
assert M is not None, f'No backend for {grad.device}'
if len(grad.shape) == 2:
return M.unsorted_segment_sum_bwd(grad.contiguous(), indices), None, None
else:
raise NotImplementedError()
def unsorted_segment_sum(
data : torch.Tensor,
indices : torch.Tensor,
num_segments : int
) -> torch.Tensor:
return UnsortedSegmentSum.apply(data, indices, num_segments)
def unit_test_cpu():
print('==== Correctness Test CPU ====')
data = torch.randn(1000, 3, requires_grad=False)
indices = torch.randint(0, 100, (1000,), requires_grad=False)
num_segments = 100
d1 = data.clone().requires_grad_()
d2 = data.clone().requires_grad_()
ref = unsorted_segment_sum_ref(d1, indices, num_segments)
out = UnsortedSegmentSum.apply(d2, indices, num_segments)
print('(FWD) L2 = ', (ref - out).pow(2).sum().sqrt())
ref.pow(2).sum().backward()
out.pow(2).sum().backward()
print('(BWD) L2 = ', (d1.grad - d2.grad).pow(2).sum().sqrt())
def unit_test_gpu():
print('==== Correctness Test GPU ====')
data = torch.randn(1000, 3, requires_grad=False)
indices = torch.randint(0, 100, (1000,), requires_grad=False)
num_segments = 100
d1 = data.clone().requires_grad_()
d2 = data.clone().cuda().requires_grad_()
ref = unsorted_segment_sum_ref(d1, indices, num_segments)
out = UnsortedSegmentSum.apply(d2, indices.clone().cuda(), num_segments)
print('(FWD) L2 = ', (ref - out.cpu()).pow(2).sum().sqrt())
ref.pow(2).sum().backward()
out.pow(2).sum().backward()
print('(BWD) L2 = ', (d1.grad - d2.grad.cpu()).pow(2).sum().sqrt())
if __name__ == '__main__':
unit_test_cpu()
unit_test_gpu()
exit(0)
# Benchmark
t0 = time.perf_counter()
for _ in range(1000):
_ = unsorted_segment_sum_ref(data, indices, num_segments)
t1 = time.perf_counter()
print(f'Reference (Fwd): {(t1 - t0) * 1000:.2f} ms')
t0 = time.perf_counter()
for _ in range(1000):
_ = UnsortedSegmentSum.apply(data, indices, num_segments)
t1 = time.perf_counter()
print(f'Extension (Fwd): {(t1 - t0) * 1000:.2f} ms')
t0 = time.perf_counter()
for _ in range(1000):
out = unsorted_segment_sum_ref(d1, indices, num_segments)
out.pow(2).sum().backward()
t1 = time.perf_counter()
print(f'Reference (Fwd + Bwd): {(t1 - t0) * 1000:.2f} ms')
t0 = time.perf_counter()
for _ in range(1000):
out = UnsortedSegmentSum.apply(d2, indices, num_segments)
out.pow(2).sum().backward()
t1 = time.perf_counter()
print(f'Extension (Fwd + Bwd): {(t1 - t0) * 1000:.2f} ms')
| medav/meshgraphnets-torch | kernels/unsorted_segsum/kernel.py | kernel.py | py | 4,483 | python | en | code | 6 | github-code | 36 |
10018427147 | class numbers(object):
def set(self):
self.a=10
self.b=12
self.a,self.b=self.b,self.a
print(self.a,self.b)
print(self.b,self.a)
class strings(numbers):
def put(self):
super().set()
self.s1="prasad"
self.s2="naidu"
self.s1,self.s2=self.s2,self.s1
print(self.s1,self.s2)
print(self.s2,self.s1)
s=strings()
s.put() | prasadnaidu1/django | Adv python practice/method override/KV RAO OVERRIDE.py | KV RAO OVERRIDE.py | py | 407 | python | en | code | 0 | github-code | 36 |
22216786293 | import bpy
import bmesh
import os
#function for changing the topology of the object by triangulating it
def triangulate_object(obj):
me = obj.data
# Get a BMesh representation
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm, faces=bm.faces[:], quad_method=0, ngon_method=0)
# Finish up, write the bmesh back to the mesh
bm.to_mesh(me)
bm.free()
#retrieves the radi for the Envelope based elip
def min_max_axis(object):
Xs = []
Ys = []
Zs = []
boundaries = []
coords = [(object.matrix_world * v.co) for v in object.data.vertices]
for vert in coords:
Xs.append(vert[0])
Ys.append(vert[1])
Zs.append(vert[2])
Xs.sort()
Ys.sort()
Zs.sort()
boundaries.append(Xs[len(Xs) - 1])
boundaries.append(Xs[0])
boundaries.append(Ys[len(Ys) - 1])
boundaries.append(Ys[0])
boundaries.append(Zs[len(Zs) - 1])
boundaries.append(Zs[0])
E_radi.append((abs(boundaries[0] - boundaries[1]))/2)
E_radi.append((abs(boundaries[2] - boundaries[3]))/2)
E_radi.append((abs(boundaries[4] - boundaries[5]))/2)
#create folder and move blend file into it
file = bpy.path.basename(bpy.context.blend_data.filepath)
filename = file.strip('.blend')
mom_folder = '/Users/rileysterman/Desktop/blender objects/pre-pre-animation/'
object_folder = mom_folder + filename
os.makedirs(object_folder)
os.rename(bpy.context.blend_data.filepath, object_folder + '/' + file)
#identify object of interest,create lists of elip radi, and reset its location to the origin, and set the center to be based on mass volume
ob = bpy.data.objects[0]
E_radi = []
ob.location = (0,0,0)
#ob.origin_set(type='ORIGIN_CENTER_OF_VOLUME')
#triangulate the object
triangulate_object(ob)
#export stl file of triangulated object into the object folder
stl_path = object_folder + '/' + filename + '.stl'
bpy.ops.export_mesh.stl(filepath=stl_path)
#export fbx file
fbx_path = object_folder + '/' + filename + '.fbx'
bpy.ops.export_scene.fbx(filepath =fbx_path)
# E_radi
min_max_axis(ob)
#noramlize E_radi
radi_sum = E_radi[0] + E_radi[1] + E_radi[2]
E_radi[0] = E_radi[0] /radi_sum
E_radi[1] = E_radi[1] /radi_sum
E_radi[2] = E_radi[2] /radi_sum
E = open(object_folder +'/'+ 'E' + filename, 'w+')
E.write(str(E_radi[0]) + '~' + str(E_radi[1]) + '~' + str(E_radi[2]))
E.close()
| whorticulterist/RotationSimulation | blender objects/python_scripts/Initial_Individual_Processing.py | Initial_Individual_Processing.py | py | 2,397 | python | en | code | 0 | github-code | 36 |
39099199233 | # Incomplete
import os
import random
def game():
with open("./files/data.txt","r",encoding="UTf-8") as data:
words = [word for word in data]
print(words)
word = random.choice(words)
random_word = word(words)
print(random_word)
with open("./files/game.txt","w",encoding="UTF-8") as file:
print(words)
letter = input("Put a letter: ")
def main():
os.system("cls")
#Print the game
game()
if __name__ == "__main__":
main() | ArturoCBTyur/Prueba_Nueva | hangman.py | hangman.py | py | 523 | python | en | code | 0 | github-code | 36 |
25048960763 | from Crypto.Cipher import AES # AES (all modes)
from Crypto.Util import Counter # AES CTR
from os import urandom # AES CBC or CTR
#from Crypto import Random # AES CBC or CTR
#Random.get_random_bytes(16) # AES CBC or CTR
from Crypto.Cipher import PKCS1_OAEP # RSA
from Crypto.PublicKey import RSA # RSA
#module 'time' has no attribute 'clock?
def pad16(string):
BLOCK_SIZE = 16
PADDING = '#'
if (len(string) % 16) > 0:
out = string + (BLOCK_SIZE - len(string) % BLOCK_SIZE) * PADDING
return out
else:
return string
#ALTERNATIVE PADDING out = '{s:{c}^{n}}'.format(s=string,n=BLOCK_SIZE,c='#')
def unpad16 (string):
BLOCK_SIZE = 16
PADDING = '#'
out = string.strip(PADDING)
return out
class RSA_cipher (object):
def __init__(self, k):
self.KEY_LENGTH = k
# self.KEY_LENGTH = 1024 # Minimum value, better use method set_key_length()
#def set_key_length (self, k):
# self.KEY_LENGTH = k
def generate_keypair (self):
key = RSA.generate(self.KEY_LENGTH)
pubkey = key.publickey().exportKey("DER")
privkey = key.exportKey("DER")
return (pubkey,privkey)
def encrypt (self, pub, message):
key = RSA.importKey(pub)
cipher = PKCS1_OAEP.new(key)
ciphertext = cipher.encrypt(message)
return ciphertext
def decrypt (self, priv, ciphertext):
key = RSA.importKey(priv)
cipher = PKCS1_OAEP.new(key)
message = cipher.decrypt(ciphertext)
return message
class AES_ECB (object): # USER FOR host_decrypt AND host_encrypt
def __init__(self, k):
self.KEY = pad16(k)
self.cipher = AES.new(self.KEY, AES.MODE_ECB)
def encrypt(self, s):
s = pad16(s)
return self.cipher.encrypt(s)
def decrypt(self, s):
t = self.cipher.decrypt(s)
return unpad16(t)
class AES_CBC (object):
def __init__(self, k):
self.KEY = pad16(k)
def encrypt(self, s):
iv = urandom(16)
s = pad16(s)
enc_cipher = AES.new(self.KEY, AES.MODE_CBC, iv)
return iv + enc_cipher.encrypt(s)
def decrypt(self, s):
iv = s[:16]
dec_cipher = AES.new(self.KEY, AES.MODE_CBC, iv)
t = dec_cipher.decrypt(s[16:])
return unpad16(t)
class AES_CTR (object): # KEY = 128 or 256 bit IV = 128 bit # BLOCK SIZE = 128 bit
def __init__(self, k):
self.KEY = pad16(k) # KEY 128 or 256 bit (padded)
def encrypt(self, s):
iv = urandom(16) # generate random IV (128 bit) - for every encryption
ctr = Counter.new(128, initial_value=long(iv.encode('hex'), 16)) # init counter
enc_cipher = AES.new(self.KEY, AES.MODE_CTR, counter=ctr) # init cipher
s = pad16(s) # message padding (multiple of 128 bit)
return iv + enc_cipher.encrypt(s) # minimum output 32 byte: IV (128 bit) + ENC_MESSAGE (128 bit)
def decrypt(self, s):
iv = s[:16] # get IV (first 128 bit)
ctr = Counter.new(128, initial_value=long(iv.encode('hex'), 16)) # init counter
dec_cipher = AES.new(self.KEY, AES.MODE_CTR, counter=ctr) # init cipher
t = dec_cipher.decrypt(s[16:]) # decrypt (IV is excluded)
return unpad16(t) # return unpadded message
# GOOD TEST OF RSA
####################################################################################
print ("-------------------\nRSA:\n\n")
r = RSA_cipher(4096)
pub, priv = r.generate_keypair()
#print pub
#print priv
enc = r.encrypt(pub, "test_string")
dec = r.decrypt(priv, enc)
print ("ENC: " + str(enc) ) #.encode('hex')
print ("DEC: " + str(dec))
# GOOD TEST OF AES-ECB
####################################################################################
print ("\n\n-------------------\nAES ECB:\n\n")
KEY = "AAAAABBBBBCCCCCDDDDDEEEEEFFFFFGG"
c = AES_ECB (KEY)
enc = c.encrypt("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA " * 2)
dec = c.decrypt(enc)
print ("ENC: " + str(enc)) #.encode('hex')
print ("DEC: " + str(dec))
# GOOD TEST OF AES-CBC
####################################################################################
print ("\n\n-------------------\nAES CBC:\n\n")
KEY = "AAAAABBBBBCCCCCDDDDDEEEEEFFFFFGG"
c = AES_CBC (KEY)
enc = c.encrypt("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA " * 2)
dec = c.decrypt(enc)
print ("ENC: " + str(enc)) #.encode('hex')
print ("DEC: " + str(dec))
# GOOD TEST OF AES-CTR <-- Suggested
####################################################################################
print ("\n\n-------------------\nAES CTR:\n\n")
KEY = "AAAAABBBBBCCCCCDDDDDEEEEEFFFFFGG"
c = AES_CTR (KEY)
enc = c.encrypt("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA " * 2)
dec = c.decrypt(enc)
print ("ENC: " + str(enc)) #.encode('hex')
print ("DEC: " + str(dec))
print ("DEC2: " + str( c.decrypt(enc) )) #.encode('hex') | chusanapunn/RSA_AY3T2 | A1/aeR.py | aeR.py | py | 4,658 | python | en | code | 0 | github-code | 36 |
1685939480 | import pandas as pd
import geopandas as gpd
from sqlalchemy import create_engine
from shapely.geometry import box
from SGIS.api import SGISRequest
from credentials.database import AP
CSV = '/Users/dongookson/Code/data-project/SGIS/key_locations/ad8.csv'
QUERY = 'select * from key_locations'
GET = False
# connect to database AP
engine = create_engine(AP)
if GET:
# read dummy data
df = pd.read_csv(CSV)[0:10]
# Make API GET request
patient_locs = [SGISRequest().geocode_addr(patient) for patient in df['Address']]
df['x_5179'] = [p.get('x') for p in patient_locs]
df['y_5179'] = [p.get('y') for p in patient_locs]
# create geodataframe
gdf = gpd.GeoDataFrame(
df,
crs='epsg:5179',
geometry=gpd.points_from_xy(x=df.x_5179, y=df.y_5179)
)
# create well-known-text(wkt) column for WGS84
gdf['wkt_4326'] = gdf.to_crs(4326)['geometry']
# write as table
gdf.to_postgis(
'key_locations',
engine
)
else:
gdf = gpd.read_postgis(QUERY, engine, geom_col='geometry')
print(gdf.head(10))
# print(gdf.crs)
# print(gdf['geometry'].sindex.query(box(988969.330849867, 988969.33084999, 1818020.086700, 1818020.0860560)))
# print(type(gdf['geometry'])) | donny-son/airhealth-database | SGIS/test_geocode.py | test_geocode.py | py | 1,276 | python | en | code | 0 | github-code | 36 |
37220479023 | import json
with open('settings.json','r',encoding='utf8') as token:
data = json.load(token)
import requests
import subprocess
from flask import Flask, render_template, request, abort, make_response, jsonify
from datetime import datetime, timezone, timedelta
import firebase_admin
from firebase_admin import credentials, firestore
cred = credentials.Certificate("project-analytics-8acd9-firebase-adminsdk-6usuy-2415c74209.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
from bs4 import BeautifulSoup
from linebot import (LineBotApi, WebhookHandler)
from linebot.exceptions import (InvalidSignatureError)
from linebot.models import (MessageEvent, TextMessage, TextSendMessage, ImageSendMessage, LocationSendMessage)
line_bot_api = LineBotApi(data["LineBotApi"])
handler = WebhookHandler(data["webhook"])
app = Flask(__name__)
@app.route('/')
def index():
homepage = "<h1>許哲睿Python測試網頁</h1>"
homepage += "<a href=/mis>MIS</a><br>"
homepage += "<a href=/current>開啟網頁及顯示日期時間</a><br>"
homepage += "<a href=/welcome?nick=許哲睿>開啟網頁及傳送使用者暱稱</a><br>"
homepage += "<a href=/login>透過表單輸入名字傳值</a><br>"
homepage += "<a href=/hi>計算總拜訪次數</a><br>"
homepage += "<a href=/aboutme>關於子青老師 (響應式網頁實例)</a><br>"
homepage += "<br><a href=/read>讀取Firestore資料</a><br>"
homepage += "<a href=/resource>MIS resource</a><br>"
homepage += "<br><a href=/spider>讀取開眼電影即將上映影片,寫入Firestore</a><br>"
homepage += "<br><a href=/search>輸入關鍵字進行資料查詢</a><br>"
return homepage
@app.route('/mis')
def course():
return "<h1>資訊管理導論</h1>"
@app.route('/current')
def current():
tz = timezone(timedelta(hours=+8))
now = datetime.now(tz)
return render_template("current.html", datetime = str(now))
@app.route('/welcome', methods=["GET", "POST"])
def welcome():
user = request.values.get("nick")
return render_template("welcome.html", name=user)
@app.route('/hi')
def hi():# 載入原始檔案
f = open('count.txt', "r")
count = int(f.read())
f.close()
count += 1# 計數加1
f = open('count.txt', "w")# 覆寫檔案
f.write(str(count))
f.close()
return "本網站總拜訪人次:" + str(count)
@app.route("/login", methods=["POST","GET"])
def login():
if request.method == "POST":
user = request.form["nm"]
return "您輸入的名字為:" + user
else:
return render_template("login.html")
@app.route("/resource")
def classweb():
return render_template("links.html")
@app.route("/aboutme")
def about():
tz = timezone(timedelta(hours=+8))
now = datetime.now(tz)
return render_template("aboutme.html",datetime = str(now))
@app.route("/read")
def read():
Result = ""
collection_ref = db.collection("靜宜資管")
docs = collection_ref.order_by(
"mail", direction=firestore.Query.DESCENDING).get()
for doc in docs:
Result += "文件內容:{}".format(doc.to_dict()) + "<br>"
return Result
@app.route('/spider')
def spider():
url = "http://www.atmovies.com.tw/movie/next/"
Data = requests.get(url)
Data.encoding = "utf-8"
sp = BeautifulSoup(Data.text, "html.parser")
result = sp.select(".filmListAllX li")
lastUpdate = sp.find("div", class_="smaller09").text[5:]
for item in result:
picture = item.find("img").get("src").replace(" ", "")
title = item.find("div", class_="filmtitle").text
movie_id = item.find("div", class_="filmtitle").find(
"a").get("href").replace("/", "").replace("movie", "")
hyperlink = "http://www.atmovies.com.tw" + \
item.find("div", class_="filmtitle").find("a").get("href")
show = item.find("div", class_="runtime").text.replace("上映日期:", "")
show = show.replace("片長:", "")
show = show.replace("分", "")
showDate = show[0:10]
showLength = show[13:]
doc = {
"title": title,
"picture": picture,
"hyperlink": hyperlink,
"showDate": showDate,
"showLength": showLength,
"lastUpdate": lastUpdate
}
doc_ref = db.collection("電影").document(movie_id)
doc_ref.set(doc)
return "近期上映電影已爬蟲及存檔完畢,網站最近更新日期為:" + lastUpdate
@app.route("/search", methods=["POST", "GET"])
def search():
if request.method == "POST":
MovieTitle = request.form["MovieTitle"]
collection_ref = db.collection("電影")
docs = collection_ref.order_by("showDate").get()
info = ""
for doc in docs:
if MovieTitle in doc.to_dict()["title"]:
info += "片名:" + doc.to_dict()["title"] + "<br>"
info += "海報:" + doc.to_dict()["picture"] + "<br>"
info += "影片介紹:" + doc.to_dict()["hyperlink"] + "<br>"
info += "片長:" + doc.to_dict()["showLength"] + " 分鐘<br>"
info += "上映日期:" + doc.to_dict()["showDate"] + "<br><br>"
return info
else:
return render_template("input.html")
@app.route("/callback", methods=["POST"])
def callback():
# get X-Line-Signature header value
signature = request.headers["X-Line-Signature"]
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return "OK"
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
message = event.message.text
if(message[:5].upper() == 'MOVIE'):
res = searchMovie(message[6:])
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=res))
elif(message.upper() == "TCYANG"):
line_bot_api.reply_message(event.reply_token, ImageSendMessage(
original_content_url = "https://www1.pu.edu.tw/~tcyang/aboutme/family.jpg",
preview_image_url = "https://www1.pu.edu.tw/~tcyang/aboutme/family.jpg"
))
elif(message.upper() == "PU"):
line_bot_api.reply_message(event.reply_token, LocationSendMessage(
title="靜宜大學地理位置",
address="台中市沙鹿區臺灣大道七段200號",
latitude=24.22649,
longitude=120.5780923
))
else:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="我是電影機器人,您輸入的是:" + message + "。祝福您有個美好的一天!"))
def searchMovie(keyword):
info = "您要查詢電影,關鍵字為:" + keyword + "\n"
collection_ref = db.collection("電影")
docs = collection_ref.order_by("showDate").get()
found = False
for doc in docs:
if keyword in doc.to_dict()["title"]:
found = True
info += "片名:" + doc.to_dict()["title"] + "\n"
info += "海報:" + doc.to_dict()["picture"] + "\n"
info += "影片介紹:" + doc.to_dict()["hyperlink"] + "\n"
info += "片長:" + doc.to_dict()["showLength"] + " 分鐘\n"
info += "上映日期:" + doc.to_dict()["showDate"] + "\n\n"
if not found:
info += "很抱歉,目前無符合這個關鍵字的相關電影喔"
return info
@app.route("/webhook", methods=["POST"])
def webhook():
# build a request object
req = request.get_json(force=True)
# fetch queryResult from json
action = req.get("queryResult").get("action")
#msg = req.get("queryResult").get("queryText")
#info = "動作:" + action + "; 查詢內容:" + msg
if (action == "CityWeather"):
city = req.get("queryResult").get("parameters").get("city")
info = "查詢都市名稱:" + city + ",天氣:"
city = city.replace("台", "臺")
token = data["token"]
url = "https://opendata.cwb.gov.tw/api/v1/rest/datastore/F-C0032-001?Authorization=" + \
token + "&format=JSON&locationName=" + str(city)
Data = requests.get(url)
Weather = json.loads(Data.text)[
"records"]["location"][0]["weatherElement"][0]["time"][0]["parameter"]["parameterName"]
Rain = json.loads(Data.text)[
"records"]["location"][0]["weatherElement"][1]["time"][0]["parameter"]["parameterName"]
info += Weather + ",降雨機率:" + Rain + "%"
elif (action == "searchMovie"):
cond = req.get("queryResult").get("parameters").get("FilmQ")
keyword = req.get("queryResult").get("parameters").get("any")
info = "您要查詢電影的" + cond + ",關鍵字是:" + keyword + "\n\n"
if (cond == "片名"):
collection_ref = db.collection("電影")
docs = collection_ref.order_by("showDate").get()
found = False
for doc in docs:
if keyword in doc.to_dict()["title"]:
found = True
info += "片名:" + doc.to_dict()["title"] + "\n"
info += "海報:" + doc.to_dict()["picture"] + "\n"
info += "影片介紹:" + doc.to_dict()["hyperlink"] + "\n"
info += "片長:" + doc.to_dict()["showLength"] + " 分鐘\n"
info += "上映日期:" + doc.to_dict()["showDate"] + "\n\n"
if not found:
info += "很抱歉,目前無符合這個關鍵字的相關電影喔"
return make_response(
jsonify({
"fulfillmentText": info,
"fulfillmentMessages": [
{"quickReplies": {
"title": info,
"quickReplies": ["台北天氣", "台中天氣", "高雄天氣"]
}}]
}))
if __name__ == "__main__":
app.run() | NTX8205/LineBot | flask_server.py | flask_server.py | py | 10,110 | python | en | code | 0 | github-code | 36 |
36955527539 | from suite_subprocess import suite_subprocess
from wtscenario import make_scenarios
import wiredtiger, wttest
# test_huffman01.py
# Huffman key and value configurations
# Basic smoke-test of huffman key and value settings.
class test_huffman01(wttest.WiredTigerTestCase, suite_subprocess):
"""
Test basic operations
"""
table_name = 'table:test_huff'
huffval = [
('none', dict(huffval=',huffman_value=none',vfile=None)),
('english', dict(huffval=',huffman_value=english',vfile=None)),
('utf8', dict(huffval=',huffman_value=utf8t8file',vfile='t8file')),
('utf16', dict(huffval=',huffman_value=utf16t16file',vfile='t16file')),
]
scenarios = make_scenarios(huffval)
def test_huffman(self):
dir = self.conn.get_home()
# if self.vfile != None and not os.path.exists(self.vfile):
if self.vfile != None:
f = open(dir + '/' + self.vfile, 'w')
# For the UTF settings write some made-up frequency information.
f.write('48 546233\n49 460946\n')
f.write('0x4a 546233\n0x4b 460946\n')
f.close()
config= self.huffval
self.session.create(self.table_name, config)
# Test Huffman encoding ranges.
class test_huffman_range(wttest.WiredTigerTestCase):
table_name = 'table:test_huff'
# Test UTF8 out-of-range symbol information.
def test_huffman_range_symbol_utf8(self):
dir = self.conn.get_home()
f = open(dir + '/t8file', 'w')
f.write('256 546233\n257 460946\n')
f.close()
config="huffman_value=utf8t8file"
msg = '/not in range/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.session.create(self.table_name, config), msg)
# Test UTF16 out-of-range symbol information.
def test_huffman_range_symbol_utf16(self):
dir = self.conn.get_home()
f = open(dir + '/t16file', 'w')
f.write('65536 546233\n65537 460946\n')
f.close()
config="huffman_value=utf16t16file"
msg = '/not in range/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.session.create(self.table_name, config), msg)
# Test out-of-range frequency information.
def test_huffman_range_frequency(self):
# Write out-of-range frequency information.
dir = self.conn.get_home()
f = open(dir + '/t8file', 'w')
f.write('48 4294967296\n49 4294967297\n')
f.close()
config="huffman_value=utf8t8file"
msg = '/not in range/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.session.create(self.table_name, config), msg)
# Test duplicate symbol information.
def test_huffman_range_symbol_dup(self):
dir = self.conn.get_home()
f = open(dir + '/t8file', 'w')
f.write('100 546233\n101 460946\n')
f.write('102 546233\n100 460946\n')
f.close()
config="huffman_value=utf8t8file"
msg = '/duplicate symbol/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.session.create(self.table_name, config), msg)
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_huffman01.py | test_huffman01.py | py | 3,250 | python | en | code | 24,670 | github-code | 36 |
41585106305 | import json
# # Data to be written
# dictionary = {
# "hello":"lol"
# }
# # Serializing json
# json_object = json.dumps(dictionary, indent=4)
# Writing to sample.json
# with open("sample.json", "a") as outfile:
# outfile.write(json_object)
filename="sample.json"
entry={'hello','lol1'}
with open(filename, "r") as file:
data = json.load(file)
# 2. Update json object
data.append(entry)
# 3. Write json file
with open(filename, "w") as file:
json.dump(data, file) | AugeGottes/Yet-Another-Kafka | test.py | test.py | py | 484 | python | en | code | 0 | github-code | 36 |
36335146377 | import json, sys, time, os
import requests as rq
import soundcloud as sc
id = "ql3NWDyvuRgjpzwArS8lYmm2SrVGYLDz"
scurl = "https://api-v2.soundcloud.com/"
qcliid = "?client_id=ql3NWDyvuRgjpzwArS8lYmm2SrVGYLDz"
client = sc.Client(client_id=id)
class Track:
def __init__(self, inp):
data = json.loads(resolve(inp).text)
resp = query("/tracks/" + str(data['id']))
self.resp = resp
self.content = parse(resp)
self.id = self.content['id']
self.name = self.content['title']
self.artistid = self.content['user_id']
self.artist = self.content['user']['username']
if (self.content['monetization_model'] == 'AD_SUPPORTED') or (self.content['monetization_model'] == 'BLACKBOX') or (self.content['monetization_model'] == 'NOT_APPLICABLE'):
self.downloadable = True
try:
self.mpeg = self.content['media']['transcodings'][1]['url'] + qcliid
except IndexError:
print("WIP")
self.downloadable = False
else:
self.downloadable = False
def getMpeg(self):
url = parse(rq.get(self.mpeg))['url']
return rq.get(url)
def download(self):
if self.downloadable:
resp = self.getMpeg()
name = self.name + " -- " + self.artist + ".mp3"
name = name.replace('/', '|')
name = name.replace('\\', '|')
with open(name, "wb") as mpeg:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
mpeg.write(chunk)
else:
print(self.name + " is not downloadable")
class Playlist:
def __init__(self, inp):
data = json.loads(resolve(inp).text)
try:
resp = query("/playlists/" + str(data['id']))
except KeyError:
print("There was an error. Are you sure this is a playlist? If you are, is it public?")
sys.exit()
self.resp = resp
self.content = parse(resp)
self.name = self.content['title']
self.id = self.content['id']
self.artistid = self.content['user_id']
self.artist = self.content['user']['username']
tracks = self.content['tracks']
objTracks = []
for track in tracks:
temp = Track(idToUrl(track['id']))
objTracks.append(temp)
self.tracks = objTracks
def download(self):
cwd = os.getcwd()
title = self.name + " -- " + self.artist
path = os.path.join(cwd, title)
os.mkdir(path)
os.chdir(path)
for track in self.tracks:
track.download()
os.chdir(cwd)
class User:
def __init__(self, inp):
data = json.loads(resolve(inp).text)
resp = query("/users/" + str(data['id']))
self.resp = resp
self.content = parse(resp)
self.id = self.content['id']
self.name = self.content['full_name']
self.tracks = parse(query("/users/" + str(data['id']) + "/tracks"))
self.playlists = parse(query("/users/" + str(data['id']) + "/playlists"))
self.followings = parse(query("/users/" + str(data['id']) + "/followings"))
self.followers = parse(query("/users/" + str(data['id']) + "/followers"))
self.comments = parse(query("/users/" + str(data['id']) + "/comments"))
self.webProfiles = parse(query("/users/" + str(data['id']) + "/web-profiles"))
likes = parse(query("/users/" + str(data['id']) + "/track_likes"))
likes = likes['collection']
objLikes = []
for like in likes:
temp = Track(idToUrl(like['track']['id']))
objLikes.append(temp)
self.likes = objLikes
def downloadLikes(self):
cwd = os.getcwd()
title = self.name + "'s likes"
path = os.path.join(cwd, title)
os.mkdir(path)
os.chdir(path)
for like in self.likes:
like.download()
os.chdir(cwd)
def resolve(inp):
out = ''
try:
out = client.get("/resolve", url=inp)
except rq.exceptions.HTTPError as e:
out = str(e)
url = convertApiv2(out)
resp = rq.get(url)
return resp
def convertApiv2(resp):
spliturl = resp.split('api', 1)
url = spliturl[0] + "api-v2" + spliturl[1]
return url.strip("403 Client Error: Forbidden for url: ")
def parse(resp): return json.loads(resp.text)
def query(inp):
out = ''
try:
out = client.get(inp)
except rq.exceptions.HTTPError as e:
out = str(e)
url = convertApiv2(out)
resp = rq.get(url)
return resp
def idToUrl(inp):
url = scurl + "tracks/" + str(inp) + qcliid
resp = rq.get(url)
return parse(resp)['permalink_url']
# ADD CODE HERE
| idosyncrasi/soundcloud-dl | main.py | main.py | py | 4,200 | python | en | code | 0 | github-code | 36 |
34376075705 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.auth.views import login, logout
from django.views.generic.edit import CreateView
import views
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
# url('^', include('django.contrib.auth.urls')),
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^login/$', login,
{'template_name': 'login.html'}),
url(r'^logout/$', logout,
{'template_name': 'logout.html'}),
url(r'^register/$', views.RegisterView.as_view(), name='register'),
url(r'^wall/', include('wall.urls', namespace="wall")),
)
| jonathantumulak/facebookwall | facebookwall/src/facebookwall/facebookwall/urls.py | urls.py | py | 689 | python | en | code | 0 | github-code | 36 |
27745446285 | import numpy as np
class Data:
def __init__(self):
with open('goblet_book.txt', 'r') as file:
content = file.read()
self.book_data = np.array(list(content))
self.book_chars = np.unique(self.book_data)
self.K = len(self.book_chars)
self.dict_int_to_char = dict(zip( np.arange(self.K), self.book_chars))
self.dict_char_to_int = dict(zip(self.book_chars, np.arange(self.K)))
def seq_to_vec(self, y):
y_encoded = [self.dict_char_to_int[char] for char in y]
vec = np.zeros((len(y_encoded), self.K))
vec[np.arange(len(y_encoded)), y_encoded] = 1
return vec.T
def vec_to_seq(self, vec):
ints = np.argmax(vec.T, 1)
seq = [self.dict_int_to_char[int] for int in ints]
return seq
data = Data()
y = ["a", "l", "a", "n"]
a = 2 | alanroussel/ML-assignments | 4_RNN/dataset.py | dataset.py | py | 757 | python | en | code | 0 | github-code | 36 |
30590086381 | """
删除链表的倒数第N个节点
"""
# 1
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# 快慢指针
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
pre = ListNode(0)
pre.next = head
slow, fast = pre, pre
for _ in range(n):
fast = fast.next
while fast and fast.next:
slow, fast = slow.next, fast.next
slow.next = slow.next.next
return pre.next
# 递归迭代 -- 回溯时,进行节点计数
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
if not head:
self.count = 0
return head
head.next = self.removeNthFromEnd(head.next, n)
self.count += 1
return head.next if self.count == n else head
| leon1peng/alg_t | 刷题狂魔/Week06/删除链表的倒数第N个节点.py | 删除链表的倒数第N个节点.py | py | 875 | python | en | code | 1 | github-code | 36 |
32194079583 | import time
import os
import sqlite3
import mysql.connector
import json
import utils.normalize_data
import git
import importlib
def process_requests():
maria_conn = None
lite_conn = None
try:
maria_cnf_FD = open('db_conf.json')
maria_cnf = json.load(maria_cnf_FD)
maria_cnf_FD.close()
if not os.path.isdir('depot'):
print('ERROR: Depot dir not found')
return
lite_conn = sqlite3.connect('data/git_tasks.db')
maria_conn = mysql.connector.connect(host=maria_cnf['host'],user=maria_cnf['user'],
password=maria_cnf['password'],database='git_info')
lite_cur = lite_conn.cursor()
# maria_cur = maria_conn.cursor()
while True:
lite_cur.execute('SELECT * FROM tasks WHERE start_date IS NULL \
ORDER BY priority,req_date')
print(lite_cur)
row = lite_cur.fetchone()
if row:
print(row)
# time.sleep(60)
lite_cur.execute('UPDATE tasks \
SET start_date = DateTime("now","localtime") \
WHERE task_id = ?', (row[0],))
lite_conn.commit()
# Extract request type
try:
request = json.loads(row[3])
task_mod = importlib.import_module('depot_manager.' + request['_C'])
result = task_mod.process(request, maria_conn, lite_conn)
except json.decoder.JSONDecodeError:
print('BAD REQUEST FORMAT: ' + row[3])
result = (True, json.dumps({'status': 'BAD REQUEST FORMAT'}))
except ModuleNotFoundError:
print('INVALID REQUEST TYPE: ' + str(request))
result = (True, json.dumps({'status': 'INVALID REQUEST TYPE'}))
except KeyError:
print('BAD REQUEST FORMAT: ' + str(request))
result = (True, json.dumps({'status': 'BAD REQUEST FORMAT'}))
lite_cur.execute('UPDATE tasks \
SET answer = ?, end_date = DateTime("now","localtime") \
WHERE task_id = ?', (result[1],row[0]))
if result[0]:
lite_cur.execute('UPDATE tasks \
SET ack_date = DateTime("now","localtime") \
WHERE task_id = ?', (row[0],))
lite_conn.commit()
time.sleep(15)
lite_conn.close()
maria_conn.close()
except sqlite3.Error as e:
print('Database Error, Exiting server')
print(e)
lite_conn = None
except mysql.connector.Error as e:
print('Database Error, Exiting server')
print(e)
maria_conn = None
except json.JSONDecodeError:
print('ERROR Reading json config file')
except KeyboardInterrupt:
pass
finally:
if lite_conn:
lite_conn.close()
if maria_conn:
maria_conn.close()
print('STOP')
| mathieu-bergeron/aquiletour2021 | dockers/git/depot_manager/task_processor.py | task_processor.py | py | 3,103 | python | en | code | 0 | github-code | 36 |
70911809705 | import os
import sys
import re
import json
import math
import argparse
import time
import subprocess
import numpy as np
import networkx as nx
import tensorflow as tf
import datetime
from operator import itemgetter
import collections
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
FATHER_PATH = os.path.join(FILE_PATH, '..')
DATA_PATH = os.path.join(FATHER_PATH, 'data')
def main():
parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('--input_file', type = str, required = True)
#parser.add_argument('--n', type = int, required = True)
parser.add_argument('--ratio', type = float, required = True)
parser.add_argument('--self_loop', type = str, default = "yes")
args = parser.parse_args()
args.input_file = os.path.join(DATA_PATH, args.input_file)
nw_file = os.path.join(DATA_PATH, args.input_file + "_nw.dat")
n = 0
m = 0
G_init = []
G_dynamic = {}
with open(nw_file, "r") as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
items = line.split()
if len(items) == 1:
n = int(args.ratio * float(items[0]))
if len(items) != 2:
continue;
m = max(int(items[1]), int(items[0]), m)
if int(items[1]) < n and int(items[0]) < n:
G_init.append(items)
else:
it = max(int(items[0]), int(items[1]))
if it not in G_dynamic:
G_dynamic[it] = [items]
else:
G_dynamic[it].append(items)
if args.self_loop == "yes":
for i in xrange(n):
G_init.append((str(i), str(i)))
for i in xrange(n, m + 1):
if i not in G_dynamic:
G_dynamic[i] = [(str(i), str(i))]
else:
G_dynamic[i].append((str(i), str(i)))
init_nw_file = os.path.join(DATA_PATH, args.input_file + "_" + str(n) + "_" + str(args.ratio) + "_nw_init")
dynamic_nw_file = os.path.join(DATA_PATH, args.input_file + "_" + str(n) + "_" + str(args.ratio) + "_nw_dynamic")
with open(init_nw_file, "w") as f:
f.write(str(n) + "\n")
for u, v in G_init:
f.write(str(u) + "\t" + str(v) + "\n")
tmp = [(k, G_dynamic[k]) for k in sorted(G_dynamic.keys())]
with open(dynamic_nw_file, "w") as f:
for u, s in tmp:
f.write(str(u) + "\t" + str(len(s)) + "\n")
for v, w in s:
f.write(str(v) + "\t" + str(w) + "\n")
f.write("\n")
if __name__ == "__main__":
main()
| luke28/DNE | tools/get_input.py | get_input.py | py | 2,670 | python | en | code | 8 | github-code | 36 |
42406250255 | import json
from flask import url_for
from flask_login import current_user
from flask_mail import Message
from bd_project import mail
from bd_project.models import OrderList, Product
from bd_project.classes import UserHelper
from bd_project.models import Order
def add_current_ordered_products(order_products_by_current_user, order_id):
for products in order_products_by_current_user:
for pr_id, product in products.items():
order_product = OrderList(order_id=order_id, product_id=Product.get(Product.id == pr_id),
amount=product.get('amount'))
order_product.save()
def get_current_order_products(user_id):
with open('ordered_products.json', 'r') as f:
order_products_by_users = json.load(f)
order_products_by_current_user = order_products_by_users.get(f'{user_id}')
return order_products_by_current_user if order_products_by_current_user else None
def clear_current_user_ordered_products():
with open('ordered_products.json', 'r') as f:
order_products = json.load(f)
order_products[f'{current_user.id}'] = []
with open('ordered_products.json', 'w') as f:
json.dump(order_products, f, indent=2)
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Сброс пароля', sender='vladislavBlog@gmail.com', recipients=[user.email])
msg.body = f'''
Что бы сбросить пароль перейдите по ссылке:
{url_for('users.reset_token', token=token, _external=True)}
Если вы не делали этого запроса проигнорируйте сообщение.
'''
mail.send(msg)
def send_sales_receipt(user, ordered_products):
last_order = Order.select().order_by(Order.id.desc()).get()
msg = Message(f'Чек для заказа из магазина зефира "Влад магазин"', sender='vladislavBlog@gmail.com',
recipients=[user.email])
order_sum = UserHelper.order_price(ordered_products)
receipt = f'Ваш заказ номер:{last_order.id}\n'
for products in ordered_products:
for pr_id, product in products.items():
receipt += f'Продукт: {product.get("product")} - {product.get("amount")}\n'
receipt += f'Сумма заказа: {order_sum}.'
print(receipt)
msg.body = receipt
mail.send(msg)
| Braindead3/bd_project | bd_project/users/utils.py | utils.py | py | 2,407 | python | en | code | 0 | github-code | 36 |
2285190842 | from setuptools import setup
VERSION = "1.0.3"
setup(
name="builder",
version=VERSION,
license="Apache License 2.0",
author="The Open Peer Power Authors",
author_email="hello@openpeerpower.io",
url="https://openpeerpower.io/",
description="Opp.io wheels builder form Open Peer Power.",
long_description="",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Topic :: Home Automation"
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.7",
],
keywords=["docker", "openpeerpower", "opp.io"],
zip_safe=False,
platforms="any",
packages=["builder"],
include_package_data=True,
)
| actions-marketplace-validations/OpenPeerPower_wheels | setup.py | setup.py | py | 989 | python | en | code | 0 | github-code | 36 |
73593036584 | from django.urls import path
from django.contrib.auth.views import LoginView, LogoutView
from .views import home_view, account_view, meal_view, order_view, report_view, sign_up_view, add_meal_view, edit_meal_view
app_name = 'restaurant'
urlpatterns = [
path('', home_view, name='home'),
path('sign-up/', sign_up_view, name='sign-up'),
path('sign-in/', LoginView.as_view(template_name='restaurant/sign_in.html'),
name='sign-in'),
path('sign-out/', LogoutView.as_view(template_name='restaurant/sign_out.html'),
name='sign-out'),
path('account/', account_view, name='account'),
path('meal/', meal_view, name='meal'),
path('meal/add/', add_meal_view, name='add-meal'),
path('meal/<int:meal_id>/edit/', edit_meal_view, name='edit-meal'),
path('order/', order_view, name='order'),
path('report/', report_view, name='report'),
]
| AmarjotSingh21/food-delivery-django | restaurant/urls.py | urls.py | py | 882 | python | en | code | 0 | github-code | 36 |
2315870297 | # This is imp else block give u result if no errors occurs
try:
n = int(input("Enter number: "))
result = 100/n
except ValueError:
print("Please enter number only!")
except ZeroDivisionError:
print("we cannot divide by zero! pls enter another number...")
else:
print("Division:", result)
| sudeepsawant10/python-development | exceptionh/4_else_clause.py | 4_else_clause.py | py | 308 | python | en | code | 0 | github-code | 36 |
17323116767 | # Read outputs and write them in individual files
# for polyb in ["gemm", "3mm", "cholesky", "durbin", "ludcmp", "covariance", "seidel-2d", "fdtd-2d"]:
for polyb in ["4mm"]:
for dataset_size in ["MINI", "SMALL", "MEDIUM", "LARGE"]:
# for dataset_size in ["MINI", "SMALL", "MEDIUM"]:
# for format in ["_posit32_", "_posit64_"]:
# for format in ["_posit32_", "_posit64_", "_posit_mem_float_", "_posit_mem_double_"]:
for format in ["_posit_mem_float_", "_posit_mem_double_"]:
infile = polyb + '/' + polyb + format + dataset_size + '.txt'
outfile = polyb + '/' + polyb + "_C" + format + dataset_size + '.txt'
delete_list = ["32.2x", "64.2x", "p"]
with open(infile) as fin, open(outfile, "w+") as fout:
for line in fin:
for word in delete_list:
line = line.replace(word, "")
fout.write(line)
| artecs-group/posit-hls | error_eval/results/outputs/clean_posit_outputs.py | clean_posit_outputs.py | py | 949 | python | en | code | 2 | github-code | 36 |
5467848201 | import pandas as pd
import geopandas
beam_skims_types = {'timePeriod': str,'pathType': str,'origin': int,'destination': int,'TIME_minutes': float, 'TOTIVT_IVT_minutes': float, 'VTOLL_FAR': float, 'DIST_meters': float, 'WACC_minutes': float, 'WAUX_minutes': float, 'WEGR_minutes': float, 'DTIM_minutes': float, 'DDIST_meters': float, 'KEYIVT_minutes': float, 'FERRYIVT_minutes': float, 'BOARDS': float, 'DEBUG_TEXT': str}
print('READING')
chunk = pd.read_csv('data/bay_area_skims.csv.gz', compression="gzip", dtype=beam_skims_types, chunksize=1000000)
print('CONCATENATING')
df = pd.concat(chunk)
print('FILTERING')
sub_df = df.loc[(df['pathType'] == 'SOV') & (df['timePeriod'] == 'AM')]
print('EXPORTING')
sub_df.to_csv('data/auto_skims.csv')
print('DONE')
chunk = pd.read_csv('data/auto_skims.csv', dtype=beam_skims_types, chunksize=1000000)
df = pd.concat(chunk)
df = df[['origin', 'destination', 'TOTIVT_IVT_minutes', 'DIST_meters']].copy()
df = df.rename(columns={'origin': 'from_zone_id', 'destination': 'to_zone_id', 'TOTIVT_IVT_minutes': 'SOV_AM_IVT_mins'})
df['from_zone_id'] = df['from_zone_id'].astype('str')
df['to_zone_id'] = df['to_zone_id'].astype('str')
df = df.set_index(['from_zone_id', 'to_zone_id'])
store = pd.HDFStore('data/custom_mpo_06197001_model_data.h5')
store['travel_data'] = df
def correct_index(index_int):
if type(index_int)==type('string'):
return index_int
return '0'+str(index_int)
blocks = store['blocks'].copy()
block_taz = pd.read_csv('data/block_w_taz.csv', dtype={'GEOID10': object, 'taz1454': object})
block_taz = block_taz.rename(columns={'GEOID10': 'block_id', 'taz1454':'TAZ'})
block_taz = block_taz.set_index('block_id')[['TAZ']]
block_taz.index = block_taz.index.map(correct_index)
blocks = blocks.join(block_taz)
blocks['TAZ'] = blocks['TAZ'].fillna(0)
blocks = blocks[blocks['TAZ']!=0].copy()
blocks = blocks.rename(columns = {'TAZ': 'zone_id'})
households = store['households'].copy()
persons = store['persons'].copy()
jobs = store['jobs'].copy()
units = store['residential_units'].copy()
households = households[households['block_id'].isin(blocks.index)].copy()
persons = persons[persons['household_id'].isin(households.index)].copy()
jobs = jobs[jobs['block_id'].isin(blocks.index)].copy()
units = units[units['block_id'].isin(blocks.index)].copy()
store['blocks'] = blocks
store['households'] = households
store['persons'] = persons
store['jobs'] = jobs
store['residential_units'] = units
store.close()
print("Done!")
| urbansim/DEMOS_URBANSIM | demos_urbansim/process_skims.py | process_skims.py | py | 2,507 | python | en | code | 1 | github-code | 36 |
31701293102 |
import os
import importlib
from collections import OrderedDict
from .utils import (SettingsLoader, ProjectSettings, ThemeSettings,
ShareData, PathResolver, SysPathContextManager)
from .protocol import PluginRegister
from .sequence_analyze import SequenceParser
class SettingsProcedure:
@classmethod
def _load_share_data(cls, loaders):
ShareData.load_data(loaders)
@classmethod
def _load_project_settings(cls, path):
project_settings_loader = SettingsLoader(path)
ProjectSettings.load_data(project_settings_loader)
cls._load_share_data(project_settings_loader)
@classmethod
def _load_theme_settings(cls, path, name):
theme_settings_loader = SettingsLoader(path, name)
ThemeSettings.load_data(theme_settings_loader)
cls._load_share_data(theme_settings_loader)
@classmethod
def _load_settings(cls):
pr = PathResolver
# set up ProjectSettings
project_settings_path = pr.project_settings()
cls._load_project_settings(project_settings_path)
# set up ThemeSettings
theme_settings_set = []
for theme_name in ProjectSettings.get_registered_theme_name():
theme_settings_path = pr.theme_settings(theme_name)
cls._load_theme_settings(theme_settings_path, theme_name)
@classmethod
def _load_themes(cls):
pr = PathResolver
theme_dir = pr.themes()
for theme_name in ProjectSettings.get_registered_theme_name():
with SysPathContextManager(theme_name, theme_dir):
importlib.import_module(theme_name)
@classmethod
def run(cls, project_path=None):
# project_path is None means the path has already been set.
if project_path:
PathResolver.set_project_path(project_path)
cls._load_settings()
cls._load_themes()
class PluginProcedure:
runtime_components = ['pre_load', 'in_load', 'post_load',
'pre_process', 'in_process', 'post_process',
'pre_write', 'in_write', 'post_write']
extended_procedure = ['cli_extend']
@classmethod
def _get_plain_text(cls, theme_name, field_name):
search_key = '{}.{}'.format(theme_name, field_name)
plain_text = ThemeSettings.get(search_key)
return plain_text
@classmethod
def _get_execution_orders(cls):
error_happend = False
exec_orders = OrderedDict()
# In this function, exec_orders contains both default and extended
# procedures.
for component in (cls.runtime_components + cls.extended_procedure):
parser = SequenceParser()
for theme_name in ProjectSettings.get_registered_theme_name():
plain_text = cls._get_plain_text(theme_name, component)
if plain_text is None:
continue
# analyze
parser.analyze(theme_name, plain_text)
if parser.error:
parser.report_error()
error_happend = True
else:
exec_orders[component] = parser.generate_sequence()
return error_happend, exec_orders
@classmethod
def _linearize_exec_orders(cls, exec_orders):
# extract cli_indices.
extract_field = cls.extended_procedure[0]
cli_indices = exec_orders[extract_field]
del exec_orders[extract_field]
# generate plugin calling sequence.
flat_orders = []
for container in exec_orders.values():
flat_orders.extend(container)
return flat_orders, cli_indices
@classmethod
def _verify_plugins(cls, flat_orders):
for plugin_index in flat_orders:
plugin = PluginRegister.get_plugin(plugin_index)
if plugin is None:
# can not find such plugin
print('Can Not Find {}'.format(plugin_index))
return True
return False
@classmethod
def run(cls):
parse_error, exec_orders = cls._get_execution_orders()
flat_order, cli_indices = cls._linearize_exec_orders(exec_orders)
match_error = cls._verify_plugins(flat_order + cli_indices)
if parse_error or match_error:
raise SyntaxError('Error happended, suspend program.')
return flat_order, cli_indices
| huntzhan/GeekCMS | geekcms/loadup.py | loadup.py | py | 4,407 | python | en | code | 5 | github-code | 36 |
4108004877 | from collections import deque
from sys import stdin, exit
input = stdin.readline
num, h = [int(x) for x in input().split()]
grid = [[int(x) for x in input().split()] for _ in range(num)]
visited = [[False] * num for _ in range(num)]
visited[0][0] = True
moves = [[1, 0], [0, 1], [-1, 0], [0, -1]]
queue = deque()
queue.append([0, 0])
while queue:
a, b = queue.popleft()
for x, y in moves:
x += a
y += b
if 0 <= x < num and 0 <= y < num and not visited[x][y] and abs(grid[a][b] - grid[x][y]) <= h:
queue.append([x, y])
visited[x][y] = True
if visited[num - 1][num - 1]:
print("yes")
exit()
print("no")
| AAZZAZRON/DMOJ-Solutions | dmopc13c3p3.py | dmopc13c3p3.py | py | 679 | python | en | code | 1 | github-code | 36 |
72569293224 | import numpy as np
import pylab as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib
def matshow(ax, data, x_name=None, y_name=None, x_n=None, y_n=None,
x_tickslabels=None, y_tickslabels=None, inversed=False, cmap=plt.cm.gray_r,
colorbar=False, colorbarticks=None, colorbarticklabels=None, c_name=None, **kwargs):
"""
A simple matrix showing plot, based on
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.matshow
:param ax: a matplotlib.axes.Axes object
:param data: the numpy array matrix to plot
:param x_name: label of x axis
:param y_name: label of y axis
:param x_n: number of elements in x direction, by default take shape of ``data``
:param y_n: number of elements in y direction, by default take shape of ``data``
:param cmap: the color bar to use. Default: gray_r
:param title: the title to place on top of the axis.
:param text: some text to be written in the figure (top left corner)
As we frequently want to do this, here is a simple way to do it.
For more complicated things, add the text yourself to the axes.
:param x_tickslabels: x axis tick labels
:param y_tickslabels: y axis tick labels
:param inversed: if True inverses the y-axis to get the classical representation of a matrix
:param colorbar: If True includes a colorbar
:param c: data to use for the colorbar
Any further kwargs are either passed to ``matshow()``.
"""
stuff = ax.matshow(data, cmap=cmap, origin='lower', **kwargs)
if x_name is not None:
ax.set_xlabel(x_name)
if y_name is not None:
ax.set_ylabel(y_name)
#ax.set_ylabel("")
if x_n is None:
x_n = np.shape(data)[1]
if y_n is None:
y_n = np.shape(data)[0]
ax.set_xticks(np.arange(x_n))
ax.set_yticks(np.arange(y_n))
if x_tickslabels is not None:
ax.set_xticklabels(x_tickslabels)
if y_tickslabels is not None:
ax.set_yticklabels(y_tickslabels)
#ax.set_yticklabels([''] * len(y_tickslabels))
if inversed:
ax.invert_yaxis()
else:
ax.xaxis.tick_bottom()
if colorbar:
# And make the plot:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "5%", pad="3%")
cax = plt.colorbar(stuff, cax, ticks=colorbarticks)
if not colorbarticklabels is None:
cax.set_ticklabels(colorbarticklabels)
if not c_name is None:
cax.set_label(c_name)
| kuntzer/sclas | plots/matshow.py | matshow.py | py | 2,325 | python | en | code | 3 | github-code | 36 |
33441946307 | def get_value(data_list,index):
try:
return data_list[index]
except IndexError:
return None
n=int(input("Enter no. of elements in list- "))
el=input("Enter the element- ")
list=[el for i in range(n)]
index_number=int(input("Enter index no.- "))
value=get_value(list,index_number)
print(value ," is present at index",index_number)
| Hiteshwalia4/Python-Programs | oct22 prac/Q1_oct22.py | Q1_oct22.py | py | 364 | python | en | code | 0 | github-code | 36 |
34987281436 | import torch
import torch.nn as nn
from methods.ein_seld.data_augmentation import spec_augment_
from methods.ein_seld.data_augmentation import spec_augment, channel_rotation
from methods.utils.stft import (STFT, LogmelFilterBank, intensityvector,
spectrogram_STFTInput)
import numpy as np
import librosa
import librosa.display
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import torch
import torchaudio
import torchaudio.functional as F
import torchaudio.transforms as T
import librosa
class LogmelIntensity_Extractor(nn.Module):
def __init__(self, cfg , data_type):
super().__init__()
data = cfg['data']
sample_rate, n_fft, hop_length, window, n_mels, fmin, fmax = \
data['sample_rate'], data['n_fft'], data['hop_length'], data['window'], data['n_mels'], \
data['fmin'], data['fmax']
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# STFT extractor
self.stft_extractor = STFT(n_fft=n_fft, hop_length=hop_length, win_length=n_fft,
window=window, center=center, pad_mode=pad_mode,
freeze_parameters=data['feature_freeze'])
# Spectrogram extractor
self.spectrogram_extractor = spectrogram_STFTInput
# Logmel extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=n_fft,
n_mels=n_mels, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=data['feature_freeze'])
# Intensity vector extractor
self.intensityVector_extractor = intensityvector
self.data_type = data_type
self.cfg = cfg
def define_transformation(self,waveform):
sample_rate = 24000
n_fft = 1024
win_length = None
hop_length = 600
n_mels = 256
mel_spectrogram = T.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
power=2.0,
n_mels=n_mels,
)
melspec = mel_spectrogram(waveform.cpu())
return melspec
def forward(self, x):
"""
input:
(batch_size, channels=4, data_length)
output:
(batch_size, channels, time_steps, freq_bins)
"""
# for infrerence
if type(x)!= tuple :
if x.ndim == 3:
x = self.stft_extractor(x)
logmel = self.logmel_extractor(self.spectrogram_extractor(x))
intensity_vector = self.intensityVector_extractor(x, self.logmel_extractor.melW)
out = torch.cat((logmel, intensity_vector), dim=1)
return out
else:
raise ValueError("x shape must be (batch_size, num_channels, data_length)\n \
Now it is {}".format(x.shape))
input, target, ind, data_type = x
if input.ndim != 3:
raise ValueError("x shape must be (batch_size, num_channels, data_length)\n \
Now it is {}".format(input.shape))
#self.plot_waveform(input[0])
#melspec = self.define_transformation(input[0])
#self.plot_spectrogram(melspec)
# get the indices of augmented data
aug_idx_inverse = [i for i, x in enumerate(data_type) if x == "train_invert_position_aug"]
if ind == 'train' and len(aug_idx_inverse) != 0:
for i, dt in enumerate(aug_idx_inverse):
input[i, :, :] = torch.flip(input[i, :, :], dims=[1]) # invert waveform time axis
sed_label = torch.flip(target['sed'][i], dims=[0]) # invert sed label time axis
doa_label = torch.flip(target['doa'][i], dims=[0]) # invert doa label time axis
doa_label = 0.0 - doa_label # also invert sound source position
target['sed'][i] = sed_label
target['doa'][i] = doa_label
aug_idx_rotate = [i for i, x in enumerate(data_type) if x == "train_rotate_channel"]
if ind == 'train' and len(aug_idx_rotate) != 0:
for i , dt in enumerate(aug_idx_rotate):
input[i, :, :], pattern = channel_rotation.apply_data_channel_rotation('foa', input[i, :, :])
aug_rotate = channel_rotation.apply_label_channel_rotation('foa', target['doa'][i], pattern)
# update the target
target['doa'][i] = aug_rotate
input = self.stft_extractor(input)
logmel = self.logmel_extractor(self.spectrogram_extractor(input))
aug_idx_spc = [i for i, x in enumerate(data_type) if x == "train_spec_aug"]
if ind == 'train' and len(aug_idx_spc) != 0:
# get specAugment Parameters
F = self.cfg['data_augmentation']['F']
T = self.cfg['data_augmentation']['T']
num_freq_masks = self.cfg['data_augmentation']['num_freq_masks']
num_time_masks = self.cfg['data_augmentation']['num_time_masks']
replace_with_zero = self.cfg['data_augmentation']['replace_with_zero']
for i , dt in enumerate(aug_idx_spc):
logmel_aug = spec_augment.specaug(torch.squeeze(logmel[dt,:,:,:]).permute(0, 2, 1),
W=2, F=F, T=T,
num_freq_masks=num_freq_masks,
num_time_masks=num_time_masks,
replace_with_zero=replace_with_zero)
logmel[dt, :, :, :] = logmel_aug
intensity_vector = self.intensityVector_extractor(input, self.logmel_extractor.melW)
out = torch.cat((logmel, intensity_vector), dim=1)
return out, target
def plot_spectrogram(self, spec, title=None, ylabel='freq_bin', aspect='auto', xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or 'Spectrogram (db)')
axs.set_ylabel(ylabel)
axs.set_xlabel('frame')
im = axs.imshow(librosa.power_to_db(spec[0]), origin='lower', aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
plt.savefig('Spectrogram.png', format='png')
plt.close(fig)
def plot_waveform(self,waveform, title="Waveform", xlim=None, ylim=None):
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames)
# // sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c].cpu(), linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f'Channel {c + 1}')
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
plt.savefig('waveform.png', format='png')
plt.close(figure)
'''
# For spectrogram visualization
def plot_specgram(self,waveform, sample_rate, title="Spectrogram", xlim=None):
#waveform = waveform[0].numpy()
waveform = waveform[0].cpu().numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) // sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f'Channel {c + 1}')
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.savefig('Spec')
plt.show(block=False)
''' | saraalrawi/EIN-SELD | seld/methods/feature.py | feature.py | py | 8,022 | python | en | code | 1 | github-code | 36 |
12507629411 | # 정렬
# K번째수
def solution(array, commands):
answer = []
for i, j, k in commands:
arr = array[i - 1:j]
arr.sort()
answer.append(arr[k - 1])
print(answer)
return answer
solution([1, 5, 2, 6, 3, 7, 4], [[2, 5, 3], [4, 4, 1], [1, 7, 3]])
# [5, 6, 3]
| Hong-Jinseo/Algorithm | programmers/42748.py | 42748.py | py | 300 | python | en | code | 0 | github-code | 36 |
18760515421 | import numpy as np
import cv2
import screeninfo
import oa_ls
def init_proj(window_name, screen_id):
screen = screeninfo.get_monitors()[screen_id]
width, height = screen.width, screen.height
cv2.moveWindow(window_name, screen.x -1, screen.y-1)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
return width, height
def show_laser_line():
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
width, height = init_proj("window", 1)
img_ls = create_laser_scan_line_speckle((0,0,255), 1, width, height, 3)
cv2.imshow("window", img_ls)
cv2.waitKey(0)
if __name__ == '__main__':
show_laser_line()
| olaals/multivision-depr | multivision/oa_realapi.py | oa_realapi.py | py | 699 | python | en | code | 0 | github-code | 36 |
3685343845 |
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import json
from collections import defaultdict
from io import StringIO
from PIL import Image
import requests
from lib.helpers import DatabaseConnector as dtb
from lib import label_map_util
from lib import visualization as vis_util
import uuid
import datetime
class ObjectDetectionRules(object):
"""docstring for ObjectDetectionRules"""
LABEL={
"shorts": ['outerwears', 'tshirt', 'tops'],
"jeans": ['outerwears', 'tshirt', 'tops'],
"tops": ["jeans", "shorts", "pants", "skirts"],
"person": [],
"skirts": ['outerwears', 'tshirt', 'tops']
}
class ObjectDetector(object):
"""docstring for ObjectDetector"""
def __init__(self, graph_path, label_path, num_class):
super(ObjectDetector, self).__init__()
self.model = ObjectDetectionModel(graph_path, label_path, num_class)
def run(self, picture, expecting):
image = Image.open(picture)
boxes = self.model.run(image)
expected = []
others = []
for i in range(len(boxes)):
boxes[i]["width"] = image.size[0]
boxes[i]["height"] = image.size[1]
if boxes[i]["label"]["name"] in expecting:
expected.append(boxes[i])
if boxes[i]["label"]["name"] in [x for expected_label in expecting for x in ObjectDetectionRules.LABEL[expected_label]]:
others.append(boxes[i])
expected = self.format_objects(picture, expected, is_expected=True)
others = self.format_objects(picture, others)
expected, others = self.correct_detection(expected, others)
if len(others)>1:
max_confidende = -1
tokeep = None
for obj in others:
if obj["confidence"]>max_confidende:
max_confidende = obj["confidence"]
tokeep = obj
others = [tokeep]
if len(expected)==0:
return []
return expected + others
def format_object(self, path, box, is_expected=False):
return { "is_expected":is_expected, "path": path, "height": box["height"], "width": box["width"], "label": box["label"]["name"], "confidence": box["label"]["value"], "ymin": box["ymin"], "ymax": box["ymax"], "xmin": box["xmin"], "xmax":box["xmax"]}
def format_objects(self, path, boxes, keep_main=True, is_expected=False):
objects = {}
for box in boxes:
obj = self.format_object(path, box, is_expected=is_expected)
if obj["label"] not in objects.keys():
objects[obj["label"]] = []
objects[obj["label"]].append(obj)
for key in objects.keys():
max_value = -1
indice = None
for i in range(len(objects[key])):
if objects[key][i]["confidence"]>max_value:
max_value = objects[key][i]["confidence"]
indice = i
objects[key][indice]["is_main"]=True
if keep_main:
total = []
for k in objects.keys():
for obj in objects[k]:
if "is_main" in obj.keys():
total.append(obj)
else:
total=[]
for k in objects.keys():
for obj in objects[k]:
total.append(obj)
return total
def correct_detection(self, expected, others):
for i in range(len(expected)):
for j in range(len(others)):
if others[j]["ymin"] <= expected[i]["ymin"] and others[j]["ymax"] < expected[i]["ymax"] and others[j]["ymax"] > expected[i]["ymin"] and others[j]["ymax"]-expected[i]["ymin"]>=0.4:
others[j] = None
elif others[j]["ymin"] <= expected[i]["ymin"] and others[j]["ymax"] < expected[i]["ymax"] and others[j]["ymax"] > expected[i]["ymin"] and others[j]["ymax"]-expected[i]["ymin"]>=0.1:
expected[i]["ymin"] = expected[i]["ymin"] + (others[j]["ymax"]-expected[i]["ymin"]) / 2.0
others[j]["ymax"] = others[j]["ymax"] - (others[j]["ymax"]-expected[i]["ymin"]) / 2.0
elif others[j]["ymin"] >= expected[i]["ymin"] and others[j]["ymax"] < expected[i]["ymax"]:
others[j] = None
elif others[j]["ymin"] >= expected[i]["ymin"] and others[j]["ymax"] >= expected[i]["ymax"] and expected[i]["ymax"] - others[j]["ymin"] >= 0.4:
others[j] = None
elif others[j]["ymin"] >= expected[i]["ymin"] and others[j]["ymax"] >= expected[i]["ymax"] and expected[i]["ymax"] - others[j]["ymin"] >= 0.1:
expected[i]["ymax"] = expected[i]["ymax"] - (expected[i]["ymax"]-others[j]["ymin"]) / 2.0
others[j]["ymin"] = others[j]["ymin"] + (expected[i]["ymax"]-others[j]["ymin"]) / 2.0
elif others[j]["ymin"] <= expected[i]["ymin"] and others[j]["ymax"] >= expected[i]["ymax"]:
others[j] = None
expected = [x for x in expected if x != None]
others = [x for x in others if x != None]
return expected, others
class ObjectDetectionModel(object):
"""docstring for ObjectDetectionModel"""
def __init__(self, graph_path, label_path, num_class):
super(ObjectDetectionModel, self).__init__()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=detection_graph)
self.label_map = label_map_util.load_labelmap(label_path)
categories = label_map_util.convert_label_map_to_categories(self.label_map, max_num_classes=num_class, use_display_name=True)
self.category_index = label_map_util.create_category_index(categories)
# Definite input and output Tensors for detection_graph
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')
def load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def run(self, image):
try:
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
# Visualization of the results of a detection.
boxes = vis_util.visualize_boxes_and_labels_on_image_array_2(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
min_score_thresh=0.2,
use_normalized_coordinates=True,
line_thickness=5)
return boxes
except:
raise | MatthieuBlais/tensorflow-clothing-detection | obj_detection.py | obj_detection.py | py | 6,828 | python | en | code | 11 | github-code | 36 |
19738758129 | from vigilo.models.session import DBSession, MigrationDDL
from vigilo.models import tables
def upgrade(migrate_engine, actions):
"""
Migre le modèle.
@param migrate_engine: Connexion à la base de données,
pouvant être utilisée durant la migration.
@type migrate_engine: C{Engine}
@param actions: Conteneur listant les actions à effectuer
lorsque cette migration aura été appliquée.
@type actions: C{MigrationActions}
"""
MigrationDDL(
[
"CREATE INDEX ix_vigilo_conffile_name "
"ON vigilo_conffile (name)",
],
).execute(DBSession, tables.ConfFile.__table__)
| vigilo/models | src/vigilo/models/migration/012_ConfFile_name_index.py | 012_ConfFile_name_index.py | py | 661 | python | fr | code | 4 | github-code | 36 |
17237472456 | #키와 몸무게로 성별 classification
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors, datasets
from sklearn import svm
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
file=open('gender_dataset.txt')
gender=[]
height=[]
weight=[]
print("▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆\nCROSS VALIDATION\n▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆")
for line in file.readlines():
line = line.replace('\n', '')
g,h,w = line.split('\t')
gender.append(str(g))
height.append(float(h))
weight.append(float(w))
# print(gender)
# print(height)
# print(weight)
X=[]
for i in range(len(gender)):
X.append([height[i],weight[i]])
y=gender
# print(X)
# print(y)
# plt.scatter(X[:,0], X[:,1], c=y, s=30, cmap=plt.cm.Paired)
k_fold=int(input("cross validation할 k_fold값: "))
new_X=[[] for i in range(k_fold)]
new_y=[[] for i in range(k_fold)]
#male
male_count=0
group=0
for i in range(len(gender)):
if(y[i]=="Male"):
male_count+=1
new_X[group].append(X[i])
new_y[group].append(y[i])
if(male_count==int(len(gender)/2/k_fold)):
male_count=0
group+=1
#female
female_count=0
fgroup=0
for i in range(len(gender)):
if(y[i]=="Female"):
female_count+=1
new_X[fgroup].append(X[i])
new_y[fgroup].append(y[i])
if(female_count==int(len(gender)/2/k_fold)):
female_count=0
fgroup+=1
# print(len(new_X[0]))
total_percentage=0
models = input("모델의 종류를 입력해주세요(linear,poly,rbf,sigmoid,precomputed,lda,knn): ")
if models=="knn":
neigh = int(input("n_neighbors 값을 입력하세요: "))
for test_group in range(k_fold):
# if(test_group!=0):continue
train_X=[]
train_y=[]
test_X=[]
test_y=[]
for target_group in range(k_fold):
if(target_group==test_group):
test_X=new_X[target_group]
test_y=new_y[target_group]
elif(target_group!=test_group):
train_X=train_X+new_X[target_group]
train_y=train_y+new_y[target_group]
# print(str("test group: ")+str(test_group))
# print(len(test_X))
# print(len(train_X)
if models=="linear":
clf = svm.SVC(kernel="linear") #svm_linear
elif models=="poly":
clf = svm.SVC(kernel="poly") #svm_poly
elif models=="rbf":
clf = svm.SVC(kernel="rbf") #svm_poly
elif models=="sigmoid":
clf = svm.SVC(kernel="sigmoid") #svm_poly
elif models=="precomputed":
clf = svm.SVC(kernel="precomputed") #svm_poly
elif models=="lda":
clf = LinearDiscriminantAnalysis(n_components=1) #lda
elif models=="knn":
#knn start
n_neighbors = neigh
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
#knn end
else:
models = input("오류 발생. 다시 시도해주세요.")
break
clf.fit(train_X, train_y)
model_answer=clf.predict(test_X)
total_count=0
correct_count=0
for i in range(len(model_answer)):
total_count+=1
if(model_answer[i]==test_y[i]):
correct_count+=1
percentage=correct_count/total_count*100
total_percentage+=percentage
print("테스트 그룹: "+str(test_group+1))
print("정확도: "+str(percentage)+"% ("+str(correct_count)+"/"+str(total_count)+")\n")
total_percentage/=k_fold
print("----------------------------------------")
print("모델 종류: "+str(models.upper()))
print("cross validation 전체 정확도: "+str(total_percentage)+"%")
print("----------------------------------------\n<키와 몸무게로 성별 예측>")
# ax = plt.gca()
# xlim = ax.get_xlim()
# ylim = ax.get_ylim()
# xx = np.linspace(xlim[0], xlim[1], 30)
# yy = np.linspace(ylim[0], ylim[1], 30)
# YY, XX = np.meshgrid(yy, xx)
# xy = np.vstack([XX.ravel(), YY.ravel()]).T
# Z = clf.decision_function(xy).reshape(XX.shape)
# ax.contour(XX, YY, Z, colors='k', levels=[-1,0,1], alpha=0.5, linestyles=['--', '-', '--'])
# ax.scatter(clf.support_vectors_[:,0], clf.support_vectors_[:,1], s=60, facecolors='r')
# plt.show()
while True:
a,b = map(float,input("키와 몸무게 값을 공백 한 칸을 두고 입력해주세요: ").split(" "))
newdata = [[a,b]]
print(clf.predict(newdata))
| hoony6134/kaist | gender.py | gender.py | py | 4,612 | python | en | code | 0 | github-code | 36 |
33540908783 | import logging
from typing import List
import funppy
def sum(*args):
result = 0
for arg in args:
result += arg
return result
def sum_ints(*args: List[int]) -> int:
result = 0
for arg in args:
result += arg
return result
def sum_two_int(a: int, b: int) -> int:
return a + b
def sum_two_string(a: str, b: str) -> str:
return a + b
def sum_strings(*args: List[str]) -> str:
result = ""
for arg in args:
result += arg
return result
def concatenate(*args: List[str]) -> str:
result = ""
for arg in args:
result += str(arg)
return result
def setup_hook_example(name):
logging.warning("setup_hook_example")
return f"setup_hook_example: {name}"
def teardown_hook_example(name):
logging.warning("teardown_hook_example")
return f"teardown_hook_example: {name}"
if __name__ == '__main__':
funppy.register("sum", sum)
funppy.register("sum_ints", sum_ints)
funppy.register("concatenate", concatenate)
funppy.register("sum_two_int", sum_two_int)
funppy.register("sum_two_string", sum_two_string)
funppy.register("sum_strings", sum_strings)
funppy.register("setup_hook_example", setup_hook_example)
funppy.register("teardown_hook_example", teardown_hook_example)
funppy.serve()
| httprunner/hrp | examples/debugtalk.py | debugtalk.py | py | 1,315 | python | en | code | 83 | github-code | 36 |
5600126262 | import boto3
import json
import logging
import os
from base64 import b64decode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
SLACK_CHANNEL = '#general'
HOOK_URL = 'https://hooks.slack.com/services/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'
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def main(event, context):
logger.info("Event: " + str(event))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Message: " + str(message))
#alarm_name = message['AlarmName']
#old_state = message['OldStateValue']
#new_state = message['NewStateValue']
#reason = message['NewStateReason']
slack_message = {
'channel': SLACK_CHANNEL,
'text': "New error message: %s" % (str(message))
}
req = Request(HOOK_URL, json.dumps(slack_message).encode('utf-8'))
try:
response = urlopen(req)
response.read()
logger.info("Message posted to %s", slack_message['channel'])
except HTTPError as e:
logger.error("Request failed: %d %s", e.code, e.reason)
except URLError as e:
logger.error("Server connection failed: %s", e.reason)
| ManuelGago/VodChallenge | vodchallenge/error_to_slack.py | error_to_slack.py | py | 1,195 | python | en | code | 1 | github-code | 36 |
6418206467 | import logging
from datetime import datetime
from flask import request
from flask_mail import Mail, Message
from service.schemas import MessageSchema, EmailSchema
from service.models import tr_messages, tm_emails
from service import db, app
from service.tasks.task_email import email_users
def saveMessage(data):
try:
# Convert timestamp string to general datetime format
data['timestamp'] = convertToDateTime(data['timestamp'])
# Timestamp should be greater than now
if data['timestamp'] > datetime.now():
# Get delta time in seconds
delaySeconds = getSecondsDifference(data['timestamp'])
# Save message to database
message = tr_messages.Messages(**data)
db.session.add(message)
arEmails = list()
emails = getEmails()
for email in emails:
arEmails.append(email['email'])
print(arEmails, data["email_subject"], data["email_content"])
# Call email task asynchronously
# ARGS = Email addreses, subject, content
# Countdown = delta time in seconds
email_users.apply_async(args=[arEmails, data["email_subject"], data["email_content"]], countdown=delaySeconds)
# Commit db transaction
return db.session.commit()
else:
return 'Check your datetime'
except Exception as e:
logging.exception(e)
return 'Please check your request'
def getEmails():
emails = tm_emails.Emails.query.all()
return EmailSchema.all_email_schema.dump(emails).data
def getMessageAtTimestamp():
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M')
messages = tr_messages.Messages.query.all()
return MessageSchema.all_message_schema.dump(messages).data
def getSecondsDifference(dt):
dtDelta = (dt - datetime.now()).total_seconds()
return int(dtDelta)
# Send email function with SMTP
def sendEmail(email_addresses, subject, message):
print(email_addresses, subject, message)
try:
mail = Mail(app)
msg = Message(subject, sender=(app.config['MAIL_SENDER'], app.config['MAIL_USERNAME']), recipients=email_addresses)
msg.body = message
response = mail.send(msg)
logging.info(response)
return 'success'
except Exception as e:
logging.exception("Email error")
return 'failed'
# return request.post(
# "https://api.mailgun.net/v3/{}/messages".format(app.config['MAIL_DOMAIN']),
# auth=("api", app.config['MAIL_API_KEY']),
# data={"from": "{}} <mailgun@{}}>".format(app.config['MAIL_SENDER'], app.config['MAIL_DOMAIN']),
# "to": [email_address],
# "subject": subject,
# "text": message}
# )
def convertToDateTime(str):
return datetime.strptime(str, '%d %b %Y %H:%M')
| robinraintama/flask_email | service/controllers/MessageController.py | MessageController.py | py | 3,148 | python | en | code | 0 | github-code | 36 |
944344202 | pkgname = "cairomm1.0"
pkgver = "1.14.5"
pkgrel = 0
build_style = "meson"
configure_args = ["-Dboost-shared=true"]
hostmakedepends = ["meson", "pkgconf"]
makedepends = ["cairo-devel", "libsigc++2-devel"]
checkdepends = ["boost-devel", "fontconfig-devel", "fonts-dejavu-otf"]
pkgdesc = "C++ bindings to Cairo graphics library (1.14)"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.0-or-later"
url = "https://www.cairographics.org/cairomm"
source = f"http://cairographics.org/releases/cairomm-{pkgver}.tar.xz"
sha256 = "70136203540c884e89ce1c9edfb6369b9953937f6cd596d97c78c9758a5d48db"
@subpackage("cairomm1.0-devel")
def _devel(self):
return self.default_devel(
extra=[
"usr/lib/cairomm-1.0",
]
)
| chimera-linux/cports | contrib/cairomm1.0/template.py | template.py | py | 748 | python | en | code | 119 | github-code | 36 |
7424990082 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Harmonia's Kärcher
# Fixes surnäme items in ä mässive wäy.
import json
from SPARQLWrapper import SPARQLWrapper, JSON
import requests
from bs4 import BeautifulSoup
def getWDcontent(item):
sparql.setQuery("""
SELECT DISTINCT ?lang ?label ?description WHERE {{
{{
SELECT ?lang ?label WHERE {{
wd:{0} rdfs:label ?label .
BIND(LANG(?label) AS ?lang) .
}}
}} UNION {{
SELECT ?lang ?description WHERE {{
wd:{0} schema:description ?description .
BIND(LANG(?description) AS ?lang) .
}}
}}
}} ORDER BY ?lang
""".format(item)) # Sample query: http://tinyurl.com/hj4z2hu
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
results = results["results"]["bindings"]
label_langs = {}
descriptions = []
for res in results:
for k, v in res.items():
if k == "label":
lang = v['xml:lang']
if lang not in label_langs:
label = v['value']
label_langs[lang] = label
if lang not in all_labels_languages:
all_labels_languages.append(lang)
elif k == "description":
lang = v['xml:lang']
descriptions.append(lang)
print(' - Labels found in {} language(s)'.format(len(label_langs)))
print(' - Descriptions found in {} language(s)'.format(len(descriptions)))
return label_langs, descriptions
# Global variables
all_labels_languages = []
all_items = []
# Languages and descriptions
with open("resources/surname.json") as file:
surname_descriptions = json.load(file)
file.close()
out = ""
all_langs = ['af', 'an', 'ast', 'bar', 'bm', 'br', 'ca', 'co', 'cs', 'cy',
'da', 'de', 'de-at', 'de-ch', 'en', 'en-ca', 'en-gb', 'eo', 'es',
'et', 'eu', 'fi', 'fr', 'frc', 'frp', 'fur', 'ga', 'gd', 'gl',
'gsw', 'hr', 'hu', 'ia', 'id', 'ie', 'io', 'it', 'jam', 'kab',
'kg', 'lb', 'li', 'lij', 'lt', 'lv', 'mg', 'min', 'ms', 'nap',
'nb', 'nds', 'nds-nl', 'nl', 'nn', 'nrm', 'oc', 'pap', 'pcd',
'pl', 'pms', 'prg', 'pt', 'pt-br', 'rgn', 'rm', 'ro', 'sc', 'scn',
'sco', 'sk', 'sr-el', 'sv', 'sw', 'tr', 'vec', 'vi', 'vls', 'vmf',
'vo', 'wa', 'wo', 'zu', 'fo', 'is', 'kl']
# endpoint
endpoint = "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
sparql = SPARQLWrapper(endpoint)
# Main query
rest_base = "https://www.wikidata.org/api/rest_v1/"
rest_request = "page/html/User%3AHarmonia_Amanda%2FNames"
response = requests.get(rest_base + rest_request)
soup = BeautifulSoup(response.text, "lxml")
all_items = soup.p.text.split()
for item in all_items:
print('\nParsing item {}'.format(item))
labels, descriptions = getWDcontent(item)
label = labels['en']
out += "{}\tAen\t{} (surname)\n".format(item, label)
# We fix descriptions first to avoid conflicts
for lang, description in surname_descriptions.items():
out += "{}\tD{}\t{}\n".format(item, lang, description)
# Force empty descriptions for languages not in the previous list
for lang in descriptions:
if lang not in surname_descriptions.keys():
out += "{}\tD{}\t\"\"\n".format(item, lang)
print(labels, descriptions, label)
for lang in all_langs:
out += "{}\tL{}\t{}\n".format(item, lang, label)
out += "\n"
f = open('temp-qs.txt', 'w')
f.write(out)
f.close()
f = open('temp-ps.txt', 'w')
f.write(('\n').join(all_items))
f.close()
qs_url = "https://tools.wmflabs.org/wikidata-todo/quick_statements.php"
ps_url = "https://petscan.wmflabs.org/#tab_other_sources"
print("\n=============")
print("Operation complete! {} items parsed.".format(len(all_items)))
print("- Please paste the content of temp-qs.txt to {}".format(qs_url))
ps_txt = "- Please paste the content of temp-ps.txt to {} ".format(ps_url)
ps_txt += "and run the command '-P31:Q4167410'"
print(ps_txt)
print("Note: during the execution of the script,")
print(" labels were found in the following languages:")
print(', '.join(all_labels_languages))
| Ash-Crow/scrapers | harmonias-karcher.py | harmonias-karcher.py | py | 4,238 | python | en | code | 3 | github-code | 36 |
18035041317 | class Solution:
def findLatestStep(self, arr: List[int], m: int) -> int:
if m == len(arr): return m
length = [0] * (len(arr) + 2)
res = -1
for i, a in enumerate(arr):
left, right = length[a-1], length[a+1]
if left == m or right == m:
res = i
length[a-left] = length[a+right] = left + right + 1
return res | LittleCrazyDog/LeetCode | 1562-find-latest-group-of-size-m/1562-find-latest-group-of-size-m.py | 1562-find-latest-group-of-size-m.py | py | 400 | python | en | code | 2 | github-code | 36 |
74169633702 | import logging
import os
import uuid
from io import BytesIO
from typing import Sequence
from zipfile import ZIP_DEFLATED, ZipFile
import anndata as ad
import dramatiq
from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile
from fastapi.responses import ORJSONResponse
from matplotlib.colors import rgb2hex
from sqlalchemy.orm import Session
from starlette.responses import StreamingResponse
from starlette.status import HTTP_404_NOT_FOUND
from histocat.api.db import get_db
from histocat.api.security import get_active_member
from histocat.config import config
from histocat.core.dataset import service as dataset_service
from histocat.core.dataset.dto import DatasetDto, DatasetUpdateDto
from histocat.core.image import get_qualitative_colors
from histocat.core.member.models import MemberModel
from histocat.core.utils import stream_bytes
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/groups/{group_id}/projects/{project_id}/datasets", response_model=Sequence[DatasetDto])
def get_project_datasets(
group_id: int,
project_id: int,
db: Session = Depends(get_db),
member: MemberModel = Depends(get_active_member),
):
"""Retrieve project's datasets"""
items = dataset_service.get_project_datasets(db, project_id=project_id)
return items
@router.patch("/groups/{group_id}/datasets/{dataset_id}", response_model=DatasetDto)
def update(
group_id: int,
dataset_id: int,
params: DatasetUpdateDto,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""Update dataset"""
item = dataset_service.get(db, id=dataset_id)
if not item:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
item = dataset_service.update(db, item=item, params=params)
return item
@router.get("/groups/{group_id}/datasets/{dataset_id}/centroids")
def get_centroids(
group_id: int,
dataset_id: int,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""Get dataset cell centroids"""
dataset = dataset_service.get(db, id=dataset_id)
if not dataset:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
adata = ad.read_h5ad(dataset.cell_file_location())
mappable = get_qualitative_colors(vmin=adata.obs["AcquisitionId"].min(), vmax=adata.obs["AcquisitionId"].max())
colors = [rgb2hex(c) for c in mappable.to_rgba(adata.obs["AcquisitionId"])]
output = {
"acquisitionIds": adata.obs["AcquisitionId"].tolist(),
"cellIds": adata.obs["CellId"].tolist(),
"objectNumbers": adata.obs["ObjectNumber"].tolist(),
"x": adata.obs["CentroidX"].round(2).tolist(),
"y": adata.obs["CentroidY"].round(2).tolist(),
"colors": colors,
}
return ORJSONResponse(output)
@router.get("/groups/{group_id}/datasets/{dataset_id}", response_model=DatasetDto)
def get_by_id(
group_id: int,
dataset_id: int,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""Get dataset by id"""
item = dataset_service.get(db, id=dataset_id)
if not item:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
return item
@router.delete("/groups/{group_id}/datasets/{dataset_id}", response_model=DatasetDto)
def delete_by_id(
group_id: int,
dataset_id: int,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""Delete a specific dataset by id"""
item = dataset_service.remove(db, id=dataset_id)
if not item:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
return item
@router.get("/datasets/{dataset_id}/download")
async def download_by_id(dataset_id: int, db: Session = Depends(get_db)):
"""Download dataset by id"""
item = dataset_service.get(db, id=dataset_id)
if not item:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
file_name = f"{item.name}.zip"
abs_src = os.path.abspath(item.location)
buffer = BytesIO()
with ZipFile(buffer, "w", ZIP_DEFLATED) as zip:
for folderName, _, filenames in os.walk(item.location):
for filename in filenames:
absname = os.path.abspath(os.path.join(folderName, filename))
arcname = absname[len(abs_src) + 1 :]
zip.write(absname, arcname)
headers = {"Content-Disposition": f'attachment; filename="{file_name}"'}
return StreamingResponse(stream_bytes(buffer.getvalue()), media_type="application/zip", headers=headers)
@router.post("/groups/{group_id}/projects/{project_id}/datasets/upload")
def upload_dataset(
group_id: int,
project_id: int,
type: str = Form(None),
masks_folder: str = Form(None),
regionprops_folder: str = Form(None),
intensities_folder: str = Form(None),
file: UploadFile = File(None),
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
path = os.path.join(config.INBOX_DIRECTORY, str(uuid.uuid4()))
if not os.path.exists(path):
os.makedirs(path)
uri = os.path.join(path, file.filename)
with open(uri, "wb") as f:
f.write(file.file.read())
broker = dramatiq.get_broker()
message = dramatiq.Message(
actor_name="import_dataset",
queue_name="import",
args=(),
kwargs={
"type": type,
"masks_folder": masks_folder,
"regionprops_folder": regionprops_folder,
"intensities_folder": intensities_folder,
"uri": uri,
"project_id": project_id,
},
options={},
)
broker.enqueue(message)
return {"uri": uri}
| BodenmillerGroup/histocat-web | backend/histocat/api/dataset/controller.py | controller.py | py | 5,914 | python | en | code | 5 | github-code | 36 |
23124927437 | from django.conf.urls import patterns, include, url
from rest_framework.routers import DefaultRouter
from blog import views
router = DefaultRouter()
router.register(r'post', views.PostViewSet)
router.register(r'category', views.CategoryViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
)
| PolarIt/myblog | myblog/urls.py | urls.py | py | 398 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.