seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6566002246 | # -*- coding: utf-8 -*-
# IEC
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
#from iec import iec_input <---- HAS THIS BEEN DONE? (I JUST CHANGED THE NAME)
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import cgi
import cgitb
cgitb.enable()
import logging
import sys
sys.path.append("../utils")
import utils.json_utils
sys.path.append("../iec")
from iec import iec_model,iec_tables
from uber import uber_lib
from django.template import Context, Template
import rest_funcs
class IecOutputPage(webapp.RequestHandler):
def post(self):
form = cgi.FieldStorage()
LC50 = float(form.getvalue('LC50'))
threshold = float(form.getvalue('threshold'))
dose_response = float(form.getvalue('dose_response'))
iec_obj = iec_model.iec(True,True,'single',dose_response, LC50, threshold, None)
text_file = open('iec/iec_description.txt','r')
x1 = text_file.read()
templatepath = os.path.dirname(__file__) + '/../templates/'
ChkCookie = self.request.cookies.get("ubercookie")
html = uber_lib.SkinChk(ChkCookie, "IEC Output")
html = html + template.render(templatepath + '02uberintroblock_wmodellinks.html', {'model':'iec','page':'output'})
html = html + template.render (templatepath + '03ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberoutput_start.html', {
'model':'iec',
'model_attributes':'IEC Output'})
html = html + iec_tables.timestamp(iec_obj)
html = html + iec_tables.table_all(iec_obj)
html = html + template.render(templatepath + 'export.html', {})
html = html + template.render(templatepath + '04uberoutput_end.html', {})
html = html + template.render(templatepath + '06uberfooter.html', {'links': ''})
rest_funcs.save_dic(html, iec_obj.__dict__, "iec", "single")
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', IecOutputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| hongtao510/u_tool | iec/iec_output.py | iec_output.py | py | 2,218 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cgitb.enable",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numb... |
4157944015 | import os
import re
import jwt
from django.http import JsonResponse
from django.conf import settings
from apis.models import User
def login_decorator(func):
"""로그인 데코레이터"""
def wrapper(self, request, *args, **kwargs):
try:
token = request.headers.get("Authorization", None)
token_payload = jwt.decode(
token,
os.environ.get('SECRET_KEY'),
os.environ.get('ALGORITHM'))
user = User.objects.get(id=token_payload['user_id'])
request.user = user
return func(self, request, *args, **kwargs)
except jwt.DecodeError:
return JsonResponse({'message' : 'INVALID_USER'}, status=401)
except User.DoesNotExist:
return JsonResponse({'message' : 'INVALID_USER'}, status=401)
return wrapper
def text_validation(text):
"""텍스트 유효성 검사"""
"""텍스트에 한글, 영어, 숫자, 물음표, 느낌표, 마침표, 따옴표, 공백 외 다른 문자열 제거"""
REGEX_TEXT = '[^a-zA-Z가-힣0-9.,?!\"\'\s]'
text = re.sub(REGEX_TEXT, '', text)
"""맨 앞과 뒤 공백 제거"""
text = text.strip()
""".과 ?과 !를 구분자로 텍스트 쪼개기"""
text = re.split('([.|?|!])', text)
for i in range(len(text)):
text[i] = text[i].strip()
for i in range(len(text)):
if text[i] in ('.', '?', '!'):
text[i] += '*'
text = ''.join(text)
text = re.split('[*]', text)
value = []
for i in range(len(text)):
if len(text[i]) > 1:
value.append(text[i])
return value
class AudioOperator:
def audio_maker(id):
"""음원 생성 함수"""
file = f'{id}.wav'
f = open(f'{settings.BASE_DIR}/media/project/{id}.wav', 'w')
f.close
return file
def audio_delete(id):
"""음원 삭제 함수"""
os.remove(f'{settings.BASE_DIR}/media/project/{id}.wav') | HyeonWooJo/tts-input-service | backend/core/utils.py | utils.py | py | 2,087 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "jwt.decode",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_... |
22652081377 | import boto3
import unittest
import lakelayer
from datetime import datetime
from datetime import timedelta
import json
import pytz
#
# Author: Tim Burns
# License: Apache 2.0
#
# A Testing Class to Validate Scraping the KEXP Playlist for the blog
# https://www.owlmountain.net/
# If you like this, donate to KEXP: https://www.kexp.org/donate/
class KexpPlaylistHistoricalDataLakeTest(unittest.TestCase):
session = boto3.Session(profile_name="owlmtn")
s3_bucket = "owlmtn-stage-data"
def test_get_historical(self):
kexp_lake = lakelayer.KexpDataLake(s3_client=self.session.client("s3"),
s3_bucket=self.s3_bucket,
s3_stage="stage/kexp")
kexp_reader = lakelayer.KexpApiReader()
utc = pytz.UTC
time_end_date = datetime.now() - timedelta(days=4*365)
oldest_playlist_record = kexp_lake.list_playlists()[0]
print(oldest_playlist_record)
obj = self.session.client("s3").get_object(Bucket=self.s3_bucket,
Key=oldest_playlist_record["Key"])
print(obj)
oldest_playlist_array = obj['Body'].read().decode("utf-8").split("\n")
print(json.loads(oldest_playlist_array[0])['airdate'] + "\n\t" +
json.loads(oldest_playlist_array[(len(oldest_playlist_array))-1])['airdate'])
airdate_before = datetime.strptime(json.loads(oldest_playlist_array[(len(oldest_playlist_array))-1])['airdate'],
lakelayer.datetime_format_api)
playlist_map = None
print(f"{utc.localize(time_end_date)} to {airdate_before}")
# When there is only one key, we have exhausted the range
while playlist_map is None or len(playlist_map.keys()) > 1:
print(f"{time_end_date} to {airdate_before}")
playlist_map = kexp_reader.get_playlist(read_rows=1000,
airdate_after_date=time_end_date,
airdate_before_date=airdate_before)
print(len(playlist_map.keys()))
if len(playlist_map.keys()) == 0:
break
else:
last_index = len(playlist_map.keys())-1
airdate_before = datetime.strptime(playlist_map[list(playlist_map.keys())[last_index]]['airdate'],
lakelayer.datetime_format_api)
runtime_key = datetime.strftime(airdate_before, lakelayer.datetime_format_lake)
print(runtime_key)
if len(playlist_map.keys()) > 1:
shows_map = kexp_reader.get_shows(playlist_map)
result = kexp_lake.put_data(runtime_key, playlist_map, shows_map, time_end_date, airdate_before)
print(result)
if __name__ == '__main__':
unittest.main()
| timowlmtn/bigdataplatforms | src/kexp/pytest/test_kexp_get_historical_1000.py | test_kexp_get_historical_1000.py | py | 2,953 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "boto3.Session",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "lakelayer.KexpDataLake",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "lakelayer... |
3480526611 | x = [5, 6, 2, 1, 19, 5]
y = [200, 300, 180, 50, 1100, 580]
import matplotlib.pyplot as plt
from scipy import stats
s, inter, r, p, err = stats.linregress(x, y)
def fun(x):
return s * x + inter
model = list(map(fun, x))
plt.scatter(x, y)
plt.plot(x, model, color='r')
plt.show() | jj8000/kurs-cz.2 | zaj_6/regresja_prow.py | regresja_prow.py | py | 287 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.stats.linregress",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib... |
71874630115 | """Endpoints of the strava_ingestion_service for metriker."""
from database_utils.activity_handler import StravaActivityHandler, parse_activity
from database_utils.user_handler import StravaUserHandler
from fastapi import APIRouter
from .config import settings
from .strava_handler import StravaHandler
# initiate database wrappers
user_handler = StravaUserHandler(
secret_key=settings.SECRET_KEY.get_secret_value(),
user=settings.DB_USER,
password=settings.DB_PASS.get_secret_value(),
host=settings.DB_HOST,
port=settings.DB_PORT,
database=settings.DB_NAME,
)
activity_handler = StravaActivityHandler(
user=settings.DB_USER,
password=settings.DB_PASS.get_secret_value(),
host=settings.DB_HOST,
port=settings.DB_PORT,
database=settings.DB_NAME,
)
# initiate strava api wrapper
strava_handler = StravaHandler(
client_id=settings.STRAVA_CLIENT_ID,
client_secret=settings.STRAVA_CLIENT_SECRET.get_secret_value(),
user_handler=user_handler,
)
router = APIRouter()
@router.post("/updateUserById")
def update_user_by_id(user_id: str) -> None:
"""Request information about a user from the strava api.
Args:
user_id: id of the user on strava
Returns:
200, None
"""
old_user = user_handler.get(user_id)
new_user = strava_handler.get_logged_in_athlete(user_id=user_id)
old_user.name = new_user["firstname"]
user_handler.update(old_user)
@router.post("/updateUserActivityById")
def update_user_activity_by_id(activity_id: str, user_id: str) -> None:
"""Request a single activity from the strava api.
The activity is defined by activity_id and belongs to user_id.
Args:
activity_id: id of the activity on strava
user_id: id of the user the activity belongs to on strava
Returns:
200, None
"""
activity = strava_handler.get_activity_by_id(user_id=user_id, activity_id=activity_id)
activity_handler.add(parse_activity(activity))
@router.post("/updateUserActivities")
def update_user_activities(user_id: str) -> None:
"""Request all activities of a user from the strava api.
Args:
user_id: id of the user on strava
Returns:
200, None
"""
activities = strava_handler.get_logged_in_athlete_activities(user_id)
for activity in activities:
activity_handler.add(parse_activity(activity))
@router.delete("/deleteUserActivityById")
def delete_user_activity_by_id(activity_id: str) -> None:
"""Request all activities of a user from the strava api.
Args:
activity_id: id of the user on strava
Returns:
200, None
"""
activity_handler.delete(activity_id)
@router.delete("/deleteUserById")
def delete_user_by_id(user_id: str) -> None:
"""Request all activities of a user from the strava api.
Args:
user_id: id of the user on strava
Returns:
200, None
"""
activity_handler.delete_user_activities(user_id)
user_handler.delete(user_id)
| christophschaller/metriker | strava_ingestion_service/strava_ingestion_service/endpoints.py | endpoints.py | py | 3,012 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "database_utils.user_handler.StravaUserHandler",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "config.settings.SECRET_KEY.get_secret_value",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "config.settings.SECRET_KEY",
"line_number": 12,
... |
17326080433 | import cv2
class Coordinates:
def __init__(self, video_path: str):
self.cap = cv2.VideoCapture(video_path)
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.print_coordinates)
self.video()
def print_coordinates(self, event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
print(f"[{x}, {y}],")
def video(self):
while True:
status, frame = self.cap.read()
if not status:
break
cv2.imshow("Frame", frame)
if cv2.waitKey(10) & 0xFF == ord('q'): break
def __del__(self):
self.cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
c = Coordinates("data/traffic.mp4")
| FernandoLpz/YouTube | CountCars/coordinates.py | coordinates.py | py | 775 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.setMouseCallback",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.EVENT_LBUTTOND... |
70113408995 | from django.db import models
# Create your models here.
class BaseModelTable(models.Model):
id = models.AutoField("id", primary_key=True)
create_time = models.DateTimeField(null=True, verbose_name='创建时间', auto_now_add=True)
modify_time = models.DateTimeField(null=True, verbose_name='修改时间', auto_now=True)
is_deleted = models.IntegerField('0:未删除 1:已删除', null=True, default=0)
class Meta:
abstract = True # 定义抽象models class
ordering = ['-create_time']
class Department(BaseModelTable):
name = models.CharField("部门名", max_length=50, null=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "部门表"
db_table = 'department'
class BusinessLine(BaseModelTable):
lineName = models.CharField('业务线名', max_length=50, null=True)
department = models.ForeignKey(Department, on_delete=models.CASCADE)
def __str__(self):
return self.lineName
class Meta:
verbose_name = "业务线表"
db_table = 'business_line'
class Group(BaseModelTable):
groupName = models.CharField('垂直小组名', max_length=50, null=True)
businessLine = models.ForeignKey(BusinessLine, on_delete=models.CASCADE)
def __str__(self):
return self.groupName
class Meta:
verbose_name = "垂直小组表"
db_table = 'business_group'
class Config(BaseModelTable):
group = models.ForeignKey(Group, on_delete=models.CASCADE)
moduleName = models.CharField('模块名称', max_length=20, null=True)
gitAddress = models.URLField('git地址', null=True)
caseType = models.IntegerField('用例类型')
creator = models.CharField('创建人', max_length=20, null=True)
modifier = models.CharField('修改人', max_length=20, null=True)
def __str__(self):
return self.moduleName
class Meta:
verbose_name = "配置信息"
db_table = 'git_config'
from django.db import connection
class BusinessManager(models.Manager):
"""
测试用例管理器
过滤状态为 0 启用的用例信息展示
"""
id = 1
status = 0
def get_queryset(self):
return super(BusinessManager, self).get_queryset().filter(status=0)
def ownCustomSql(self):
with connection.cursor() as cursor:
cursor.execute("UPDATE case_info SET status = 1 WHERE id = %s", [self.id, ])
cursor.execute("SELECT * FROM case_info WHERE status = %s", [self.status, ])
row = cursor.fetchone()
return row
class CaseInfo(BaseModelTable):
STATUS_CHOICES = [(0, '正常'), (1, '废弃'), ]
group = models.ForeignKey(Group, on_delete=models.CASCADE)
moduleName = models.CharField('模块名称', max_length=20, null=True)
caseName = models.CharField('用例名称', max_length=50, null=True)
caseType = models.IntegerField('用例类型')
status = models.IntegerField('状态', choices=STATUS_CHOICES, default=0)
casePath = models.CharField('用例路径', max_length=50, null=True)
modifier = models.CharField('修改人', max_length=20, null=True)
apiName = models.CharField('接口名称', max_length=50, null=True)
excelId = models.IntegerField(null=True)
serverName = models.CharField('服务名', max_length=50, null=True)
objects = models.Manager()
businessData = BusinessManager()
def __str__(self):
return self.caseName
class Meta:
verbose_name = "用例信息"
db_table = 'case_info'
| jiqialin/AutoCaseManagemantPlatform | AutoCaseInfoManagement/index/models.py | models.py | py | 3,588 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "... |
4851242917 | # -*- coding: utf-8 -*-
from flask import abort, Blueprint, render_template, request
from .partial import send_file_partial
pileup_bp = Blueprint('pileup', __name__, template_folder='templates',
static_folder='static', static_url_path='/pileup/static')
@pileup_bp.route('/remote/static', methods=['OPTIONS', 'GET'])
def remote_static():
"""Stream *large* static files with special requirements."""
file_path = request.args.get('file')
range_header = request.headers.get('Range', None)
if not range_header and file_path.endswith('.bam'):
return abort(500)
new_resp = send_file_partial(file_path)
return new_resp
@pileup_bp.route('/pileup')
def viewer():
"""Visualize BAM alignments."""
vcf_file = request.args.get('vcf')
bam_files = request.args.getlist('bam')
bai_files = request.args.getlist('bai')
samples = request.args.getlist('sample')
alignments = [{'bam': bam, 'bai': bai, 'sample': sample}
for bam, bai, sample in zip(bam_files, bai_files, samples)]
position = {
'contig': request.args['contig'],
'start': request.args['start'],
'stop': request.args['stop']
}
return render_template('pileup/pileup.html', alignments=alignments,
position=position, vcf_file=vcf_file)
| gitter-badger/scout | scout/server/blueprints/pileup/views.py | views.py | py | 1,343 | python | en | code | null | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.r... |
988098948 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 18 15:21:25 2018
@author: Sergio Balderrama
ULg-UMSS
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import enlopy as el
from matplotlib.dates import DateFormatter
#%%
############################# Load Method ###################################
Power_Data_4 = pd.read_csv('Data_Espino_Thesis.csv',index_col=0)
index = pd.date_range(start='2016-01-01 00:00:00', periods=166464,
freq=('5min'))
Power_Data_4.index = index
start = '2016-03-21 00:00:00'
end = '2017-03-20 23:55:00'
index2 = pd.date_range(start='2016-03-21 00:00:00', periods=365,
freq=('1D'))
#index3 = pd.DatetimeIndex(start='2016-01-01 00:05:00', periods=3456,
# freq=('5min'))
load = Power_Data_4['Demand'][start:end]*1000
Hour = []
for i in range(365):
for j in range(288):
Hour.append(j)
Month_Average = load.groupby([load.index.month,Hour]).mean()
#Month_Average_2 = Month_Average
#Month_Average_2.index = index3
mean = pd.Series()
n = 0
for i in range(365):
m = index2[i].month
for i in range(288):
mean.loc[n] = Month_Average[(m,i)]
n += 1
mean.index = load.index
error = load - mean
log_error = np.log(load) - np.log(mean)
z = 0.3 # 0.3
log_error = np.maximum(-z,log_error)
for i in log_error.index:
if log_error[i] > z:
log_error.loc[i] = z
##Sample new loads from load duration curve
curve = log_error
N = len(curve)
LDC_curve = el.get_LDC(curve)
Scenarios = pd.DataFrame()
Scenarios['Base Scenario'] = load
scenarios = 20 # number of scenarios cretated
for i in range(1, scenarios+1):
curve_ldc = el.gen_load_from_LDC(LDC_curve, N=len(LDC_curve[0]))
PSD = plt.psd(curve, Fs=1, NFFT=N, sides='twosided')
Sxx = PSD[0]
curve_psd = el.generate.gen_load_from_PSD(Sxx, curve_ldc, 1)
load_psd = mean*np.exp(curve_psd)
name= 'Scenario ' + str(i)
Scenarios[name] = load_psd
#%%
Scenarios['year'] = Scenarios.index.year
Scenarios['day'] = Scenarios.index.dayofyear
Scenarios['hour'] = Scenarios.index.hour
hourly_scenarios = Scenarios.groupby(['year','day', 'hour']).mean()
index_hourly = pd.date_range(start='2016-03-21 01:00:00', periods=8760,
freq=('1H'))
hourly_scenarios.index = index_hourly
#%%
size = [20,15]
fig=plt.figure(figsize=size)
ax=fig.add_subplot(111, label="1")
alpha= 0.1
start = 24*50
end = 24*60
ax.plot(hourly_scenarios.index[start:end],
hourly_scenarios['Base Scenario'][start:end]/1000,
c='k')
for i in range(1, scenarios+1):
name= 'Scenario ' + str(i)
ax.plot(hourly_scenarios.index[start:end],
hourly_scenarios[name][start:end]/1000, c='b',
alpha=alpha)
date_form = DateFormatter("%H:%M")
ax.xaxis.set_major_formatter(date_form)
ax.set_xlabel("Time (hours)",size=30)
ax.set_ylabel("kW",size=30)
ax.set_xlim(hourly_scenarios.index[start], hourly_scenarios.index[end-1])
tick_size = 25
#mpl.rcParams['xtick.labelsize'] = tick_size
ax.tick_params(axis='x', which='major', labelsize = tick_size )
ax.tick_params(axis='y', which='major', labelsize = tick_size )
handle1 = mlines.Line2D([], [], color='b',
label='Stochastic Scenarios')
handle2 = mlines.Line2D([], [], color='k',
label='Base scenario')
plt.legend(handles=[handle2, handle1], bbox_to_anchor=(0.85, -0.05),
fontsize = 30, frameon=False, ncol=2)
plt.savefig('Top_Down_Demand_Modeling.png')
plt.show()
#%%
| Slbalderrama/Phd_Thesis_Repository | Demand_Modeling/Scenario_Creation_Demand.py | Scenario_Creation_Demand.py | py | 3,753 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
... |
20581591067 | """
多进程多线程抓取案例
"""
from multiprocessing import Process, Queue, Pool, Manager, Lock
import os, time, random, requests, traceback, json, threading
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
import pymongo
client = pymongo.MongoClient(host='127.0.0.1', port=27017, username="root", password="123456", authSource="test", authMechanism='SCRAM-SHA-1')
# 获取数据库,初始化数据库
db = client['test']
site = 'http://www.txt8.net'
# 伪装请求头
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
# 列表函数
def list_page(q, num):
print('start mission')
try:
for index in range(1, num) :
url = '%s/sort/dushi/%s.html' %(site, index)
print('抓取 url = %s' % url)
res = requests.get(url, headers = headers).text
soup = BeautifulSoup(res, 'lxml')
page_data = []
for i in soup.find('ul', class_='librarylist').find_all('li'):
try:
left = i.find('div', class_='pt-ll-l')
right = i.find('div', class_='pt-ll-r')
title = left.find('a').get('title').strip()
url = site + left.find('a').get('href').strip()
pic = site + left.find('img')['src'].strip()
author = right.find_all('a')[1].get_text().strip()
content = right.find('p', class_='intro').get_text().strip()
item = {'title': title, 'url': url, 'pic': pic, 'author': author, 'content': content}
#print(json.dumps(item, ensure_ascii=False))
page_data.append(item)
except Exception as e:
traceback.print_exc()
print('抓取数量:%s' % len(page_data))
q.put(page_data)
except:
traceback.print_exc()
# 抓取完成,让子进程退出循环
time.sleep(10)
q.put('exit')
# 进程中的子线程尽心抓取保存工作
def download_thread(item):
try:
db.txt8.insert_one(item)
except:
traceback.print_exc()
# 保存函数
def download(q):
print('子进程开启==========')
# 开启无线循环
while True:
# 从队列获取名字和url
page_data = q.get()
if page_data == 'exit':
break
print(f'子进程 {os.getpid()} 开启线程池获取数据,本次待抓取数量:{len(page_data)}')
# 建立拥有2个线程的线程池
pool = ThreadPoolExecutor(2)
# 通过map映射列表调用hello函数
pool.map(download_thread, page_data)
# 等待任务完成,关闭线程池
pool.shutdown(wait = True)
if __name__ == '__main__':
print('初始化数据库')
# 清空数据
db.txt8.delete_many({})
print('数据库初始化完成')
print('开始抓取数据')
manager = Manager()
# 使用Manager主进程队列
q = manager.Queue()
# 定义进程池
p = Pool()
# 创建进程
p.apply_async(list_page, args=(q, 11, ))
p.apply_async(download, args=(q, ))
p.close()
p.join()
print('完成抓取工作')
# 关闭数据库
client.close()
| qugemingzizhemefeijin/python-study | ylspideraction/chapter09/_020mulitprocessingthreadtxt8.py | _020mulitprocessingthreadtxt8.py | py | 3,463 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "traceback.print_ex... |
39994274479 | from qiskit import QuantumCircuit, execute
from qiskit import IBMQ, Aer
from qiskit.visualization import plot_histogram
from qiskit.providers.aer.noise import NoiseModel
# Build noise model from backend properties
provider = IBMQ.load_account()
print(provider)
backend = provider.get_backend('ibmq_vigo')
noise_model = NoiseModel.from_backend(backend)
# Get coupling map from backend
coupling_map = backend.configuration().coupling_map
# Get basis gates from noise model
basis_gates = noise_model.basis_gates
# Make a circuit
circ = QuantumCircuit(3, 3)
circ.h(0)
circ.cx(0, 1)
circ.cx(1, 2)
circ.measure([0, 1, 2], [0, 1, 2])
# Perform a noise simulation
result = execute(circ, Aer.get_backend('qasm_simulator'),
coupling_map=coupling_map,
basis_gates=basis_gates,
noise_model=noise_model).result()
counts = result.get_counts(0)
plot_histogram(counts) | simonetome/QuantumGeneticAlgorithm | GQA/quantum/prova.py | prova.py | py | 908 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "qiskit.IBMQ.load_account",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "qiskit.IBMQ",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "qiskit.providers.aer.noise.NoiseModel.from_backend",
"line_number": 10,
"usage_type": "call"
},
{
... |
9539235134 | """
Handles retrieving policies from:
ACL/COLING 2014 Dataset Zip File
Policies Directory
Scraped from webpage
"""
from zipfile import ZipFile
import re
import pickle
policy_dict = {
'cbc': './policies/CBC.txt',
'bbc': './policies/BBC.txt',
'nytimes': './policies/NYT.txt',
'thestar': './policies/TO-star.txt',
'npr': './policies/NPR.txt',
'bloomberg': './policies/bloomberg.txt',
}
def get_policy(hostname):
"""Takes in a webpage's hostname and returns the policy as a string"""
if hostname in policy_dict:
policy = policy_dict[hostname]
with open(policy, 'rb') as file:
file_bytes = file.read()
return file_bytes.decode('utf-8')
with open('./policies/corpus.pkl', 'rb') as pkl:
# Check if webpage is in corpus zip file
corpus_list = pickle.load(pkl)
r = re.compile('corpus/.*{}.*\.xml'.format(hostname))
results = list(filter(r.match, corpus_list))
if results:
# Get first result for now
result = results[0]
with ZipFile('./policies/corpus.zip', 'r') as archive:
with archive.open(result, 'r') as file:
file_bytes = file.read()
# Remove tags and clean up whitespace issues
remove_tags = re.compile(b'<.*>')
file_bytes = re.sub(remove_tags, b'', file_bytes)
file_bytes = re.sub(b'\n\s*\n', b'\n', file_bytes)
file_bytes = re.sub(b' {2,}', b'', file_bytes)
return file_bytes.decode('utf-8')
| krishnr/Priv | server/src/get_policies.py | get_policies.py | py | 1,620 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pickle.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number"... |
35566203783 | import os
from os.path import splitext
from subprocess import check_call, Popen
import tempfile
import logging
import urllib.request
import urllib.parse
VOXYGEN_URL_FMT = 'https://www.voxygen.fr/sites/all/modules/voxygen_voices/assets/proxy/index.php?method=redirect&text={message}&voice=Marion'
VOICERSS_URL_FMT = 'http://api.voicerss.org/?key=125f646630aa40649a5ef922dea3e76c&hl=fr-fr&src={message}'
MYBLUEMIX_URL_FMT = 'https://text-to-speech-demo.ng.bluemix.net/api/v1/synthesize?text={message}&voice=fr-FR_ReneeVoice&download=true&accept=audio%2Fmp3'
def get_temp_filepath():
f = tempfile.NamedTemporaryFile()
temp_filepath = f.name
f.close()
return temp_filepath
def tts_normalize(filepath_in, filepath_out, rate=22050):
# sox -V0 %(export_name)s.mp3 %(filename)s.wav rate 22050 norm
if os.name is 'nt':
# relies on ffmpeg on windows
cmd = ['ffmpeg', '-i', filepath_in, '-ar', str(rate), '-y', filepath_out]
else:
# get audio format from file extension.
# if no extension its been downloaded from internet, and assume its mp3
type_of_input = splitext(filepath_in)[1][1:] or 'mp3'
type_of_output = splitext(filepath_out)[1][1:] or 'wav'
cmd = ['sox', '-t', type_of_input, filepath_in, '-t', type_of_output, filepath_out, 'rate', str(rate)]
try:
success = check_call(cmd)
if success is not 0:
return False
except:
return False
def tts_pico(message, filepath):
tmp_filepath = get_temp_filepath() + '.wav'
cmd = ['pico2wave', '-l', 'fr-FR', '-w', tmp_filepath, message]
check_call(cmd)
tts_normalize(tmp_filepath, filepath)
os.remove(tmp_filepath)
def tts_online(message, filepath):
message = urllib.parse.quote(message)
url = MYBLUEMIX_URL_FMT.format(message=message)
logging.debug('requesting: {}'.format(url))
tmp_filepath, headers = urllib.request.urlretrieve(url)
tts_normalize(tmp_filepath, filepath)
os.remove(tmp_filepath)
def tts(message, filepath):
# first try online, and fallback on pico if needed
try:
logging.debug('try to get voice online')
tts_online(message, filepath)
return True
except IOError as e:
logging.debug(e)
logging.debug('online failed')
try:
logging.debug('fallback on pico')
tts_pico(message, filepath)
return True
except IOError as e:
logging.debug(e)
logging.debug('pico failed')
return False
| jujumo/conteur | conteur/tts.py | tts.py | py | 2,519 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.name",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.spl... |
15362382953 | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from functools import partial
from sklearn.metrics import roc_auc_score, f1_score
from torch.utils.data import DataLoader
def collate(block, word_to_ix):
block_size = len(block)
max_words = np.max([len(i[0]) for i in block])
mat = np.zeros((block_size, max_words), dtype=int)
for i in range(block_size):
for j in range(max_words):
try:
if block[i][0][j] in word_to_ix:
mat[i,j] = word_to_ix[block[i][0][j]]
except IndexError:
pass
mat = torch.from_numpy(mat)
embeddings = torch.FloatTensor(np.array([x for _, x, _ in block]))
labels = torch.FloatTensor(np.array([y for _, _, y in block]))
return mat, embeddings, labels
def load_KSI_data(dir='data/original/',
batch_size=32,
train=True,
val=True,
test=True,
device='cpu'):
training_data=np.load(f'{dir}training_data.npy', allow_pickle=True)
test_data=np.load(f'{dir}test_data.npy', allow_pickle=True)
val_data=np.load(f'{dir}val_data.npy', allow_pickle=True)
word_to_ix=np.load(f'{dir}word_to_ix.npy', allow_pickle=True).item() # words (in notes) to index
wikivec=np.load(f'{dir}newwikivec.npy', allow_pickle=True) # wiki article embeddings (# codes with wiki articles, vocab size)
wikivec = torch.FloatTensor(wikivec).to(device)
collate_fn = partial(collate, word_to_ix=word_to_ix)
loaders = {}
if train:
loaders['train'] = DataLoader(training_data, collate_fn=collate_fn, batch_size=batch_size)
if val:
loaders['val'] = DataLoader(val_data, collate_fn=collate_fn, batch_size=batch_size)
if test:
loaders['test'] = DataLoader(test_data, collate_fn=collate_fn, batch_size=batch_size)
return loaders, wikivec, word_to_ix
def train(model,
dataloader,
loss_function,
wikivec=None,
optimizer=None,
profiler=None,
scheduler=None,
device='cpu',
init_hidden=False):
model.train()
for data in dataloader:
optimizer.zero_grad()
note, embeddings, labels = data
if init_hidden:
model.hidden = model.init_hidden(len(note), device=device)
note = note.to(device)
embeddings = embeddings.to(device)
labels = labels.to(device)
scores = model(note, embeddings, wikivec)
loss = loss_function(scores, labels)
loss.backward()
optimizer.step()
if scheduler:
scheduler.step()
if profiler:
profiler.step()
def test_model(model,
dataloader,
wikivec=None,
threshold=0.5,
k=10,
label_bins=None,
device='cpu',
init_hidden=False):
y = []
yhat = []
recall = []
model.eval()
for data in dataloader:
note, embeddings, labels = data
if init_hidden:
model.hidden = model.init_hidden(len(note), device=device)
note = note.to(device)
embeddings = embeddings.to(device)
out = model(note, embeddings, wikivec).cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
y.append(labels)
yhat.append(out)
y = np.concatenate(y)
yhat = np.concatenate(yhat)
preds = np.array(yhat > threshold, dtype=float)
for i in range(yhat.shape[0]):
n_labels = int(y[i, :].sum())
topk = max(k, n_labels)
ind_topk = np.argpartition(yhat[i, :], -topk)[-topk:]
recall.append(y[i, ind_topk].sum() / n_labels if n_labels > 0 else np.nan)
mask = np.sum(y, axis=0) > 0 # mask out classes without both positive and negative examples
def generate_mask(bin_indices):
bin_mask = np.zeros(y.shape[1], dtype=bool)
bin_mask[bin_indices] = True
full_mask = mask & bin_mask
return full_mask
# compute macro AUC by label frequency group
label_freq_aucs = None
if label_bins:
loaded_bin_data = np.load(label_bins, allow_pickle=True)
bin_10 = generate_mask(loaded_bin_data[0])
bin_50 = generate_mask(loaded_bin_data[1])
bin_100 = generate_mask(loaded_bin_data[2])
bin_500 = generate_mask(loaded_bin_data[3])
bin_remaining = generate_mask(loaded_bin_data[4])
else:
code_frequencies = y.sum(axis=0)
bin_10 = np.argwhere((code_frequencies <= 10) & (code_frequencies > 0)).squeeze()
bin_50 = np.argwhere((code_frequencies <= 50) & (code_frequencies > 10)).squeeze()
bin_100 = np.argwhere((code_frequencies <= 100) & (code_frequencies > 50)).squeeze()
bin_500 = np.argwhere((code_frequencies <= 500) & (code_frequencies > 100)).squeeze()
bin_remaining = np.argwhere(code_frequencies > 500).squeeze()
label_freq_aucs = {}
label_freq_aucs['1-10'] = roc_auc_score(y[:, bin_10], yhat[:, bin_10], average='macro')
label_freq_aucs['11-50'] = roc_auc_score(y[:, bin_50], yhat[:, bin_50], average='macro')
label_freq_aucs['51-100'] = roc_auc_score(y[:, bin_100], yhat[:, bin_100], average='macro')
label_freq_aucs['101-500'] = roc_auc_score(y[:, bin_500], yhat[:, bin_500], average='macro')
label_freq_aucs['>500'] = roc_auc_score(y[:, bin_remaining], yhat[:, bin_remaining], average='macro')
# compute overall metrics
recall = np.nanmean(recall)
micro_f1 = f1_score(y[:, mask], preds[:, mask], average='micro')
macro_f1 = f1_score(y[:, mask], preds[:, mask], average='macro')
micro_auc = roc_auc_score(y[:, mask], yhat[:, mask], average='micro')
macro_auc = roc_auc_score(y[:, mask], yhat[:, mask], average='macro')
return recall, micro_f1, macro_f1, micro_auc, macro_auc, label_freq_aucs
def train_model(model,
train_dataloader,
val_dataloader,
wikivec=None,
optimizer=None,
scheduler=None,
n_epochs=10,
profile=False,
log_path='./log',
device='cpu',
init_hidden=False,
early_stopping=None,
early_stopping_metric='recall_at_k'):
loss_function = nn.BCELoss()
if optimizer is None:
optimizer = optim.Adam(model.parameters())
best_results = -1
best_iter = -1
performance = []
models = []
if profile:
with torch.profiler.profile(activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
], profile_memory=True,
use_cuda=device != 'cpu',
on_trace_ready=torch.profiler.tensorboard_trace_handler(log_path)) as prof:
for epoch in range(n_epochs):
train(model,
train_dataloader,
loss_function,
wikivec=wikivec,
optimizer=optimizer,
profiler=prof,
scheduler=scheduler,
device=device,
init_hidden=init_hidden)
t_recall_at_k, t_micro_f1, t_macro_f1, t_micro_auc, t_macro_auc, _ = test_model(model,
train_dataloader,
wikivec,
device=device,
init_hidden=init_hidden)
v_recall_at_k, v_micro_f1, v_macro_f1, v_micro_auc, v_macro_auc, _ = test_model(model,
val_dataloader,
wikivec,
device=device,
init_hidden=init_hidden)
print(f'Epoch: {epoch+1:03d}, Train Recall@10: {t_recall_at_k:.4f}, Val Recall@10: {v_recall_at_k:.4f}' +
f', Train Micro F1: {t_micro_f1:.4f}, Val Micro F1: {v_micro_f1:.4f}' +
f', Train Macro F1: {t_macro_f1:.4f}, Val Macro F1: {v_macro_f1:.4f}' +
f', Train Micro AUC: {t_micro_auc:.4f}, Val Micro AUC: {v_micro_auc:.4f}' +
f', Train Macro AUC: {t_macro_auc:.4f}, Val Macro AUC: {v_macro_auc:.4f}')
if early_stopping:
if early_stopping_metric == 'recall_at_k':
metric = v_recall_at_k
elif early_stopping_metric == 'micro_f1':
metric = v_micro_f1
elif early_stopping_metric == 'macro_f1':
metric = v_macro_f1
elif early_stopping_metric == 'micro_auc':
metric = v_micro_auc
elif early_stopping_metric == 'macro_auc':
metric = v_macro_auc
performance.append(metric)
models.append(copy.deepcopy(model.state_dict()))
if metric > best_results:
best_results = metric
best_iter = len(performance)-1
if (len(performance) - best_iter) > early_stopping:
print(f'Early stopping at epoch {epoch}')
model = model.load_state_dict(models[best_iter])
break
else:
prof = None
for epoch in range(n_epochs):
train(model,
train_dataloader,
loss_function,
wikivec=wikivec,
optimizer=optimizer,
profiler=prof,
scheduler=scheduler,
device=device,
init_hidden=init_hidden)
t_recall_at_k, t_micro_f1, t_macro_f1, t_micro_auc, t_macro_auc, _ = test_model(model,
train_dataloader,
wikivec,
device=device,
init_hidden=init_hidden)
v_recall_at_k, v_micro_f1, v_macro_f1, v_micro_auc, v_macro_auc, _ = test_model(model,
val_dataloader,
wikivec,
device=device,
init_hidden=init_hidden)
print(f'Epoch: {epoch+1:03d}, Train Recall@10: {t_recall_at_k:.4f}, Val Recall@10: {v_recall_at_k:.4f}' +
f', Train Micro F1: {t_micro_f1:.4f}, Val Micro F1: {v_micro_f1:.4f}' +
f', Train Macro F1: {t_macro_f1:.4f}, Val Macro F1: {v_macro_f1:.4f}' +
f', Train Micro AUC: {t_micro_auc:.4f}, Val Micro AUC: {v_micro_auc:.4f}' +
f', Train Macro AUC: {t_macro_auc:.4f}, Val Macro AUC: {v_macro_auc:.4f}')
if early_stopping:
if early_stopping_metric == 'recall_at_k':
metric = v_recall_at_k
elif early_stopping_metric == 'micro_f1':
metric = v_micro_f1
elif early_stopping_metric == 'macro_f1':
metric = v_macro_f1
elif early_stopping_metric == 'micro_auc':
metric = v_micro_auc
elif early_stopping_metric == 'macro_auc':
metric = v_macro_auc
performance.append(metric)
models.append(copy.deepcopy(model.state_dict()))
if metric > best_results:
best_results = metric
best_iter = len(performance)-1
if (len(performance) - best_iter) > early_stopping:
print(f'Early stopping at epoch {epoch+1}')
model = model.load_state_dict(models[best_iter])
break
return prof
| bllguo/KSI | KSI_utils.py | KSI_utils.py | py | 12,920 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.max",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_... |
24837006891 | from PIL import Image, ImageDraw
import io
IMAGE_WIDTH = 38
IMAGE_HEIGHT = 32
N_RESIZE = 64
NUMBER_OF_POSITIONS = 51 # [0;100]
file = open("progressArc.h", "w")
file.write("#ifndef LCD_PROGRESS_ARC_H_\n")
file.write("#define LCD_PROGRESS_ARC_H_\n\n")
file.write("namespace lcd\n{\n\n")
file.write("static const uint8_t NUMBER_OF_ARC_POSITIONS = {0};\n\n".format(NUMBER_OF_POSITIONS))
file.write("static const uint8_t progressArcArray[NUMBER_OF_ARC_POSITIONS][{0}] = {{\n".format((IMAGE_HEIGHT * IMAGE_WIDTH / 8)))
for position in xrange(0, NUMBER_OF_POSITIONS):
image = Image.new('1', (32 * N_RESIZE, 32 * N_RESIZE), 'white')
draw = ImageDraw.Draw(image)
draw.ellipse((0, 0, 32 * N_RESIZE, 32 * N_RESIZE), fill ='black', outline ='black')
draw.ellipse((5 * N_RESIZE, 5 * N_RESIZE, 27 * N_RESIZE, 27 * N_RESIZE), fill ='white', outline ='white')
draw.pieslice((0, 0, 32 * N_RESIZE, 32 * N_RESIZE), (-225 + position*270/(NUMBER_OF_POSITIONS-1)), 135, fill ='white', outline ='white')
image = image.resize((IMAGE_WIDTH,IMAGE_HEIGHT))
pixelData = image.getdata()
outputArray = []
for y in xrange(0, IMAGE_HEIGHT / 8):
for x in xrange(0, IMAGE_WIDTH):
currentByte = 0
for bit in xrange(0, 8):
currentByte |= (1 if (0 == pixelData[x + (y*8+bit)*IMAGE_WIDTH]) else 0) << bit
outputArray.append(currentByte)
#print outputArray
file.write(" { ")
byteCount = 0
for byte in outputArray:
file.write(("0x%02X" % byte) + ", ")
byteCount += 1
if (16 == byteCount):
byteCount = 0
file.write("\n ")
file.write(" },\n")
file.write("};\n")
file.write("} // namespace\n#endif\n")
file.close()
image.save('test.png') | zukaitis/midi-grid | Misc/image_generation/generateProgressArc.py | generateProgressArc.py | py | 1,781 | python | en | code | 74 | github-code | 1 | [
{
"api_name": "PIL.Image.new",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_... |
3757279275 | # -*- coding: utf-8 -*-
from django.conf import settings
from urlparse import parse_qs, urlparse
from datetime import datetime
from celery import task
from twython import Twython
from dashboard.models import SocialSearch, Item
@task(ignore_result=True)
def collect_all_social_searchs():
social_search_list = SocialSearch.objects.all()
for social_search in social_search_list:
collect_social_search.apply_async(args=[social_search.id])
@task(ignore_result=True)
def collect_social_search(social_search_id):
try:
social_search = SocialSearch.objects.get(id=social_search_id)
except SocialSearch.DoesNotExist:
return
social_account = social_search.social_account
app_key = settings.TWITTER_APP_KEY
app_secret = settings.TWITTER_APP_SECRET
oauth_token = social_account.access_token
oauth_token_secret = social_account.access_token_secret
search_term = social_search.search_term
since_id = social_search.since_id
t = Twython(app_key, app_secret, oauth_token, oauth_token_secret)
items = t.search(q=search_term, result_type='recent', count=100, since_id=since_id)
if items['search_metadata']['count'] > 0:
for item in items['statuses']:
item_object = Item()
item_object.social_search = social_search
item_object.social_item_id = item['id']
item_object.social_item_text = item['text']
item_object.social_user_id = item['user']['id']
item_object.social_user_screen_name = item['user']['screen_name']
item_object.social_user_name = item['user']['name']
item_object.social_user_avatar = item['user']['profile_image_url']
item_object.save()
social_search.last_collection_date = datetime.now()
social_search.item_count = Item.objects.filter(social_search=social_search).count()
social_search.since_id = items['search_metadata']['max_id']
social_search.save()
return
| allisson/django-social-monitor-example | dashboard/tasks.py | tasks.py | py | 1,986 | python | en | code | 17 | github-code | 1 | [
{
"api_name": "dashboard.models.SocialSearch.objects.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dashboard.models.SocialSearch.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "dashboard.models.SocialSearch",
"line_number": 13,
"... |
73614605793 | from unittest.mock import Mock
import pytest
import requests
from parking_permit.html_parser import HtmlParser
from parking_permit.queue_service import *
LICENSE_PLATE = "AB-123-C"
CLIENT_NUMBER = "1234567"
URL = (
"https://www.amsterdam.nl/parkeren-verkeer/parkeervergunning/"
+ "parkeervergunning-bewoners/wachtlijst/"
)
HTML_RESPONSE = "THIS IS AN EXAMPLE HTML RESPONSE"
class MockResponse(requests.Response):
@property
def text(self):
return HTML_RESPONSE
@pytest.fixture
def mock_response():
return MockResponse()
@pytest.fixture
def requests_session_send(monkeypatch: pytest.MonkeyPatch):
import requests
mock = Mock()
monkeypatch.setattr(requests.Session, "send", mock)
return mock
@pytest.fixture
def requests_session_prepare_request(monkeypatch: pytest.MonkeyPatch):
import requests
mock = Mock()
monkeypatch.setattr(requests.Session, "prepare_request", mock)
return mock
@pytest.fixture
def service():
return QueueService(HtmlParser())
@pytest.fixture
def service_request(service: QueueService):
request = service._build_request(LICENSE_PLATE, CLIENT_NUMBER)
return request
def test_build_request(service_request: Request):
assert service_request.method == "GET"
assert service_request.url == URL
assert service_request.params == {
"kenteken": LICENSE_PLATE,
"klantnummer": CLIENT_NUMBER,
"module": 16349201,
"ajax": "true",
"rich-ajax": "true",
}
def test_send_request(
service: QueueService,
requests_session_send: Mock,
requests_session_prepare_request: Mock,
mock_response: MockResponse,
service_request: Request,
):
requests_session_prepare_request.return_value = service_request
requests_session_send.return_value = mock_response
response = service._send_request(service_request)
requests_session_send.assert_called_with(service_request)
assert response == mock_response
| janheindejong/parking-permit | tests/test_queue_service.py | test_queue_service.py | py | 1,976 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.Response",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pytest.MonkeyPatch",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "un... |
21840890275 | import sys, itertools
input = sys.stdin.readline
def dfs(start, ls):
visited[start] = True
for i in graph[start]:
if not visited[i]:
if i in j or i in unselect:
ls.append(i)
dfs(i, ls)
N =int(input())
population = [0] +list(map(int, input().split()))
total = sum(population)
graph = list([0] for _ in range(N + 1))
for i in range(1, N + 1):
temp = list(map(int, input().split()))
graph[i] = temp[1:]
visited = [False] * (N + 1)
arr = list(range(1, N + 1))
ans = float('inf')
for i in range(1, N // 2 + 1):
for j in itertools.combinations(arr, i):
cnt = 0
select = [j[0]]
unselect = []
dfs(j[0], select)
if len(select) == len(j):
unselect = []
for k in arr:
if not k in select:
unselect.append(k)
check = [unselect[0]]
dfs(unselect[0], check)
if sorted(check) == unselect:
one = 0
for k in select:
one += population[k]
two = total - one
ans = min(ans, abs(one - two))
visited = [False] * (N + 1)
if ans == float('inf'):
ans = -1
print(ans) | pearl313/BOJ | 백준/Gold/17471. 게리맨더링/게리맨더링.py | 게리맨더링.py | py | 1,287 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "itertools.combinations",
"line_number": 23,
"usage_type": "call"
}
] |
28399981836 | """Support for automation and script tracing and debugging."""
from homeassistant.core import callback
from .const import DATA_TRACE
@callback
def get_debug_trace(hass, automation_id, run_id):
"""Return a serializable debug trace."""
return hass.data[DATA_TRACE][automation_id][run_id]
@callback
def get_debug_traces_for_automation(hass, automation_id, summary=False):
"""Return a serializable list of debug traces for an automation."""
traces = []
for trace in hass.data[DATA_TRACE].get(automation_id, {}).values():
if summary:
traces.append(trace.as_short_dict())
else:
traces.append(trace.as_dict())
return traces
@callback
def get_debug_traces(hass, summary=False):
"""Return a serializable list of debug traces."""
traces = []
for automation_id in hass.data[DATA_TRACE]:
traces.extend(get_debug_traces_for_automation(hass, automation_id, summary))
return traces
| robertdelpeut/core | homeassistant/components/trace/trace.py | trace.py | py | 966 | python | en | code | null | github-code | 1 | [
{
"api_name": "const.DATA_TRACE",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "homeassistant.core.callback",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "const.DATA_TRACE",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "homeassi... |
35968078948 | from population import *
from route import *
import config
import NSGAII
class GA:
mutationRate = 0.65
tournamentSize = 10
elitism = True
runningAvg = 0
@classmethod
def evolvePopulation(cls, pop):
newPopulation = Population(2*pop.populationSize, False)
#copy over old population
for i in range(0, pop.populationSize):
newPopulation.saveRoute(i, pop.getRoute(i))
# elitismOffset = 0
# if cls.elitism:
# newPopulation.saveRoute(0, pop.getFittest())
# elitismOffset = 1
for i in range(pop.populationSize, 2*pop.populationSize):
parent1 = cls.tournamentSelection(pop)
# parent2 = cls.tournamentSelection(pop)
# child = cls.crossover(parent1, parent2)
if random.random() < cls.mutationRate:
child = cls.mutate(parent1)
else:
child = Route()
# child = Route()
newPopulation.saveRoute(i, child)
# for i in range(elitismOffset, newPopulation.populationSize):
# cls.mutate(newPopulation.getRoute(i))
#pareto optimization - total jobs and total distance
function1_list = []
function2_list = []
for i in range(newPopulation.populationSize):
final_route = newPopulation.getRoute(i)
function1_list.append(final_route.getTotalJobs())
function2_list.append(-1*final_route.getDistance())
pareto_sorted_order = NSGAII.NSGAII_main(function1_list, function2_list, False)
# print(pareto_sorted_order)
i = 0
pruned_pop = Population(pop.populationSize, False)
for front in pareto_sorted_order:
for route_index in front:
temp_route = newPopulation.getRoute(route_index)
pruned_pop.saveRoute(i, temp_route)
# print("i = %d"%i)
i+=1
if i > config.population - 1:
break
if i > config.population - 1:
break
#single objective optimization - total jobs
newPopulation.sortByFitness()
newPopulation.prune()
return pruned_pop
@classmethod
def updateMutateProbability(cls, prob):
if cls.mutationRate + prob >= 0 and cls.mutationRate + prob < 1:
cls.mutationRate += prob
# print("mutation prob = %f" %cls.mutationRate)
@classmethod
def mutate (cls, route):
child_route = []
flag = False
for [start, end] in route.get_route():
subroute = [start,end]
if random.random() < cls.mutationRate:
if random.random() < 0.5: # incremental mutation
if (route.isSubrouteValid([start+1, end+1])):
subroute = [start+1, end+1]
flag = True
elif (route.isSubrouteValid([start+1, end])): # will increase capacity wastage (hill climbing)
subroute = [start+1, end]
else:
subroute = [start, end] # mutation leads to invalid solution hence, skip mutation
# subroute = [start, end]
else: # decremental mutation
# subroute = [start, end]
if (route.isSubrouteValid([start-1, end])):
subroute = [start-1, end]
flag = True
elif (route.isSubrouteValid([start-1, end-1])):
subroute = [start-1, end-1]
flag = True
elif (route.isSubrouteValid([start, end-1])): # will increase capacity wastage (hill climbing)
subroute = [start, end-1]
else:
subroute = [start, end] # mutation leads to invalid solution hence, skip mutation
else: # no mutation
subroute = [start,end]
child_route.append(subroute)
# print(route)
# print(route)
# if flag:
# print("Mutated")
child = Route(child_route)
return child
@classmethod
def tournamentSelection (cls, pop):
tournament = Population(cls.tournamentSize, False)
for i in range(cls.tournamentSize):
randomInt = random.randint(0, pop.populationSize-1)
tournament.saveRoute(i, pop.getRoute(randomInt))
fittest = tournament.getFittest()
return fittest
| Akshay-Kawlay/MTSP-throughput-max | approach1&2/galogic.py | galogic.py | py | 4,700 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "NSGAII.NSGAII_main",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "config.population",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "config.population",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "rout... |
14919840293 | import os
import sys
from datetime import timedelta
from dotenv.main import load_dotenv
# BASE_DIR = Path(__file__).resolve().parent.parent
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(BASE_DIR, 'apps'))
if os.getenv("GITHUB_ACTIONS") == "true":
load_dotenv('.env.copy')
else:
load_dotenv()
SECRET_KEY = os.getenv('SECRET_KEY')
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'jazzmin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# My apps
'products.apps.ProductsConfig',
'shops.apps.ShopsConfig',
'users.apps.UsersConfig',
'orders.apps.OrdersConfig',
'ecommerce.apps.EcommerceConfig',
# 'telegrambots.apps.TelegrambotsConfig',
# Third party apps
'multiselectfield',
'mptt',
'rest_framework',
'drf_yasg',
'django_filters',
'django_hosts',
]
MIDDLEWARE = [
'django_hosts.middleware.HostsRequestMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_hosts.middleware.HostsResponseMiddleware',
]
ROOT_URLCONF = 'root.urls'
ROOT_HOSTCONF = 'root.hosts'
PARENT_HOST = os.getenv('DOMAIN')
DEFAULT_HOST = 'api'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'root.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
},
}
if os.environ.get('GITHUB_WORKFLOW'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
AUTH_USER_MODEL = 'users.User'
AUTHENTICATION_BACKENDS = ['apps.shared.django.CustomUserBackend']
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Tashkent'
USE_I18N = True
USE_TZ = True
STATIC_URL = 'static/'
STATIC_ROOT = os.path.join(BASE_DIR + 'static')
MEDIA_URL = 'media/'
MEDIA_ROOT = os.path.join(BASE_DIR + 'media')
FIXTURE_DIRS = [os.path.join(BASE_DIR, 'apps/shops/fixtures/')]
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.SessionAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
'DATETIME_FORMAT': '%Y-%m-%d %H:%M:%S',
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
}
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header',
'description': 'Type in the *\'Value\'* input box below: **\'Bearer <JWT>\'**, where JWT is the '
'JSON web token you get back when logging in.'
}
},
'PERSIST_AUTH': True
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(days=5),
'REFRESH_TOKEN_LIFETIME': timedelta(days=2),
# 'UPDATE_LAST_LOGIN': True,
"TOKEN_OBTAIN_SERIALIZER": "apps.shared.restframework.CustomTokenObtainPairSerializer",
}
JAZZMIN_SETTINGS = {
"site_title": "digital-ecommerce Admin",
"site_header": "digital-ecommerce",
"site_brand": "digital-ecommerce",
"site_logo": "site/logo.square.svg",
"login_logo": "site/logo.svg",
"login_logo_dark": None,
"site_logo_classes": False,
"site_icon": None,
"welcome_sign": "digital-ecommerce API",
"copyright": "digital-ecommerce MCHJ",
"search_model": ["users.User", "auth.Group"],
"user_avatar": None,
"topmenu_links": [
{"name": "Home", "url": "admin:index", "permissions": ["auth.view_user"]},
{"name": "Support", "url": "https://github.com/farridav/django-jazzmin/issues", "new_window": True},
{"model": "users.User"},
{"app": "users"},
],
"usermenu_links": [
{"name": "Support", "url": "https://github.com/farridav/django-jazzmin/issues", "new_window": True},
{"model": "users.User"}
],
"show_sidebar": True,
"navigation_expanded": True,
"hide_apps": [],
"hide_models": [],
"order_with_respect_to": ["auth", "users", "books.author", "books.book"],
"custom_links": {
"books": [{
"name": "Make Messages",
"url": "make_messages",
"icon": "fas fa-comments",
"permissions": ["books.view_book"]
}]
},
"icons": {
"auth": "fas fa-users-cog",
"auth.Group": "fas fa-users",
"users.user": "fas fa-user",
"products.Product": "fas fa-boxes",
"products.Category": "fas fa-bars",
"shops.Category": "fas fa-bars",
"shops.Currency": "fas fa-coins",
"shops.Shop": "fas fa-shopping-cart",
"orders.Order": "fas fa-box",
},
"default_icon_parents": "fas fa-chevron-circle-right",
"default_icon_children": "fas fa-circle",
"related_modal_active": False,
"custom_css": None,
"custom_js": None,
"use_google_fonts_cdn": True,
"show_ui_builder": False,
"changeform_format": "horizontal_tabs",
"changeform_format_overrides": {"users.User": "collapsible", "auth.group": "vertical_tabs"},
"language_chooser": False,
}
# import sentry_sdk
#
# sentry_sdk.init(
# dsn=os.getenv('SENTRY_SDK_URL'),
# integrations=[
# DjangoIntegration(),
# ],
# traces_sample_rate=1.0,
# send_default_pii=True
# )
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')
EMAIL_PORT = os.getenv('EMAIL_PORT')
EMAIL_HOST = os.getenv('EMAIL_HOST')
EMAIL_USE_TLS = True
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL', default='redis://localhost:6380')
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
CELERY_TIMEZONE = os.getenv('TIME_ZONE')
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USERNAME_REQUIRED = False
SILENCED_SYSTEM_CHECKS = ['auth.W004']
| GaniyevUz/Digital-Ecommerce | root/settings.py | settings.py | py | 7,697 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line... |
5747429962 | import sys
import tensorflow as tf
import keras
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.io import loadmat
import cv2
from skimage.io import imshow
from keras.models import Sequential
from keras.layers import Conv2D,Conv2DTranspose, Cropping2D, Dense, Activation, Dropout, Flatten,MaxPooling2D, Merge, Average
from keras.preprocessing import image
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
# Number of classes
n_classes = 21
input_shape = (224, 224, 3)
#fcn-32s architecture
#block1
FCN32 = Sequential()
FCN32.add(Conv2D(64,(3, 3), activation='relu', input_shape=input_shape, padding='same',name = 'conv1_1'))
FCN32.add(Conv2D(64,(3, 3), activation='relu', name = 'conv1_2',padding='same'))
FCN32.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), name = 'block1_pool'))
#block2
FCN32.add(Conv2D(128,(3, 3), activation='relu', name = 'conv2_1',padding='same'))
FCN32.add(Conv2D(128,(3, 3), activation='relu', name = 'conv2_2',padding='same'))
FCN32.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), name = 'block2_pool'))
#block3
FCN32.add(Conv2D(256,(3, 3), activation='relu', name = 'conv3_1',padding='same'))
FCN32.add(Conv2D(256,(3, 3), activation='relu', name = 'conv3_2',padding='same'))
FCN32.add(Conv2D(256,(3, 3), activation='relu', name = 'conv3_3',padding='same'))
FCN32.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), name = 'block3_pool'))
#block4
FCN32.add(Conv2D(512,(3, 3), activation='relu', name = 'conv4_1',padding='same'))
FCN32.add(Conv2D(512,(3, 3), activation='relu', name = 'conv4_2',padding='same'))
FCN32.add(Conv2D(512,(3, 3), activation='relu', name = 'conv4_3',padding='same'))
FCN32.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), name = 'block4_pool'))
#block5
FCN32.add(Conv2D(512,(3, 3), activation='relu', name = 'conv5_1',padding='same'))
FCN32.add(Conv2D(512,(3, 3), activation='relu', name = 'conv5_2',padding='same'))
FCN32.add(Conv2D(512,(3, 3), activation='relu', name = 'conv5_3',padding='same'))
FCN32.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), name = 'block5_pool'))
#block6
FCN32.add(Conv2D(4096,(7, 7), activation='relu', name = 'fc6',padding='same'))
FCN32.add(Dropout(0.5))
FCN32.add(Conv2D(4096,(1, 1), activation='relu', name = 'fc7',padding='same'))
FCN32.add(Dropout(0.5))
# Transformation
FCN32.add(Conv2D(n_classes,(1, 1), activation='linear', kernel_initializer='he_normal', padding='valid', strides=(1, 1), name= 'score_fr'))
#deconvolution
FCN32.add(Conv2DTranspose(n_classes,kernel_size = (64, 64),strides = (32,32), name = 'upsample'))
FCN32.add(Cropping2D(cropping = 16))
FCN32.add(Activation('softmax', name = 'ac1'))
FCN32.add(Conv2D(1,(3, 3), activation='relu', name = 'f',padding='same'))
FCN32.summary()
#compile model
FCN32.compile(loss="kullback_leibler_divergence", optimizer='adam', metrics=['accuracy'])
#transfer learning - VGGnet to FCN32
transfer_weights = loadmat('C://Users/jchin/Tensorflow-Segmentation-master/pascal-fcn32s-dag.mat', matlab_compatible=False, struct_as_record=False)
params = transfer_weights['params']
def transfer_learning(input_model):
layer_names = [l.name for l in input_model.layers]
for i in range(0, params.shape[1]-1, 2):
t_name = '_'.join(params[0,i].name[0].split('_')[0:-1])
if t_name in layer_names:
kindex = layer_names.index(t_name)
t_weights = params[0,i].value
t_bias = params[0,i+1].value
input_model.layers[kindex].set_weights([t_weights, t_bias[:,0]])
else:
print ('not found: ', str(t_name))
transfer_learning(FCN32)
# Image directory
image_directory = 'C://Users/jchin/Desktop/image_segmen/VOC2012/JPEGImages/'
segm_image_directory = 'C://Users/jchin/Desktop/image_segmen/VOC2012/SegmentationClass/'
train_set_list = 'C://Users/jchin/Desktop/image_segmen/VOC2012/ImageSets/Segmentation/train.txt'
validation_set_list = 'C://Users/jchin/Desktop/image_segmen/VOC2012/ImageSets/Segmentation/trainval.txt'
#data preprocessing
#Extract train and validation sets
# Train set
train_set = open(train_set_list, "r")
train_set_names = []
for l in train_set:
train_set_names.append(l.strip())
train_set.close()
#Prepare training images
train_images = []
for i in range(len(train_set_names)):
train_images.append(image_directory + train_set_names[i] + '.jpg')
train_images.sort()
#segmented images of training data
segm_set = []
for i in range(len(train_set_names)):
segm_set.append(segm_image_directory + train_set_names[i] + '.png')
segm_set.sort()
#validation set
valid_set = open(validation_set_list, "r")
valid_set_names = []
for l in valid_set:
valid_set_names.append(l.strip())
valid_set.close()
# validation set images
valid_set = []
for i in range(len(valid_set_names)):
valid_set.append(image_directory + valid_set_names[i] + '.jpg')
valid_set.sort()
#Load images and generate numpy arrays of images to feed into the model
height, width = (224, 224)
def extract_data(path, label=None):
img = Image.open(path)
img = img.resize((224,224))
if label:
y = np.frombuffer(img.tobytes(), dtype=np.uint8).reshape((224,224,1))
y = y.astype('float64')
y = y[None,:]
return y
else:
X = np.frombuffer(img.tobytes(), dtype=np.uint8).reshape((224,224,3))
X = X.astype('float64')
X = X[None,:]
return X
def generate_arrays_from_file(image_list, train_directory, test_directory,validate = None):
while True:
for image_name in image_list:
train_path = train_directory + "{}.jpg".format(image_name)
test_path = test_directory + "{}.png".format(image_name)
X = extract_data(train_path, label=False)
y = extract_data(test_path, label=True)
if validate:
yield np.array(X)
else:
yield np.array(X) , np.array(y)
# Model training
n_epoch = 1
steps_per_epoch = 1
# steps_per_epoch = len(filenamesV)/100
FCN32.fit_generator(generator=generate_arrays_from_file(train_set_names, image_directory, segm_image_directory),
steps_per_epoch=steps_per_epoch,
epochs=n_epoch)
#validate model
# n_steps = len(valid_set_names)
n_steps = 2
predicted_images = FCN32.predict_generator(generate_arrays_from_file(valid_set_names, image_directory,
segm_image_directory,validate= 1), steps =n_steps)
#pixel accuracy
inputImg1 = Image.open('C://Users/jchin/Desktop/image_segmen/VOC2012/SegmentationClass/2007_001397.png')
image = inputImg1.resize((224,224))
inputImg = np.frombuffer(image.tobytes(), dtype=np.uint8).reshape((224,224))
np.set_printoptions(threshold=np.nan)
accuracy = FCN32.evaluate_generator(generate_arrays_from_file(valid_set_names, image_directory,
segm_image_directory), steps =n_steps)
print('Accuracy of FCN32 model: ',accuracy)
def root_mean_squared_error(y_true, y_pred):
return np.average(np.sqrt(np.mean(np.square(y_pred - y_true), axis=-1)))
rmse = root_mean_squared_error(predicted_images,predicted_images)
print('pixel accuracy:',rmse)
| suhaschowdaryj/semantic_segmentation | fcn32.py | fcn32.py | py | 7,416 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.warnoptions",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "warnings.simplefilter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ke... |
17291005352 | import boto3
import requests
import os
import smtplib
import paramiko
import time
import schedule
EMAIL_ADDRESS = os.environ.get('EMAIL_ADDRESS')
EMAIL_PASSWORD = os.environ.get('EMAIL_PASSWORD')
instance_id = "i-0c90f5640105608c3"
host_ip = "13.234.116.18"
def send_notification(email_text):
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.sendmail(EMAIL_ADDRESS, EMAIL_ADDRESS, email_text)
smtp.close()
print("Email sent successfully!")
def restart_application():
print("restart_application")
key = "C:/Users/Himanshu/Downloads/Documents/aws_stuff/vprofile_ap_s1.pem"
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(
hostname=host_ip,
username="ubuntu",
key_filename=key
)
stdin, stdout, stderr = client.exec_command("docker start fdcaf261ffbb")
print(stdout.readlines())
client.close()
except Exception as exc:
print(exc)
def restart_server_and_container():
# restart ec2 server
ec2_client = boto3.client('ec2')
print("Restarting the server...")
ec2_client.reboot_instances(
InstanceIds=[
instance_id
]
)
# restart the application
print("Restarting the container...")
time.sleep(60)
restart_application()
def monitor_website():
try:
response = requests.get('http://ec2-13-234-116-18.ap-south-1.compute.amazonaws.com:8080/')
if response.status_code == 200:
print("Application is UP and RUNNING!")
else:
print("Application DOWN, need to be FIXED!")
msg = f"""Subject: SITE DOWN\n
Application Response Code: {response.status_code}."""
send_notification(msg)
# restart the application
restart_application()
except Exception as ex:
print(f"==========CONNECTION_ERROR==============\n{ex}")
msg = "Subject: CONNECTION ERROR\nApplication is not Reachable!"
send_notification(msg)
restart_server_and_container()
schedule.every(15).minutes.do(monitor_website)
while True:
schedule.run_pending()
| himanshupant4899/boto3-project | Monitoring_Website/monitor-website.py | monitor-website.py | py | 2,321 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_... |
9862990084 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="agilent-format",
version="0.4.4",
author="Stuart Read",
author_email="stuart.read@lightsource.ca",
description="File reader for Agilent Resolutions Pro FT-IR images",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/stuart-cls/python-agilent-file-formats",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
install_requires=[
'numpy',
],
test_suite="agilent_format.tests.suite"
)
| stuart-cls/python-agilent-file-formats | setup.py | setup.py | py | 710 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 15,
"usage_type": "call"
}
] |
9361751702 | # %%
from matplotlib import pyplot as plt
import numpy as np
file = "log.txt"
def preprocessing(line):
new=line.replace('\n','').replace(',','').replace('=','').replace(':','').replace('/','').replace('(','').replace(')','').replace('%','')
lineData=new.strip().split(' ')
return lineData
def line1(lineData):
setting={}
setting['arch']=lineData[2]
setting['epochs']=lineData[4]
setting['workers']=lineData[7]
setting['batch']=lineData[14]
setting['order']=lineData[20]
setting['timeout']=lineData[23]
return setting
def line2(lineData):
training={}
training['sum']=float(lineData[3])
training['average']=float(lineData[7])
training['max']=float(lineData[11])
training['min']=float(lineData[15])
training['median']=float(lineData[19])
return training
def line3(lineData,test):
test['need_update']=int(lineData[3])
test['updated']=int(lineData[7])
test['update_rate']=float(lineData[11])
return test
def line4(lineData,test):
test['loss']=float(lineData[4])
test['right']=int(lineData[6])
test['cases']=int(lineData[8])
test['accuracy']=float(lineData[9])
return test
f = open(file)
line = f.readline()
setting=[]
test=[]
while line:
lineData=preprocessing(line)
setting.append(line1(lineData))
line = f.readline()
lineData=preprocessing(line)
training=line2(lineData)
line = f.readline()
lineData=preprocessing(line)
training=line3(lineData,training)
line = f.readline()
lineData=preprocessing(line)
test.append(line4(lineData,training))
line = f.readline()
line = f.readline()
f.close()
# %%
x='epochs'
# 'epochs' 'processes' 'batch' 'reduce'
y='right'
# 'loss' 'right' 'accuracy'
way='average'
def find_same_setting(setting,idx):
i = idx
next =-1
cases=[]
key1='arch'
key2='order'
while(i<len(setting)):
flag = False
for key,value in setting[i].items():
if value != setting[idx][key]:
flag = True
if flag:
if (next==-1)&(setting[idx][key1]==setting[i][key1])&(setting[idx][key2]==setting[i][key2]):
next=i
i+=1
continue
cases.append(i)
i+=1
if next==-1:
next=len(setting)
return next,np.array(cases)
def calculate(all_data,key,setting,value1,value2):
results=[]
for idx in range(len(all_data)):
if (setting[idx]['arch']==value1)&(setting[idx]['order']==value2):
break
if (setting[idx]['arch']!=value1)|(setting[idx]['order']!=value2):
return results
next=idx
while next<len(all_data):
result={}
for key1,value in setting[next].items():
result[key1]=value
next,cases=find_same_setting(setting,next)
data=[]
for p in cases:
data.append(all_data[p][key])
result['max']=max(data)
result['min']=min(data)
data=np.array(data)
result['average']=np.mean(data)
result['median']=np.median(data)
results.append(result)
return results
def clasify1(test,setting):
lines=[]
lines.append(calculate(test,y,setting,'ff-net','y'))
lines.append(calculate(test,y,setting,'ff-net','n'))
#lines.append(calculate(test,y,setting,'conv-net','y'))
#lines.append(calculate(test,y,setting,'conv-net','n'))
all_casee=[]
for line in lines:
cases=[]
for case in line:
example={}
for key,value in case.items():
if (key=='arch')|(key=='order')|(key=='max')|(key=='min')|(key=='average')|(key=='median')|(key ==x):
continue
example[key]=value
if example not in cases:
cases.append(example)
all_casee.append(cases)
return lines,all_casee
def each_line1(line,case):
xs=[]
ys=[]
for data in line:
flag = False
for key,value in case.items():
if data[key]!=value:
flag=True
if flag:
continue
xs.append(data[x])
ys.append(data[way])
return np.array(xs),np.array(ys)
def draw1(test,setting):
lines,cases=clasify1(test,setting)
for case in cases[0]:
title=''
for key,value in case.items():
if title == '':
title+=(key+'='+value)
else:
title+=(';'+key+'='+value)
plt.title(title)
for i in range(2):
xs,ys=each_line1(lines[i],case)
if i == 0:
plt.plot(xs,ys,label="arch=ff,in order")
if i == 1:
plt.plot(xs,ys,label="arch=ff,random")
#if i == 2:
# plt.plot(xs,ys,label="arch=conv,in order")
#if i == 3:
# plt.plot(xs,ys,label="arch=conv,random")
plt.xlabel(x)
plt.ylabel(y)
plt.ylim(0,test[0]['cases'])
plt.legend()
plt.show()
draw1(test,setting)
# %%
way='average'
def find_same_setting2(setting,idx):
i = idx
next =-1
cases=[]
key1='arch'
key2='order'
while(i<len(setting)):
if (setting[idx][key1]!=setting[i][key1])|(setting[idx][key2]!=setting[i][key2]):
i+=1
continue
if (setting[i]['timeout'] != setting[idx]['timeout'])|(setting[i]['workers'] != setting[idx]['workers']):
if (next==-1):
next=i
i+=1
continue
cases.append(i)
i+=1
if next==-1:
next=len(setting)
return next,np.array(cases)
def clasify2(test,setting):
line=[]
for idx in range(len(test)):
if (setting[idx]['arch']=='ff-net')&(setting[idx]['order']=='y'):
break
next=idx
while next<len(test):
flag=False
for tag in line:
if (tag['timeout']==setting[next]['timeout'])&(tag['workers']==setting[next]['workers']):
flag=True
if flag:
next+=1
continue
result={}
result['timeout']=setting[next]['timeout']
result['workers']=setting[next]['workers']
next,cases=find_same_setting2(setting,next)
data=[]
for p in cases:
data.append(test[p]['update_rate'])
result['max']=max(data)
result['min']=min(data)
data=np.array(data)
result['average']=np.mean(data)
result['median']=np.median(data)
line.append(result)
#print(result)
cases=[]
for case in line:
if case['timeout'] not in cases:
cases.append(case['timeout'])
return line,cases
def each_line2(line,case):
xs=[]
ys=[]
for data in line:
if data['timeout']!=case:
continue
xs.append(data['workers'])
ys.append(data[way])
print(data['workers'],data[way])
return np.array(xs),np.array(ys)
def draw2(test,setting):
line,cases=clasify2(test,setting)
for case in cases:
print(case)
xs,ys=each_line2(line,case)
plt.plot(xs,ys,label='timeout='+case)
plt.xlabel('workers')
plt.ylabel('update_rate')
plt.ylim(0,110)
plt.legend()
plt.show()
draw2(test,setting)
# %%
| SunZekai-CN/obj3 | benchmark/readlog.py | readlog.py | py | 7,318 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number"... |
24398367329 | import logging
import os
import random
from urlparse import urlparse
from bs4 import BeautifulSoup
import pickle
from PIL import Image
import requests
import twitter_oauth
logging.basicConfig(level=logging.DEBUG, filename='cuties.log')
logging.debug('Cuties start')
class Dog(object):
'''
Scrapes the SF SPCA adoptions pages.
Returns a list of URLs.
'''
def __init__(self, testing=False, refresh=True):
self.dog_list = self.make_dog_list(refresh)
self.name = 'barf'
self.dog_info(testing)
def scrape(self, page=0):
logging.debug('Making a request.')
dog_page = requests.get('https://www.sfspca.org/adoptions/dogs?'
'page={}'.format(page))
dog_soup = BeautifulSoup(dog_page.text)
dogs = dog_soup.findAll('div', class_='node-animal')
if len(dogs) == 0:
return []
else:
urls = [dog.img['src'] for dog in dogs]
return urls + self.scrape(page+1)
def make_dog_list(self, refresh):
'''
Calls scrape or loads a list of dog image urls.
Parses the list and outputs a list of dictionaries.
Each dictionary is dog_id: dog_image_filename.
'''
if refresh:
dogs = self.scrape()
logging.debug('Currently available pups: {}'.format(len(dogs)))
else:
with open('dog_image_urls.txt', 'r') as f:
dogs = pickle.load(f)
dog_list = []
for url in dogs:
path = urlparse(url)[2]
filename = path.split('/').pop()
filename_components = filename.split('-')
# TODO
if filename_components[1] != 'photo.jpg':
dog_id = filename_components[0]
dog = {dog_id: filename}
dog_list.append(dog)
else:
pass
logging.debug('Number of dogs with photos: {}'.format(len(dog_list)))
return dog_list
def choose_dog(self, testing=False):
'''
Takes a list of dictionaries that represent dogs.
Returns a random dog.
'''
logging.debug('Choosing a random dog.')
choice = random.randrange(len(self.dog_list))
lucky_dog = self.dog_list[choice]
self.dog_id = lucky_dog.keys()[0]
self.image = lucky_dog.values()[0]
with open('tweeted_dogs.csv', 'r') as f:
tweeted_dogs = f.read()
if self.dog_id in tweeted_dogs:
logging.debug('Repeat dog, choosing another')
os.remove(self.image)
return self.choose_dog()
elif not testing:
with open('tweeted_dogs.csv', 'a') as f:
logging.debug('Dog ID {} tweet recorded'.format(self.dog_id))
f.write(self.dog_id + '\n')
logging.debug('New dog id: {}'.format(self.dog_id))
return self.dog_id
def dog_image(self):
image_path = ('https://www.sfspca.org/sites/default/files/styles/'
'480_width/public/images/animals' +
self.image)
image_file = requests.get(image_path)
with open(self.image, 'wb') as f:
for chunk in image_file.iter_content(chunk_size=1024):
f.write(chunk)
im = Image.open(self.image)
im = im.convert('RGBA')
out = Image.new(size=(450, 240), color='white', mode='RGBA')
out.paste(im, (85, 0), im)
out.save(self.image)
return self.image
def age_parse(self, age):
quantity = ''
for i in age:
if i == 'Y':
scale = 'year'
break
elif i == 'M':
scale = 'month'
break
if i.isdigit:
quantity += i
if quantity == '1':
age_string = 'a {}'.format(scale)
elif quantity in ('8', '11'):
age_string = 'an {} {}'.format(quantity, scale)
else:
age_string = 'a {} {}'.format(quantity, scale)
return age_string
def dog_info(self, testing):
self.choose_dog(testing)
self.dog_image()
self.profile_url = ('https://www.sfspca.org/adoptions/pet-details/' +
self.dog_id)
dog_profile = requests.get(self.profile_url)
profile_soup = BeautifulSoup(dog_profile.text)
stats = profile_soup.find_all('span', class_='field-label')
try:
self.name = profile_soup.find('h1').text
age = stats[1].next_sibling.next_sibling.text.strip('\n ')
self.age = self.age_parse(age)
self.gender = stats[2].next_sibling \
.next_sibling \
.text.strip('\n') \
.strip(' ') \
.lower()
# self.personality
try:
energy = stats[3].next_sibling.next_sibling.text
self.energy = energy.strip('\n').strip(' ').lower()
except:
self.energy = None
except:
logging.debug('No info for dog, choosing another.')
os.remove(self.image)
self.dog_info()
class Tweet(object):
'''
Image filename, name, age, gender, energy, personality
'''
def __init__(self, testing=False):
self.lucky_dog = Dog(testing)
self.text = self.from_dog()
self.image = self.lucky_dog.image
def from_dog(self):
name = self.lucky_dog.name
age = self.lucky_dog.age
gender = self.lucky_dog.gender
energy = self.lucky_dog.energy
url = self.lucky_dog.profile_url
if energy is not None:
if energy in ('low', 'medium', 'high'):
text = 'Hi! I\'m {}, {} old {} energy {}. {}'.format(name,
age,
energy,
gender,
url)
else:
text = 'Hi! I\'m {}, {} old {} {}. {}'.format(name,
age,
energy,
gender,
url)
return text
else:
text = 'Hi! I\'m {}, {} old {}. {}'.format(name,
age,
gender,
url)
return text
def post_to_Twitter(self):
twitter_api = twitter_oauth.TweetPoster()
tweet_id = twitter_api.post_tweet(self.text, self.image)
os.remove(self.image)
return tweet_id
cutepetssf_tweet = Tweet()
tweet_id = cutepetssf_tweet.post_to_Twitter()
logging.debug('Success! Tweet ID: {}'.format(tweet_id))
| ecalifornica/CutePetsSF | cuties.py | cuties.py | py | 7,219 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.debug",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.debug",
... |
3242822152 | import sys
sys.path('../../../')
import torch
import torch.nn as nn
import Datasets.medutils_torch as medutils_torch
from Datasets.medutils_torch import complex
from Datasets.medutils_torch.fft import fft2, ifft2
from Datasets.medutils_torch.mri import \
adjointSoftSenseOpNoShift, \
forwardSoftSenseOpNoShift
class DataIDLayer(nn.Module):
"""
Placeholder for data layer
"""
def __init__(self, *args, **kwargs):
super(DataIDLayer, self).__init__()
def forward(selfself, x, *args, **kwargs):
return x
def __repr__(self):
return f'DataIDLayer()'
class DCLayer(nn.Module):
"""
DClayer from DC-CNN, apply for single coil mainly
"""
def __init__(self, lambda_init=0., learnable=True):
"""
:param lambda_init -> float: Init value of data consistency block
"""
super(DCLayer, self).__init__()
self.lambda_ = torch.nn.Parameter(torch.Tensor(1))
self.lambda_.data = torch.tensor(lambda_init, dtype=self.lambda_.dtype)
self.lambda_.requires_grad = learnable
self.set_learnable(learnable)
def forward(self, x, y, mask):
A_x = fft2(x)
k_dc = (1 - mask) * A_x + mask * (self.lambda_ * A_x + (1 - self.lambda_) * y)
x_dc = ifft2(k_dc)
return x_dc
def extra_repr(self):
return f"lambda={self.lambda_.item():.4g}, learnable={self.requires_grad}"
def set_learnable(self, flag):
self.lambda_.requires_grad = flag
class DataGDLayer(nn.Module):
"""
DataLayer computing the gradient on the L2 data term.
"""
def __init__(self, lambda_init, learnable=True):
"""
:param lambda_init (float): init value of data term weight lambda
"""
super(DataGDLayer, self).__init__()
self.lambda_init = lambda_init
self.data_weight = torch.nn.Parameter(torch.Tensor(1))
self.data_weight.data = torch.tensor(
lambda_init,
dtype=self.data_weight.dtype
)
self.set_learnable(learnable)
def forward(self, x, y, smaps, mask):
# use ifft2 to get zero filled image, and its residual
A_x_y = forwardSoftSenseOpNoShift(x, smaps, mask) - y
# use fft2 to get res-kspace
gradD_x = adjointSoftSenseOpNoShift(A_x_y, smaps, mask)
return x - self.data_weight * gradD_x
def __repr__(self):
return f'DataLayer(lambda_init={self.data_weight.item():.4g}'
def set_learnable(self, flag):
self.data_weight.requires_grad = flag
class DataProxCGLayer(nn.Module):
"""
Solving the prox wrt. dataterm using Conjugate Gradient
as proposed by Aggarwal et al.
"""
def __init__(self, lambda_init, tol=1e-6, itera=10, learnable=True):
super(DataProxCGLayer, self).__init__()
self.lambda_a = torch.nn.Parameter(torch.Tensor(1))
self.lambda_a.data = torch.tensor(lambda_init)
self.lambda_a_init = lambda_init
self.lambda_a.requires_grad = learnable
self.tol = tol
self.iter = itera
self.op = MyCG
def forward(self, x, y, samps, mask):
return self.op.apply(
x, self.lambda_a, y, samps, mask,
self.tol, self.itera
)
def extra_repr(self) -> str:
return (f"lambda_init = {self.lambdaa.item():.4g}, tol={self.tol}"
f" iter={self.itera} learnable={self.lambda_a.requires_grad}")
def set_learnable(self, flag):
self.lambda_a.requires_grad = flag
class MyCG(torch.autograd.Function):
"""
performs CG algorithm
"""
@staticmethod
def complexDot(data1, data2):
nBatch = data1.shape[0]
mult = complex.complex_mult_conj(data1, data2)
re, im = torch.unbind(mult, dim=-1)
return torch.stack([torch.sum(re.view(nBatch, -1), dim=-1),
torch.sum(im.view(nBatch, -1), dim=-1)], -1)
@staticmethod
def solve(x0, M, tol, max_iter):
nBatch = x0.shape[0]
# x0 shape tensor
x = torch.zeros(x0.shape).to(x0.device)
r = x0.clone()
p = x0.clone()
x0x0 = (x0.pow(2)).view(nBatch, -1).sum(-1)
rr = torch.stack([
(r.pow(2)).view(nBatch, -1).sum(-1),
torch.zeros(nBatch).to(x0.device)
], dim=-1)
it = 0
while torch.min(rr[..., 0] / x0x0) > tol and it < max_iter:
it += 1
q = M(p)
alpha = complex.complex_div(rr, MyCG.complexDot(p, q))
x += complex.complex_mult(
alpha.reshape(nBatch, 1, 1, 1, -1),
p.clone()
)
r -= complex.complex_mult(
alpha.reshape(nBatch, 1, 1, 1, -1),
q.clone()
)
rr_new = torch.stack([
(r.pow(2)).view(nBatch, -1).sum(-1),
torch.zeros(nBatch).to(x0.device)
],
dim=-1)
beta = torch.stack([
rr_new[..., 0] / rr[..., 0],
torch.zeros(nBatch).to(x0.device)
],
dim=-1)
p = r.clone() + complex.complex_mult(
beta.reshape(nBatch, 1, 1, 1, -1),
p
)
rr = rr_new.clone()
return x
@staticmethod
def forward(ctx, z, lambda_a, y, smaps, mask, tol, max_iter):
ctx.tol = tol
ctx.max_iter = max_iter
def A(x):
return forwardSoftSenseOpNoShift(x, smaps, mask)
def AT(y):
return adjointSoftSenseOpNoShift(y, smaps, mask)
def M(p):
return lambda_a * AT(A(p)) + p
x0 = lambda_a * AT(y) + z
ctx.save_for_backward(AT(y), x0, smaps, mask, lambda_a)
return MyCG.solve(x0, M, ctx.tol, ctx.max_iter)
@staticmethod
def backward(ctx, grad_x):
ATy, rhs, smaps, mask, lambda_a = ctx.saved_tensors
def A(x):
return forwardSoftSenseOpNoShift(x, smaps, mask)
def AT(y):
return adjointSoftSenseOpNoShift(y, smaps, mask)
def M(p):
return lambda_a * AT(A(p)) + p
Qe = MyCG.solve(grad_x, M, ctx.tol, ctx.max_iter)
QQe = MyCG.solve(Qe, M, ctx.tol, ctx.max_iter)
grad_z = Qe
grad_lambda_a = complex.complex_dotp(Qe, ATy).sum() \
- complex.complex_dotp(QQe, rhs).sum()
return grad_z, grad_lambda_a, None, None, None, None, None
class DataVSLayer(nn.Module):
"""
DataLayer using variable splitting formulation
"""
def __init__(self, alpha_init, beta_init, learnable=True):
"""
:param alpha_init -> float: Init value of data consistency block (DCB)
:param beta_init -> float: Init value of weighted averageing blcok (WAB)
"""
super(DataVSLayer, self).__init__()
self.alpha = torch.nn.Parameter(torch.Tensor(1))
self.alpha.data = torch.tensor(alpha_init, dtype=self.alpha.dtype)
self.beta = torch.nn.Parameter(torch.Tensor(1))
self.beta.data = torch.tensor(beta_init, dtype=self.beta.dtype)
self.set_learnable(learnable)
def forward(self, x, y, smaps, mask):
A_x = forwardSoftSenseOpNoShift(x, smaps, 1.)
k_dc = (1 - mask) * A_x + mask * (self.alpha * A_x + (1 - self.alpha) * y)
x_dc = adjointSoftSenseOpNoShift(k_dc, smaps, 1.)
x_wab = self.beta * x + (1 - self.beta) * x_dc
return x_wab
def extra_repr(self) -> str:
return (
f"alpha={self.alpha.item():.4g},"
f"beta={self.beta.item():.4g},"
f"learnable={self.learnable}"
)
def set_learnable(self, flag):
self.alpha.requires_grad = flag
self.beta.requires_grad = flag
| YuyangXueEd/MRI_Recon_Tutorial | Models/sigmanet/modules/datalayer.py | datalayer.py | py | 7,826 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_num... |
40239177456 | #!/usr/bin/env python
"""This script identifies the controller and plots the results."""
# builtin
import os
import argparse
# external
import matplotlib.pyplot as plt
# local
import utils
from gait_landmark_settings import settings
PATHS = utils.config_paths()
def main(event, structure, recompute, normalize):
trial_numbers = sorted(settings.keys())
plot_dir = utils.mkdir(os.path.join(PATHS['figures_dir'],
'identification-results',
'-'.join(event.lower().split(' ')),
'-'.join(structure.split(' '))))
for trial_number in trial_numbers:
msg = 'Identifying {} controller from {} for trial #{}'
msg = msg.format(structure, event, trial_number)
print('=' * len(msg))
print(msg)
print('=' * len(msg))
trial = utils.Trial(trial_number)
if recompute:
trial.remove_precomputed_data()
trial.identify_controller(event, structure)
fig, axes = trial.plot_joint_isolated_gains(event, structure,
normalize=normalize)
solver = trial.control_solvers[event][structure]
id_num_steps = solver.identification_data.shape[0]
title = """\
{} Scheduled Gains Identified from {} Gait Cycles in Trial {}
Nominal Speed: {} m/s, Gender: {}
"""
fig.suptitle(title.format(structure.capitalize(), id_num_steps,
trial_number,
trial.meta_data['trial']['nominal-speed'],
trial.meta_data['subject']['gender']))
fig.set_size_inches((14.0, 14.0))
plt.tight_layout()
plt.subplots_adjust(top=0.85)
fig_path = os.path.join(plot_dir, 'gains-' + trial_number + '.png')
fig.savefig(fig_path, dpi=300)
print('Gain plot saved to {}'.format(fig_path))
plt.close(fig)
fig, axes = trial.plot_validation(event, structure)
fig_path = os.path.join(plot_dir, 'validation-' + trial_number + '.png')
fig.savefig(fig_path, dpi=300)
print('Validation plot saved to {}'.format(fig_path))
plt.close(fig)
if __name__ == "__main__":
desc = "Identify Controller"
parser = argparse.ArgumentParser(description=desc)
msg = ("A valid event name in the data, likely: "
"'Longitudinal Perturbation', 'First Normal Walking', "
"or 'Second Normal Walking'.")
parser.add_argument('-e', '--event', type=str, help=msg,
default='Longitudinal Perturbation')
msg = ("The desired controller structure: 'join isolated' or 'full'.")
parser.add_argument('-s', '--structure', type=str, help=msg,
default='joint isolated')
msg = ("Force recomputation of all data.")
parser.add_argument('-r', '--recompute', action="store_true", help=msg)
msg = ("Normalize gains to subject mass in plots.")
parser.add_argument('-n', '--normalize', action="store_true", help=msg)
args = parser.parse_args()
main(args.event, args.structure, args.recompute, args.normalize)
| csu-hmc/gait-control-direct-id-paper | src/identify_controller.py | identify_controller.py | py | 3,224 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "utils.config_paths",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "gait_landmark_settings.settings.keys",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gait_landmark_settings.settings",
"line_number": 21,
"usage_type": "name"
},
... |
20111198660 | # coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
_package_data = dict(
full_package_name='nim_install',
version_info=(0, 6, 0),
__version__='0.6.0',
author='Anthon van der Neut',
author_email='a.van.der.neut@ruamel.eu',
description='install nim compiler in Linux virtualenv assumes gcc',
# keywords="",
entry_points='nim_install',
# entry_points=None,
license='MIT',
since=2016,
# data_files="",
universal=True,
print_allowed=True,
extras_require={':python_version<="3.3"': ['backports.lzma']},
tox=dict(
env='23',
),
)
version_info = _package_data['version_info']
__version__ = _package_data['__version__']
######
def main():
import sys
import os
import tarfile
if sys.version_info < (3, ):
from backports import lzma
import urllib2
else:
import lzma
from urllib.request import FancyURLopener
class MyURLOpener(FancyURLopener):
version = 'Mozilla/5.0'
try:
nim_version_string = sys.argv[1]
except IndexError:
nim_version = (1, 2, 6)
nim_version_string = '.'.join([str(x) for x in nim_version])
nim_download = 'http://nim-lang.org/download/nim-{}.tar.xz'.format(
nim_version_string)
print('getting', nim_download)
inst_dir = os.path.dirname(os.path.dirname(sys.executable))
print('inst_dir', inst_dir)
os.chdir(inst_dir)
if True:
from io import BytesIO
if sys.version_info < (3, ):
# request = urllib2.Request(nim_download)
# request.add_header('User-Agent', "Mozilla/5.0")
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0')]
else:
opener = MyURLOpener()
response = opener.open(nim_download)
data = BytesIO()
data.write(lzma.decompress(response.read()))
data.seek(0)
with tarfile.open(fileobj=data, mode='r') as tar:
for tarinfo in tar:
if '/' not in tarinfo.name:
continue
name = tarinfo.name.split('/', 1)[1]
if tarinfo.isdir():
if not os.path.exists(name):
os.mkdir(name)
continue
# print('tarinfo', tarinfo.name, name, tarinfo.isdir())
with open(name, 'wb') as fp:
fp.write(tar.extractfile(tarinfo).read())
# os.system('make -j8')
os.system('sh build.sh')
os.system('./bin/nim c koch')
os.system('./koch tools')
if __name__ == '__main__':
main()
| gitrootside/vokker | venv/Lib/site-packages/nim_install/__init__.py | __init__.py | py | 2,696 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.FancyURLopener",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.... |
13917637840 | from django.contrib import admin
from .models import Activity
@admin.register(Activity)
class ActivityAdmin(admin.ModelAdmin):
list_display = (
'id',
'created_at',
'activity',
'description',
)
list_filter = ('created_at',)
date_hierarchy = 'created_at' | jafarjtown/Drim | activities/admin.py | admin.py | py | 317 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 6,
"usage_type": "call"
},
{
... |
73221180195 | import datetime
import pytz
class Account:
""" Simple account class with balance """
@staticmethod
def _current_time():
utc_time = datetime.datetime.utcnow()
return pytz.utc.localize(utc_time)
def __init__(self, name, balance):
self.name = name
self.balance = balance
self.transaction_list = []
print("Account created for " + self.name)
def deposit(self, amount):
if amount > 0:
self.balance += amount
self.show_balance()
self.transaction_list.append((Account._current_time(), amount))
def withdraw(self, amount):
if 0 < amount <= self.balance:
self.balance -= amount
self.transaction_list.append((Account._current_time(), -amount))
else:
print("The amount must be greater than zero and no more then your account balance")
self.show_balance()
def show_balance(self):
print("Balance is {}".format(self.balance))
def show_transactions(self):
for date, amount in self.transaction_list:
if amount > 0:
tran_type = "deposited"
else:
tran_type = "withdrawn"
amount *= -1
print("{:6} {} on {} (local time was {})".format(amount, tran_type, date, date.astimezone()))
if __name__ == '__main__':
tim = Account("Tim", 0)
tim.show_balance()
tim.deposit(1000)
# tim.show_balance()
tim.withdraw(500)
# tim.show_balance()
tim.withdraw(2000)
tim.show_transactions()
| chuckwm/PycharmProjects | oop/teacher_accounts.py | teacher_accounts.py | py | 1,578 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.datetime.utcnow",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pytz.utc.localize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytz.... |
25441538667 | import pygame
from .funcs import *
from .restart_button import Restart_Button
from .resume_button import Resume_Button
from .sfx_disable_button import SFX_Disable_Button
from .music_disable_button import Music_Disable_Button
class Pause_Menu:
def __init__(self, game):
self.game = game
self.image = load_image('data/graphics/images/pause_menu_background.png', 3)
def load(self):
self.running = True
self.load_buttons()
self.run()
def load_buttons(self):
self.restart_button = Restart_Button(self.game)
self.resume_button = Resume_Button(self.game)
self.sfx_disable_button = SFX_Disable_Button(self.game)
self.music_disable_button = Music_Disable_Button(self.game)
self.buttons = [self.restart_button, self.resume_button, self.sfx_disable_button, self.music_disable_button]
def render(self):
self.game.render(update_screen=False)
self.game.screen.blit(self.image, (self.game.screen.get_width()/2-self.image.get_width()/2, self.game.screen.get_height()/2-self.image.get_height()/2))
for button in self.buttons:
button.render()
self.game.cursor.render()
pygame.display.update()
def update(self):
self.game.event_manager.update(player_movement=False)
for button in self.buttons:
if button.update():
self.refresh()
def run(self):
while self.running:
self.update()
self.render()
def refresh(self):
self.running = False
pygame.event.clear()
| pratripat/Dungeon-Game | scripts/pause_menu.py | pause_menu.py | py | 1,597 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "restart_button.Restart_Button",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "resume_button.Resume_Button",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sfx_disable_button.SFX_Disable_Button",
"line_number": 21,
"usage_type": "call"... |
11778563292 | from PyQt5.QtWidgets import QLabel, QSizePolicy, QRubberBand
from PyQt5.QtGui import QImage, QPixmap
from PyQt5 import QtCore
from PyQt5.Qt import QSize, QRect, QRectF, QPoint, QPointF
import numpy as np
class ImageWidget(QLabel):
def __init__(self, parent=None):
super().__init__(parent)
self.image_array = None
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)
self.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
def hasHeightForWidth(self):
return self.pixmap() is not None
def heightForWidth(self, w):
if self.pixmap():
return int(w * (self.pixmap().height() / self.pixmap().width()))
def sizeHint(self):
pix = self.pixmap()
size = QSize() if pix is None else pix.size()
self.updateGeometry()
return size
def resizeEvent(self, event):
super().resizeEvent(event)
self.update_image()
def set_array(self, array):
self.image_array = array
self.update_image()
def update_image(self):
if self.image_array is None:
self.clear()
return
height, width, channels = self.image_array.shape
assert channels == 3
bytes_per_line = channels * width
qt_image = QImage(self.image_array.data, width, height, bytes_per_line, QImage.Format_RGB888)
pix = QPixmap(qt_image)
self.setPixmap(pix.scaled(self.size(), QtCore.Qt.KeepAspectRatio | QtCore.Qt.SmoothTransformation))
def map_to_image(self, widget_point):
pix = self.pixmap()
if pix:
w_off = 0.5 * (self.width() - pix.width())
h_off = 0.5 * (self.height() - pix.height())
pixmap_point = QPointF(widget_point.x() - w_off, widget_point.y() - h_off)
image_point = QPointF(pixmap_point.x() / pix.width(), pixmap_point.y() / pix.height())
return image_point
else:
return QPointF()
def map_from_image(self, image_point):
pix = self.pixmap()
if pix:
w_off = 0.5 * (self.width() - pix.width())
h_off = 0.5 * (self.height() - pix.height())
pixmap_point = QPointF(image_point.x() * pix.width(), image_point.y() * pix.height())
widget_point = QPointF(pixmap_point.x() + w_off, pixmap_point.y() + h_off)
return widget_point.toPoint()
else:
return QPoint()
class ImageWithROI(ImageWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.rubberband = None
self.origin = None
self.final = None
def set_array(self, array, preserve_roi=False):
super().set_array(array)
if self.rubberband and not preserve_roi:
self.rubberband.hide()
self.rubberband = None
def mousePressEvent(self, event):
origin = event.pos()
if self.pixmap() is not None:
if self.pixmap().rect().contains(origin):
self.origin = self.map_to_image(origin)
if self.rubberband is None:
self.rubberband = QRubberBand(QRubberBand.Rectangle, self)
else:
self.rubberband.hide()
self.rubberband.setGeometry(QRect(self.map_from_image(self.origin), QSize()))
self.rubberband.show()
else:
print('Pressed outside image')
def mouseMoveEvent(self, event):
if self.rubberband is not None:
self.final = self.map_to_image(event.pos())
self.update_rubberband()
def update_rubberband(self):
self.rubberband.setGeometry(QRect(self.map_from_image(self.origin), self.map_from_image(self.final)).normalized())
def mouseReleaseEvent(self, event):
if self.rubberband is not None:
self.update_rubberband()
def resizeEvent(self, event):
super().resizeEvent(event)
if self.pixmap() and self.rubberband:
self.update_rubberband()
def get_rubberband_rect(self):
if self.origin and self.final:
return QRectF(self.origin, self.final).normalized()
else:
return None
def get_image_and_roi(self):
roi_norm = self.get_rubberband_rect() or QRectF(0, 0, 1, 1)
if self.image_array is not None:
h, w, *_ = self.image_array.shape
rx = int(np.round(roi_norm.x() * w))
ry = int(np.round(roi_norm.y() * h))
rw = int(np.round(roi_norm.width() * w))
rh = int(np.round(roi_norm.height() * h))
roi = (rx, ry, rw, rh)
return self.image_array, roi
else:
raise ValueError("No image set") | hovren/visualsearch | vsearch/gui/image.py | image.py | py | 4,771 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QSizePolicy.MinimumExpanding",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QSizePolicy",
"line_number": 13,
"usage_type": "n... |
1569004540 | """Provides interface for training the model."""
import os
import pathlib
import numpy as np
import torch
from matplotlib import pyplot as plt
from tqdm import tqdm
from . import models
from .preprocessing import ImageDataPipeline
def save(model, path):
"""
Saves the model's parameter states.
Parameters:
- model (class def)
- The class definition should inherit from torch.nn.Module.
- path (str)
- Path where the model will be saved to.
- e.g. hello/world/saved_model_file.pth
"""
# PyTorch requires parent directory of savepath to exist. Ensure it does.
parentdir = pathlib.Path(path).parent
if not os.path.exists(parentdir):
os.makedirs(parentdir)
torch.save(model.state_dict(), path)
def plot(steps, errors):
"""
Plots error over the steps.
Parameters:
- steps (int)
- Number of parameter updates (minibatches trained on).
- errors (list of floats)
- Represents the errors for each corresponding step.
- e.g. error[0] is error for step 1
"""
plt.plot(np.array(list(range(1, steps + 1))), np.array(errors))
plt.xlabel('Step')
plt.ylabel('Error')
plt.show()
def main(train_dir,
label_dict,
steps,
savepath,
resuming=True):
"""
Trains the model and saves the result.
Parameters:
- train_dir (str)
- Path to the directory of classes.
- e.g. 'data/train', where 'train' holds subdirs with images in them.
- label_dict (dict, str -> np.ndarray)
- Maps the name of the subdirectory (class) to a label.
- e.g. {'cats': np.array([[1, 0]]), 'dogs': np.array([[0, 1]])}
- Each label must have the same shape!
- In this case, the two labels are of shape [1, 2].
- steps (int)
- Number of parameter updates (minibatches trained on).
- savepath (str)
- Path where the model will be saved/loaded from.
- e.g. hello/world/save_model.pth
- resuming (bool)
- Whether to resume training from a saved model or to start from scratch.
"""
# Initialize model and send to device.
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Using device: {}'.format(device))
model = models.BabyResNet().to(device)
if resuming:
model.load_state_dict(torch.load(savepath))
# Declare optimizer, preprocessor, and list to record errors.
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
preproc = ImageDataPipeline()
errors = []
# Begin training.
for step, path, image, label in tqdm(preproc.preprocess_classes(steps, train_dir, label_dict),
desc='Progress', total=steps, ncols=99, unit='image'):
optimizer.zero_grad()
image, label = torch.tensor(image).to(device), torch.tensor(label).to(device)
output = model(image)
error = torch.sqrt(torch.nn.functional.mse_loss(output, label))
errors.append(error)
error.backward()
optimizer.step()
save(model, savepath)
print('\a')
plot(steps, errors)
| MarxSoul55/cats_vs_dogs | cats_vs_dogs/src/pytorch_impl/src/train.py | train.py | py | 3,279 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_numb... |
17217608702 | import sys
sys.path.append('../TS2VEC')
from pathlib import Path
import hydra
from omegaconf import OmegaConf, DictConfig
import numpy as np
from base.network import TS2Vec
from task.classification.model import TSC
PROJECT_PATH = Path('.').absolute()
DATA_PATH = Path(PROJECT_PATH, 'data', 'UCRArchive_2018', 'FordA')
def normalizer_3d(train_arr, target_arr):
arr = train_arr.reshape(-1, train_arr.shape[2])
std = arr.std(axis=0)
mean = arr.mean(axis=0)
return (target_arr - mean) / std
def load_forda(DATA_DIR):
train = np.loadtxt(Path(DATA_DIR, 'FordA_TRAIN.tsv'))
test = np.loadtxt(Path(DATA_DIR, 'FordA_TEST.tsv'))
inputs_train = train[:, 1:]
labels_train = train[:, 0]
inputs_test = test[:, 1:]
labels_test = test[:, 0]
inputs_train = inputs_train.reshape((inputs_train.shape[0], inputs_train.shape[1], 1))
inputs_test = inputs_test.reshape((inputs_test.shape[0], inputs_test.shape[1], 1))
idx = np.random.permutation(len(inputs_train))
inputs_train = inputs_train[idx]
labels_train = labels_train[idx]
labels_train[labels_train == -1] = 0
labels_test[labels_test == -1] = 0
# inputs_train = normalizer_3d(inputs_train, inputs_train)
# inputs_test = normalizer_3d(inputs_train, inputs_test)
return inputs_train, inputs_test, labels_train, labels_test
@hydra.main(config_path='.', config_name='cfg.yaml')
def main(cfg: DictConfig) -> None:
# config = OmegaConf.load('./experiments/classification/fordA/cfg.yaml')
# os.system('mlflow ui --backend-store-uri' +
# 'file://' + hydra.utils.get_original_cwd() + '/mlruns')
# http://127.0.0.1:5000/
inputs_train, inputs_test, labels_train, labels_test = load_forda(DATA_PATH)
# learn TS2Vec
ts2vec = TS2Vec(cfg)
ts2vec.learn(inputs_train)
# learn Classifier
tsc = TSC(cfg, ts2vec.model)
tsc.learn(inputs_train, labels_train.reshape(-1, 1, 1),
valid_data=(inputs_test, labels_test.reshape(-1, 1, 1)))
if __name__ == "__main__":
# main()
inputs_train, inputs_test, labels_train, labels_test = load_forda(DATA_PATH)
cfg = OmegaConf.load('./experiments/classification/fordA/cfg.yaml')
# learn TS2Vec
ts2vec = TS2Vec(cfg)
ts2vec.learn(inputs_train, project_path=PROJECT_PATH)
cfg = OmegaConf.load('./experiments/classification/fordA/cfg_.yaml')
tsc = TSC(cfg, ts2vec.model)
tsc.learn(inputs_train, labels_train.reshape(-1, 1, 1), valid_data=(inputs_test, labels_test.reshape(-1, 1, 1)),
verbose=1,
project_path=PROJECT_PATH, save_path=None)
# # learn Classifier
# tsc = TSC(cfg, ts2vec.model)
tsc.learn(inputs_train, labels_train.reshape(-1, 1, 1),
valid_data=(inputs_test, labels_test.reshape(-1, 1, 1)), project_path=PROJECT_PATH)
| tae73/TS2Vec-Tensorflow | experiments/classification/FordA/forda.py | forda.py | py | 2,840 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_num... |
12047340102 | import argparse
import os
import numpy as np
import tqdm.auto as tqdm
import math
import torch
import torch.optim as optim
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
ShardingStrategy,
FullStateDictConfig,
StateDictType,
)
import datasets
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
import minimal_llama.gist.llama_simple3 as llama_simple3
import minimal_llama.utils.io_utils as io_utils
import minimal_llama.utils.torch_utils as torch_utils
from accelerate import init_empty_weights
import minimal_llama.newfancy.fsdp_utils as fsdp_utils
FSDP_IS_AVAILABLE = enable_2d_with_fsdp()
def run():
parser = argparse.ArgumentParser()
parser.add_argument("--model_size", type=str, default="7b")
parser.add_argument("--load_dir", type=str)
parser.add_argument("--save_path", type=str)
parser.add_argument("--mixed_precision", action="store_true")
parser.add_argument("--expand_embedding", type=int, default=256)
parser.add_argument("--save_optimizer", action="store_true", default=False)
args = parser.parse_args()
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
fsdp_utils.setup(rank=rank, world_size=world_size)
mixed_precision_policy, auto_wrap_policy = fsdp_utils.get_policies(
args, rank, layer_class=llama_simple3.LLaMALayer)
model_config = llama_simple3.LLAMA_CONFIG_DICT[args.model_size]
model_config.num_gist_tokens = args.expand_embedding
model_config.dtype = torch.bfloat16
model = llama_simple3.LLaMAModel(config=model_config)
model = FSDP(
model,
# process_group=None, # see: tp
auto_wrap_policy=auto_wrap_policy,
mixed_precision=mixed_precision_policy,
sharding_strategy=ShardingStrategy.FULL_SHARD,
device_id=torch.cuda.current_device(),
limit_all_gathers=True,
)
optimizer = optim.AdamW(model.parameters(), lr=1)
fsdp_utils.load_model_and_optimizer_sharded(
model=model,
rank=rank,
load_dir=args.load_dir,
optim=optimizer,
)
fsdp_utils.save_model_checkpoint(
model=model,
rank=rank,
save_path=args.save_path,
)
if __name__ == "__main__":
run()
| zphang/minimal-llama | minimal_llama/gist/convert_fsdp_checkpoint.py | convert_fsdp_checkpoint.py | py | 2,450 | python | en | code | 447 | github-code | 1 | [
{
"api_name": "torch.distributed.tensor.parallel.fsdp.enable_2d_with_fsdp",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 38,
"usage_type": "attribu... |
70813688993 | from cmd import PROMPT
from lib2to3.pgen2 import driver
import time
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
def scraper(browser):
bando = {}
titolo = browser.find_element(By.XPATH, "//section[@id='post-content']//h1").text
if titolo == 'TEST':
return
bando['Titolo'] = titolo
bando['url'] = browser.current_url
bando['Descrizione'] = browser.find_element(By.XPATH, "//div[@class='field field-name-body field-type-text-with-summary field-label-hidden']")
bando['Misura'] = browser.find_element(By.XPATH, "//div[@class='field field-name-field-relazione-misura field-type-taxonomy-term-reference field-label-above']//div[@class=field-item even']")
print(bando)
return bando
def crowler(browser):
actions = ActionChains(browser)
bandi = []
lista = browser.find_elements(By.XPATH, "//div[@class='field-item even']//h3//a")
links = []
for ii in lista:
#time.sleep(3)
tmp = ii.get_attribute("href")
if tmp != None:
links.append(tmp)
for ii in links:
print(ii)
browser.get('https://psr.regione.molise.it/aperti')
time.sleep(3)
browser.get(ii+'/')
time.sleep(2)
tmp_list = browser.find_elements(By.PARTIAL_LINK_TEXT, 'Leggi tutto')
for jj in tmp_list:
browser.get(jj.get_attribute('href'))
b_tmp = scraper(browser)
time.sleep(2)
if b_tmp != None:
bandi.append(b_tmp)
return bandi
def psrMolise():
browser = webdriver.Chrome()
browser.get('https://psr.regione.molise.it/aperti')
bandi = crowler(browser)
if __name__=='__main__':
psrMolise() | massiagostini/Tesi | ws_molise.py | ws_molise.py | py | 1,978 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 19,
"us... |
44847088453 | from setuptools import setup
import os
def _get_version():
filename = os.path.join('src', 'cyclone', '__init__.py')
glb = {}
with open(filename) as fp:
for line in fp:
if '__version__' in line:
exec(line, glb)
return glb['__version__']
raise RuntimeError('cannot find version')
setup(
name='cyclone',
version=_get_version(),
description='Cyclone',
namespace_packages=[],
package_dir={'': 'src'},
packages=[
'cyclone',
'cyclone.spiders'
],
include_package_data=True,
install_requires=[
'SQLAlchemy==1.3.1 ',
'beautifulsoup4==4.7.1',
'scrapy==1.6.0',
'psycopg2==2.7.7'
]
) | IOR88/cyclone | setup.py | setup.py | py | 731 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 15,
"usage_type": "call"
}
] |
6208729995 | import logging
import os
import sentry_sdk
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, RegexHandler, ConversationHandler
from handlers import conversation
from settings import PROXY
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',
level=logging.INFO
# filename='bot.log'
)
logger = logging.getLogger(__name__)
logger.info('BOTBOT')
sentry_sdk.init("https://b67a03c0cb244e35b5a57c803abae167@sentry.io/1315096")
def main():
mybot = Updater(os.getenv('API_KEY'), request_kwargs=PROXY)
dp = mybot.dispatcher
dp.add_handler(conversation)
mybot.start_polling()
mybot.idle()
if __name__ == '__main__':
main()
| KaltakhchyanD/lp_project_orders_tg_bot | bot.py | bot.py | py | 749 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sentry_sdk.in... |
71845052193 | from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404
from django.db.models import Q, Count
from ..models import Question, Category
def index(request, category_name='qna'):
page = request.GET.get('page', '1')
kw = request.GET.get('kw', '') # 검색어
so = request.GET.get('so', 'recent')
question_list = Question.objects.order_by('-create_date')
category_list = Category.objects.all()
category = get_object_or_404(Category, name=category_name)
question_list = Question.objects.filter(category=category)
if so == 'recommend':
# aggretation, annotation에는 relationship에 대한 역방향 참조도 가능 (ex. Count('voter'))
question_list = question_list.annotate(num_voter=Count('voter')).order_by('-num_voter', '-create_date')
elif so == 'popular':
question_list = question_list.annotate(num_answer=Count('answer')).order_by('-num_answer', '-create_date')
else:
question_list = question_list.order_by('-create_date')
# 검색
if kw:
question_list = question_list.filter(
Q(subject__icontains=kw) | # 질문 제목검색
Q(content__icontains=kw) | # 질문 내용검색
Q(answer__content__icontains=kw) | # 답변 내용검색
Q(author__username__icontains=kw) | # 질문 작성자검색
Q(answer__author__username__icontains=kw) # 답변 작성자검색
).distinct()
# 페이징처리
paginator = Paginator(question_list, 10) # 페이지당 10개식 보여주기
page_obj = paginator.get_page(page)
max_index = len(paginator.page_range)
context = {'question_list': page_obj, 'max_index': max_index, 'page': page, 'kw': kw, 'so': so,
'category_list': category_list, 'category': category}
return render(request, 'pybo/question_list.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
context = {'question': question}
return render(request, 'pybo/question_detail.html', context)
| ShinHyeongcheol/mysite | pybo/views/base_views.py | base_views.py | py | 2,112 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.Question.objects.order_by",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Question.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 12,
"usage_type": "name"
},
{
"api... |
23972292939 | from django.shortcuts import render
from rest_framework import viewsets
from .models import ApartmentModel, Comment
from apps.apartment.forms import CommentForm
from apps.apartment.api.serializers import ApartmentSerializers
from apps.contact.models import ContactModel
from apps.hoteInfo.models import HotelInfo
def apartmentView(req):
comment = Comment.objects.all()
all_model = ApartmentModel.objects.order_by('-id')[:3]
contact = ContactModel.objects.order_by('-id')[0]
info = HotelInfo.objects.order_by('-id')[0]
recentNews = ApartmentModel.objects.order_by('id')[:2]
comments_counts = {}
for post in all_model:
comments = Comment.objects.filter(post=post)
comments_counts[post.id] = comments.count()
context = {
'all_model':all_model,
'contact':contact,
'info':info,
'recentNews':recentNews,
'comments_counts':comments_counts
}
return render(req, 'blog.html', context)
def aboutApartment(req):
aboutHotels = ApartmentModel.objects.order_by('-id')[0]
slideAbout = ApartmentModel.objects.order_by('-id')[1:4]
context = {
'aboutHotels':aboutHotels,
'slideAbout':slideAbout
}
return render(req, 'about.html', context)
def blogDetalis(req):
commentForm = CommentForm()
contact = ContactModel.objects.order_by('-id')[0]
recentNews = ApartmentModel.objects.order_by('id')[:2]
print('*'*30)
print(contact)
context = {
'commentForm':commentForm,
'contact':contact,
'recentNews':recentNews
}
return render(req, 'blog-detalis.html', context)
class ApartmentViewSets(viewsets.ModelViewSet):
queryset = ApartmentModel.objects.all()
serializer_class = ApartmentSerializers
| Strannik1424/Luxen-Hotel | apps/apartment/views.py | views.py | py | 1,778 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.Comment.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Comment.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.Comment",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": ... |
25204853716 | __author__ = "jhurley@gmail.com (James Hurley)"
import cgi
import datetime
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.db import djangoforms
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import mail
import os
from model import gModel
def getFormattedDate(gift):
"""Formats a gift's creation date into readable PDT time.
Args:
gift: A gift object
Returns:
A string that's formatted like so: "12/2/09 (23:49 PDT)"
"""
newhour = gift.date - datetime.timedelta(hours=9)
newhour = str(newhour)
newhour = newhour[11:13]
newhour = long(newhour)
if newhour > 15:
if gift.date.day == 1:
if gift.date.month == 1 or gift.date.month == 2 or gift.date.month == 4 or gift.date.month == 6 or gift.date.month == 8 or gift.date.month == 9 or gift.date.month == 11:
newday = "31"
elif gift.date.month == 3:
if gift.date.year % 4 == 0:
newday = "29" # leap year
else:
newday = "28"
elif gift.date.month == 5 or gift.date.month == 7 or gift.date.month == 10 or gift.date.month == 12:
newday = "30"
if gift.date.month == 1:
newmonth = "12"
else:
newmonth = str(gift.date.month - 1)
else:
newday = gift.date.day - 1
newmonth = str(gift.date.month)
else:
newday = gift.date.day
newmonth = gift.date.month
newhour = str(newhour)
newminute = str(gift.date.minute)
if len(newminute) == 1:
newminute = newminute + "0"
newyear = str(gift.date.year)
newyear = newyear[2:]
newdate = str(newmonth) + "/" + str(newday) + "/" + newyear + " (" + newhour + ":" + newminute + " PDT)"
return newdate
| JamesHurley/gifty | controllers/helper.py | helper.py | py | 1,835 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.timedelta",
"line_number": 26,
"usage_type": "call"
}
] |
10100352992 | import pygame
from .base import BaseState
class CustomizeCharacter(BaseState):
def __init__(self):
super().__init__()
self.title = self.font.render("Customize Character", False, 'White')
self.title_rect = self.title.get_rect(center=self.screen_rect.center)
def draw(self, surface):
surface.fill(pygame.Color("black"))
surface.blit(self.title, self.title_rect)
| JeffMcCracken/run-escape | code/states/customize_character.py | customize_character.py | py | 411 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "base.BaseState",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pygame.Color",
"line_number": 12,
"usage_type": "call"
}
] |
37716038335 | import streamlit as st
import time
import torch
import torchvision.transforms as transforms
import torch.nn as nn
from PIL import Image
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
# Convolutional layer 1: 3 input channels, 16 output channels, 3x3 kernel size, stride of 1, padding of 1
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) # Max pooling with 2x2 kernel and stride of 2
# Convolutional layer 2: 16 input channels, 32 output channels, 3x3 kernel size, stride of 1, padding of 1
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) # Max pooling with 2x2 kernel and stride of 2
# Fully connected layer 1: Input size 32*56*56 (224/2^2), output size 64
self.fc1 = nn.Linear(32 * 56 * 56, 64)
self.relu3 = nn.ReLU()
# Fully connected layer 2: Input size 64, output size 2
self.fc2 = nn.Linear(64, 2)
def forward(self, x):
x = self.conv1(x) # Apply convolutional layer 1
x = self.relu1(x) # Apply ReLU activation function
x = self.pool1(x) # Apply max pooling
x = self.conv2(x) # Apply convolutional layer 2
x = self.relu2(x) # Apply ReLU activation function
x = self.pool2(x) # Apply max pooling
x = x.view(x.size(0), -1) # Flatten the tensor
x = self.fc1(x) # Apply fully connected layer 1
x = self.relu3(x) # Apply ReLU activation function
x = self.fc2(x) # Apply fully connected layer 2
return x
def predict_brand(image):
# Load the saved model from file
model = CNN() # Instantiate your model
model.load_state_dict(torch.load('./models/brand_detect_modelv1.pth'))
model.eval()
# Apply the transformation to the image
image = transform(image).unsqueeze(0)
# Make the prediction
with torch.no_grad():
outputs = model(image)
probabilities = torch.softmax(outputs, dim=1)
predicted_prob, predicted_class = torch.max(probabilities, 1)
# Return the predicted class and probability score
if predicted_class.item() == 0:
shoe_brand = "Adidas"
else:
shoe_brand = "Nike"
return shoe_brand, predicted_prob.item()
def predict_auth(image):
# Load the saved model from file
model = CNN() # Instantiate your model
model.load_state_dict(torch.load('./models/fake_detect_modelv1.pth'))
model.eval()
# Apply the transformation to the image
image = transform(image).unsqueeze(0)
# Make the prediction
with torch.no_grad():
outputs = model(image)
probabilities = torch.softmax(outputs, dim=1)
predicted_prob, predicted_class = torch.max(probabilities, 1)
# Return the predicted class and probability score
if predicted_class.item() == 0:
authenticity = "Authentic"
else:
authenticity = "Counterfeit"
return authenticity, predicted_prob.item()
def main():
st.title("Sneaker Authenticator")
st.write("Upload an image of your sneakers to determine authenticity.")
uploaded_image = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
if uploaded_image is not None:
progress_bar = st.progress(0)
for i in range(10):
time.sleep(0.25) # Simulate a delay
progress_bar.progress((i + 1) * 10)
# Display the uploaded image
# Create a container to center align the image
container = st.container()
# Add CSS to center align the image
container.markdown(
"""
<style>
.center-image {
display: flex;
justify-content: center;
}
</style>
""",
unsafe_allow_html=True
)
# Display the uploaded image with a smaller size, centered
container.image(uploaded_image, caption="Uploaded Image", width=300,
clamp=False)
image = Image.open(uploaded_image)
# Call the prediction function to determine the brand and authenticity
# brand, authenticity = predict_sneaker(uploaded_image)
brand = predict_brand(image)
authenticity = predict_auth(image)
# Display the brand and authenticity
st.write(f"Brand: {brand[0]} (Confidence Score: {brand[1]*100:.3f}%)")
st.write(f"Authenticity: {authenticity[0]} (Confidence Score: {authenticity[1]*100:.3f}%)")
if __name__ == "__main__":
main()
| waizwafiq/fakeshoe_detection | run.py | run.py | py | 4,940 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 11,
"usage_type": "call"
},
{
... |
40808949145 | from torch.nn import Module
import torch.nn as nn
class RNN(Module):
def __init__(self, num_in, num_layers, num_hidden, num_out):
super(RNN, self).__init__()
self.lstm = nn.LSTM(num_in, num_hidden, num_layers, dropout=0.3)
self.body = nn.Sequential(nn.Linear(num_hidden, num_hidden),nn.Dropout(0.3),nn.Linear(num_hidden, num_out),nn.ReLU())
def forward(self, X):
X, _ = self.lstm(X)
return self.body(X[-1])
| Aryan187/Music-Recommendation-System | Model/rnn.py | rnn.py | py | 457 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "torch.nn.LSTM",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_n... |
75186846753 | import base64
import os
import glob
import tempfile
from time import sleep
import plaid
from dotenv import load_dotenv
from google.cloud import storage
from plaid.api import plaid_api
from plaid.model.institutions_get_request import InstitutionsGetRequest
from plaid.model.country_code import CountryCode
from plaid.model.institutions_get_request_options import InstitutionsGetRequestOptions
def main(data,context):
load_dotenv()
PROJECT_ID = os.getenv('PROJECT_ID')
BUCKET_NAME = os.getenv('BUCKET_NAME')
TEMP_DIR = tempfile.gettempdir()
FOLDER_NAME = os.getenv('FOLDER_NAME')
CREATED_FOLDER = os.path.join(TEMP_DIR, FOLDER_NAME)
PLAID_ID = os.getenv('PLAID_ID')
PLAID_SECRET = os.getenv('PLAID_SECRET')
# Credentials for cloud storage
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "./cloud_storage.json"
if os.path.exists(CREATED_FOLDER):
os.system(str("rm -rf "+CREATED_FOLDER))
path = os.path.join(TEMP_DIR, FOLDER_NAME)
os.mkdir(path)
else:
path = os.path.join(TEMP_DIR, FOLDER_NAME)
os.mkdir(path)
configuration = plaid.Configuration(
host=plaid.Environment.Sandbox,
api_key={
'clientId': PLAID_ID,
'secret': PLAID_SECRET,
}
)
initial_request = InstitutionsGetRequest(
country_codes=[CountryCode('US')],
count=1,
offset=1,
options=InstitutionsGetRequestOptions(
include_optional_metadata=True
)
)
api_client = plaid.ApiClient(configuration)
client = plaid_api.PlaidApi(api_client)
initial_response = client.institutions_get(initial_request)
initial_institutions = initial_response['total']
# Loop through the institution Lists
limit_value=500
total_length=initial_institutions
if total_length >= limit_value:
offset_value = round(total_length/limit_value)
else:
offset_value = round(total_length/limit_value)+1
for j in range(offset_value):
request = InstitutionsGetRequest(
country_codes=[CountryCode('US')],
count=limit_value,
offset=j,
options=InstitutionsGetRequestOptions(
include_optional_metadata=True
)
)
response = client.institutions_get(request)
institutions = response['institutions']
print(response)
if j == 7:
print("Limit 7 reached")
sleep(60)
if j == 14:
print("Limit 14 reached")
sleep(60)
if j == 21:
sleep(60)
if j > 25:
exit()
for i in range(len(institutions)):
get_inst_id = institutions[i].institution_id
get_inst_logo = institutions[i].logo
if get_inst_logo is not None:
cwd = os.getcwd()
change_dir = str(CREATED_FOLDER)
os.chdir(change_dir)
decoded_data = base64.b64decode((get_inst_logo))
img_file = open(str(get_inst_id+".jpg"), 'wb')
img_file.write(decoded_data)
img_file.close()
os.chdir(cwd)
else:
pass
STORAGE_CLIENT = storage.Client(project=PROJECT_ID)
BUCKET = STORAGE_CLIENT.bucket(BUCKET_NAME)
assert os.path.isdir(CREATED_FOLDER)
for local_file in glob.glob(CREATED_FOLDER + '/**'):
if not os.path.isfile(local_file):
print("File "+str(local_file)+" ignored")
continue
else:
remote_path = os.path.join(
FOLDER_NAME, os.path.split(local_file)[-1])
blob = BUCKET.blob(remote_path)
blob.upload_from_filename(local_file)
if __name__ == "__main__":
main('data','context')
print("Success")
| shashwot-acme/wallet_function | plaid/main.py | main.py | py | 3,859 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tempfile.gettempdir",
"lin... |
21589746404 | import torch.nn as nn
import torch
import numpy as np
def gen_sequential(
channels,
Norm_feature = 0,
force_zero = False,
force_zero_ceiling = 0.01,
**kwargs
):
assert len(channels) > 0
modulist = []
from_channel = channels[0]
mid_layers = channels[1:]
for layer in range(len(mid_layers)):
to_channel = mid_layers[layer]
linear_layer = nn.Linear(from_channel, to_channel)
if force_zero:
with torch.no_grad():
linear_layer.bias.data.fill_(0)
linear_layer.weight.uniform_(0, force_zero_ceiling)
linear_layer.requires_grad = True
modulist.append(linear_layer)
if Norm_feature == 0:
modulist.append(nn.BatchNorm1d(to_channel))
else:
modulist.append(nn.BatchNorm1d(Norm_feature))
modulist.append(nn.LeakyReLU(0.2, inplace=True))
from_channel = to_channel
return nn.ModuleList(modulist)
class ResBlock(nn.Module):
def __init__(self,
channels,
force_zero = False,
prob_ending = False,
):
super(ResBlock, self).__init__()
self.channels = channels
self.body = gen_sequential(channels, force_zero=force_zero)
self.prob_ending = prob_ending
def forward(self, X):
previous_results = X
Y = X
for m in self.body:
Y = m(Y)
if self.prob_ending:
return nn.Softmax(-1)(torch.log(previous_results) + Y)
else:
return Y + previous_results
| TurquoiseKitty/BCGmeta_git | models/block_models.py | block_models.py | py | 1,614 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Linear",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm1d",
"li... |
33533072018 | import asyncio
import os
import requests
import time
from PIL import Image
from io import BytesIO
from datetime import datetime
import random
from telethon import events
from userbot.utils import admin_cmd
from userbot import ALIVE_NAME
from telethon.tl.types import ChannelParticipantsAdmins
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "ANONYMOUS"
# animation Idea by @NOOB_GUY_OP (Sipakisking)
# Made by @hellboi_atul ....and thanks to @Crackexy for the logos...
# Kang with credits else gay...
# alive.py for MASTERMIND USERBOT
global ghanti
ghanti = borg.uid
edit_time = 3
""" =======================CONSTANTS====================== """
file1 = "https://telegra.ph/file/19ea7a05f62940857d2df.jpg"
file2 = "https://telegra.ph/file/7d178738fb18a11eb6c7e.jpg"
file3 = "https://telegra.ph/file/dd324f29c0491b035eaea.jpg"
""" =======================CONSTANTS====================== """
pm_caption = "** Apun Zinda He Sarr ^.^** \n`BOT : ` **☣Hot(Bole toh ekdam garam) **\n\n"
pm_caption += "°•✮•° About My System °•✮•°\n\n"
pm_caption += "➾ **ᴛᴇʟᴇᴛʜᴏɴ ᴠᴇʀꜱɪᴏɴ** ☞ 1.15.0\n"
pm_caption += "➾ **ꜱᴜᴘᴘᴏʀᴛ ᴄʜᴀɴɴᴇʟ** ☞ [ᴊᴏɪɴ](https://t.me/XMASTER_MIND_BOT)\n"
pm_caption += "➾ **ʟɪᴄᴇɴꜱᴇ** ☞ [MASTERMIND™](https://github.com/RDX-ANONYMOUS/MASTER-MIND-BOT)\n"
pm_caption += "➾ **𝙲𝙾𝙿𝚈𝚁𝙸𝙶𝙷𝚃 𝙱𝚈** ☞ [𝕄𝔸𝕊𝕋𝔼ℝ 𝕄𝕀ℕ𝔻 𝕌𝕊𝔼ℝ𝔹𝕆𝕋](https://github.com/RDX-ANONYMOUS/MASTER-MIND-BOT)\n\n"
pm_caption += f"➾ **M̸R̸ N̸O̸O̸B̸** ☞ [{DEFAULTUSER}](tg://user?id={ghanti})\n"
@borg.on(admin_cmd(pattern=r"alive"))
async def hmm(yes):
chat = await yes.get_chat()
global ghanti
ghanti = borg.uid
await yes.delete()
on = await borg.send_file(yes.chat_id, file=file1,caption=pm_caption)
await asyncio.sleep(edit_time)
ok = await borg.edit_message(yes.chat_id, on, file=file2)
await asyncio.sleep(edit_time)
ok2 = await borg.edit_message(yes.chat_id, ok, file=file3)
await asyncio.sleep(edit_time)
ok3 = await borg.edit_message(yes.chat_id, ok2, file=file1)
await asyncio.sleep(edit_time)
ok4 = await borg.edit_message(yes.chat_id, ok3, file=file3)
await asyncio.sleep(edit_time)
ok5 = await borg.edit_message(yes.chat_id, ok4, file=file2)
await asyncio.sleep(edit_time)
ok6 = await borg.edit_message(yes.chat_id, ok5, file=file1)
await asyncio.sleep(edit_time)
ok7 = await borg.edit_message(yes.chat_id, ok6, file=file2)
| RDX-ANONYMOUS/MASTER-MIND-BOT | userbot/plugins/alive.py | alive.py | py | 2,629 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "userbot.ALIVE_NAME",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "asyncio.sleep",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"l... |
9571528493 | from django.urls import path,include
# from watchlist_app.api.views import movie_list, movie_detail
from watchlist_app.api.views import GamePlatformVS,GamePlatform,ReviewCreate,ReviewDetail,ReviewList,WatchListAV,WatchDetailAV,GamePlatformAV,GamePlatformDetailAV
from rest_framework.routers import DefaultRouter
router=DefaultRouter()
router.register('Game',GamePlatformVS,basename='gameplatform')
urlpatterns = [
path('list/', WatchListAV.as_view(),name='game-list'),
path('<int:pk>/',WatchDetailAV.as_view(),name='game-detail'),
path('',include(router.urls)),
# path('stream/',StreamPlatformAV.as_view(),name='stream-list'),
# path('stream/<int:pk>',StreamPlatformDetailAV.as_view(),name='stream-detail'),
# path('review/',ReviewList.as_view(),name='review-list'),
# path('review/<int:pk>', ReviewDetail.as_view(),name='review-detail'),
path('<int:pk>/review-create/',ReviewCreate.as_view(),name='review-create'),
path('<int:pk>/reviews/',ReviewList.as_view(),name='review-list'),
path('review/<int:pk>/',ReviewDetail.as_view(),name='review-detail')
] | hyeonseong0917/smilegate_personal_project | watchmate/watchlist_app/api/urls.py | urls.py | py | 1,098 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "watchlist_app.api.views.GamePlatformVS",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
... |
18801120419 | import numpy as np
import pandas as pd
import pyvista as pv
import sys
import logging
import tetgen
from dragen.utilities.InputInfo import RveInfo
class MeshingHelper:
def __init__(self, rve_shape: tuple = None, rve: pd.DataFrame = None, grains_df: pd.DataFrame = None):
self.rve = rve
self.grains_df = grains_df
self.x_max = int(max(rve.x))
self.x_min = int(min(rve.x))
self.y_max = int(max(rve.y))
self.y_min = int(min(rve.y))
self.z_max = int(max(rve.z))
self.z_min = int(min(rve.z))
self.n_grains = int(max(rve.GrainID))
self.n_pts_x = rve_shape[0]
if isinstance(rve.box_size, pd.Series):
self.bin_size = rve.box_size[0] / self.n_pts_x # test
else:
self.bin_size = rve.box_size / self.n_pts_x # test
if RveInfo.box_size_y is not None:
self.box_size_y = RveInfo.box_size_y
self.n_pts_y = rve_shape[1]
else:
self.box_size_y = RveInfo.box_size
self.n_pts_y = self.n_pts_x
if RveInfo.box_size_z is not None:
self.box_size_z = RveInfo.box_size_z
self.n_pts_z = rve_shape[2]
else:
self.box_size_z = RveInfo.box_size
self.n_pts_z = self.n_pts_x
def gen_blocks(self) -> pv.UnstructuredGrid:
"""this function generates a structured grid
in py-vista according to the rve"""
xrng = np.linspace(0, RveInfo.box_size/1000, self.n_pts_x+1, endpoint=True)
yrng = np.linspace(0, self.box_size_y/1000, self.n_pts_y+1, endpoint=True)
zrng = np.linspace(0, self.box_size_z/1000, self.n_pts_z+1, endpoint=True)
grid = pv.RectilinearGrid(xrng, yrng, zrng)
grid = grid.cast_to_unstructured_grid()
return grid
def gen_grains(self, grid: pv.UnstructuredGrid) -> pv.UnstructuredGrid:
"""the grainIDs are written on the cell_array"""
self.rve.sort_values(by=['z', 'y', 'x'], inplace=True) # This sorting is important! Keep it that way
# Add the data values to the cell data
grid.cell_data["GrainID"] = self.rve['GrainID'].to_numpy()
grid.cell_data["phaseID"] = self.rve['phaseID'].to_numpy()
print(self.rve['phaseID'])
# Now plot the grid!
if RveInfo.anim_flag:
plotter = pv.Plotter(off_screen=True)
plotter.add_mesh(grid, scalars='phaseID',
show_edges=True, interpolate_before_map=True)
plotter.add_axes()
plotter.show(interactive=True, auto_close=True, window_size=[800, 600],
screenshot=RveInfo.store_path + '/Figs/pyvista_Hex_Mesh_phases.png')
plotter.close()
plotter = pv.Plotter(off_screen=True)
plotter.add_mesh(grid, scalars='GrainID',
show_edges=True, interpolate_before_map=True)
plotter.add_axes()
plotter.show(interactive=True, auto_close=True, window_size=[800, 600],
screenshot=RveInfo.store_path + '/Figs/pyvista_Hex_Mesh_grains.png')
plotter.close()
return grid
def smoothen_mesh(self, grid: pv.UnstructuredGrid, n_iter: int) -> pv.UnstructuredGrid:
if not RveInfo.smoothing_flag:
n_iter = 0
"""information about grainboundary elements of hex-mesh
is extracted here and stored in pv.Polydata and
in a pd.Dataframe
n_iter: smoothing depending on framework that triggers the smoothing?
"""
x_max = max(grid.points[:, 0])
x_min = min(grid.points[:, 0])
y_max = max(grid.points[:, 1])
y_min = min(grid.points[:, 1])
z_max = max(grid.points[:, 2])
z_min = min(grid.points[:, 2])
numberOfGrains = self.n_grains
gid_list = list()
pid_list = list()
######################################
assert RveInfo.element_type in ['C3D8', 'HEX8', 'C3D10', 'C3D4']
if RveInfo.element_type != 'C3D8' and RveInfo.element_type != 'HEX8':
old_grid = grid.copy()
grid_tet = pv.UnstructuredGrid()
for i in range(1, numberOfGrains + 1):
phase = self.rve.loc[self.rve['GrainID'] == i].phaseID.values[0]
grain_grid_tet = old_grid.extract_cells(np.where(np.asarray(old_grid.cell_data.values())[0] == i))
grain_surf_tet = grain_grid_tet.extract_surface(pass_pointid=True, pass_cellid=True)
grain_surf_tet.triangulate(inplace=True)
tet = tetgen.TetGen(grain_surf_tet)
if RveInfo.element_type == 'C3D4':
tet.tetrahedralize(order=1, mindihedral=10, minratio=1.5, supsteiner_level=0, steinerleft=0)
elif RveInfo.element_type == 'C3D10':
sys.exit('Element type Error! C3D10 currently not supported! Chose C3D4')
node, elem = tet.tetrahedralize(order=2, mindihedral=10, minratio=1.5, supsteiner_level=0, steinerleft=0)
tet_grain_grid = tet.grid
ncells = tet_grain_grid.n_cells
if RveInfo.gui_flag:
RveInfo.progress_obj.emit(75+(100*(i+1)/self.n_grains/4))
grainIDList = [i]
grainID_array = grainIDList * ncells
gid_list.extend(grainID_array)
phaseIDList = [phase]
phaseID_array = phaseIDList * ncells
pid_list.extend(phaseID_array)
if i == 1:
grid_tet = tet_grain_grid
else:
grid_tet = tet_grain_grid.merge(grid_tet, merge_points=True)
grid_tet.cell_data['GrainID'] = np.asarray(gid_list)
grid_tet.cell_data['phaseID'] = np.asarray(pid_list)
grid = grid_tet.copy()
all_points_df = pd.DataFrame(grid.points, columns=['x', 'y', 'z'])
all_points_df['ori_idx'] = all_points_df.index
all_points_df_old = all_points_df.copy()
all_points_df_old['x_min'] = False
all_points_df_old['y_min'] = False
all_points_df_old['z_min'] = False
all_points_df_old['x_max'] = False
all_points_df_old['y_max'] = False
all_points_df_old['z_max'] = False
all_points_df_old.loc[(all_points_df_old.x == x_min), 'x_min'] = True
all_points_df_old.loc[(all_points_df_old.y == y_min), 'y_min'] = True
all_points_df_old.loc[(all_points_df_old.z == z_min), 'z_min'] = True
all_points_df_old.loc[(all_points_df_old.x == x_max), 'x_max'] = True
all_points_df_old.loc[(all_points_df_old.y == y_max), 'y_max'] = True
all_points_df_old.loc[(all_points_df_old.z == z_max), 'z_max'] = True
old_grid = grid.copy() # copy doesn't copy the dynamically assigned new property...
for i in range(1, numberOfGrains + 1):
grain_grid = old_grid.extract_cells(np.where(old_grid.cell_data['GrainID'] == i))
grain_surf = grain_grid.extract_surface()
grain_surf_df = pd.DataFrame(data=grain_surf.points, columns=['x', 'y', 'z'])
merged_pts_df = grain_surf_df.join(all_points_df_old.set_index(['x', 'y', 'z']), on=['x', 'y', 'z'])
grain_surf_smooth = grain_surf.smooth(n_iter=n_iter)
smooth_pts_df = pd.DataFrame(data=grain_surf_smooth.points, columns=['x', 'y', 'z'])
all_points_df.loc[merged_pts_df['ori_idx'], ['x', 'y', 'z']] = smooth_pts_df.values
grain_vol = grain_grid.volume
self.grains_df.loc[self.grains_df['GrainID'] == i, 'meshed_conti_volume'] = grain_vol * 10 ** 9
self.grains_df[['GrainID', 'meshed_conti_volume', 'phaseID']].\
to_csv(RveInfo.store_path + '/Generation_Data/grain_data_output_conti.csv', index=False)
all_points_df.loc[all_points_df_old['x_min'], 'x'] = x_min
all_points_df.loc[all_points_df_old['y_min'], 'y'] = y_min
all_points_df.loc[all_points_df_old['z_min'], 'z'] = z_min
all_points_df.loc[all_points_df_old['x_max'], 'x'] = x_max
all_points_df.loc[all_points_df_old['y_max'], 'y'] = y_max
all_points_df.loc[all_points_df_old['z_max'], 'z'] = z_max
grid.points = all_points_df[['x', 'y', 'z']].values
return grid
| ibf-RWTH/DRAGen | dragen/utilities/PvGridGeneration.py | PvGridGeneration.py | py | 8,377 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "dragen.utilities.InputInfo.RveInfo.box_size_y",
"line_number": 30,
"usage_type": "attribute"
}... |
5357527996 | from watson_developer_cloud import ToneAnalyzerV3
import json
import os as os
import config
from watson_developer_cloud import WatsonException
class Tono():
def __init__(self):
self.vTone_analyzer = ToneAnalyzerV3(
username=config.WTAusername,
password=config.WTApass,
version=config.WTAversion)
def invocarWToneAnalyzer(self, pRutaArhivo):
vError = 0
vTonoResultado = ''
err = ''
try:
with open(os.path.join(os.path.dirname(__file__), pRutaArhivo)) as vArchivo:
vTonoResultado = self.vTone_analyzer.tone(
text=str(vArchivo)
)
return [vTonoResultado, vError, err]
except WatsonException as err:
vError = 1
return [vTonoResultado, vError, err]
| joaquinpunales1992/Python-AIServices | AnalisisComportamiento_JPunales/WatsonToneAnalyzer.py | WatsonToneAnalyzer.py | py | 1,042 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "watson_developer_cloud.ToneAnalyzerV3",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "config.WTAusername",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "config.WTApass",
"line_number": 11,
"usage_type": "attribute"
},
{
"... |
15103968278 | from django.shortcuts import render
from django.shortcuts import redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from todolist.forms import CreateTask
from todolist.models import Task
import datetime
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.http import JsonResponse
from django.core import serializers
from django.urls import reverse
# Create your views here.
@login_required(login_url='/todolist/login/')
def show_todolist(request):
user = request.user
data_todolist = Task.objects.all()
context = {
'todolist': data_todolist,
'nama': 'Muhammad Ruzain',
'npm' : '2106750250',
'user': user
}
return render(request, "todolist.html", context)
def register(request):
form = UserCreationForm()
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Akun telah berhasil dibuat!')
return redirect('todolist:login')
context = {'form':form}
return render(request, 'register.html', context)
def login_user(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user) # melakukan login terlebih dahulu
response = HttpResponseRedirect(reverse("todolist:show_todolist")) # membuat response
response.set_cookie('last_login', str(datetime.datetime.now())) # membuat cookie last_login dan menambahkannya ke dalam response
return response
else:
messages.info(request, 'Username atau Password salah!')
context = {}
return render(request, 'login.html', context)
def logout_user(request):
logout(request)
return redirect('todolist:login')
# without ajax
def create_task(request):
form = CreateTask()
if request.method == "POST":
form = CreateTask(request.POST)
if form.is_valid():
judul = request.POST.get("judul")
deskripsi = request.POST.get("deskripsi")
user = request.user
new_data = Task(user=user, title=judul, description=deskripsi)
new_data.save()
return redirect('todolist:show_todolist')
context = {'form':form}
return render(request, 'create_task.html', context)
# def change_status(request, id):
# task = Task.objects.get(id=id)
# task.is_finished = not task.is_finished
# task.save()
# return redirect('todolist:show_todolist')
# def delete(request, id):
# task = Task.objects.get(id=id)
# task.delete()
# return redirect('todolist:show_todolist')
def show_json(request):
data = Task.objects.all()
return HttpResponse(serializers.serialize("json", data), content_type="application/json")
def add_task(request):
if request.method == 'POST':
judul = request.POST.get('judul')
deskripsi = request.POST.get('deskripsi')
user = request.user
Task.objects.create(
user = user,
title = judul,
description = deskripsi,
)
return JsonResponse({}, status=200)
def delete_task(request, id):
if request.method == 'POST':
task = Task.objects.get(id=id)
task.delete()
return JsonResponse({}, status=200)
def change_status(request, id):
if request.method == 'POST':
task = Task.objects.get(id=id)
task.is_finished = not task.is_finished
task.save()
return JsonResponse({}, status=200)
| eruzetaien/PBPtugas2 | todolist/views.py | views.py | py | 3,925 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "todolist.models.Task.objects.all",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "todolist.models.Task.objects",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "todolist.models.Task",
"line_number": 26,
"usage_type": "name"
},
... |
42426529533 | import sys
from collections import defaultdict
from copy import deepcopy
input=sys.stdin.readline
n,k=map(int,input().split())
l=[tuple(map(int,input().split())) for _ in range(n)]
dp=[0]*(k+1)
dp_idx=defaultdict(set)
for i in range(1,k+1):
for idx,(w,v) in enumerate(l):
if 0<=i-w<=k and idx not in dp_idx[i-w]:
if dp[i]<dp[i-w]+v:
dp[i]=max(dp[i],dp[i-w]+v)
tmp=deepcopy(dp_idx[i-w])
tmp.add(idx)
dp_idx[i]=tmp
print(max(dp)) | jhchoy00/baekjoon | 12865.py | 12865.py | py | 515 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 14,
"usage_type": "call"
}
] |
43236512740 | from __future__ import print_function
import os
import rospkg
import rospy
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Qt
from python_qt_binding.QtCore import Signal
from python_qt_binding.QtGui import QIcon
from python_qt_binding.QtGui import QGraphicsScene
from python_qt_binding.QtGui import QWidget
from qt_dotgraph.dot_to_qt import DotToQtGenerator
from rqt_gui_py.plugin import Plugin
from rqt_capabilities.graphics_view import CapabilitiesInteractiveGraphicsView
from rqt_capabilities.dotcode import generate_dotcode_from_capability_info
from capabilities.service_discovery import spec_index_from_service
from capabilities.msg import CapabilityEvent
from capabilities.srv import GetRunningCapabilities
class CapabilityGraph(Plugin):
__deferred_fit_in_view = Signal()
__redraw_graph = Signal()
def __init__(self, context):
super(CapabilityGraph, self).__init__(context)
self.setObjectName('CapabilityGraph')
self.__current_dotcode = None
self.__running_providers = []
self.__spec_index = None
self.__widget = QWidget()
self.__dot_to_qt = DotToQtGenerator()
rp = rospkg.RosPack()
ui_file = os.path.join(rp.get_path('rqt_capabilities'), 'resources', 'CapabilityGraph.ui')
loadUi(ui_file, self.__widget, {'CapabilitiesInteractiveGraphicsView': CapabilitiesInteractiveGraphicsView})
self.__widget.setObjectName('CapabilityGraphUI')
if context.serial_number() > 1:
self.__widget.setWindowTitle(self.__widget.windowTitle() + (' (%d)' % context.serial_number()))
self.__scene = QGraphicsScene()
self.__scene.setBackgroundBrush(Qt.white)
self.__widget.graphics_view.setScene(self.__scene)
self.__widget.refresh_graph_push_button.setIcon(QIcon.fromTheme('view-refresh'))
self.__widget.refresh_graph_push_button.pressed.connect(self.__refresh_view)
self.__refresh_view()
self.__deferred_fit_in_view.connect(self.__fit_in_view, Qt.QueuedConnection)
self.__deferred_fit_in_view.emit()
self.__redraw_graph.connect(self.__update_capabilities_graph)
# TODO: use user provided server node name
rospy.Subscriber('/capability_server/events', CapabilityEvent, self.__handle_event)
context.add_widget(self.__widget)
def __handle_event(self, msg):
if msg.type == CapabilityEvent.STOPPED:
return
if msg.type == CapabilityEvent.LAUNCHED and msg.provider not in self.__running_providers:
self.__running_providers.append(msg.provider)
if msg.type == CapabilityEvent.TERMINATED and msg.provider in self.__running_providers:
self.__running_providers.remove(msg.provider)
self.__redraw_graph.emit()
def __get_specs(self):
self.__spec_index, errors = spec_index_from_service()
assert not errors
def __get_running_providers(self):
# TODO: replace 'capability_server' with user provided server name
service_name = '/{0}/get_running_capabilities'.format('capability_server')
rospy.wait_for_service(service_name)
get_running_capabilities = rospy.ServiceProxy(service_name, GetRunningCapabilities)
response = get_running_capabilities()
self.__running_providers = []
for cap in response.running_capabilities:
self.__running_providers.append(cap.capability.provider)
def __refresh_view(self):
self.__get_specs()
self.__get_running_providers()
self.__update_capabilities_graph()
def __update_capabilities_graph(self):
self.__update_graph_view(self.__generate_dotcode())
def __generate_dotcode(self):
return generate_dotcode_from_capability_info(self.__spec_index, self.__running_providers)
def __update_graph_view(self, dotcode):
if dotcode == self.__current_dotcode:
return
self.__current_dotcode = dotcode
self.__redraw_graph_view()
def __fit_in_view(self):
self.__widget.graphics_view.fitInView(self.__scene.itemsBoundingRect(), Qt.KeepAspectRatio)
def __redraw_graph_view(self):
self.__widget.graphics_view._running_providers = self.__running_providers
self.__widget.graphics_view._spec_index = self.__spec_index
self.__scene.clear()
highlight_level = 1
# layout graph and create qt items
(nodes, edges) = self.__dot_to_qt.dotcode_to_qt_items(self.__current_dotcode,
highlight_level=highlight_level,
same_label_siblings=True)
for node_item in nodes.itervalues():
self.__scene.addItem(node_item)
for edge_items in edges.itervalues():
for edge_item in edge_items:
edge_item.add_to_scene(self.__scene)
self.__scene.setSceneRect(self.__scene.itemsBoundingRect())
self.__fit_in_view()
| graziegrazie/my_turtlebot | rocon/src/rqt_capabilities/src/rqt_capabilities/capability_graph.py | capability_graph.py | py | 5,052 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rqt_gui_py.plugin.Plugin",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "python_qt_binding.QtCore.Signal",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "python_qt_binding.QtCore.Signal",
"line_number": 33,
"usage_type": "call"
},
... |
4912088125 | from django.urls import path, include
from . import views
urlpatterns = [
path('',views.application,name='application'),
path('upload',views.upload,name='upload'),
path('preview', views.preview, name='preview'),
path('modification', views.modification, name='modification'),
path('status', views.status, name='status'),
path('eEpic', views.eEpic, name='eEpic'),
] | kevalunagar/National-Voter-Service-Portal | nvsp/application/urls.py | urls.py | py | 388 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
74064928674 | import torch
def initialize_layer(layer, type = "normal", gain=0.02):
classname = layer.__class__.__name__
if hasattr(layer, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if type == 'normal':
torch.nn.init.normal_(layer.weight.data, 0.0, gain)
elif type == 'xavier':
torch.nn.init.xavier_normal_(layer.weight.data, gain=gain)
elif type == 'kaiming':
torch.nn.init.kaiming_normal_(layer.weight.data, a=0, mode='fan_in')
elif type == 'orthogonal':
torch.nn.init.orthogonal_(layer.weight.data, gain=gain)
elif type == 'pretrained':
pass
else:
raise NotImplementedError('initialization method [%s] is not implemented' % type)
if hasattr(layer, 'bias') and layer.bias is not None and type != 'pretrained':
torch.nn.init.constant_(layer.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(layer.weight.data, 1.0, gain)
torch.nn.init.constant_(layer.bias.data, 0.0)
def initialize_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
# Update on 11/18/2020, initialization looks good.
ret_net = None
if isinstance(gpu_ids, int):
gpu_ids = [gpu_ids]
if len(gpu_ids) > 0:
assert (torch.cuda.is_available())
net.to(int(gpu_ids[0]))
ret_net = torch.nn.DataParallel(net, gpu_ids)
net = ret_net.module
pretrain_count, initial_count, total = 0, 0, 0
for root_child in net.children():
for children in root_child.children():
total += 1
if children in root_child.initialization_layer :
initialize_layer(children, init_type, gain=init_gain)
initial_count += 1
else:
initialize_layer(children, "pretrained", gain=init_gain)
pretrain_count += 1
print("Initialization complete for total of {} layers, where {} initialized with pretrain and {} initialized with "
"indicated type of initialization".format(total, pretrain_count, initial_count))
return ret_net if ret_net is not None else net | Cli98/tep-repo | Networks/initialization.py | initialization.py | py | 2,198 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.init.normal_",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.xavier_normal_",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.... |
13042112688 | import pytest
import requests_mock
import datetime
from dateutil.tz import tzutc
from iotile.cloud.cloud import IOTileCloud
from iotile.cloud.config import link_cloud
from iotile.core.dev.config import ConfigManager
from iotile.core.dev.registry import ComponentRegistry
from iotile.core.exceptions import ArgumentError, ExternalError
import json
@pytest.fixture
def registry():
reg = ComponentRegistry()
reg.clear()
yield reg
reg.clear()
def test_login(registry):
"""Make sure successful login is properly handled."""
payload = {
'jwt': 'big-token',
'username': 'user1'
}
manager = ConfigManager()
with requests_mock.Mocker() as mocker:
mocker.post('https://iotile.cloud/api/v1/auth/login/', json=payload)
link_cloud(manager, 'user1@random.com', 'password')
assert registry.get_config('arch:cloud_user') == 'user1'
assert registry.get_config('arch:cloud_token') == 'big-token'
def test_refresh(registry):
"""Make sure we can properly refresh our jwt token."""
payload = {
'token': 'new-token'
}
registry.set_config("arch:cloud_token", 'old-token')
registry.set_config("arch:cloud_user", 'test_user')
cloud = IOTileCloud()
with requests_mock.Mocker() as mocker:
mocker.post('https://iotile.cloud/api/v1/auth/api-jwt-refresh/', json=payload)
cloud.refresh_token()
assert registry.get_config('arch:cloud_token') == 'new-token'
def test_alternative_domains(registry):
"""Make sure we can specify an alternative domain."""
payload = {
'jwt': 'big-token',
'username': 'user1'
}
manager = ConfigManager()
manager.set('cloud:server', 'https://testcloud.com')
with requests_mock.Mocker() as mocker:
mocker.post('https://testcloud.com/api/v1/auth/login/', json=payload)
link_cloud(manager, 'user1@random.com', 'password')
assert registry.get_config('arch:cloud_user') == 'user1'
assert registry.get_config('arch:cloud_token') == 'big-token'
cloud = IOTileCloud()
payload = {
'token': 'new-token'
}
with requests_mock.Mocker() as mocker:
mocker.post('https://testcloud.com/api/v1/auth/api-jwt-refresh/', json=payload)
cloud.refresh_token()
assert registry.get_config('arch:cloud_token') == 'new-token'
def test_check_time():
""" Make sure we can check if the time is correct"""
json_true = {'now': datetime.datetime.now(tzutc()).strftime('%a, %d %b %Y %X %Z')}
json_false = {'now': 'Wed, 01 Sep 2010 17:30:32 GMT'}
payload = {
'jwt': 'big-token',
'username': 'user1'
}
manager = ConfigManager()
with requests_mock.Mocker() as mocker:
mocker.post('https://iotile.cloud/api/v1/auth/login/', json=payload)
link_cloud(manager, 'user1@random.com', 'password')
cloud = IOTileCloud()
mocker.get('https://iotile.cloud/api/v1/server/', json=json_true)
assert cloud.check_time() == True
mocker.get('https://iotile.cloud/api/v1/server/', json=json_false)
assert cloud.check_time() == False
def test_get_fleet():
"""Make sure we can get fleets."""
auth_payload = {
'jwt': 'big-token',
'username': 'user1'
}
test_payload = {"count":1,
"next":"Null",
"previous":"Null",
"results":[{"device":"d--0000-0000-0000-0001","always_on":True,"is_access_point":False}]}
expected = {
"d--0000-0000-0000-0001":{
"always_on":True,
"is_access_point":False}
}
manager = ConfigManager()
with requests_mock.Mocker() as mocker:
mocker.post('https://iotile.cloud/api/v1/auth/login/', json=auth_payload)
link_cloud(manager, 'user1@random.com', 'password')
cloud = IOTileCloud()
mocker.get('https://iotile.cloud/api/v1/fleet/g--0000-0000-0001/devices/', json=test_payload)
mocker.get('https://iotile.cloud/api/v1/fleet/g--0000-0000-0002/devices/', status_code=404)
assert cloud.get_fleet(1) == expected
with pytest.raises(ArgumentError):
cloud.get_fleet(2)
with pytest.raises(ArgumentError):
cloud.get_fleet(pow(16,12) + 1)
def test_get_whitelist():
""" Make sure we can retrieve the whitelist correctly """
with open('test/large_mock_answer.json') as lma:
j = json.load(lma)
test_payload = j['whitelist_test']
p1 = j['whitelist_g1']
p2 = j['whitelist_g2']
p3 = j['whitelist_g3']
expected = j['expected']
empty_whitelist_test = j['empty_whitelist_test']
p4 = j['whitelist_g4']
payload = {
'jwt': 'big-token',
'username': 'user1'
}
manager = ConfigManager()
with requests_mock.Mocker() as mocker:
mocker.post('https://iotile.cloud/api/v1/auth/login/', json=payload)
link_cloud(manager, 'user1@random.com', 'password')
cloud = IOTileCloud()
mocker.get('https://iotile.cloud/api/v1/fleet/?device=d--0000-0000-0000-0001', status_code=404)
with pytest.raises(ExternalError):
cloud.get_whitelist(1)
mocker.get('https://iotile.cloud/api/v1/fleet/?device=d--0000-0000-0000-0002', json={'results':[]})
with pytest.raises(ExternalError):
cloud.get_whitelist(2)
mocker.get('https://iotile.cloud/api/v1/fleet/?device=d--0000-0000-0000-01bd', json=test_payload)
mocker.get('https://iotile.cloud/api/v1/fleet/g--0000-0000-0001/devices/', json=p1)
mocker.get('https://iotile.cloud/api/v1/fleet/g--0000-0000-0002/devices/', json=p2)
mocker.get('https://iotile.cloud/api/v1/fleet/g--0000-0000-0003/devices/', json=p3)
assert cloud.get_whitelist(0x1bd) == expected
mocker.get('https://iotile.cloud/api/v1/fleet/?device=d--0000-0000-0000-01bd', json=empty_whitelist_test)
mocker.get('https://iotile.cloud/api/v1/fleet/g--0000-0000-0004/devices/', json=p4)
with pytest.raises(ExternalError):
cloud.get_whitelist(0x1bd)
| iotile/coretools | iotile_ext_cloud/test/test_login.py | test_login.py | py | 6,110 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "iotile.core.dev.registry.ComponentRegistry",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "iotile.core.dev.config.ConfigManager",
"line_number": 30,
"usage_type": "c... |
10686813347 | from threading import Lock
import grpc
from google.protobuf.empty_pb2 import Empty as EmptyResponse
from kmol.core.logger import LOGGER as logging
from ...configs import ServerConfiguration
from ...exceptions import ClientAuthenticationError
from ...protocol_buffers import mila_pb2, mila_pb2_grpc
from .server_manager import ServerManager, Participant
class GrpcServicer(ServerManager, mila_pb2_grpc.MilaServicer):
def __init__(self, config: ServerConfiguration) -> None:
super().__init__(config=config)
self.__lock = Lock()
def _validate_token(self, token: str, context) -> bool:
if not self.verify_token(token=token, ip_address=self._get_ip(context)):
context.abort(grpc.StatusCode.PERMISSION_DENIED, "Access Denied... Token is invalid...")
return True
def _get_ip(self, context) -> str:
return context.peer().split(":")[1]
def Authenticate(self, request: grpc, context) -> str:
client_ip = self._get_ip(context)
if not self.verify_ip(client_ip):
context.abort(grpc.StatusCode.PERMISSION_DENIED, "Access Denied... IP Address is not whitelisted.")
try:
token = self.register_client(request.name, client_ip)
return mila_pb2.Token(token=token)
except ClientAuthenticationError as e:
context.abort(grpc.StatusCode.PERMISSION_DENIED, str(e))
def Heartbeat(self, request, context) -> EmptyResponse:
if self._validate_token(request.token, context):
self.register_heartbeat(request.token)
context.set_code(grpc.StatusCode.OK)
return EmptyResponse()
def Close(self, request, context) -> EmptyResponse:
if self._validate_token(request.token, context):
self.close_connection(request.token)
context.set_code(grpc.StatusCode.OK)
return EmptyResponse()
def RequestModel(self, request, context) -> mila_pb2.Model:
if self._validate_token(request.token, context):
if self.should_wait_for_additional_clients():
context.abort(grpc.StatusCode.RESOURCE_EXHAUSTED, "Waiting for more clients to join.")
self.close_registration()
if not self.are_more_rounds_required():
context.abort(grpc.StatusCode.PERMISSION_DENIED, "All rounds have been completed. Closing session.")
if not self.set_client_status_to_awaiting_response(request.token):
context.abort(grpc.StatusCode.RESOURCE_EXHAUSTED, "Next round is not available yet.")
client = self._registry[request.token]
logging.info("[{}] Sending Model (round={})".format(client, client.round))
return mila_pb2.Model(json_configuration=self.get_configuration(), latest_checkpoint=self.get_latest_checkpoint())
def SendCheckpoint(self, request, context) -> EmptyResponse:
if self._validate_token(request.token, context):
with self.__lock:
self.save_checkpoint(token=request.token, content=request.content)
self.set_client_status_to_available(request.token)
if self.are_all_updates_received():
self.aggregate()
self.enable_next_round()
context.set_code(grpc.StatusCode.OK)
return EmptyResponse()
def get_client_filename_for_current_round(self, client: Participant):
return "{}/{}.{}.{}.remote".format(
self._config.save_path, client.name, client.ip_address.replace(".", "_"), self._current_round
) | elix-tech/kmol | src/mila/services/server_manager/grpc_servicer.py | grpc_servicer.py | py | 3,599 | python | en | code | 33 | github-code | 1 | [
{
"api_name": "server_manager.ServerManager",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "protocol_buffers.mila_pb2_grpc.MilaServicer",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "protocol_buffers.mila_pb2_grpc",
"line_number": 13,
"usage_... |
72708313633 |
from functools import reduce
import discord
import linkbot.utils.queries.cmdban as cmdban
import neo4jdb as db
class Command:
"""
A parsed message that looks for the prefix, a command, and args following the command.
:message: (discord.Message) The message that was sent.
:author: (discord.User/discord.Member) The user/member who sent the message.
:channel: (discord.Channel) The channel that the command was sent in.
:server: [discord.Server] The server that the command was issued on. None if the channel is private.
:hasPrefix: (bool) Whether or not the prefix was attached to the beginning of the message.
:command: (str) The command that was sent.
:argstr: (str) The string proceeding the command.
:args: (list[str]) A list of strings that proceeded the command. All whitespace has been removed.
:info: (CommandInfo) A dictionary of info for this command. None if the command doesn't exist.
:isValid: (bool) A bool stating whether this is a valid command or not.
"""
def __init__(self, bot, message: discord.Message):
"""
Parse a message into an attempted command.
:param message: The message to parse.
:type message: discord.Message
"""
self.bot_prefix = bot.prefix
# Get channel and server
self.channel = message.channel
if isinstance(self.channel, discord.TextChannel):
self.guild = message.channel.guild
else:
self.guild = None
# Get message and author
self.author = message.author
self.message = message
self.is_dm = isinstance(self.channel, discord.DMChannel)
# Get arg string and prefix
self.argstr = message.content
self.has_prefix = message.content.startswith(bot.prefix)
if self.has_prefix:
self.argstr = self.argstr[len(bot.prefix):].lstrip()
# Get command.
try:
self.command_arg = self.argstr[:self.argstr.index(' ')]
except ValueError:
self.command_arg = self.argstr
# Get args
self.argstr = self.argstr[len(self.command_arg):].lstrip()
tempargs = self.argstr.split(' ')
self.args = []
for x in tempargs:
x = x.strip()
if x != '':
self.args.append(x)
# Get info
self.info: CommandInfo = bot.commands.get(self.command_arg.lower())
self.is_valid = self.info is not None
async def is_banned(self):
if not self.guild:
return False
async with await db.Session.new() as sess:
return await cmdban.get_member_is_banned_from_command(
sess, self.guild.id, self.author.id, self.info.command_name)
async def run(self):
await self.info.func(self)
def shiftargs(self, count=1):
while count > 0 and len(self.args) > 0:
self.argstr = self.argstr[len(self.args[0]):].lstrip()
self.args = self.args[1:]
count -= 1
class CmdExample:
"""
Holds an example use of a command and its corresponding effect.
:cmd: (str) The example use of the command.
:effect: (str) The descriptive effect of the example.
"""
def __init__(self, cmd, effect):
self.cmd = cmd
self.effect = effect
class CommandInfo:
""" Class containing info about a particular command. """
def __init__(self, name, func, syntax, description, examples, aliases, show_in_help):
self.command_name = name
self.func = func
self.syntax = syntax
self.description = description
self.examples = [CmdExample(cmd, effect) for (cmd, effect) in examples]
self.aliases = aliases
self.show_in_help = show_in_help
def get_syntax_with_format(self, prefix, mk_down='`', sep=' || '):
"""
Formats the syntax of this command using the args.
:param str prefix: The command prefix for the bot.
:param str mk_down: The markdown string that should be placed on both sides of the syntax.
:param str sep: The string that will seperate each syntax case.
:return: The syntax cases formatted according to the parameters.
"""
fn = lambda full, syn: full + "{sep}{mk}{syn}{mk}".format(sep=sep, mk=mk_down,
syn=syn.format(c=prefix + self.command_name))
return reduce(fn, self.syntax, '')[len(sep):]
def get_examples_with_format(self, prefix, cmd_as_code=True, cmd_ex_sep='\n\t', sep='\n'):
"""
Formats the examples for this command using the args provided.
:param str prefix: The command prefix for the bot.
:param bool cmd_as_code: Format the command portion of the example as a code snippet?
:param str cmd_ex_sep: The separating string between the command and effect portions of the example.
:param str sep: The separating string between examples.
:return: A string of the formatted examples.
"""
fn = lambda full, ex: full + "{sep}{tick}{cmd}{tick}{ex_sep}{effect}"\
.format(sep=sep, tick='`' if cmd_as_code else '', cmd=ex.cmd, ex_sep=cmd_ex_sep, effect=ex.effect)
return reduce(fn, self.examples, '')[len(sep):].format(prefix=prefix)
def embed_syntax(self, embed, prefix, mk_down='`', title_mk_down='', sep=' || ', inline=False):
"""
Embeds the syntax of this command into the passed embed as a new field.
:param str prefix: The command prefix for the bot.
:param discord.Embed embed: The embed to create a new field on.
:param str mk_down: The markdown string to be placed on both sides of the syntax.
:param str title_mk_down: The markdown string to be placed on both sides of the title.
:param str sep: String to be used to separate the syntax cases.
:param bool inline: Should this embed be created inline?
"""
embed.add_field(
name="{mk}{cmd}{mk}".format(cmd=' | '.join([self.command_name] + self.aliases), mk=title_mk_down),
value=self.get_syntax_with_format(prefix, mk_down, sep), inline=inline)
def embed_examples(self, embed, prefix, cmd_as_code=True):
"""
Embeds the examples for this command into the passed embed as new fields.
:param str prefix: The command prefix for the bot.
:param discord.Embed embed: The embed to create a new field on.
:param bool cmd_as_code: Should each command portion be shown as a code snippet?
"""
for ex in self.examples:
embed.add_field(name="{mk}{cmd}{mk}"
.format(cmd=ex.cmd.format(c=prefix + self.command_name),
mk='`' if cmd_as_code else ''), value=ex.effect, inline=False)
| tjbrockmeyer/LinkBot | linkbot/utils/command.py | command.py | py | 6,880 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "discord.Message",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "discord.TextChannel",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "discord.DMChannel",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "... |
24240566551 | import setuptools
import versioneer
DESCRIPTION_FILES = ["pypi-intro.rst"]
long_description = []
import codecs
for filename in DESCRIPTION_FILES:
with codecs.open(filename, 'r', 'utf-8') as f:
long_description.append(f.read())
long_description = "\n".join(long_description)
setuptools.setup(
name = "root-solver",
version = versioneer.get_version(),
packages = setuptools.find_packages('src'),
package_dir = {'': 'src'},
install_requires = [
"numpy",
],
python_requires = '>=3.4',
author = "James Tocknell",
author_email = "aragilar@gmail.com",
description = "Root solver for polynomial equations",
long_description = long_description,
license = "3-clause BSD",
keywords = "root solver",
url = "https://root-solver.readthedocs.io",
project_urls={
'Documentation': 'https://root-solver.readthedocs.io',
'Source': 'https://github.com/aragilar/root-solver/',
'Tracker': 'https://github.com/aragilar/root-solver/issues',
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
],
cmdclass=versioneer.get_cmdclass(),
)
| aragilar/root-solver | setup.py | setup.py | py | 1,577 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "codecs.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "versioneer.get_version",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "setuptools.find_p... |
11626599818 | # -*- coding: utf-8 -*-
# @Time : 19-8-5 下午5:31
# @Author : huziying
# @File : half_circle.py
# 半圆形
import cv2
import numpy
import uuid
import base64
import os
from django.db import connection
from .utils_huziying import horizontal_measurements, vertical_measurements
numpy.set_printoptions(threshold=numpy.inf)
def half_circle_image_process(img):
"""图片处理过程"""
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blurred = cv2.GaussianBlur(img_gray, (15, 15), 0)
img_thresh = cv2.threshold(img_blurred, 120, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
edges = cv2.Canny(img_thresh, 200, 400, 3)
indices = numpy.where(edges != [0])
coordinates = numpy.array(list(zip(indices[1], indices[0])))
reference_coordinate = coordinates[coordinates.argmin(axis=0)[0]]
return coordinates, reference_coordinate
def main(image=None):
"""
(x, y)
————————→ x
|
|
|
|
↓
y
坐标测量顺序: 从左向右,从上向下
"""
img_name = uuid.uuid1()
if not image:
img = cv2.imread('measurement/template/half_circle_1.jpg')
else:
receive = base64.b64decode(image)
with open('measurement/images/{}.jpg'.format(img_name), 'wb') as f:
f.write(receive)
img = cv2.imread('measurement/images/{}.jpg'.format(img_name))
coordinates, reference_coordinate = half_circle_image_process(img)
height, width, dimension = img.shape
measurements_data, data = list(), dict()
cursor = connection.cursor()
cursor.execute("select top_left, bottom_right, name from templates where shape = '1' and direction = '0' "
"order by name;")
vertical = cursor.fetchall()
for v in vertical:
top_left = (int(v[0][0] * width + reference_coordinate[0]), int(v[0][1] * height + reference_coordinate[1]))
bottom_right = (int(v[1][0] * width + reference_coordinate[0]), int(v[1][1] * height + reference_coordinate[1]))
name = v[2]
# print('top_left', top_left, 'bottom_right', bottom_right, 'name', name)
cv2.rectangle(img, top_left, bottom_right, (0, 255, 0), thickness=1)
coordinates_limit = coordinates[numpy.where(
(coordinates[:, 0] >= top_left[0]) & (coordinates[:, 0] <= bottom_right[0]) &
(coordinates[:, 1] >= top_left[1]) & (coordinates[:, 1] <= bottom_right[1]))]
coordinates_limit_sort = coordinates_limit[coordinates_limit[:, 1].argsort(), :]
# for c in coordinates_limit_sort:
# cv2.circle(img, tuple(c), 1, (0, 0, 255), 1)
measurement = vertical_measurements(coordinates_limit_sort, img, name)
measurements_data.append(measurement)
cursor.execute("select top_left, bottom_right, name from templates where shape = '1' and direction = '1' "
"order by name;")
horizontal = cursor.fetchall()
for h in horizontal:
top_left = (int(h[0][0] * width + reference_coordinate[0]), int(h[0][1] * height + reference_coordinate[1]))
bottom_right = (int(h[1][0] * width + reference_coordinate[0]), int(h[1][1] * height + reference_coordinate[1]))
name = h[2]
# print('top_left', top_left, 'bottom_right', bottom_right)
cv2.rectangle(img, top_left, bottom_right, (0, 255, 0), thickness=1)
coordinates_limit = coordinates[numpy.where(
(coordinates[:, 0] >= top_left[0]) & (coordinates[:, 0] <= bottom_right[0]) &
(coordinates[:, 1] >= top_left[1]) & (coordinates[:, 1] <= bottom_right[1]))]
coordinates_limit_sort = coordinates_limit[coordinates_limit[:, 0].argsort(), :]
measurement = horizontal_measurements(coordinates_limit_sort, img, name)
measurements_data.append(measurement)
# print('measurements_data', measurements_data)
data['measurements_data'] = measurements_data
result_name = uuid.uuid1()
cv2.imwrite('measurement/images/{}.jpg'.format(result_name), img)
with open('measurement/images/{}.jpg'.format(result_name), 'rb') as f:
base64_img = base64.b64encode(f.read())
data.update({'image': base64_img})
if os.path.exists('measurement/images/{}.jpg'.format(img_name)):
os.remove('measurement/images/{}.jpg'.format(img_name))
if os.path.exists('measurement/images/{}.jpg'.format(result_name)):
os.remove('measurement/images/{}.jpg'.format(result_name))
# cv2.namedWindow('img', cv2.WINDOW_NORMAL)
# cv2.imshow("img", img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return data
if __name__ == '__main__':
main()
| huzing2524/circuit_board | measurement/utils/half_circle.py | half_circle.py | py | 4,641 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "numpy.set_printoptions",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY... |
40463001775 | from bs4 import BeautifulSoup
import requests
from datetime import datetime
# https://amzn.to/3E7V45Y -> Cetus Pro FPV
def cetus_price():
url = 'https://amzn.to/3E7V45Y'
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
price = soup.find('span', attrs={'class': 'a-offscreen'}).text
price_text = "The current price of Cetus Pro FPV is: " + str(price)
return price_text
| enriquetecfan11/Telegram-Bot | amazonprice.py | amazonprice.py | py | 583 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
}
] |
13576462360 | """
Laboratorio de Programación Básica en Python para Manejo de Datos
-----------------------------------------------------------------------------------------
Este archivo contiene las preguntas que se van a realizar en el laboratorio.
No puede utilizar pandas, numpy o scipy. Se debe utilizar solo las funciones de python
básicas.
Utilice el archivo `data.csv` para resolver las preguntas.
"""
import csv
from io import open
file = open('data.csv', 'r')
data = file.readlines()
file.close()
print(data[0])
def pregunta_01():
"""
Retorne la suma de la segunda columna.
Rta/
214
"""
with open('data.csv', mode='r') as datos:
datos = datos.readlines()
return sum(int(var.strip().split('\t')[1]) for var in datos)
def pregunta_02():
"""
Retorne la cantidad de registros por cada letra de la primera columna como la lista
de tuplas (letra, cantidad), ordendas alfabéticamente.
Rta/
[
("A", 8),
("B", 7),
("C", 5),
("D", 6),
("E", 14),
]
"""
with open('data.csv', mode='r') as datos:
datos = datos.readlines()
datos = [var.strip().split('\t')[0] for var in datos]
col1 = sorted(dict.fromkeys(datos))
respuesta = []
[respuesta.append((x, datos.count(x))) for x in col1]
return respuesta
def pregunta_03():
"""
Retorne la suma de la columna 2 por cada letra de la primera columna como una lista
de tuplas (letra, suma) ordendas alfabeticamente.
Rta/
[
("A", 53),
("B", 36),
("C", 27),
("D", 31),
("E", 67),
]
"""
with open('data.csv', mode='r') as datos:
datos = datos.readlines()
datos = [var.strip().split('\t')[:2] for var in datos]
col1 = sorted(list({var[0] for var in datos}))
respuesta = []
for letra in col1:
sum = 0
for camp in datos:
if camp[0] == letra:
sum += int(camp[1])
respuesta.append((letra, sum))
return respuesta
def pregunta_04():
"""
La columna 3 contiene una fecha en formato `YYYY-MM-DD`. Retorne la cantidad de
registros por cada mes, tal como se muestra a continuación.
Rta/
[
("01", 3),
("02", 4),
("03", 2),
("04", 4),
("05", 3),
("06", 3),
("07", 5),
("08", 6),
("09", 3),
("10", 2),
("11", 2),
("12", 3),
]
"""
with open('data.csv', mode='r') as datos:
datos = datos.readlines()
datos = [var.strip().split('\t')[2][5:7] for var in datos]
col1 = sorted(dict.fromkeys(datos))
respuesta = []
[respuesta.append((x, datos.count(x))) for x in col1]
return respuesta
def pregunta_05():
"""
Retorne una lista de tuplas con el valor maximo y minimo de la columna 2 por cada
letra de la columa 1.
Rta/
[
("A", 9, 2),
("B", 9, 1),
("C", 9, 0),
("D", 8, 3),
("E", 9, 1),
]
"""
letras = []
colMax = []
colMin = []
maximos = []
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
if(not row[0] in letras):
letras.append(row[0])
colMax.append(int(row[1]))
colMin.append(int(row[1]))
else:
if(colMax[letras.index(row[0])] < int(row[1])):
colMax[letras.index(row[0])] = int(row[1])
if(colMin[letras.index(row[0])] > int(row[1])):
colMin[letras.index(row[0])] = int(row[1])
for letra in letras:
maximos.append(
(letra, colMax[letras.index(letra)], colMin[letras.index(letra)]))
maximos.sort(reverse=False)
return maximos
def pregunta_06():
"""
La columna 5 codifica un diccionario donde cada cadena de tres letras corresponde a
una clave y el valor despues del caracter `:` corresponde al valor asociado a la
clave. Por cada clave, obtenga el valor asociado mas pequeño y el valor asociado mas
grande computados sobre todo el archivo.
Rta/
[
("aaa", 1, 9),
("bbb", 1, 9),
("ccc", 1, 10),
("ddd", 0, 9),
("eee", 1, 7),
("fff", 0, 9),
("ggg", 3, 10),
("hhh", 0, 9),
("iii", 0, 9),
("jjj", 5, 17),
]
"""
letras=[]
colMax = []
colMin = []
dic = []
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
for cod in row[4].split(','):
letra = cod.split(':')[0]
codigo = cod.split(':')[1]
if(not letra in letras):
letras.append(letra)
colMax.append(int(codigo))
colMin.append(int(codigo))
else:
if(colMax[letras.index(letra)]<int(codigo)):
colMax[letras.index(letra)]=int(codigo)
if(colMin[letras.index(letra)]>int(codigo)):
colMin[letras.index(letra)]=int(codigo)
for letra in letras:
dic.append((letra,colMin[letras.index(letra)],colMax[letras.index(letra)]))
dic.sort(reverse=False)
return dic
def pregunta_07():
"""
Retorne una lista de tuplas que asocien las columnas 0 y 1. Cada tupla contiene un
valor posible de la columna 2 y una lista con todas las letras asociadas (columna 1)
a dicho valor de la columna 2.
Rta/
[
(0, ["C"]),
(1, ["E", "B", "E"]),
(2, ["A", "E"]),
(3, ["A", "B", "D", "E", "E", "D"]),
(4, ["E", "B"]),
(5, ["B", "C", "D", "D", "E", "E", "E"]),
(6, ["C", "E", "A", "B"]),
(7, ["A", "C", "E", "D"]),
(8, ["E", "D", "E", "A", "B"]),
(9, ["A", "B", "E", "A", "A", "C"]),
]
"""
numeros = []
letras = []
reg = []
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
if(not int(row[1]) in numeros):
numeros.append(int(row[1]))
letras.append([row[0]])
else:
letras[numeros.index(int(row[1]))].append(row[0])
for numero in numeros:
reg.append((numero, letras[numeros.index(numero)]))
reg.sort(reverse=False)
return reg
def pregunta_08():
"""
Genere una lista de tuplas, donde el primer elemento de cada tupla contiene el valor
de la segunda columna; la segunda parte de la tupla es una lista con las letras
(ordenadas y sin repetir letra) de la primera columna que aparecen asociadas a dicho
valor de la segunda columna.
Rta/
[
(0, ["C"]),
(1, ["B", "E"]),
(2, ["A", "E"]),
(3, ["A", "B", "D", "E"]),
(4, ["B", "E"]),
(5, ["B", "C", "D", "E"]),
(6, ["A", "B", "C", "E"]),
(7, ["A", "C", "D", "E"]),
(8, ["A", "B", "D", "E"]),
(9, ["A", "B", "C", "E"]),
]
"""
numeros = []
letras = []
reg = []
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
if(not int(row[1]) in numeros):
numeros.append(int(row[1]))
letras.append([row[0]])
else:
if(not row[0] in letras[numeros.index(int(row[1]))]):
letras[numeros.index(int(row[1]))].append(row[0])
for numero in numeros:
order = letras[numeros.index(numero)]
order.sort()
reg.append((numero, order))
reg.sort(reverse=False)
return reg
def pregunta_09():
"""
Retorne un diccionario que contenga la cantidad de registros en que aparece cada
clave de la columna 5.
Rta/
{
"aaa": 13,
"bbb": 16,
"ccc": 23,
"ddd": 23,
"eee": 15,
"fff": 20,
"ggg": 13,
"hhh": 16,
"iii": 18,
"jjj": 18,
}
"""
columna = []
letras = []
dictionario = {}
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
for cod in row[4].split(','):
letra = cod.split(':')[0]
columna.append(letra)
if(not letra in letras):
letras.append(letra)
letras.sort()
for letra in letras:
dictionario.update({letra: columna.count(letra)})
return dictionario
def pregunta_10():
"""
Retorne una lista de tuplas contengan por cada tupla, la letra de la columna 1 y la
cantidad de elementos de las columnas 4 y 5.
Rta/
[
("E", 3, 5),
("A", 3, 4),
("B", 4, 4),
...
("C", 4, 3),
("E", 2, 3),
("E", 3, 3),
]
"""
letras = []
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
col4 = row[3].split(",")
col4 = len(col4)
col5 = row[4].split(",")
col5 = len(col5)
letras.append((row[0], col4, col5))
return letras
def pregunta_11():
"""
Retorne un diccionario que contengan la suma de la columna 2 para cada letra de la
columna 4, ordenadas alfabeticamente.
Rta/
{
"a": 122,
"b": 49,
"c": 91,
"d": 73,
"e": 86,
"f": 134,
"g": 35,
}
"""
letras = {}
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
for letra in row[3].split(","):
if(not letra in letras.keys()):
letras.update({letra: int(row[1])})
else:
letras[letra] += int(row[1])
dicc = sorted(letras.items())
return dict(dicc)
def pregunta_12():
"""
Genere un diccionario que contengan como clave la columna 1 y como valor la suma de
los valores de la columna 5 sobre todo el archivo.
Rta/
{
'A': 177,
'B': 187,
'C': 114,
'D': 136,
'E': 324
}
"""
letras = {}
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
letra = row[0]
for codigo in row[4].split(","):
numero = int(codigo.split(":")[1])
if(not letra in letras.keys()):
letras.update({letra: numero})
else:
letras[letra] += numero
dicc = sorted(letras.items())
return dict(dicc)
print(pregunta_12())
| classroom-fundamentos-de-analitica/lab---python-basico-andresmonsalve19 | preguntas.py | preguntas.py | py | 10,852 | python | es | code | 1 | github-code | 1 | [
{
"api_name": "io.open",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 73,
"usage_t... |
43482876476 | import asyncio
import logging
import threading
from typing import List
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from starlette.websockets import WebSocket, WebSocketDisconnect
from .saver import Saver
from .transport import Transfer, InputData
logging.basicConfig(level=logging.INFO)
app = FastAPI()
transfer = Transfer()
saver = Saver()
class WebSocketSender:
clients: [WebSocket] = []
messages: [InputData] = []
is_on_thread = False
async def send(self, data):
for client in self.clients.copy():
try:
await client.send_json({"data": data})
except (WebSocketDisconnect, RuntimeError):
try:
self.clients.remove(client)
except ValueError:
pass
async def thread_async(self):
if self.is_on_thread:
return
else:
self.is_on_thread = True
while True:
if self.messages:
messages = [self.messages.pop() for _ in range(len(self.messages))]
await self.send(messages)
await asyncio.sleep(0.5)
class Messages:
messages: List[InputData] = []
web_socket_sender = WebSocketSender()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.on_event("startup")
async def startup():
transfer.callbacks.append(saver.save)
transfer.callbacks.append(web_socket_sender.messages.append)
transfer.callbacks.append(Messages.messages.append)
threading.Thread(target=transfer.thread, daemon=True).start()
@app.on_event("shutdown")
def shutdown():
transfer.is_on = False
logging.basicConfig(level=logging.DEBUG)
transfer.callbacks.remove(saver.save)
transfer.callbacks.remove(web_socket_sender.messages.append)
transfer.callbacks.remove(Messages.messages.append)
saver.close()
@app.websocket("/ws")
async def ws(websocket: WebSocket):
await websocket.accept()
web_socket_sender.clients.append(websocket)
await web_socket_sender.thread_async()
@app.get("/messages")
async def list_messages():
return Messages.messages
| coma8765/gorocket-monitoring-backend | app/main.py | main.py | py | 2,255 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "transport.Trans... |
8283091296 | # https://github.com/SwarnadeepGhosh
# Six Days Weather Forecast using Python and MetaWeather API
import requests
import time
API_ROOT = 'https://www.metaweather.com'
API_LOCATION = '/api/location/search/?query=' # + city
API_WEATHER = '/api/location/' # + woeid
def print_pause(printable_data):
time.sleep(1)
print(printable_data+'\n')
def display_weather(weather_data):
for i in range(len(weather_data)):
date = weather_data[i]['applicable_date']
state = weather_data[i]['weather_state_name']
max_temp = weather_data[i]['max_temp']
max_temp = round(max_temp, 2)
min_temp = weather_data[i]['min_temp']
min_temp = round(min_temp, 2)
print(f" {date} \t {state} \t High: {max_temp}°C \t Low: {min_temp}°C")
def check_again():
again = input('\nDo You want to check again ? (y/n) ').lower()
if again == 'n':
print_pause('Have a Good Day ..')
time.sleep(1.5)
elif again == 'y':
weather_report()
else:
print('I cant understand, Please try again..')
check_again()
def weather_report():
try:
print('Swarnadeep Welcomes you to the Weather Report Forecast.\n')
time.sleep(1)
city = input('Where in the World are you ? ')
print_pause('\nGetting Location Data...')
r1 = requests.get(API_ROOT + API_LOCATION + city)
# if r1.status_code == 200:
location_data = r1.json()
woeid = location_data[0]['woeid']
# print("Where on Earth ID is : "+str(woeid))
print_pause('Location Data Fetched successfully...')
time.sleep(1)
r2 = requests.get(API_ROOT + API_WEATHER + str(woeid))
print_pause('Getting Weather Data, Please wait...')
print_pause('Weather Data of ' + city.capitalize() + ':')
# We will get a dictionary having keys: consolidated_weather, time, sun_rise, sun_set, timezone_name, parent, sources, title, location_type, woeid, latt_long, timezone.
weather_data = r2.json()['consolidated_weather']
# We will get 6 day Weather Forecast data in weather data as a list of dictionaries.
# Each day having keys: id,weather_state_name, weather_state_abbr, wind_direction_compass, created, applicable_date, min_temp, max_temp, the_temp, wind_speed, wind_direction, air_pressure, humidity, visibility, predictability
display_weather(weather_data)
check_again()
except requests.exceptions.ConnectionError:
print('Sorry there is a network error')
time.sleep(1)
except:
print("I don't know where that is. Please enter only famous cities.\n")
time.sleep(1)
weather_report()
if __name__ == '__main__':
weather_report()
| SwarnadeepGhosh/Python-Small-Projects | weather_forecast_by_MetaWeather_API.py | weather_forecast_by_MetaWeather_API.py | py | 2,828 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.sleep",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 45... |
5965575419 | # -*- coding: iso-8859-1 -*-
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
import iris
import iris.quickplot as qplt
import iris.plot as iplt
from iris.util import unify_time_units
import cartopy.crs as ccrs
import cartopy.feature as cfeat
import glob
import copy
import datetime
import netCDF4
import cf_units
from plotFunctions import line, plot_figure, MedianPairwiseSlopes, open_file_with_cbar_extents, plot_map_of_time_average
UNITS_DICT = {'CSDI': 'days', 'DTR': u'\u00B0C', 'FD': 'days', 'ID': 'days', 'SU': 'days',
'TN10p': '%', 'TN90p': '%', 'TNn': u'\u00B0C', 'TNx': u'\u00B0C', 'TR': 'days',
'TX10p': '%', 'TX90p': '%', 'TXn': u'\u00B0C', 'TXx': u'\u00B0C', 'WSDI': 'days' }
#'HW': 'days', 'TMGE10': 'days', 'TMGE5': 'days', 'TMLT10': 'days', 'TMLT10': 'days', 'TMm': u'\u00B0C','TMLT5': 'days', 'TX95T': u'\u00B0C', 'TXGE30': 'days', 'TXGE35': 'days', 'TXGT50P': '%',
#'TNLT2': 'days', 'TNLTM2': 'days', 'TNLTM20': 'days',
# gibt es, aber nicht bei climpact: 'GSL': 'days',
#UNITS_DICT = {'CSDI': 'days'}
#List of the indices which were calculated using python/climpact
python_indices = ['TXx', 'TNx', 'TXn', 'TNn', 'DTR', 'FD', 'TR']
climpact_indices = ['CSDI', 'ID', 'SU', 'TN10p', 'TN90p', 'TX10p', 'TX90p', 'WSDI']
REGIONS = {'SPAIN': [-7.5, 37.5, 0.0, 42.5], 'GERMANY': [5.0, 45.0, 15.0, 50.0], 'MOROCCO': [-5.0, 30.0, 5.0, 35.0]} #westerly longitude, southerly latitude, easterly longitude, northerly latitude
TIME_FACTOR = 365*10.
cbar_extent_GERMANY = {}
cbar_extent_MOROCCO = {}
cbar_extent_SPAIN = {}
for INAME in UNITS_DICT.keys():
print(INAME)
######################################################################################
#load in data with constraints to correct region defined by longitudes and latitudes #
######################################################################################
#INAME='TXx' #Decide which index should be used:
filepath='/project/hadobs2/hadex3/ghcndex/GHCND_'+INAME+'_1951-2018_RegularGrid_global_2.5x2.5deg_LSmask.nc'
time_constraint = iris.Constraint(time=lambda c: 20160101 > c.point > 19910100)
#longitude_constraint = iris.Constraint(longitude=lambda c: 0<=c.point<=60 or 360.>=c.point>=342)
latitude_constraint = iris.Constraint(latitude=lambda c: 22< c.point<60)
'''
original_data = original_data.extract(latitude_constraint)
data = iris.cube.CubeList()
for i in range(len(original_data)):
#cube.intersection changes longitudes from 0-360 degree to -180 - +180 degree.
data.append(original_data[i].intersection(longitude=(-18, 60)))
'''
for REGION in REGIONS:
print(REGION)
OUTPATH = '/scratch/vportge/plots/GHCNDEX/'+REGION+'/'
original_data=iris.load(filepath, time_constraint) #Data has name of the months.
if len(original_data) == 0:
print(INAME)
continue
left_lon = float(REGIONS[REGION][0])
right_lon = float(REGIONS[REGION][2])
lower_lat = float(REGIONS[REGION][1])
upper_lat = float(REGIONS[REGION][3])
lat_constraint = iris.Constraint(latitude=lambda c: lower_lat <= c.point <= upper_lat)
#lon_constraint = iris.Constraint(longitude=lambda c: left_lon <= c.point <= right_lon)
original_data = original_data.extract(lat_constraint)
data = iris.cube.CubeList()
for i in range(len(original_data)):
#cube.intersection changes longitudes from 0-360 degree to -180 - +180 degree.
data.append(original_data[i].intersection(longitude=(left_lon, right_lon)))
######################################################################################
#Change time coordinate of data as it only contains the month via .name() of the cube#
######################################################################################
spat_avg_month = iris.cube.CubeList()
for i in range(len(data)):
month_data = data[i]
month_time = month_data.coord('time')
month_datetime = []
for j in range(len(month_time.points)):
yyyy = datetime.datetime.strptime(str(int(month_time.points[j])), '%Y%m%d').year
if month_data.name() == 'Ann':
mm = '01'
elif month_data.name() == 'Jan':
mm = '01'
elif month_data.name() == 'Feb':
mm = '02'
elif month_data.name() == 'Mar':
mm = '03'
elif month_data.name() == 'Apr':
mm = '04'
elif month_data.name() == 'May':
mm = '05'
elif month_data.name() == 'Jun':
mm = '06'
elif month_data.name() == 'Jul':
mm = '07'
elif month_data.name() == 'Aug':
mm = '08'
elif month_data.name() == 'Sep':
mm = '09'
elif month_data.name() == 'Oct':
mm = '10'
elif month_data.name() == 'Nov':
mm = '11'
elif month_data.name() == 'Dec':
mm = '12'
month_datetime.append(datetime.datetime.strptime(str(yyyy)+str(mm)+'01', '%Y%m%d'))
times_nums_units = netCDF4.date2num(month_datetime, units = 'days since 1970-01-01 00:00', calendar = 'standard')
time_unit = cf_units.Unit( 'days since 1970-01-01 00:00', calendar='standard')
new_timecoord = iris.coords.DimCoord(times_nums_units, standard_name = 'time', units = time_unit, var_name = "time")
month_data.remove_coord('time')
month_data.add_dim_coord(new_timecoord,0)
#calculate spatial average#
if month_data.name() == 'Ann':
ANN_data = copy.deepcopy(month_data)
#calculate spatial average#
ANN_data.coord('latitude').guess_bounds()
ANN_data.coord('longitude').guess_bounds()
ANN_data_areas = iris.analysis.cartography.area_weights(ANN_data)
ANN_data_avg = ANN_data.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=ANN_data_areas)
#ANN_data_avg = ANN_data_avg.collapsed('longitude', iris.analysis.MEAN)
ANN_index = i*1.
else:
month_data.coord('latitude').guess_bounds()
month_data.coord('longitude').guess_bounds()
month_data_areas = iris.analysis.cartography.area_weights(month_data)
month_avg = month_data.collapsed(['latitude', 'longitude'], iris.analysis.MEAN, weights=month_data_areas)
#month_avg = month_avg.collapsed('longitude', iris.analysis.MEAN)
spat_avg_month.append(month_avg)
del(data[int(ANN_index)])
#######################################################################################
#cubelist.concatenate_cube() doesn't work so get the values and save them into a list.#
#Leading to a list consisting of 12 lists (one for each month.) Sort the dates so that#
#a time series can be plotted. times_spat_avg are the sorted dates (as numbers) and #
#values_spat_avg are the corresponding values of the index. (Spatially averaged!). #
#######################################################################################
if len(original_data)>1:
times_spat_avg = []
values_spat_avg = []
for i in spat_avg_month:
time_month = i.coord('time')
times_spat_avg.append(time_month.points)
values_spat_avg.append(i.data)
#flatten the lists
times_spat_avg = [item for sublist in times_spat_avg for item in sublist]
values_spat_avg = [item for sublist in values_spat_avg for item in sublist]
#sort list by time coordinate
times_spat_avg, values_spat_avg = (list(t) for t in zip(*sorted(zip(times_spat_avg, values_spat_avg))))
###################################################################
#Plot map of averaged values over whole time period using ANN_data#
###################################################################
'''
plt.close()
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
#ax.set_extent([ -30., 65., 30, 65. ], crs=ccrs.PlateCarree())
ANN_data.coord('time').guess_bounds()
ANN_data_time_avg = ANN_data.collapsed('time', iris.analysis.MEAN)
ANN_data_lon = ANN_data_time_avg.coord('longitude').points
ANN_data_lat = ANN_data_time_avg.coord('latitude').points
cont = iplt.pcolormesh(ANN_data_time_avg, cmap = 'CMRmap')
ax.set_extent((np.amin(ANN_data_lon)-2, np.amax(ANN_data_lon)+2, np.amin(ANN_data_lat)-2, np.amax(ANN_data_lat)+2), crs = ccrs.PlateCarree())
political_bdrys = cfeat.NaturalEarthFeature(category='cultural',
name='admin_0_countries',
scale='50m')
ax.add_feature(political_bdrys, edgecolor='b', facecolor='none', zorder=2)
cb=fig.colorbar(cont, ax=ax, orientation='horizontal')
cb.set_label(INAME+' Index Value')
plt.title('Map of averaged '+INAME+' values (GHCNDEX)')
plt.savefig(OUTPATH+INAME+'_GHCNDEX_map_averaged_'+REGION+'.png')
'''
########################################################
#Plot time series of years 1991 - 2015 with trend line #
########################################################
if len(original_data)>1: #Then it's monthly data.
plt.close()
YDATA = values_spat_avg
XDATA = times_spat_avg
#convert to datetime object so that it can be plotted easily.
times_datetime = [netCDF4.num2date(i, units = 'days since 1970-01-01 00:00', calendar = 'standard') for i in times_spat_avg]
trendanalysis = MedianPairwiseSlopes(XDATA,YDATA)
slope = trendanalysis[0]
slope_lower_uncrty = trendanalysis[1]
slope_upper_uncrty = trendanalysis[2]
Y_INTERCEPTION = trendanalysis[3]
trendline=line(np.array(XDATA), np.array(Y_INTERCEPTION), slope)
fig = plt.figure(figsize = (10, 8))
plt.plot(times_datetime, YDATA)
plt.plot(times_datetime, trendline, label='trend: '+str(round(slope*TIME_FACTOR,2))+ ' ' + UNITS_DICT[INAME]+' per decade')
plt.grid()
plt.title('Time series of monthly GHCNDEX ' + INAME + ' in ' + REGION , size=22)
plt.xlabel('years', size=20)
plt.ylabel(UNITS_DICT[INAME], size=20)
plt.legend(fontsize = 16)
plt.tight_layout()
plt.tick_params(axis='both', which='major', labelsize=16)
plt.savefig(OUTPATH+INAME+'_time_series_GHCNDEX_with_trend_monthly_'+REGION+'.png')
YDATA = ANN_data_avg.data
XDATA = ANN_data_avg.coord('time').points
#convert to datetime object so that it can be plotted easily.
times_datetime = [netCDF4.num2date(i, units = 'days since 1970-01-01 00:00', calendar = 'standard') for i in ANN_data_avg.coord('time').points]
YDATA1 = YDATA[0:14]
YDATA2 = YDATA[14:]
XDATA1 = XDATA[0:14]
XDATA2 = XDATA[14:]
times_datetime1 = times_datetime[0:14]
times_datetime2 = times_datetime[14:]
trendanalysis = MedianPairwiseSlopes(XDATA,YDATA)
slope = trendanalysis[0]
Y_INTERCEPTION = trendanalysis[3]
trendanalysis1 = MedianPairwiseSlopes(XDATA1,YDATA1)
slope1 = trendanalysis1[0]
Y_INTERCEPTION1 = trendanalysis1[3]
trendanalysis2 = MedianPairwiseSlopes(XDATA2,YDATA2)
slope2 = trendanalysis2[0]
Y_INTERCEPTION2 = trendanalysis2[3]
trendline=line(np.array(XDATA), np.array(Y_INTERCEPTION), slope)
trendline1=line(np.array(XDATA1), np.array(Y_INTERCEPTION1), slope1)
trendline2=line(np.array(XDATA2), np.array(Y_INTERCEPTION2), slope2)
plt.close()
fig=plt.figure(figsize = (10, 8))
plt.plot(times_datetime, YDATA)
plt.plot(times_datetime, trendline, label='trend: '+str(round(slope*TIME_FACTOR,2))+ ' ' + UNITS_DICT[INAME]+' per decade'+ ' (1991-2015)')
plt.plot(times_datetime1, trendline1, label='trend: '+str(round(slope1*TIME_FACTOR,2))+ ' ' + UNITS_DICT[INAME]+' per decade'+ ' (1991-2004)')
plt.plot(times_datetime2, trendline2, label='trend: '+str(round(slope2*TIME_FACTOR,2))+ ' ' + UNITS_DICT[INAME]+' per decade'+ ' (2005-2015)')
plt.grid()
plt.title('Time series of annually GHCNDEX ' + INAME + ' in ' + REGION , size=22)
plt.xlabel('years', size=20)
plt.ylabel(UNITS_DICT[INAME], size=20)
plt.legend(fontsize = 16)
plt.tight_layout()
plt.tick_params(axis='both', which='major', labelsize=16)
plt.savefig(OUTPATH+INAME+'_time_series_GHCNDEX_with_trend_annually_'+REGION+'.png')
#####################################################
#Plot map of trend for each gridpoint using ANN_data#
#####################################################
min_or_max = ['min', 'max']
for cold_win in min_or_max:
'''Read in the files which contain the lowest and highest values of CLIMPACT/PYTHON created indices.
Those values can be used to set the extent of the GHCNDEX plots so that the colorbars show the same range.'''
cbar_path_python = '/scratch/vportge/plots/textfiles_with_cbar_extent/'+cold_win+'_LST_in_cold_window_CMSAF_python_cbar_'+REGION+'.txt'
cbar_path_climpact = '/scratch/vportge/plots/textfiles_with_cbar_extent/'+cold_win+'_LST_in_cold_window_CMSAF_climpact_cbar_'+REGION+'.txt'
cbar_dict_python = open_file_with_cbar_extents(cbar_path_python)
cbar_dict_climpact = open_file_with_cbar_extents(cbar_path_climpact)
#Pcolormesh will be used for plotting which uses the boundarie points.
GRIDLONS = np.append(ANN_data.coord('longitude').bounds[:,0], ANN_data.coord('longitude').bounds[-1,1])
GRIDLATS = np.append(ANN_data.coord('latitude').bounds[:,0], ANN_data.coord('latitude').bounds[-1,1])
TRENDS_ANN = np.ma.zeros(ANN_data.shape[1:3]) # lat, lon
XDATA_ANN = ANN_data.coord('time').points
for lat in range(ANN_data.shape[1]):
for lon in range(ANN_data.shape[2]):
YDATA_GRIDPOINT = ANN_data[:, lat, lon]
TRENDS_ANN[lat,lon] = MedianPairwiseSlopes(XDATA_ANN,YDATA_GRIDPOINT.data)[0]*TIME_FACTOR
TRENDS_ANN = np.ma.masked_where(np.isnan(TRENDS_ANN), TRENDS_ANN)
OUTNAME = OUTPATH+INAME+'_map_of_trend_'+REGION+'_'+cold_win+'.png'
if INAME in python_indices:
FIG_INFO = ['Trend of annually GHCNDEX ' + INAME , UNITS_DICT[INAME], OUTPATH, REGION, OUTNAME, cbar_dict_python[INAME]]
if REGION == 'GERMANY':
cbar_extent_GERMANY[INAME] = plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
elif REGION == 'SPAIN':
cbar_extent_SPAIN[INAME] = plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
elif REGION == 'MOROCCO':
cbar_extent_MOROCCO[INAME] = plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
elif INAME in climpact_indices:
FIG_INFO = ['Trend of annually GHCNDEX ' + INAME , UNITS_DICT[INAME], OUTPATH, REGION, OUTNAME, cbar_dict_climpact[INAME.lower()]]
if REGION == 'GERMANY':
cbar_extent_GERMANY[INAME] = plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
elif REGION == 'SPAIN':
cbar_extent_SPAIN[INAME] = plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
elif REGION == 'MOROCCO':
cbar_extent_MOROCCO[INAME] = plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
else:
FIG_INFO = ['Trend of annually GHCNDEX ' + INAME , UNITS_DICT[INAME], OUTPATH, REGION, OUTNAME, False]
plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
################################################################
#begin Calculation for trends for first time period: 1991 -2004#
################################################################
cbar_path_python = '/scratch/vportge/plots/textfiles_with_cbar_extent/'+cold_win+'_LST_in_cold_window_CMSAF_python_cbar_'+REGION+'_period1.txt'
cbar_path_climpact = '/scratch/vportge/plots/textfiles_with_cbar_extent/'+cold_win+'_LST_in_cold_window_CMSAF_climpact_cbar_'+REGION+'_period1.txt'
cbar_dict_python = open_file_with_cbar_extents(cbar_path_python)
cbar_dict_climpact = open_file_with_cbar_extents(cbar_path_climpact)
TRENDS_ANN = np.ma.zeros(ANN_data.shape[1:3]) # lat, lon
XDATA_ANN = ANN_data.coord('time').points[0:14]
for lat in range(ANN_data.shape[1]):
for lon in range(ANN_data.shape[2]):
YDATA_GRIDPOINT = ANN_data[0:14, lat, lon]
TRENDS_ANN[lat,lon] = MedianPairwiseSlopes(XDATA_ANN,YDATA_GRIDPOINT.data)[0]*TIME_FACTOR
TRENDS_ANN = np.ma.masked_where(np.isnan(TRENDS_ANN), TRENDS_ANN)
OUTNAME = OUTPATH+INAME+'_1991-2004_map_of_trend_'+REGION+'_'+cold_win+'.png'
if INAME in python_indices:
FIG_INFO = ['Trend of annually GHCNDEX ' + INAME +' (1991-2004)', UNITS_DICT[INAME], OUTPATH, REGION, OUTNAME, cbar_dict_python[INAME]]
plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
elif INAME in climpact_indices:
FIG_INFO = ['Trend of annually GHCNDEX ' + INAME +' (1991-2004)', UNITS_DICT[INAME], OUTPATH, REGION, OUTNAME, cbar_dict_climpact[INAME.lower()]]
plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
else:
FIG_INFO = ['Trend of annually GHCNDEX ' + INAME +' (1991-2004)', UNITS_DICT[INAME], OUTPATH, REGION, OUTNAME, False]
plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
################################################################
#begin Calculation for trends for second time period: 2005-2015#
################################################################
cbar_path_python = '/scratch/vportge/plots/textfiles_with_cbar_extent/'+cold_win+'_LST_in_cold_window_CMSAF_python_cbar_'+REGION+'_period2.txt'
cbar_path_climpact = '/scratch/vportge/plots/textfiles_with_cbar_extent/'+cold_win+'_LST_in_cold_window_CMSAF_climpact_cbar_'+REGION+'_period2.txt'
cbar_dict_python = open_file_with_cbar_extents(cbar_path_python)
cbar_dict_climpact = open_file_with_cbar_extents(cbar_path_climpact)
TRENDS_ANN = np.ma.zeros(ANN_data.shape[1:3]) # lat, lon
XDATA_ANN = ANN_data.coord('time').points[14:]
for lat in range(ANN_data.shape[1]):
for lon in range(ANN_data.shape[2]):
YDATA_GRIDPOINT = ANN_data[14:, lat, lon]
TRENDS_ANN[lat,lon] = MedianPairwiseSlopes(XDATA_ANN,YDATA_GRIDPOINT.data)[0]*TIME_FACTOR
TRENDS_ANN = np.ma.masked_where(np.isnan(TRENDS_ANN), TRENDS_ANN)
OUTNAME = OUTPATH+INAME+'_2005-2015_map_of_trend_'+REGION+'_'+cold_win+'.png'
if INAME in python_indices:
FIG_INFO = ['Trend of annually GHCNDEX ' + INAME +' (2005-2015)', UNITS_DICT[INAME], OUTPATH, REGION, OUTNAME, cbar_dict_python[INAME]]
plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
elif INAME in climpact_indices:
FIG_INFO = ['Trend of annually GHCNDEX ' + INAME +' (2005-2015)', UNITS_DICT[INAME], OUTPATH, REGION, OUTNAME, cbar_dict_climpact[INAME.lower()]]
plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
else:
FIG_INFO = ['Trend of annually GHCNDEX ' + INAME +' (2005-2015)', UNITS_DICT[INAME], OUTPATH, REGION, OUTNAME, False]
plot_figure(TRENDS_ANN, GRIDLONS, GRIDLATS, FIG_INFO)
#Plot map of averaged values
CUBEINFO = ['annually', INAME, REGION, 'ANN', OUTPATH, 'GHCNDEX', TIME_FACTOR, UNITS_DICT[INAME]]
plot_map_of_time_average(ANN_data, CUBEINFO)
OUTPATH_trends = '/scratch/vportge/plots/GHCNDEX/'
with open(OUTPATH_trends+'cbar_GERMANY.txt', 'w') as f:
for key, value in cbar_extent_GERMANY.items():
f.write('%s, %s\n' % (key, value))
with open(OUTPATH_trends+'cbar_SPAIN.txt', 'w') as f:
for key, value in cbar_extent_SPAIN.items():
f.write('%s, %s\n' % (key, value))
with open(OUTPATH_trends+'cbar_MOROCCO.txt', 'w') as f:
for key, value in cbar_extent_MOROCCO.items():
f.write('%s, %s\n' % (key, value))
| vpoertge/SatEX | climate_index_calculation/plot_indices/plot_ghcndex_indices.py | plot_ghcndex_indices.py | py | 21,522 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "iris.Constraint",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "iris.Constraint",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "iris.load",
"line_n... |
28752304494 |
"""
Created on Fri Dec 3 09:10:44 2021
@author: adaskin
"""
import random
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from sim_tree import generate_tree_elements,sum_of_nonzeropaths
import sklearn.datasets as datasets
import scipy.sparse
def polar2xy(r, theta):
x = r*math.cos(theta)
y = r*math.sin(theta)
return x,y
def randPolarDot(r1, r2):
r = random.random() # [0, 1]
r = r*(r2-r1) + r1
theta = random.random()*math.pi*2 #hatali
return r, theta
figure, ax1 = plt.subplots()
#ax2 = figure2.add_subplot(2,2,1,projection='3d')
CL = ['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown']
nrings = 4
ndots = 256
ringSize = 5;
X = np.zeros([nrings*ndots,2])
count_dot = 0
for iring in range(nrings):
for jdot in range(ndots):
[r, theta] = randPolarDot(iring*ringSize, (iring+1)*ringSize)
[x, y] = polar2xy(r, theta)
X[count_dot,:] = [x, y]
count_dot += 1
ax1.scatter(x, y, color=CL[iring],marker='.')
#ax2.scatter3D(x, y, x**2+y**2, c=CL[iring]);
#ax1.set_xlabel("x")
#ax1.set_ylabel("y")
figure.savefig('rings.eps', bbox_inches='tight')
figure.savefig('rings.png',bbox_inches='tight')
threshold = 0.020
G = X@X.transpose()
if scipy.sparse.issparse(G):
G = G.toarray()
psi = G.flatten()
psi_norm = np.linalg.norm(psi)
psi = psi/psi_norm
(usv_tree, nonzeropath) = generate_tree_elements(psi)
L = []
for i in range(int(psi.size/2)-1, psi.size-1):
if isinstance(usv_tree[i], tuple):
u, s, v, sprev = usv_tree[i]
print("shape:", v.shape)
L.append(usv_tree[i][3])
else:
L.append(0)
fig, ax = plt.subplots()
ax.axvline(threshold, linestyle='--')
values, bins, bars = ax.hist(L)
ax.set_xlabel("probability (coefficient) of the path")
ax.set_ylabel("Number of paths")
ax.set_title('n: {}-qubits, data: {} '.format(int(np.log2(psi.size)), 'rings'))
ax.bar_label(bars, fontsize=9, color='red')
p, npaths = sum_of_nonzeropaths(usv_tree,threshold)
print(np.linalg.norm(p-psi))
print(np.linalg.norm(p-psi,1))
print(np.dot(psi,p))
#X_tensor = psi_norm*p.reshape(N,N)
norm_of_diff = np.linalg.norm(psi-p)
print("norm of diff:",norm_of_diff)
ax.text(0.75, 0.75, 'norm of diff={:5.2E}'.format(norm_of_diff), horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
#print("mean error matrix unnormalized:",np.mean((X_tensor-G)**2))
plt.savefig('rings20qubits.eps', bbox_inches='tight')
plt.savefig('rings20qubits.png',bbox_inches='tight')
| adaskin/app_with_schmidt | reduction_of_rings.py | reduction_of_rings.py | py | 2,608 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.cos",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 25... |
39285486469 | import cv2 as cv
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.cm as cm
from time import sleep
import numpy as np
face_detector = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
def show_points(room_size,tracked_points,cam_position,pad=2):
room_width = room_size[2]-room_size[0];
room_height = room_size[3]-room_size[1];
rectangle = plt.Rectangle((0,0), room_width, room_height);#, fc='r')
plt.plot((cam_position[0],cam_position[0]), (0,room_height),color='yellow',linewidth=5);
plt.gca().add_patch(rectangle)
plt.xlim((room_size[0]-pad,room_size[2]+pad));
plt.ylim((room_size[1]-pad,room_size[3]+pad));
plt.plot(cam_position[0],cam_position[1],marker='D',color='red',markersize=25,label=' Web Camera');
temp_id = 0;
colormap = plt.cm.gist_ncar;
colorst = [colormap(i) for i in np.linspace(0, 0.9,len(tracked_points))]; #to get each person a different color
for i in tracked_points:
temp_id += 1;
plt.plot(i[0],i[1],marker='o',color=colorst[temp_id-1]);
plt.pause(0.5);
plt.legend(loc='best');
plt.pause(0.5);
plt.clf();#to reuse the same plot and alleviates the use of explicit closing of plots.
| PrateekMunjal/Face-detection-webcam-opencv | utils.py | utils.py | py | 1,252 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.Rectangle",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "ma... |
42447191423 | import json
import os
from logic.payment_method import PaymentMethod
PATH = os.getcwd()
DIR_DATA = PATH + '{0}data{0}'.format(os.sep)
class PaymentMethodController(object):
def __init__(self):
self.file = '{0}{1}'.format(DIR_DATA, 'payment.json')
def add(self, payment_method: PaymentMethod = PaymentMethod()) -> str:
with open(self.file, 'r+') as f:
data = json.load(f)
data['payment_methods'].append(payment_method.__dict__)
print(payment_method.__str__())
f.seek(0)
json.dump(data, f)
f.close()
return payment_method.__str__()
def show(self):
with open(self.file, 'r') as f:
json_object = json.load(f)
return json_object
def select(self, value):
with open(self.file, 'r+') as file:
data = json.load(file)
for payment_method in data['payment_methods']:
if value in str(payment_method.values()):
return payment_method.__str__()
| ISCODEVUTB/vms | controller/payment_method_controller.py | payment_method_controller.py | py | 1,038 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "logic.payment_method.PaymentMethod",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "json.load",
... |
41146178388 | import pandas as pd
import torch
from src.common.tools import model_device
from src.callback.progressbar import ProgressBar
from configs.basic_config import config
class Generator(object):
def __init__(self, model, tok, logger, n_gpu, input_len=None):
self.model = model
self.tok = tok
self.logger = logger
self.input_len = input_len
self.model, self.device = model_device(n_gpu=n_gpu, model=self.model)
def generate_example(self,
data,
sampler,
max_length,
relation_index=None,
repetition_penalty=None,
no_repeat_ngram_size=None,
length_penalty=None,
num_return_sequences=1,
save_prefix=None,
add_cls=False):
pbar = ProgressBar(n_total=len(data), desc='Testing')
print_interval = len(data) // 10
all_generated_sents = []
all_label_sents = []
all_input_sents = []
all_encoder_states = []
all_cls_ids = []
all_self_attention = []
all_cross_attentions = []
all_encoder_attentions = []
all_input_ids = []
all_output_ids = []
all_input_mask = []
relations = config['relations']
with torch.no_grad():
for step, batch in enumerate(data):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, labels_masks, cls_label = batch
# [bsz, ln]
batch_size, ln = input_ids.shape
char_relation = self.tok(relations, padding=True, return_length=True, return_tensors='pt')
relation_ids = char_relation["input_ids"].to(input_ids.device)
relation_mask = char_relation["attention_mask"].to(input_ids.device)
dim = relation_ids.size(0)
kg_input_ids = []
for i in range(batch_size):
cur_kg_input_ids = []
cur_input_ids = input_ids[i]
cur_ln = input_mask[i].sum()
for j in range(dim):
char_ln = relation_mask[j].sum()
cur_relation_ids = relation_ids[j]
if cur_ln + char_ln > ln:
it = torch.cat([cur_input_ids[:ln - char_ln - 1], cur_input_ids[cur_ln - 1:cur_ln],
cur_relation_ids[:char_ln]],
dim=-1).unsqueeze(0)
cur_kg_input_ids.append(it)
else:
it = torch.cat(
[cur_input_ids[:cur_ln], cur_relation_ids[:char_ln], cur_input_ids[cur_ln + char_ln:]],
dim=-1).unsqueeze(0)
cur_kg_input_ids.append(it)
kg_input_ids.append(torch.cat(cur_kg_input_ids, dim=0).unsqueeze(0))
kg_input_ids = torch.cat(kg_input_ids, dim=0)
kg_input_masks = kg_input_ids.ne(self.tok.pad_token_id).int()
# Encoder the story
encoder_outputs = self.model.encoder(input_ids=input_ids,
input_masks=input_mask,
segment_ids=segment_ids,
kg_input_ids=kg_input_ids,
kg_input_masks=kg_input_masks,
)
encoder_hidden_states = encoder_outputs["encoder_hidden_states"]
encoder_attentions = encoder_outputs["encoder_attention_mask"]
if relation_index is not None:
encoder_attentions[:, :dim] = 0
encoder_attentions[:, relation_index] = 1
decoder_input_ids = torch.ones([batch_size, 1], dtype=torch.long) * self.tok.bos_token_id
decoder_input_ids = decoder_input_ids.to(self.device)
batch_data = {}
batch_data["input_ids"] = decoder_input_ids
batch_data["input_attention_mask"] = torch.ones_like(decoder_input_ids, dtype=torch.long)
batch_data["encoder_hidden_states"] = encoder_hidden_states
batch_data["encoder_padding_mask"] = encoder_attentions
decoder = self.model.decoder
sampling_result = sampler.generate_sequence(batch_data,
decoder,
start_idx=1,
end_len=max_length,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=length_penalty,
num_return_sequences=num_return_sequences)
seqs = sampling_result["beams"]
# decoder_hidden_states = sampling_result["hidden_states"]
all_generated_sents += seqs
label_sent = self.tok.batch_decode(label_ids, skip_special_tokens=True)
all_label_sents += label_sent
input_sent = self.tok.batch_decode(input_ids, skip_special_tokens=True)
all_input_sents += input_sent
all_encoder_states.append(encoder_hidden_states)
output_ids = sampling_result["output_ids"]
assert output_ids.size(0) == batch_size
output_mask = (1 - output_ids.eq(self.tok.pad_token_id).int()).to(self.device)
decoder_outputs = decoder(input_ids=output_ids,
attention_mask=output_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attentions,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=True,
output_hidden_states=None,
return_dict=True, )
attentions = decoder_outputs["attentions"] # list of [bsz, self.num_heads, tgt_len, src_len]
cross_attentions = decoder_outputs[
"cross_attentions"] # list of [bsz, self.num_heads, tgt_len, src_len]
# print(f"attention from last decoder layer: {attentions[-1].size()}")
# print(f"cross attention from last encoder layer: {cross_attentions[-1].size()}")
# print(f"attention from last encoder layer: {encoder_attentions[-1].size()}")
all_encoder_attentions.append(encoder_attentions[0].cpu().detach())
all_self_attention.append(attentions[0].cpu().detach())
all_cross_attentions.append(cross_attentions[0].cpu().detach())
all_input_ids.append(input_ids.cpu().detach())
all_output_ids.append(output_ids.cpu().detach())
all_input_mask.append(input_mask.cpu().detach())
if add_cls:
decoder_hidden_states = decoder_outputs["hidden_states"] # [bsz, ln, E]
eos_hidden_states = []
eos_index = torch.sum(output_mask, dim=1) - 2 # [bsz]
batch_size = eos_index.size(0)
for i in range(batch_size):
eos_hidden_states.append(decoder_hidden_states[i, eos_index[i], :])
eos_hidden_states = torch.cat([x.unsqueeze(0) for x in eos_hidden_states])
cls_logits = self.model.classification_head(eos_hidden_states) # [bsz, num_labels]
cls_ids = torch.argmax(cls_logits.softmax(dim=-1), dim=-1)
all_cls_ids.append(cls_ids)
if (step + 1) % max(1, print_interval) == 0:
show_info = pbar(step=step)
self.logger.info(show_info)
data = {"input_ids": all_input_ids,
"input_mask": all_input_mask,
"output_ids": all_output_ids,
"encoder_attentions": all_encoder_attentions,
"self_attention": all_self_attention,
"cross_attentions": all_cross_attentions
}
# file_path = f"test_attentions_layer_0_{self.args.data_name}.pkl"
# import pickle
# with open(file_path, 'wb') as f:
# pickle.dump(data, f)
if save_prefix is not None:
columns = ["input", "generated", "reference"]
# print(f"all_generated_sents: {len(all_generated_sents)} -- all_label_sents: {len(all_label_sents)}")
df = pd.DataFrame(columns=columns)
df["input"] = all_input_sents
df["generated"] = all_generated_sents
df["reference"] = all_label_sents
if len(all_cls_ids) != 0:
df["cls_id"] = torch.cat(all_cls_ids, dim=0).cpu().detach().tolist()
df.to_csv(f"{save_prefix}")
self.logger.info(f"save sentences in csv files: {save_prefix}. Done!")
if 'cuda' in str(self.device):
torch.cuda.empty_cache()
return all_label_sents, all_generated_sents, all_encoder_states, all_cls_ids
def generate_explanation(self,
data,
KG_model,
sampler,
max_length,
relations=None,
repetition_penalty=None,
no_repeat_ngram_size=None,
length_penalty=None,
save_prefix=None,
KG_only=False,
num_return_sequences=1):
pbar = ProgressBar(n_total=len(data), desc='Testing')
print_interval = len(data) // 10
all_generated_sents = []
all_label_sents = []
all_input_sents = []
all_encoder_states = []
all_cls_ids = []
relations = relations if relations is not None else config['relations']
with torch.no_grad():
for step, batch in enumerate(data):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, labels_masks, labels_segment_ids, cls_label = batch
# [bsz, ln]
batch_size, ln = input_ids.shape
char_relation = self.tok(relations, padding=True, return_length=True, return_tensors='pt')
relation_ids = char_relation["input_ids"].to(input_ids.device)
relation_mask = char_relation["attention_mask"].to(input_ids.device)
dim = relation_ids.size(0)
kg_input_ids = []
for i in range(batch_size):
cur_kg_input_ids = []
cur_input_ids = input_ids[i]
cur_ln = input_mask[i].sum()
for j in range(dim):
char_ln = relation_mask[j].sum()
cur_relation_ids = relation_ids[j]
if cur_ln + char_ln > ln:
it = torch.cat([cur_input_ids[:ln - char_ln - 1], cur_input_ids[cur_ln - 1:cur_ln],
cur_relation_ids[:char_ln]],
dim=-1).unsqueeze(0)
cur_kg_input_ids.append(it)
else:
it = torch.cat(
[cur_input_ids[:cur_ln], cur_relation_ids[:char_ln], cur_input_ids[cur_ln + char_ln:]],
dim=-1).unsqueeze(0)
cur_kg_input_ids.append(it)
kg_input_ids.append(torch.cat(cur_kg_input_ids, dim=0).unsqueeze(0))
kg_input_ids = torch.cat(kg_input_ids, dim=0).to(self.device)
kg_input_masks = kg_input_ids.ne(self.tok.pad_token_id).int().to(self.device)
# Encoder the story
if KG_only:
encoder = KG_model.get_encoder().to(self.device)
encoder_outputs = encoder(input_ids=kg_input_ids.reshape([batch_size * dim, ln]),
attention_mask=kg_input_masks.reshape([batch_size * dim, ln]),
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None, )
KG_hidden_states = encoder_outputs[0]
else:
encoder_outputs = self.model.encoder(input_ids=input_ids,
input_masks=input_mask,
segment_ids=segment_ids,
kg_input_ids=kg_input_ids,
kg_input_masks=kg_input_masks,
)
KG_hidden_states = encoder_outputs["kg_hidden_states"] # [bsz, dim, ln, E]
bsz, dim, ln = kg_input_masks.size()
decoder_input_ids = torch.ones([batch_size * dim, 1], dtype=torch.long) * self.tok.bos_token_id
decoder_input_ids = decoder_input_ids.to(self.device)
batch_data = {}
batch_data["input_ids"] = decoder_input_ids
batch_data["input_attention_mask"] = torch.ones_like(decoder_input_ids, dtype=torch.long).to(
self.device)
batch_data["encoder_hidden_states"] = KG_hidden_states.reshape([batch_size * dim, ln, -1]).to(
self.device)
batch_data["encoder_padding_mask"] = kg_input_masks.reshape([batch_size * dim, ln]).to(self.device)
decoder = KG_model.to(self.device)
sampling_result = sampler.generate_sequence(batch_data,
decoder,
start_idx=1,
end_len=max_length,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=length_penalty,
bart_model=True,
num_return_sequences=num_return_sequences)
seqs = sampling_result["beams"]
# decoder_hidden_states = sampling_result["hidden_states"]
dim = dim * num_return_sequences
seqs_results = []
for i in range(batch_size):
batch_seqs = seqs[i * dim:(i + 1) * dim]
batch_seqs_list = []
for j in range(int(dim / num_return_sequences)):
batch_seqs_list.append(
'|'.join(batch_seqs[j * num_return_sequences: (j + 1) * num_return_sequences]))
# print(f"batch_seqs_list: {len(batch_seqs_list)},\n {batch_seqs_list}")
seqs_results.append(batch_seqs_list)
all_generated_sents += seqs_results
input_sent = self.tok.batch_decode(input_ids, skip_special_tokens=True)
all_input_sents += input_sent
output_ids = sampling_result["output_ids"]
assert output_ids.size(0) == batch_size * dim
if (step + 1) % max(1, print_interval) == 0:
show_info = pbar(step=step)
self.logger.info(show_info)
if save_prefix is not None:
if "csv" in save_prefix:
df = pd.DataFrame(all_generated_sents, columns=relations)
df.to_csv(f"{save_prefix}")
else:
assert "pkl" in save_prefix
import pickle
with open(save_prefix, 'wb') as f:
pickle.dump({"input": all_input_sents,
"generated": all_generated_sents}, f)
self.logger.info(f"save sentences in csv files: {save_prefix}. Done!")
if 'cuda' in str(self.device):
torch.cuda.empty_cache()
return all_label_sents, all_generated_sents, all_encoder_states, all_cls_ids
| THU-BPM/CoEP | src/test/generator_CoEP.py | generator_CoEP.py | py | 17,988 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "src.common.tools.model_device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "src.callback.progressbar.ProgressBar",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "configs.basic_config.config",
"line_number": 43,
"usage_type": "name"
... |
2513105859 | import ape
from ape import Contract, reverts
from utils.checks import check_strategy_totals
from utils.utils import days_to_secs
import pytest
def test__set_uni_fees(
asset,
strategy,
management,
aave,
weth,
):
# Everything should start as 0
assert strategy.uniFees(aave, weth) == 0
assert strategy.uniFees(weth, aave) == 0
strategy.setUniFees(aave, weth, 500, sender=management)
assert strategy.uniFees(aave, weth) == 500
assert strategy.uniFees(weth, aave) == 500
strategy.setUniFees(weth, aave, 5, sender=management)
assert strategy.uniFees(aave, weth) == 5
assert strategy.uniFees(weth, aave) == 5
strategy.setUniFees(weth, aave, 0, sender=management)
assert strategy.uniFees(aave, weth) == 0
assert strategy.uniFees(weth, aave) == 0
def test__set_uni_fees__reverts(
strategy,
user,
aave,
weth,
):
# Everything should start as 0
assert strategy.uniFees(aave, weth) == 0
assert strategy.uniFees(weth, aave) == 0
with reverts("!Authorized"):
strategy.setUniFees(weth, aave, 500, sender=user)
assert strategy.uniFees(aave, weth) == 0
assert strategy.uniFees(weth, aave) == 0
def test__set_min_amount_to_sell(
strategy,
management,
):
assert strategy.minAmountToSell() == 1e4
amount = 0
strategy.setMinAmountToSell(amount, sender=management)
assert strategy.minAmountToSell() == amount
amount = int(100e18)
strategy.setMinAmountToSell(amount, sender=management)
assert strategy.minAmountToSell() == amount
def test__set_min_amount_to_sell__reverts(
strategy,
user,
):
assert strategy.minAmountToSell() == 1e4
with reverts("!Authorized"):
strategy.setMinAmountToSell(0, sender=user)
assert strategy.minAmountToSell() == 1e4
def test__emergency_withdraw__reverts(strategy, user, deposit, amount):
with reverts("!Authorized"):
strategy.emergencyWithdraw(100, sender=user)
deposit()
check_strategy_totals(
strategy,
total_assets=amount,
total_debt=amount,
total_idle=0,
total_supply=amount,
)
with reverts("!Authorized"):
strategy.emergencyWithdraw(100, sender=user)
| mil0xeth/yearn-v3-AAVE-Delta-Neutral-st-yCRV | tests/test_access.py | test_access.py | py | 2,248 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ape.reverts",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "ape.reverts",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "ape.reverts",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "utils.checks.check_strategy_total... |
5872705859 | # Вспомогательные функции для получения времени пешего похода до метро и коородинат объекта
import requests
import json
import time
from random import randint
from sklearn import metrics
import numpy as np
def regression_results(y_true, y_pred):
# Regression metrics
explained_variance=metrics.explained_variance_score(y_true, y_pred)
mean_absolute_error=metrics.mean_absolute_error(y_true, y_pred)
mse=metrics.mean_squared_error(y_true, y_pred)
#mean_squared_log_error=metrics.mean_squared_log_error(y_true, y_pred)
r2=metrics.r2_score(y_true, y_pred)
print('explained_variance: ', round(explained_variance,4))
#print('mean_squared_log_error: ', round(mean_squared_log_error,4))
print('r2: ', round(r2,4))
print('MAE: ', round(mean_absolute_error,4))
print('MSE: ', round(mse,4))
print('RMSE: ', round(np.sqrt(mse),4))
def get_coords(region, addr):
if "г," in region:
addr = region + " " + addr
time.sleep(randint(1, 2))
j = requests.get("http://search.maps.sputnik.ru/search/addr?q={0}".format(addr)).content
resp = json.loads(j)
if "address" in resp["result"]:
return resp["result"]["address"][0]["features"][0]["geometry"]["geometries"][0]["coordinates"][1], resp["result"]["address"][0]["features"][0]["geometry"]["geometries"][0]["coordinates"][0]
else:
return [0, 0]
def get_closest_metro(lng, lat):
try:
j = requests.get("https://json.smappi.org/milash/metro-station/getClosest?city=Москва&lat={0}&lon={1}&count=1".format(lat, lng)).content
resp = json.loads(j)
if "status" in resp:
return None
time.sleep(randint(1, 2))
j = requests.get("http://footroutes.maps.sputnik.ru/?loc={0},{1}&loc={2},{3}".format(lat, lng, resp[0]["station"]["geo_lat"], resp[0]["station"]["geo_lon"])).content
resp = json.loads(j)
return resp["route_summary"]["total_time"]
except Exception:
return -1
| levchCode/MoscowEstate | processing/metro_coords.py | metro_coords.py | py | 2,075 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sklearn.metrics.explained_variance_score",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.mean_absolute_error",
"line_number": 14,
"usage_type": "call"
... |
15571487589 | from urllib.parse import urlsplit
from django.contrib.sites.models import Site
from django.templatetags.static import static
from ..core.utils import build_absolute_uri
def get_email_context():
site: Site = Site.objects.get_current()
logo_url = build_absolute_uri(static("images/logo-light.svg"))
send_email_kwargs = {"from_email": site.settings.default_from_email}
email_template_context = {
"domain": site.domain,
"logo_url": logo_url,
"site_name": site.name,
}
return send_email_kwargs, email_template_context
def prepare_url(params: str, redirect_url: str) -> str:
"""Add params to redirect url."""
split_url = urlsplit(redirect_url)
split_url = split_url._replace(query=params)
return split_url.geturl()
| croolicjah/saleor-platform | saleor/saleor/core/emails.py | emails.py | py | 779 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.contrib.sites.models.Site",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.contrib.sites.models.Site.objects.get_current",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.sites.models.Site.objects",
"line_number... |
411750400 | # pylint: disable=W0621,C0114,C0116,W0212,W0613
import pathlib
from typing import cast, Optional
import pytest
import pytest_mock
from dae.genotype_storage.genotype_storage_registry import \
get_genotype_storage_factory
from dae.duckdb_storage.duckdb_genotype_storage import \
DuckDbGenotypeStorage
from dae.testing import setup_pedigree, setup_vcf, vcf_study
from dae.testing.foobar_import import foobar_gpf
from dae.studies.study import GenotypeData
@pytest.fixture
def duckdb_storage_db(
tmp_path_factory: pytest.TempPathFactory) -> DuckDbGenotypeStorage:
storage_path = tmp_path_factory.mktemp("duckdb_storage")
storage_config = {
"id": "dev_duckdb_storage",
"storage_type": "duckdb",
"db": "duckdb_genotype_storage/dev_storage.db",
"read_only": False,
"base_dir": storage_path
}
storage_factory = get_genotype_storage_factory("duckdb")
genotype_storage = cast(
DuckDbGenotypeStorage, storage_factory(storage_config))
genotype_storage.start()
return genotype_storage
@pytest.fixture
def duckdb_storage_parquet(
tmp_path_factory: pytest.TempPathFactory) -> DuckDbGenotypeStorage:
storage_path = tmp_path_factory.mktemp("duckdb_storage")
storage_config = {
"id": "dev_duckdb_storage",
"storage_type": "duckdb",
"studies_dir": "duckdb_genotype_storage",
"base_dir": str(storage_path)
}
storage_factory = get_genotype_storage_factory("duckdb")
genotype_storage = cast(
DuckDbGenotypeStorage, storage_factory(storage_config))
genotype_storage.start()
return genotype_storage
def imported_study(
root_path: pathlib.Path,
duckdb_storage: DuckDbGenotypeStorage) -> GenotypeData:
gpf_instance = foobar_gpf(root_path, duckdb_storage)
ped_path = setup_pedigree(
root_path / "vcf_data" / "in.ped",
"""
familyId personId dadId momId sex status role
f1 m1 0 0 2 1 mom
f1 d1 0 0 1 1 dad
f1 p1 d1 m1 2 2 prb
""")
vcf_path = setup_vcf(
root_path / "vcf_data" / "in.vcf.gz",
"""
##fileformat=VCFv4.2
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##contig=<ID=foo>
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT m1 d1 p1
foo 13 . G C . . . GT 0/1 0/0 0/1
foo 14 . C T . . . GT 0/0 0/1 0/1
""")
study = vcf_study(
root_path,
"minimal_vcf", ped_path, [vcf_path],
gpf_instance)
return study
@pytest.mark.parametrize(
"base_dir,parquet_scan,expected",
[
("/test/base/dir", "parquet_scan('aa/bb')",
"parquet_scan('/test/base/dir/aa/bb')"),
(None, "parquet_scan('aa/bb')",
"parquet_scan('aa/bb')"),
("/test/base/dir", "parquet_scan('/aa/bb')",
"parquet_scan('/aa/bb')"),
("/test/base/dir", None,
None),
("/test/base/dir", "ala_bala",
"ala_bala"),
],
)
def test_base_dir_join_parquet_scan(
base_dir: Optional[str], parquet_scan: str, expected: str,
duckdb_storage_parquet: DuckDbGenotypeStorage,
mocker: pytest_mock.MockerFixture) -> None:
mocker.patch.object(
duckdb_storage_parquet,
"get_base_dir",
return_value=base_dir
)
res = duckdb_storage_parquet\
._base_dir_join_parquet_scan_or_table(parquet_scan)
assert res == expected
def test_parquet_storage(
tmp_path_factory: pytest.TempPathFactory,
duckdb_storage_parquet: DuckDbGenotypeStorage) -> None:
root_path = tmp_path_factory.mktemp("test_parquet_storage")
study = imported_study(root_path, duckdb_storage_parquet)
assert study is not None
vs = list(study.query_variants())
assert len(vs) == 2
| iossifovlab/gpf | dae/dae/duckdb_storage/tests/test_parquet_layout_scans.py | test_parquet_layout_scans.py | py | 3,959 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pytest.TempPathFactory",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "dae.genotype_storage.genotype_storage_registry.get_genotype_storage_factory",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "typing.cast",
"line_number": 30,
... |
27772819097 | import lbp_face_recognition
from Tkinter import *
import tkFileDialog
from Tkinter import Label,Tk
from PIL import Image, ImageTk
class home:
def __init__(self,master):
lbp_face_recognition.train()
self.master=master
self.f=Frame(master,width=1000,height=600)
self.f.propagate(0)
self.f.pack()
self.f["bg"]='#2874A6'
heading=Label(self.f,text="Low Light Facial Recognition",bg='#2874A6',font=('Times new Roman',-50,'bold')).grid(row=1,column=2,padx=20,pady=50)
#heading.place(x=350,y=200)
buttonchoose=Button(self.f,text="Choose Test Image",command=self.choose,font=('times new roman',-20),width=15,height=3).grid(row=6,column=1,padx=20,pady=50)
buttontest=Button(self.f,text="Find Match",command=self.test,font=('times new roman',-20),width=15,height=3).grid(row=7,column=1,padx=20,pady=50)
def choose(self):
global path
path=tkFileDialog.askopenfilename(filetypes=[("Image File",'.jpg')])
im = Image.open(path)
im2 = im.resize((250, 250), Image.ANTIALIAS)
tkimage = ImageTk.PhotoImage(im2)
myvar1=Label(self.f,image = tkimage).grid(row=6,column =2)
myvar1.image = tkimage
myvar1.pack()
def test(self):
match_path = lbp_face_recognition.hist_get(path)
im = Image.open(match_path)
im2 = im.resize((250, 250), Image.ANTIALIAS)
tkimage = ImageTk.PhotoImage(im2)
myvar2=Label(self.f,image = tkimage).grid(row=7,column =2)
myvar2.image = tkimage
myvar2.pack()
root=Tk()
home=home(root)
root.mainloop()
| sudo-sunil/GUI_LLFR | gui.py | gui.py | py | 1,687 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "lbp_face_recognition.train",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Tkinter.Label",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tkFileDialog.askopenfilename",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": ... |
14179302218 | from scripts.helpful_scripts import get_account
from scripts.get_weth import get_weth
from brownie import config, network, interface
from web3 import Web3
amount = Web3.toWei(0.1, "ether")
def main():
account = get_account()
erc20_address = config["networks"][network.show_active()]["weth_token"]
# call get_weth if no weth balance
if network.show_active() in ["mainnet-fork"]:
get_weth()
lending_pool = get_lending_pool()
# We need to approve sending out ERC20 tokens
approve_erc20(amount, lending_pool.address, erc20_address, account)
# Now deposit to aave
print("Depositing...")
tx = lending_pool.deposit(
erc20_address, amount, account.address, 0, {"from": account}
)
tx.wait(1)
print("Deposited!")
# Now we can borrow, but how much??
# Lets get some stats so we know where we stand and keep account healthy
borrowable_eth, total_debt = get_borrowable_data(lending_pool, account)
# to borrow DAI we need conversion rate in terms of ETH
dai_eth_price = get_asset_price(
config["networks"][network.show_active()]["dai_eth_price_feed"]
)
# converting borrowable eth to borrowable Dai and taking only 95% for safety (lower % = safer)
amount_dai_to_borrow = (1 / dai_eth_price) * (borrowable_eth * 0.95)
print(f"We are going to borrow {amount_dai_to_borrow} DAI")
dai_address = config["networks"][network.show_active()]["dai_token"]
borrow_tx = lending_pool.borrow(
dai_address,
Web3.toWei(amount_dai_to_borrow, "ether"),
1,
0,
account.address,
{"from": account},
)
borrow_tx.wait(1)
print("We borrowed some DAI")
get_borrowable_data(lending_pool, account)
# repay_all(amount, lending_pool, account)
def repay_all(amount, lending_pool, account):
# First we need to approve repay
approve_erc20(
Web3.toWei(amount, "ether"),
lending_pool,
config["networks"][network.show_active()]["dai_token"],
account,
)
repay_tx = lending_pool.repay(
config["networks"][network.show_active()]["dai_token"],
amount,
1,
account.address,
{"from": account},
)
repay_tx.wait(1)
print("Repaid!!")
def get_asset_price(price_feed_address):
# ABI + Addr
dai_eth_price_feed = interface.AggregatorV3Interface(price_feed_address)
latest_price = dai_eth_price_feed.latestRoundData()[1]
converted_latest_price = Web3.fromWei(latest_price, "ether")
print(f"The Dai - ETH price is {converted_latest_price}")
return float(converted_latest_price)
def get_borrowable_data(lending_pool, account):
(
total_collateral_eth,
total_debt_eth,
available_borrow_eth,
current_liquidation_threshold,
ltv,
health_factor,
) = lending_pool.getUserAccountData(account.address)
available_borrow_eth = Web3.fromWei(available_borrow_eth, "ether")
total_collateral_eth = Web3.fromWei(total_collateral_eth, "ether")
total_debt_eth = Web3.fromWei(total_debt_eth, "ether")
print(f"You have {total_collateral_eth} worth of ETH deposited.")
print(f"You have {total_debt_eth} worth of ETH deposited.")
print(f"You can borrow {available_borrow_eth} worth of ETH.")
return (float(available_borrow_eth), float(total_debt_eth))
def approve_erc20(amount, spender, erc20_address, account):
# need abi and addr of erc20 contract
print("Approving ERC20 Token")
erc20 = interface.IERC20(erc20_address)
tx = erc20.approve(spender, amount, {"from": account})
tx.wait(1)
print("Approved!")
return tx
def get_lending_pool():
# working with contract that provides location of aave lending pool (need abi + addr)
lending_pool_addresses_provider = interface.ILendingPoolAddressesProvider(
config["networks"][network.show_active()]["lending_pool_addresses_provider"]
)
# ABI
lending_pool_address = lending_pool_addresses_provider.getLendingPool()
# Address from interface
lending_pool = interface.ILendingPool(lending_pool_address)
return lending_pool
| Brar-Paul/aave_brownie_py | scripts/aave_borrow.py | aave_borrow.py | py | 4,151 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "web3.Web3.toWei",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "web3.Web3",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "scripts.helpful_scripts.get_account",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "brownie.c... |
13635080325 | import curses
import random
from typing import Tuple, Set
def get_draw_range(
pointer_pos: Tuple[int, int],
map_size: Tuple[int, int],
window_size: Tuple[int, int],
buffer: int
) -> Tuple[range, range]:
# set pointer pos
pointer_y, pointer_x = pointer_pos
# set window size
win_height, win_width = window_size
win_height += -buffer * 2
win_width += -buffer * 2
# set map size
map_height, map_width = map_size
# correct draw sizes in case of a very small map
if win_height > map_height:
win_height = map_height
if win_width > map_width:
win_width = map_width
# set the y range begin and end
draw_y_begin = pointer_y - win_height // 2
draw_y_end = pointer_y + win_height // 2
# correct for an odd numbered window size
if win_height % 2 != 0:
draw_y_end += 1
# correct draw ranges
if draw_y_begin < 0:
draw_y_begin = 0
draw_y_end = win_height
if draw_y_end > map_height:
draw_y_begin = map_height - win_height
draw_y_end = map_height
# do the same with the x range
# vvv
draw_x_begin = pointer_x - win_width // 2
draw_x_end = pointer_x + win_width // 2
if win_width % 2 != 0:
draw_x_end += 1
if draw_x_begin < 0:
draw_x_begin = 0
draw_x_end = win_width
if draw_x_end > map_width:
draw_x_begin = map_width - win_width
draw_x_end = map_width
return range(draw_y_begin, draw_y_end), range(draw_x_begin, draw_x_end)
def render(game,
map_window,
fov: bool,
fov_entity: int|None = None,
pointer_pos: Tuple[int, int] = (0, 0)) -> None:
map_window.box()
window_size_y, window_size_x = map_window.getmaxyx()
game_map = game.game_map
game_tile_map = game_map["map_array"]
game_map_height = game_map["map_height"]
game_map_width = game_map["map_width"]
buffer = 3
draw_y, draw_x = get_draw_range(
pointer_pos=pointer_pos,
map_size=(game_map_height, game_map_width),
window_size=(window_size_y, window_size_x),
buffer=buffer
)
debug_string = f"({draw_y}, {draw_x}, {game.round}, {game.pointer_pos})"
map_window.addstr(0, 1, debug_string)
fov_set: Set[Tuple[int, int]] = set()
if fov and isinstance(game.pool.entities[fov_entity]["FOV"], set):
fov_set = game.pool.entities[fov_entity]["FOV"]
for window_y, map_y in enumerate(draw_y, buffer):
for window_x, map_x in enumerate(draw_x, buffer):
ent_id = None
prio_list = {
"map_tile": [],
"map_object": [],
"item": [],
"actor": [],
}
for tile_id in game_tile_map[map_y][map_x]:
for prio_item, ent_list in prio_list.items():
if game.pool.entities[tile_id].get(prio_item):
ent_list.append(tile_id)
for prio_item, ent_list in prio_list.items():
if ent_list:
ent_id = random.choice(ent_list)
if ent_id:
ent = game.pool.entities[ent_id]
char_list = random.choice(ent.get("char"))
char, fg, bg = char_list
color = game.colors[fg][bg]
else:
char = None
color = None
if (
fov and
(map_y, map_x) not in fov_set and not
game.config["debug"]["disable_fov"]
):
char = None
color = None
if char and color:
map_window.addstr(
window_y,
window_x,
char,
curses.color_pair(color)
)
if not game.pointer_bound:
py, px = pointer_pos
if map_y == py and map_x == px:
map_window.addstr(window_y, window_x, "X")
map_window.refresh()
| akibancha/pyaar | src/interface/render_map.py | render_map.py | py | 4,100 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.Tuple",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number":... |
28911695063 | import numpy as np
import plotly.graph_objects as go
import numpy as np
from PIL import Image
from scipy.interpolate import interpn
img = Image.open("Earth_Diffuse_6K.jpg")
imgdata = np.asarray(img)
factor = 30
lats = np.linspace(0, np.pi, int(imgdata.shape[0]/factor)) # this is actually latitude + 90
lons = np.linspace(0, 2*np.pi, int(imgdata.shape[1]/factor))
LATS, LONS = np.meshgrid(lats, lons)
values = interpn((lats, lons), imgdata[::factor, ::factor, :], (LATS.ravel(), LONS.ravel()))
X = np.sin(LATS)*np.cos(LONS)
Y = np.sin(LATS)*np.sin(LONS)
Z = np.cos(LATS)
x = X.ravel()
y = Y.ravel()
z = Z.ravel()
fig = go.Figure(data=[go.Scatter3d(
x=x,
y=y,
z=z,
mode='markers',
marker=dict(
size=5,
color=values
)
)])
# tight layout
fig.update_layout(margin=dict(l=0, r=0, b=0, t=0))
# fig.show()
fig.write_html("file.html") | utat-ss/FINCH-Orbit | earth sphere plotly.py | earth sphere plotly.py | py | 877 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number... |
25484810000 | from typing import overload
from unicodedata import category
from django.core import exceptions
from datetime import datetime
from django.http.response import HttpResponseNotAllowed
from .models import (
FurnitureModels, OrderModels, ReviewModels, ChatTopicModels,
ChatContentModels, OrderModels,ShoppingCartModels, ProfileModels,ComplainModels, PreOrderModels, PaymentModels, KategoriModels
)
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import render
from .forms import RegisterUserForm
from django.contrib.auth.decorators import login_required
from django.db.models import Count
import random
home = "/"
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "/login"
# from .models import ChairMode
def land_page(request):
return render(request, "user/home.html")
def login_user(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect(home)
else :
messages.error(request, 'Username atau password salah')
return render(request, "user/login.html")
def register_user(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
email = request.POST["email"]
print(username)
if request.POST.get('region',"-") != "jabodetabek":
messages.error(request, 'Layanan hanya bisa untuk daerah Jabodetabek saja!')
return redirect(request.META.get('HTTP_REFERER'))
form = RegisterUserForm(request.POST)
if form.is_valid():
username = request.POST["username"]
password = request.POST["password"]
email = request.POST["email"]
user = User.objects.create_user(username=username, email=email, password=password)
user.save()
alamat = "-"
gender = request.POST.get('gender',"-")
phone = request.POST.get('phone',"-")
full_name = request.POST.get('full_name',"-")
profile = ProfileModels.objects.create(
alamat = alamat,
gender = gender,
phone = phone,
full_name = full_name,
user = user
)
profile.save()
return redirect("/login")
else :
messages.error(request, 'Ada yang salah dengan user mu')
return render(request, "user/register.html")
@login_required(login_url="/login")
def logout_user(request):
logout(request)
return render(request, "user/home.html")
def view_furniture(request, kategori):
furnitures = FurnitureModels.objects.filter(kategori=kategori)
final_furnitures = []
for each in furnitures:
furnitur = {}
furnitur["furniture"] = each
furnitur["rating"] = calculate_rating(each)
final_furnitures.append(furnitur)
return render(request, "user/furniture_list.html", {"furnitures": final_furnitures})
def view_furniture_details(request, kategori, id):
furniture = FurnitureModels.objects.get(id=id)
gambar_furniture = []
gambar_furniture.append((str(furniture.gambar)).replace("static/", ""))
reviews = ReviewModels.objects.filter(futniture=furniture)
rating = calculate_rating(furniture)
all_furniture = list(FurnitureModels.objects.all())
count = 4 if len(all_furniture) >= 4 else len(all_furniture)
random_furniture = random.sample(all_furniture, count)
return render(
request,
"user/detail.html",
{"furniture": furniture, "reviews": reviews, "rating": rating, "gambar_furniture": gambar_furniture, "other_furniture" : random_furniture },
)
@login_required(login_url="/login")
def review_furniture(request, id):
if request.method == "POST":
furniture = FurnitureModels.objects.get(id=id)
user_has_review = ReviewModels.objects.filter(user = request.user ,futniture=furniture)
if user_has_review.count():
messages.error(request, 'Kamu sudah menulis review pada furniture ini!')
return redirect(request.META.get('HTTP_REFERER'))
curr_user = request.user
user_has_order_complete = OrderModels.objects.filter(user=curr_user,furnitur = furniture)
payment_complete = False
for order in user_has_order_complete:
payment = PaymentModels.objects.filter(user = curr_user, keranjang_deleted_id = order.keranjang_deleted_id, status = "Completed")
if payment.count() > 0:
payment_complete = True
break
if not payment_complete:
messages.error(request, 'Kamu belum pernah membeli furniture ini , Kamu tidak bisa menreview furnitur ini!')
return redirect(request.META.get('HTTP_REFERER'))
rating = request.POST["star"]
notes = request.POST["notes"]
review = ReviewModels.objects.create(futniture=furniture, user=curr_user, notes=notes, rating=rating)
review.save()
return redirect(request.META.get('HTTP_REFERER'))
@login_required(login_url="/login")
def chat(request):
curr_user = User.objects.get(id=request.user.id)
try:
topic = ChatTopicModels.objects.get(user=curr_user)
except ObjectDoesNotExist:
topic = ChatTopicModels.objects.create(user=curr_user)
topic.save()
if request.method == "POST":
content = request.POST["content"]
cahat_content = ChatContentModels.objects.create(user=curr_user, topic=topic, content=content)
cahat_content.save()
all_chat = ChatContentModels.objects.filter(topic=topic)
return render(request, "user/chat.html", {"contents": all_chat})
@login_required(login_url="/login")
def chat_reload(request):
curr_user = User.objects.get(id=request.user.id)
try:
topic = ChatTopicModels.objects.get(user=curr_user)
except ObjectDoesNotExist:
topic = ChatTopicModels.objects.create(user=curr_user)
topic.save()
all_chat = ChatContentModels.objects.filter(topic=topic)
return render(request, "user/chat_content.html", {"contents": all_chat})
# @login_required(login_url="/login")
# def call(request):
# return render(request, "user/call.html")
@login_required(login_url="/login")
def complain(request):
payment = PaymentModels.objects.filter(user=request.user, status__in = ["Completed","Delivered"])
if payment.count() == 0 :
messages.error(request, 'Tidak dapat melakukan komplain dikarenakan anda tidak memiliki pesanan')
return redirect(request.META.get('HTTP_REFERER'))
if request.method == "POST":
try :
alamat = request.POST['alamat']
nama = request.POST['nama']
email = request.POST['email']
deskripsi = request.POST['deskripsi']
pic =request.FILES.get('file')
complain = ComplainModels.objects.create(
user = request.user,
alamat = alamat,
nama = nama,
email = email,
deskripsi = deskripsi,
)
if pic :
complain.picture = pic
complain.save()
messages.success(request, 'Komplain berhasil dikirimkan !')
except exceptions as e:
print(e)
messages.error(request, 'Ada data yang salah !')
return render(request, "user/complain.html")
@login_required(login_url="/login")
def preorder(request):
if request.method == "POST":
try :
alamat = request.POST['alamat']
nama = request.POST['nama']
email = request.POST['email']
jenis_furniture = request.POST['jenis_furniture']
pic =request.FILES.get('file')
preorder = PreOrderModels.objects.create(
user = request.user,
alamat = alamat,
nama = nama,
email = email,
jenis_furniture = jenis_furniture,
)
if pic :
preorder.picture = pic
preorder.save()
messages.success(request, 'Preorder berhasil dikirimkan !')
except :
messages.error(request, 'Ada data yang salah atau kurang!')
return render(request, "user/preorder.html")
@login_required(login_url="/login")
def checkout_fast(request,id_furniture):
user = User.objects.get(id=request.user.id)
if request.method == "POST":
jumlah = int(request.POST["jumlah"])
furnitur = FurnitureModels.objects.get(id=id_furniture)
if(int(jumlah) > furnitur.stock or int(jumlah) <=0 ):
messages.error(request, 'Form submission fail')
return redirect(request.META.get('HTTP_REFERER'))
try :
keranjang = ShoppingCartModels.objects.get(user=user)
except ObjectDoesNotExist:
keranjang = ShoppingCartModels.objects.create(user=user, total = 0)
current_total = furnitur.harga * jumlah
order = OrderModels.objects.create(furnitur = furnitur,user = user, jumlah= jumlah, keranjang = keranjang, total = current_total)
keranjang.total += current_total
keranjang.total_furnitur = jumlah
order.save()
keranjang.save()
messages.success(request, (f"Berhasil menambahkan {jumlah} {furnitur.nama} kedalam keranjang"))
keranjang = ShoppingCartModels.objects.get(user=user)
all_order = OrderModels.objects.filter(user=user , keranjang = keranjang)
return render(request, "user/checkout.html" ,{"orders": all_order , "keranjang" : keranjang})
@login_required(login_url="/login")
def checkout_all(request):
user = User.objects.get(id=request.user.id)
try :
keranjang = ShoppingCartModels.objects.get(user=user)
except ObjectDoesNotExist:
return redirect(request.META.get('HTTP_REFERER'))
keranjang = ShoppingCartModels.objects.get(user=user)
all_order = OrderModels.objects.filter(user=user , keranjang = keranjang)
return render(request, "user/checkout.html" ,{"orders": all_order , "keranjang" : keranjang})
@login_required(login_url="/login")
def payment(request):
user = User.objects.get(id=request.user.id)
try :
keranjang = ShoppingCartModels.objects.get(user=user)
except :
messages.info(request, 'Keranjang anda masih kosong isi terlebih dahulu!')
return redirect("/")
if request.method == "POST":
try:
payment = PaymentModels.objects.filter(user=request.user).exclude(status = "Completed")
if payment.count() > 0 :
messages.error(request, 'Tidak dapat melakukan pembayaran dikarenakan Karena pembayaran sebelumnya belum selesai')
return redirect(request.META.get('HTTP_REFERER'))
profile = ProfileModels.objects.get(user = request.user)
picture =request.FILES['bukti_pembayaran']
if not picture :
raise
if profile.alamat == "alamat" or not bool(profile.alamat):
messages.error(request, 'Ganti alamat terlebih dahulu di halaman profile sebelum melakukan pembayaran')
return redirect(request.META.get('HTTP_REFERER'))
payment = PaymentModels.objects.create(
user = request.user,
alamat = profile.alamat,
total = keranjang.total,
status = "Pending",
bukti_pembayaran = picture,
total_furnitur = keranjang.total_furnitur,
keranjang_deleted_id = keranjang.id
)
payment.save()
orders = OrderModels.objects.filter(keranjang = keranjang)
for order in orders :
furniture = order.furnitur
furniture.stock -= order.jumlah
furniture.save()
order.keranjang_deleted_id = keranjang.id
order.save()
keranjang.delete()
return redirect("/")
except :
messages.error(request, 'Foto bukti pembayaran harus ada!')
return render(request, "user/payment.html",{"keranjang": keranjang})
def calculate_rating(furniture):
reviews = ReviewModels.objects.filter(futniture=furniture)
tmp_value = 0
for review in reviews:
tmp_value += review.rating
final_value = "-"
if reviews:
final_value = tmp_value / reviews.count()
if len(str(final_value)) > 3:
final_value = "{:.2f}".format(final_value)
return final_value
def profile(request):
try:
profile = ProfileModels.objects.get(user = request.user)
except:
profile = ProfileModels.objects.create(
alamat = "alamat",
gender = "M",
phone = "phone",
full_name = "full_name",
user = request.user
)
return render(request, "profile.html",{"profile": profile})
def edit_profile(request):
profile = ProfileModels.objects.get(user = request.user)
if request.method == "POST":
profile.alamat = request.POST['alamat']
profile.gender = request.POST['gender']
profile.full_name = request.POST['full_name']
profile.phone = request.POST['phone']
profile.birth_date = str(request.POST['birth_date'])
profile_pic =request.FILES.get('file')
if profile_pic :
profile.profile_pic = profile_pic
profile.save()
profile.birth_date = str(profile.birth_date)
return render(request, "profile-edit.html",{"profile": profile})
@login_required(login_url="/login")
def confirmation(request):
try:
payment = PaymentModels.objects.get(user=request.user , status = "Delivered")
except:
messages.error(request, 'Tidak ada pesanan yang butuh konfirmasi !')
return redirect("/")
keranjang_deleted_id = payment.keranjang_deleted_id
all_order = OrderModels.objects.filter(user=request.user , keranjang_deleted_id = keranjang_deleted_id)
if request.method == "POST":
payment.status = "Completed"
payment.tanggal_bayar = datetime.now()
payment.save()
messages.success(request, 'Pesanan berhasil diselesaikan !')
return redirect("/")
#return render(request, "user/checkout.html" ,{"orders": all_order , "keranjang" : keranjang})
return render(request, "user/confirmation.html",{"orders": all_order, "keranjang": payment})
@login_required(login_url="/login")
def detele_some_order(request, id):
try:
order = OrderModels.objects.get(id = id , user= request.user)
harga = order.total
keranjang = ShoppingCartModels.objects.get(user= request.user)
keranjang.total -= harga
keranjang.save()
order.delete()
messages.success(request , 'Sukses mendelete orderan !')
except:
messages.error(request, 'Gagal Mendelte order!')
return redirect(request.META.get('HTTP_REFERER'))
# def get_category(request):
# category = FurnitureModels.objects.all().values('kategori').annotate(dcount=Count('kategori'))
# categories = []
# for a in category:
# dict = {}
# kategori = a.get("kategori")
# exclude = ["chair","wardobe","table","bedroom"]
# if kategori not in exclude:
# dict = {}
# dict["name"] = kategori
# dict["name2"] = kategori.capitalize()
# categories.append(dict)
# return render(request, "user/category.html",{"categories":categories,})
def get_category(request):
kategori = KategoriModels.objects.all()
categories = []
for a in kategori:
dict = {}
kategori = a.nama
exclude = ["chair","wardobe","table","bedroom"]
if kategori not in exclude:
dict = {}
dict["name"] = kategori
dict["name2"] = kategori.capitalize()
categories.append(dict)
return render(request, "user/category.html",{"categories":categories,})
def get_notif(request):
try:
preorder_deleted = PreOrderModels.objects.filter(user= request.user, status = 'deleted')
preorder_accepeted = PreOrderModels.objects.filter(user= request.user, status = 'accepted')
accepeted = len(preorder_accepeted)
deleted = len(preorder_deleted)
print('aaaaaa')
if accepeted == 0 and deleted == 0:
print('aaaaaa2')
return HttpResponse('')
else:
print('aaaaaa3')
# return HttpResponse('ada notif')
return render(request, "user/notif.html",{"accepted":accepeted, "deleted":deleted})
except:
return HttpResponse('')
@login_required
def dismiss_notif(request):
try:
preorders = PreOrderModels.objects.filter(user = request.user)
for preorder in preorders:
if preorder.status == 'deleted':
preorder.delete()
else:
preorder.status = 'viewed'
preorder.save()
return redirect(request.META.get('HTTP_REFERER'))
except:
return redirect(request.META.get('HTTP_REFERER')) | arif-teguh/jepara-furniture | webapp/user/views.py | views.py | py | 17,655 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 36,
"usage_type": "call"
},
{
... |
11936950572 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 14:18:28 2018
@author: shuyun
"""
from sklearn.metrics import classification_report
import scikitplot as skplt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
import os
import re
import matplotlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from scipy.stats import kde
import seaborn as sns
from matplotlib.ticker import NullFormatter
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['font.family']='sans-serif'
def model_profit_report_classifier(y_true, y_probas):
result_df = pd.DataFrame({'y_probas': y_probas, 'y_true': y_true})
report_list = []
threshold_list = ((np.linspace(9, 0, 10)) * 10).tolist()
for i in threshold_list:
thres_value = np.percentile(result_df['y_probas'], i)
result_df['y_pred'] = 0
result_df.loc[result_df['y_probas'] > thres_value, 'y_pred'] = 1
tn, fp, fn, tp = confusion_matrix(result_df['y_true'], result_df['y_pred']).ravel()
# 统计各个指标
true_precision = (tp + fn) / (tn + fp + fn + tp)
Decile = (int(100 - i))
cul_positive = tp
cul_negative = fp
cul_total = cul_positive + cul_negative
Recall = tp / (tp + fn)
Precision = tp / (tp + fp)
Lift = Precision / true_precision
Decile = str(Decile) + '%'
Recall = format(Recall, '.2%')
Precision = format(Precision, '.2%')
Lift = format(Lift, '.2%')
report_list.append([Decile, cul_total, cul_positive, cul_negative, Precision, Recall, Lift])
model_profit_report_classifier_df = pd.DataFrame(report_list,
columns=["Decile", "cul_total", "cul_positive", "cul_negative", "Precision",
"Recall", "Lift"])
return model_profit_report_classifier_df
def plot_feature_importance(
feature_importance,
feature_top_num=10,
type='PredictionValuesChange'
):
feature_importance = feature_importance.sort_values(by='feature_importance', ascending=True).head(feature_top_num)
feature_importance.reset_index(inplace=True, drop=True)
plt.show()
rcParams.update({'figure.autolayout': True})
ax = feature_importance.plot('feature_name', 'feature_importance', kind='barh', legend=False, color='c')
ax.set_title("Feature Importance using {}".format(type), fontsize=14)
ax.set_xlabel("Importance")
ax.set_ylabel("Features")
# plt.show()
# plt.tight_layout()
# plt.savefig('test.png', bbox_inches="tight")
# plt.show()
# plt.savefig(feature_importance_path)
return feature_importance
def plot_abs_error_recall (y_true, y_pred, errors_show_list=[1000], plot=True):
precisions = []
total_count = y_true.shape[0]
for err in errors_show_list:
precisions.append(np.sum(np.abs(y_true - y_pred) <= err) / total_count)
res = pd.DataFrame({'error': errors_show_list, 'precision': precisions}, columns=['error', 'precision'])
if plot:
plt.plot(res.error, res.precision, '-')
plt.title('Error vs Precision')
plt.xlabel('Error')
plt.ylabel('Precision')
# plt.show()
return res
def plot_abs_percent_error_recall (y_true, y_pred, errors_show_list=np.arange(0, 2.1, 0.1), plot=True):
precisions = []
error_rate = (abs(y_true - y_pred)) / y_true
total_count = error_rate.shape[0]
for err in errors_show_list:
precisions.append(np.sum(error_rate <= err) / total_count)
res = pd.DataFrame({'error_rate': errors_show_list, 'precision': precisions}, columns=['error_rate', 'precision'])
if plot:
plt.show()
plt.plot(res.error_rate, res.precision, '-')
plt.title('Error_rate vs Precision')
plt.xlabel('Error')
plt.ylabel('Precision')
return res
def plot_errorsCDF(error):
qs = np.linspace(0, 100, 101)
es = np.percentile(error, q=qs)
es_sp = np.percentile(error, q=[60, 70])
plt.show()
plt.plot(es, qs, '-')
plt.plot(es_sp, [60, 70], 'o', color='r')
plt.text(es_sp[0], 60, '60% -> {:.2f}'.format(es_sp[0]))
plt.text(es_sp[1], 70, '70% -> {:.2f}'.format(es_sp[1]))
plt.title('CDF of milein error')
return pd.DataFrame({'percentile': qs, 'error': es}, columns=['percentile', 'error'])
def plot_pred_vs_true(y_true, y_pred):
re_df = pd.DataFrame({'true': y_true, 'pred': y_pred})
plt.subplots(1, 1)
sns_plot = sns.regplot(y_true, y_pred)
plt.xlabel("true")
plt.ylabel("pred")
plt.title("true vs pred") # You can comment this line out if you don't need title
# plt.show(sns_plot)
return re_df, sns_plot
def plot_cumulative_gains_regression(y_true, y_pred, title='Cumulative Gains Curve',
ax=None, figsize=None, title_fontsize="large",
text_fontsize="medium"):
y_true = np.array(y_true)
y_pred = np.array(y_pred)
percentages, gains = cumulative_gain_curve_regression(y_true, y_pred)
plt.show()
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(percentages, gains, lw=3, label='Class {}'.format(''))
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.plot([0, 1], [0, 1], 'k--', lw=2, label='Baseline')
ax.set_xlabel('Percentage of sample', fontsize=text_fontsize)
ax.set_ylabel('Gain', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.grid('on')
ax.legend(loc='lower right', fontsize=text_fontsize)
# 收益表
qs = np.linspace(0, 100, 11)
percentages_resize = np.percentile(percentages, q=qs)
gains_resize = np.percentile(gains, q=qs)
gains_reports = pd.DataFrame({'percentages': percentages_resize,
'gains': gains_resize})
return gains_reports
def model_profit_report_regression(y_true, y_pred):
result_df = pd.DataFrame({'y_pred': y_pred, 'y_true': y_true})
result_df.sort_values(by='y_pred', ascending=False, inplace=True)
result_df['y_true_cumsum'] = result_df['y_true'].cumsum()
decile = np.linspace(1, 10, 10)*0.1
total = result_df['y_true'].sum()
cul_sum = result_df['y_true_cumsum'].quantile(q=decile, interpolation='higher')
cul_num = cul_sum.apply(lambda r: (result_df['y_true_cumsum']<=r).sum())
cul_avg = cul_sum/cul_num
recall = cul_sum/total
lift = recall/decile
decile = pd.Series(decile,index=decile).apply(lambda x: format(x, '.0%'))
cul_sum = cul_sum.apply(lambda x: format(x, ',.2f'))
# cul_num = cul_num.apply(lambda x: format(x, ','))
cul_avg = cul_avg.apply(lambda x: format(x, ',.2f'))
recall = recall.apply(lambda x: format(x, '.2%'))
lift = lift.apply(lambda x: format(x, '.2f'))
model_profit_report_df = pd.DataFrame({
"Decile": decile,
"cul_num": cul_num,
"cul_sum": cul_sum,
"cul_avg": cul_avg,
"Recall": recall,
"Lift": lift})
return model_profit_report_df
def cumulative_gain_curve_regression(y_true, y_pred, pos_label=None):
y_true, y_pred = np.asarray(y_true), np.asarray(y_pred)
sorted_indices = np.argsort(y_pred)[::-1]
y_true = y_true[sorted_indices]
gains = np.cumsum(y_true)
percentages = np.arange(start=1, stop=len(y_true) + 1)
gains = gains / float(np.sum(y_true))
percentages = percentages / float(len(y_true))
gains = np.insert(gains, 0, [0])
percentages = np.insert(percentages, 0, [0])
return percentages, gains
| stevenleejun/automl_pred_lily | utils/utils_model_ana.py | utils_model_ana.py | py | 7,746 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "nu... |
74532598432 | import openai, sys
openai.api_key = "Your API key"
with open(str(sys.argv[1])) as f:
content = f.read()
completion = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
temperature = 0.2,
max_tokens = 3333,
messages = [
{"role": "system", "content": "You are a computer programmer"},
{"role": "user", "content": content}
]
)
print(completion.choices[0].message["content"])
| yefeiw/chatgpt-jiuzhang | api/chatgpt.py | chatgpt.py | py | 419 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openai.api_key",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "openai.ChatCompletion.create",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "openai.Ch... |
8316907207 | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
class RF_plotter(object):
def __init__(self,features,target,model):
'''
:param features: the feature for the samples: numpy vector
:param target: target values for the samples: numpy vector
:param model: the Decision tree model
'''
self.features = features
self.target = target
self.model = model
def scatter_2d(self):
'''
plots the scattered data with the fitting curve in 2-D target function of feature
:return: matplotlib figure
'''
fig = plt.figure()
indx_features = self.features.flatten().argsort()
plt.scatter(self.features,self.target, c ='steelblue', edgecolors='white', s= 70)
plt.plot(self.features[indx_features],self.model.predict(self.features[indx_features]), color='black', lw =2)
r2 = r2_score(self.target, self.model.predict(self.features))
return fig, r2
def scatter_3d(self):
'''
plots the scattered data with the fitting surface in 3-D target function of two feature
:return: matplotlib figure, ax ,and R2 value
'''
xx, yy = np.meshgrid(np.arange(self.features[:, 0].min(), self.features[:, 0].max(), 0.01),
np.arange(self.features[:, 1].min(), self.features[:, 1].max(), 0.01))
X_model = np.array([xx.ravel().tolist(), yy.ravel().tolist()]).T
zz = self.model.predict(X_model).reshape(xx.shape)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(self.features[:, 0], self.features[:, 1], self.target, c='steelblue', marker='o',
label='Dataset')
r2 = r2_score(self.target,self.model.predict(self.features))
ax.plot_surface(xx, yy, zz, color='green', alpha=0.5)
return fig, ax, r2
def residual(self,X_train,X_test,y_train,y_test,feature, target):
'''
plot the residual as relative error
:param X_train: training set of features
:param X_test: testing set of features
:param y_train: train set of target
:param y_test: test set of target
:param feature: feature name : string
:param target: target name : string
:return: matplotlib figure
'''
y_train_pred = self.model.predict(X_train)
y_test_pred = self.model.predict(X_test)
# residual plot where we simply subtract the true target variables from the predicted responses
fig = plt.figure()
# plt.scatter(y_train_pred, ((y_train_pred - y_train) / y_train) * 100, c='steelblue', marker='o',
# edgecolors='white', label=
# 'training data')
plt.scatter(y_train_pred, (y_train_pred - y_train), c='steelblue', marker='o',
edgecolors='white', label=
'training data')
# plt.scatter(y_test_pred, ((y_test_pred - y_test) / y_test) * 100, c='limegreen', marker='s', edgecolors='white',
# label='test data')
plt.scatter(y_test_pred,(y_test_pred - y_test), c='limegreen', marker='s', edgecolors='white',
label='test data')
MSE_train = mean_squared_error(y_train,y_train_pred)
MSE_test = mean_squared_error(y_test,y_test_pred)
plt.xlabel('y_predict')
plt.ylabel('Residual')
plt.title('feature(' + feature + ') target(' + target + ')\n' + 'MSE_train: {:1.3E}'.format(MSE_train)
+ ' MSE_test: {:1.3E}'.format(MSE_test))
plt.axhline(y=0.0, color='black', linestyle='-')
return fig
def predict_vs_exact(self,X_train,X_test,y_train,y_test,feature, target):
y_train_pred = self.model.predict(X_train)
y_test_pred = self.model.predict(X_test)
r2 = r2_score(y_test, y_test_pred)
fig = plt.figure()
plt.scatter(y_train,y_train_pred, c='steelblue', marker='o',
edgecolors='white', label=
'training data')
plt.scatter(y_test, y_test_pred, c='lightgreen', marker='s',
edgecolors='white', label=
'testing data')
plt.xlabel('exact')
plt.ylabel('predicted')
MSE_train = mean_squared_error(y_train, y_train_pred)
MSE_test = mean_squared_error(y_test, y_test_pred)
plt.title('feature(' + feature + ') target(' + target + ')\n' + 'MSE_train: {:1.3E}'.format(MSE_train)
+ ' MSE_test: {:1.3E}'.format(MSE_test)+'\n dataset size: training: '+str(y_train.shape[0])
+ ' testing: '+str(y_test.shape[0])+'\n'+'R2: {:.3f}'.format(r2))
return fig
def feature_importance(self,feat_labels):
# importance of features
importances = self.model.feature_importances_
indices = np.argsort(importances)[::-1]
sorted_feature_labels = [feat_labels[i] for i in indices]
feat_imp = {}
for f in range(len(feat_labels)):
string = "%2d) %-*s %f" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]])
print(string)
feat_imp[feat_labels[indices[f]]]= importances[indices[f]]
fig, ax = plt.subplots()
bars = ax.bar(range(len(feat_labels)),importances[indices]*100,align = 'center')
plt.xticks(range(len(feat_labels)),sorted_feature_labels,rotation = 90)
plt.xlim([-1,len(feat_labels)])
plt.ylim([0, 110])
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'{:.1f}'.format(height),
ha='center', va='bottom')
autolabel(bars)
return fig, ax, feat_imp
def MSE (self,X_train,X_test,y_train,y_test):
y_train_pred = self.model.predict(X_train)
y_test_pred = self.model.predict(X_test)
r2 = r2_score(y_test, y_test_pred)
MSE_train = mean_squared_error(y_train, y_train_pred)
MSE_test = mean_squared_error(y_test, y_test_pred)
return MSE_train, MSE_test, r2 | LLNL/Sedov-ML | sedov_common/plotters/random_forest_plotter.py | random_forest_plotter.py | py | 6,513 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "... |
8076964586 | import tkinter as tk
import matplotlib
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
class Application(tk.Frame):
def __init__(self, master=None):
matplotlib.use('TkAgg')
tk.Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.fig = Figure(figsize=(5,4), dpi=100)
self.canvas = FigureCanvasTkAgg(self.fig, self)
self.canvas.draw()
self.canvas.get_tk_widget().grid(row=0, columnspan=3)
self.QUIT = tk.Button(self, text="QUIT", fg="red",
command=root.destroy)
self.QUIT.grid(row=2)
root = tk.Tk()
app = Application(master=root)
app.mainloop()
| radovan-urban/MeC_python | tools/tkinter_matplot3.py | tkinter_matplot3.py | py | 809 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.Frame",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame.__init__",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame"... |
33010746822 | from django.urls import include, path
from rest_framework.routers import DefaultRouter
from . import views, viewsets
router = DefaultRouter()
router.register('questions', viewsets.QuestionViewSet, basename='question')
router.register('answers', viewsets.AnswerViewSet, basename='answer')
app_name = 'Questans_API_v1'
urlpatterns = [
path('', include(router.urls)),
path('questions/<str:slug>/answers/', views.QuestionAnswersListAPIView.as_view(), name='question-answers'),
path('questions/<str:slug>/upvote-toggle/', views.QuestionUpvoteToggleAPIView.as_view(), name='question-upvote-toggle'),
path('questions/<str:slug>/downvote-toggle/', views.QuestionDownvoteToggleAPIView.as_view(), name='question-downvote-toggle'),
path('answers/<int:pk>/upvote-toggle/', views.AnswerUpvoteToggleAPIView.as_view(), name='answer-upvote-toggle'),
path('answers/<int:pk>/downvote-toggle/', views.AnswerDownvoteToggleAPIView.as_view(), name='answer-downvote-toggle'),
path('answers/<int:pk>/accept-toggle/', views.AnswerAcceptToggleAPIView.as_view(), name='answer-accept-toggle')
]
| Seth250/qenea-backend | questans/api/v1/urls.py | urls.py | py | 1,100 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 14,
"usage_type": "call"
},
{
"api_name... |
70271059555 | # -*- coding: utf-8 -*-
# This software and supporting documentation are distributed by
# Institut Federatif de Recherche 49
# CEA/NeuroSpin, Batiment 145,
# 91191 Gif-sur-Yvette cedex
# France
#
# This software is governed by the CeCILL license version 2 under
# French law and abiding by the rules of distribution of free software.
# You can use, modify and/or redistribute the software under the
# terms of the CeCILL license version 2 as circulated by CEA, CNRS
# and INRIA at the following URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license version 2 and that you accept its terms.
# TODO :
# Pourvoir selectionner par selection multiple dans le listview les actions a cocher
# bouton inverser la selection au lieu de checkbox select all
# interface pour check database : plusieurs colonnes : fichier, type, action(s) (conversion, referentiel)
# possibilite de visualiser la donnee
# filtres pour modifier la vue
from __future__ import absolute_import
from brainvisa.configuration import neuroConfig
# WITH NEW DATABASE SYSTEM ####
from brainvisa.data.qt4gui.diskItemBrowser import DiskItemBrowser
import brainvisa.processing.qtgui.backwardCompatibleQt as qt
from soma.qt_gui.qt_backend import uic
import os
from brainvisa.data import neuroDiskItems, neuroHierarchy
from brainvisa.data.actions import Move, Remove, FileProcess, ImportData
import sys
import six
ICON_SIZE = 16
class ActionsWidget(qt.QDialog):
"""
A widget to present a list of file with suggested action associated.
Each action is (de)selectable.
The user can choose to run all actions now or later (= ok and cancel button of the dialog)
"""
def __init__(self, processor, parent=None, uiFile='actions.ui'):
qt.QDialog.__init__(self, parent)
# by default the dialog is not modal (doesn't block
# waiting user action)
layout = qt.QVBoxLayout(self)
self.setLayout(layout)
p = os.path.join(os.path.dirname(__file__), uiFile)
#os.path.join( neuroConfig.mainPath, 'actions.ui' )
self.ui = qt.QWidget(self)
uic.loadUi(p, self.ui)
layout.addWidget(self.ui)
# change the instruction bar title
self.titleLabel = self.ui.titleLabel
self.titleLabel.setText(_t_('Suggested actions on database files :'))
# actions list
self.actionsList = self.ui.actionsList
# self.actionsList.setSorting(-1) # disable sort
self.actionsList.setHeaderLabels([_t_("File"), _t_("Action")])
self.actionsList.setIconSize(qt.QSize(ICON_SIZE, ICON_SIZE))
self.actionsList.setContextMenuPolicy(qt.Qt.CustomContextMenu)
# item=None
directory = None
for name, component in processor.components.items():
if component.fileProcesses != []:
directory = DirectoryWidget(
name, self.actionsList, directory, "toolbox.png")
# item=None
item = self.addActions(
directory, None, component.fileProcesses)
item = self.addActions(
self.actionsList, directory, processor.fileProcesses)
# buttons to run actions
self.runNowButton = self.ui.runNowButton
self.runNowButton.setText(_t_("Run now"))
self.runNowButton.clicked.connect(self.runNow)
self.runNowButton.setToolTip(
_t_("Executes checked actions immediatly"))
self.runLaterButton = self.ui.runLaterButton
self.runLaterButton.setText(_t_("Run later"))
self.runLaterButton.clicked.connect(self.runLater)
self.runLaterButton.setToolTip(
_t_("Executes checked actions at the end of the pipeline. Does nothing outside of the pipeline."))
# button to invert the state of selected check box item
self.selectButton = self.ui.selectButton
self.selectButton.setText(_t_("Check/Uncheck selection"))
self.selectButton.clicked.connect(self.invertSelection)
self.selectButton.setToolTip(
_t_("Inverts the state of selected items"))
# print "item added"
self.resize(850, 600)
def runNow(self):
self.done(1)
def runLater(self):
self.done(2)
def invertSelection(self):
it = qt.QTreeWidgetItemIterator(
self.actionsList, qt.QTreeWidgetItemIterator.Selected)
while it.value():
if getattr(it.value(), "model", None):
if (it.value().checkState(0) == qt.Qt.Checked):
it.value().setCheckState(0, qt.Qt.Unchecked)
it.value().model.selected = False
else:
it.value().setCheckState(0, qt.Qt.Checked)
it.value().model.selected = True
it += 1
def addActions(self, parent, after, actions):
item = after
if type(actions) is list:
for action in actions:
if isinstance(action, FileProcess):
item = ActionWidget(action, parent, item)
else: # it can be a map
for key, value in action:
item = DirectoryWidget(key, parent, item)
self.addActions(item, None, value)
else: # it is a map attribute -> map or list of FileProcess
for key, value in actions.items():
item = DirectoryWidget(key, parent, item)
self.addActions(item, None, value)
return item
#
class DirectoryWidget(qt.QTreeWidgetItem):
defaultIcon = "folder.png"
def __init__(self, name, parent, after=None, icon=defaultIcon):
qt.QTreeWidgetItem.__init__(self, parent)
self.setText(0, name)
self.setExpanded(True)
pix = qt.QIcon(os.path.join(neuroConfig.iconPath, icon))
self.setIcon(0, pix)
#
class ActionWidget(qt.QTreeWidgetItem):
"""
Item in an ActionsList.
Shows a file with associated action.
"""
def __init__(self, fileProcess, parent, after=None):
self.model = fileProcess
qt.QTreeWidgetItem.__init__(self, parent)
self.setText(0, fileProcess.filePattern())
self.setToolTip(0, fileProcess.filePattern())
if fileProcess.action is not None:
icon = fileProcess.action.icon
self.setText(1, six.text_type(fileProcess.action))
self.setToolTip(
1, fileProcess.tooltip + " " + six.text_type(fileProcess.action))
else: # there's nothing to do because the file is correct
icon = "ok.png"
pix = qt.QIcon(os.path.join(neuroConfig.iconPath, icon))
self.setIcon(1, pix)
self.setExpanded(True)
self.setCheckState(0, qt.Qt.Checked)
def stateChange(self, state):
self.model.selected = state
def setAction(self, action):
self.model.action = action
pix = qt.QIcon(os.path.join(neuroConfig.iconPath, action.icon))
self.setIcon(1, pix)
self.setText(1, six.text_type(self.model.action))
self.setToolTip(
1, self.model.tooltip + " " + six.text_type(self.model.action))
#
class UnknownFilesWidget(ActionsWidget):
"""
Widget that presents a list of unknown files and proposes 2 actions on these files : remove and move.
It is possible to choose the same action for each file with the buttons remove all and move all.
It is possible to change the action for a partcular file with context menu.
For move action, default destination is the database directory.
"""
def __init__(self, processor, parent=None):
"""
@type processor: DBCleaner
@param processor: the database cleaner that find unknown files in the database.
"""
ActionsWidget.__init__(self, processor, parent)
self.defaultDest = processor.dbDir
self.database = processor.db
# change the instruction bar title
self.titleLabel.setText(
_t_('Choose actions for unknown files in the database :'))
# add buttons to change all actions remove all and move all
# there is two frames action1 and action2 to enable to add some buttons.
# Else I cannot add a button to the widget at a right place, added
# button are always on existing buttons...
self.removeAllButton = qt.QPushButton(
_t_("Remove all"), self.ui.action1)
self.removeAllButton.clicked.connect(self.removeAll)
self.moveAllButton = qt.QPushButton(_t_("Move all"), self.ui.action2)
self.moveAllButton.clicked.connect(self.moveAll)
# add a right click menu to change action for a particular file
self.popupMenu = qt.QMenu()
pix = qt.QIcon(os.path.join(neuroConfig.iconPath, Remove.icon))
self.popupMenu.addAction(pix, "Remove", self.menuRemoveEvent)
pix = qt.QIcon(os.path.join(neuroConfig.iconPath, Move.icon))
self.popupMenu.addAction(pix, "Move", self.menuMoveEvent)
pix = qt.QIcon(os.path.join(neuroConfig.iconPath, ImportData.icon))
self.popupMenu.addAction(pix, "Import", self.menuImportEvent)
self.actionsList.customContextMenuRequested.connect(
self.openContextMenu)
def openContextMenu(self, pos):
"""
Called on contextMenuRequested signal. It opens the popup menu at cursor position.
"""
self.popupMenu.exec(qt.QCursor.pos())
def menuRemoveEvent(self):
item = self.actionsList.currentItem()
if item:
action = Remove(os.path.dirname(item.model.file))
item.setAction(action)
def menuMoveEvent(self):
item = self.actionsList.currentItem()
if item:
# open a dialog to choose where to move
# getExistingDirectory ( QWidget * parent = 0, const QString &
# caption = QString(), const QString & dir = QString(), Options
# options = ShowDirsOnly )
dest = six.text_type(
qt.QFileDialog.getExistingDirectory(
self, _t_("Choose a directory for destination : "),
self.defaultDest, qt.QFileDialog.ShowDirsOnly | qt.QFileDialog.DontUseNativeDialog))
action = Move(dest)
item.setAction(action)
def menuImportEvent(self):
"""
Called when user choose to import unidentified file in the database.
"""
item = self.actionsList.currentItem()
selectedType = None
selectedFormat = None
selectedAttributes = {}
if item:
# if the current action associated to this item is already
# ImportData, get current parameter to initialize the
# diskItemBrowser
if isinstance(item.model.action, ImportData) and item.model.action.dest:
action = item.model.action
defaultValue = action.dest
else:
action = ImportData(item.model.diskItem, None)
defaultValue = item.model.diskItem
selection = defaultValue.hierarchyAttributes()
if defaultValue.type is None:
selection['_type'] = 'Any Type'
else:
selection['_type'] = defaultValue.type.name
if defaultValue.format is None:
selection['_format'] = None
else:
selection['_format'] = defaultValue.format.name
self.importDialog = DiskItemBrowser(neuroHierarchy.databases, self, write=True, selection=selection, required={
'_type': selection['_type'], '_format': selection['_format'], 'database': self.database.name})
self.importDialog.setWindowTitle(_t_(selection['_type']))
self.importDialog.accepted.connect(
lambda item=item, action=action:
self.importDialogAccepted(item, action))
self.importDialog.show()
def importDialogAccepted(self, item, action):
values = self.importDialog.getValues()
if len(values) > 0:
action.dest = values[0]
item.setAction(action)
def removeAll(self):
"""
Called when the user click on remove all button. Set action Remove on all unknown file.
"""
it = qt.QTreeWidgetItemIterator(self.actionsList)
while it.value():
action = Remove(os.path.dirname(it.value().model.file))
it.value().setAction(action)
it += 1
def moveAll(self):
"""
Called when the user click on move all button. Set action Move on all unknown file.
"""
# open a dialog to choose where to move
dest = six.text_type(
qt.QFileDialog.getExistingDirectory(
self, _t_("Choose a directory for destination : "),
self.defaultDest, qt.QFileDialog.ShowDirsOnly | qt.QFileDialog.DontUseNativeDialog))
it = qt.QTreeWidgetItemIterator(self.actionsList)
while it.value():
action = Move(
os.path.join(dest, os.path.basename(it.value().model.file)))
it.value().setAction(action)
it += 1
#
class CheckFilesWidget(ActionsWidget):
"""
Widget to present checked database files.
There are several columns to provide information about database items : filename, format, type, suggested action.
If a file is correct, there is no action associated : an icon "ok" is displayed.
This widget is based on check.ui qt designer file.
The checked files are grouped by filters attributes : these attributes are displayed as directories in the listview.
"""
def __init__(self, processor, parent=None):
"""
@type processor: DBChecker
@param processor: the database checker that checks database files and can suggest actions if some files are incorrect.
"""
super(CheckFilesWidget, self).__init__(processor, parent, "check.ui")
# actions list
self.actionsList.setHeaderLabels(
[_t_("File"), _t_("Type"), _t_("Format"), _t_("Action")])
def addActions(self, parent, after, actions):
"""
This method is redefined because, item are different from ActionsWidget items (more columns to fill)
"""
item = after
if type(actions) is list:
for action in actions:
if isinstance(action, FileProcess):
item = CheckFileWidget(action, parent, item)
else: # it can be a map
for key, value in action:
if key:
item = DirectoryWidget(key, parent, item)
self.addActions(item, None, value)
else:
item = self.addActions(parent, item, value)
else: # it is a map attribute -> map or list of FileProcess
for key, value in actions.items():
if key:
item = DirectoryWidget(key, parent, item)
self.addActions(item, None, value)
else:
item = self.addActions(parent, item, value)
return item
#
class CheckFileWidget(qt.QTreeWidgetItem):
"""
Item in an CheckFilesWidget.
For each checked file, show filename, type, format and associated action (or "ok" icon if there is no action).
"""
def __init__(self, fileProcess, parent, after=None):
self.model = fileProcess
qt.QTreeWidgetItem.__init__(self, parent)
self.setText(0, os.path.basename(fileProcess.diskItem.name))
self.setToolTip(0, self.text(0))
self.setText(1, six.text_type(fileProcess.diskItem.type))
self.setText(2, six.text_type(fileProcess.diskItem.format))
if fileProcess.action is not None:
icon = fileProcess.action.icon
self.setText(3, six.text_type(fileProcess.action))
else: # there's nothing to do because the file is correct
icon = "ok.png"
pix = qt.QIcon(os.path.join(neuroConfig.iconPath, icon))
self.setIcon(3, pix)
if fileProcess.selected:
self.setCheckState(0, qt.Qt.Checked) # the item is selected
def stateChange(self, state):
self.model.selected = state
# TODO
def setAction(self, action):
self.model.action = action
pix = qt.QIcon(os.path.join(neuroConfig.iconPath, action.icon))
self.setIcon(3, pix)
self.setText(3, six.text_type(self.model.action))
| brainvisa/axon | python/brainvisa/data/qt4gui/databaseCheckGUI.py | databaseCheckGUI.py | py | 17,800 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "brainvisa.processing.qtgui.backwardCompatibleQt.QDialog",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "brainvisa.processing.qtgui.backwardCompatibleQt",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "brainvisa.processing.qtgui.backwardC... |
73811257633 | import json
import math
import sys
import operator
model_file = sys.argv[1]
test_file = sys.argv[2]
f_test = open(test_file, 'r')
f_model = open(model_file, 'r')
#sys.stdout = open("spam.out", 'w')
Dict = json.load(f_model)
d = {}
P_c = {}
classes = []
N = 0 #total number of documents
correct = 0
k = int(Dict['~_VocabularySize'])
for t, v in Dict.items():
ClassName, WordName = t.split('_',1)
if ClassName == '*':
i, j = WordName.split('_', 1)
if i not in classes:
classes.append(i)
for CLASS1 in classes:
N = N + Dict['*_' + CLASS1 + '_' + 'Count']
for CLASS in classes:
d[CLASS] = 0
P_c[CLASS] = math.log(Dict['*_' + CLASS + '_' + 'Count']/N)
for x, y in Dict.items(): #total number for each class
ClassName1, WordName1 = x.split('_',1)
if ClassName1 != '*' and ClassName1 != '~':
d[ClassName1] = d[ClassName1] + y
for line in f_test:
P = {}
w1 = ''
for ch in '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~':
line = line.replace(ch, ' ')
line = line.lower()
words = line.split()
for s in classes:
P_d_c =0
for w in words:
w1 = s + '_' + w
if w1 in Dict:
P_w1_c = math.log((Dict[w1] + 1) / (d[s] + k))
P_d_c = P_d_c + P_w1_c
else:
P_w1_c = math.log(1 / (d[s] + k + 1 ))
P_d_c = P_d_c + P_w1_c
P[s] = P_d_c + P_c[s]
Result = max(P.items(), key=operator.itemgetter(1))[0]
print(Result)#modify
#sys.stdout.close()
f_test.close()
f_model.close()
dingyi567
| dingyi567/CSCI-544 | hw1/nbclassify.py | nbclassify.py | py | 1,542 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 34,... |
32689227188 | from django.conf.urls import patterns, url
from users import views
urlpatterns = patterns('',
url(r'^$',views.index, name='index'),
url(r'^(?P<user_id>\d+)/$', views.detail, name='detail'),
url(r'^attendance/$', views.attendance, name='attendance'),
url(r'^notifications/$', views.notifications, name='notification'),
url(r'^dues/$', views.dues, name='dues'),
#url(r'^inbox/$', views.inbox, name='inbox'),
) | dissipator/campsia | users/urls.py | urls.py | py | 432 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "users.views.index",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "us... |
25646973960 | import arxiv
import urllib
import pdfx
import re
import spacy
from spacy.lang.fr.examples import sentences
from xml.sax.saxutils import escape
import sys
def after_references(mypdftext):
keyword1 = 'References'
keyword2 = 'REFERENCES'
keyword3 = 'R EFERENCES'
keyword4 = 'Reference'
keyword5='[1]'
if keyword1 in mypdftext :
before_keyword, keyword, after_keyword = mypdftext.partition(keyword1)
elif keyword2 in mypdftext :
before_keyword, keyword, after_keyword = mypdftext.partition(keyword2)
elif keyword3 in mypdftext :
before_keyword, keyword, after_keyword = mypdftext.partition(keyword3)
elif keyword4 in mypdftext :
before_keyword, keyword, after_keyword = mypdftext.partition(keyword4)
elif keyword5 in mypdftext :
before_keyword, keyword, after_keyword = mypdftext.partition(keyword5)
else:
after_keyword = mypdftext[:10000]
return after_keyword
def extract(text:str) :
nlp = spacy.load('en_core_web_sm')
doc = nlp(text.strip())
named_entities = []
for i in doc.ents:
entry = str(i.lemma_).lower()
text = text.replace(str(i).lower(), "")
if i.label_ in ["PERSON"]:
named_entities.append(entry.title().replace(" ", "_").replace("\n","_"))
named_entities = list(dict.fromkeys(named_entities))
return named_entities
def pdf_arxiv():
search = arxiv.Search(
query = "computer science & ai",
# id_list=["1605.08386v1"]
max_results = 1,
sort_by = arxiv.SortCriterion.SubmittedDate
)
for result in search.results():
print ("Titre :", result.title)
print ("Lien :", result.pdf_url)
for result in search.results():
author = result.authors
author_list = [re.sub("[^A-Za-z0-9]","_",str(i)) for i in author]
print("Autheurs :", author_list)
response = urllib.request.urlopen(result.pdf_url)
file = open("Test" + ".pdf", 'wb')
file.write(response.read())
file.close()
# Leture du PDF
pdf = pdfx.PDFx("Test.pdf")
metadata = pdf.get_metadata()
references_dict = pdf.get_references_as_dict()
text = pdf.get_text()
return text
text=pdf_arxiv()
references = after_references(text)
references_list = extract(references)
print("5 premiers noms cités dans les références :", references_list[:5]) | Caojerem/Projet_fil_rouge_SIO_2022 | Docker/fil_rouge.py | fil_rouge.py | py | 2,426 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "spacy.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "arxiv.Search",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "arxiv.SortCriterion",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_n... |
1857536637 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 14:41:11 2019
@author: Kumail
"""
import pandas
import numpy as np
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
dataset = pandas.read_csv("../datasets/iris.csv")
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.29
seed = 7
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
clf = LogisticRegression(max_iter=1000, random_state=seed, solver='lbfgs', multi_class='multinomial').fit(X_train, Y_train)
predict = clf.predict(X_test[:5, :])
actual = Y_test[:5]
scoreTest = round(clf.score(X_test, Y_test)*100, 1)
scoreTrain= round(clf.score(X_train, Y_train)*100, 1)
print(f"Prediction: {predict}")
print(f"Actual: {actual}")
print(f"Training Score: {scoreTrain}%")
print(f"Test Score: {scoreTest}%")
sepal_length = int(input("Enter sepal length: "))
sepal_width = int(input("Enter sepal width: "))
petal_length = int(input("Enter petal length: "))
petal_width = int(input("Enter petal width: "))
inputData = np.array([sepal_length, sepal_width, petal_length, petal_width]).reshape(1,-1)
newPredict = clf.predict(inputData)[0]
print(f"Prediction is: {newPredict}") | KumailP/machine-learning-dsu | linear-models/LogisticRegression.py | LogisticRegression.py | py | 1,257 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection",
"line_number": 21,
"usage_type": "name"
},
{
"... |
27195496579 | import mesa
from agents import TreeAgent, Seed
from model import DispersalModel
from mesa.visualization.modules import CanvasGrid, ChartModule
from pointpats import PointPattern
import pointpats.quadrat_statistics as qs
def get_mean_nnd(self):
coords_seeds = []
for (agents, x, y) in self.grid.coord_iter():
if Seed in (type(agent) for agent in agents):
coords_seeds.append([x, y])
pp = PointPattern(coords_seeds)
return f"Distância Média do Vizinho Mais Próximo: {round(pp.mean_nnd,1)}"
def get_max_nnd(self):
coords_seeds = []
for (agents, x, y) in self.grid.coord_iter():
if Seed in (type(agent) for agent in agents):
coords_seeds.append([x, y])
pp = PointPattern(coords_seeds)
return f"Distância Máxima do Vizinho Mais Próximo: {round(pp.max_nnd,1)}"
def get_min_nnd(self):
coords_seeds = []
for (agents, x, y) in self.grid.coord_iter():
if Seed in (type(agent) for agent in agents):
coords_seeds.append([x, y])
pp = PointPattern(coords_seeds)
return f"Distância Mínima do Vizinho Mais Próximo: {round(pp.min_nnd,1)}"
COLORS = {"Fine": "darkgreen", "Contaminated": "yellow"}
def seed_dispersal_portrayal(agent):
if agent is None:
return
portrayal = {}
if type(agent) is TreeAgent:
portrayal["Shape"] = "resources/tree.png"
portrayal["Filled"] = "true"
portrayal["Layer"] = 0
portrayal["w"] = 1
portrayal["h"] = 1
elif type(agent) is Seed:
portrayal["Color"] = COLORS[agent.condition]
portrayal["Shape"] = "circle"
portrayal["r"] = 0.4
portrayal["Filled"] = "true"
portrayal["Layer"] = 0
portrayal["w"] = 1
portrayal["h"] = 1
return portrayal
canvas_element = CanvasGrid(seed_dispersal_portrayal, 20, 20, 500, 500)
#chart_element = ChartModule([{"Label": "mdvp", "Color": "black"}])
server = mesa.visualization.ModularServer(
DispersalModel, [get_min_nnd, get_mean_nnd, get_max_nnd, canvas_element],
"Dispersão de Propágulos - Mortalidade Dependente de Densidade")
server.port = 8521
| higuchip/density-dependent-mortality | server.py | server.py | py | 2,262 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "agents.Seed",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pointpats.PointPattern",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "agents.Seed",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pointpats.PointPattern... |
10327528775 | from itertools import product
import networkx as nx
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from .tools import transform_ts
from .generator import node_name
class VAR():
def __init__(self, p):
self.p = p
self.is_fitted = False
def fit(self, ts_data):
_, data_matrix = transform_ts(ts_data, self.p)
self.dim = ts_data.shape[1]
self.length = data_matrix.shape[0]
y = data_matrix[:, :self.dim]
X = data_matrix[:, self.dim:]
X_ = np.insert(X, 0, 1, axis=1)
self.params = np.linalg.lstsq(X_, y, rcond=1e-15)[0]
self.free_params = self.params.size
self.residuals = y - np.dot(X_, self.params)
self.sse = np.dot(self.residuals.T, self.residuals)
# self.sigma_u = self.sse/(self.length - self.dim*self.p - 1)
self.sigma_u = self.sse/self.length
self.is_fitted = True
def fit_from_graph(self, dim, data_matrix, graph, mapping=None):
self.dim = dim
self.length = data_matrix.shape[0]
self.inputs = []
self.params = np.zeros((self.dim, self.dim * self.p + 1))
self.free_params = 0
if mapping is None:
mapping = {i: n for i, n in enumerate(graph.nodes())}
inverted_mapping = {v: k for k, v in mapping.items()}
for x_t in range(self.dim):
input_nodes = list(graph.predecessors(mapping[x_t]))
inputs = np.array([inverted_mapping[x] for x in input_nodes])
y = data_matrix[:, x_t]
if input_nodes:
X = data_matrix[:, inputs]
positions = np.insert(inputs - self.dim + 1, 0, 0)
else:
X = np.array([[]]*len(data_matrix))
positions = np.array([0])
X_ = np.insert(X, 0, 1, axis=1)
params = np.linalg.lstsq(X_, y, rcond=1e-15)[0]
self.params[x_t, positions] = params
self.free_params += params.size
self.params = self.params.T
y = data_matrix[:, :self.dim]
X = data_matrix[:, self.dim: self.dim*(self.p+1)]
X_ = np.insert(X, 0, 1, axis=1)
self.residuals = y - np.dot(X_, self.params)
self.sse = np.dot(self.residuals.T, self.residuals)
# not sure whether this is ok
self.sigma_u = self.sse/self.length
self.is_fitted = True
def to_graph(self, threshold=0.1):
end_nodes = ['X{}_t'.format(i) for i in range(self.dim)]
start_nodes = ['X{}_t-{}'.format(j, i) for i, j in product(range(self.p, 0, -1), range(self.dim))]
A = self.params[1:]
assert A.shape == (len(start_nodes), len(end_nodes))
estimated_graph = nx.DiGraph()
node_ids = np.array([[(d, l) for l in range(self.p, -1, -1)] for d in range(self.dim)])
estimated_graph.add_nodes_from([node_name(d, l) for d, l in
np.reshape(node_ids, (self.dim * (self.p+1), 2))])
for i in range(len(start_nodes)):
for j in range(len(end_nodes)):
if np.abs(A[i][j]) > threshold:
estimated_graph.add_edge(start_nodes[i], end_nodes[j], weight=A[i][j])
return estimated_graph
def evaluate_test_set(self, start_values, test_data):
dim = test_data.shape[1]
variables = pd.concat([start_values, test_data])
_, data_matrix = transform_ts(variables, self.p)
y = data_matrix[:, :dim]
X_ = np.insert(data_matrix[:, dim:], 0, 1, axis=1)
predictions = np.dot(X_, self.params)
mse = mean_squared_error(y, predictions)
residuals = y - predictions
sse = np.dot(residuals.T, residuals)
sigma_u = sse / len(test_data)
_, ll = np.linalg.slogdet(sigma_u)
bic = self._bic(ll, self.free_params, len(test_data))
return mse, bic
def information_criterion(self, ic, offset=0, free_params=None):
if not self.is_fitted:
raise Exception('model is not fitted')
ll = self._log_likelihood()
if free_params is None:
free_params = self.free_params
nobs = self.length-offset
if ic == 'bic':
return self._bic(ll, free_params, nobs)
elif ic == 'aic':
return self._aic(ll, free_params, nobs)
elif ic == 'hqic':
return self._hqic(ll, free_params, nobs)
else:
raise Exception('unknown information criterion')
def _bic(self, ll, free_params, nobs):
return ll + (np.log(nobs) / nobs) * free_params
def _aic(self, ll, free_params, nobs):
return ll + (2. / nobs) * free_params
def _hqic(self, ll, free_params, nobs):
return ll + (2. * np.log(np.log(nobs)) / nobs) * free_params
def _log_likelihood(self):
_, logdet = np.linalg.slogdet(self.sigma_u)
return logdet
| danthe96/CIoTS | CIoTS/simple_var.py | simple_var.py | py | 4,928 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "tools.transform_ts",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.insert",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.lstsq",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
... |
33277836844 | # -*-coding:utf-8-*-
__author__ = 'hank'
import os
import json
class ReadConfig(object):
def __init__(self):
self.local = os.path.abspath('.')
self.father_path = os.path.dirname(self.local)
self.json_path = self.father_path + "/config/"
def read_json(self, json_name):
json_name = self.json_path + json_name
with open(json_name, "r", encoding='utf8', errors='ignore') as f:
body_json = json.load(f)
return body_json | hansenzhf/first | lib/read_config.py | read_config.py | py | 486 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
35360372876 | import cv2
import numpy as np
import dlib
import matplotlib.pyplot as plt
import sys
import face_recognition
import imutils.face_utils
from imutils import face_utils
#import imutils.face_utils
#image read
imq = cv2.imread('C:\\Users\\moham\\Pictures\\cv_DP2jpg.jpg', 0)
#image show
cv2.imshow('image', img)
#image show without hanging up
cv2.waitKey(0)
#converting dtype of numpy.ndarray into float(32 bit) /255 ????
im = np.float32(imq)/255.0
#depth command ????
depth = cv2.CV_32F
#Calculating gradient
#The Sobel Operator is a discrete differentiation operator.
#It computes an approximation of the gradient of an image intensity function.
gx = cv2.Sobel(im, depth, 1, 0, ksize=1)
gy = cv2.Sobel(im, depth, 0, 1, ksize=1)
#computing the magnitude and angle of the gradient(2D vectors)
mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)
#plotting the picture
plt.figure(figsize=(12,8))
plt.imshow(mag)
plt.show()
#face detection
# HOG + Linear SVM = dlib.get_frontal_face_detector()
#MMOD CNN = dlib.cnn_face_detection_model_v1(modelPath)
#WE are using HOG
#The get_frontal_face_detector function does not accept any parameters. A call to it returns the pre-trained HOG +
#Linear SVM face detector included in the dlib library.
#Dlib’s HOG + Linear SVM face detector is fast and efficient. By nature of how the Histogram of Oriented Gradients (HOG)
#descriptor works, it is not invariant to changes in rotation and viewing angle.
#For more robust face detection, you can use the MMOD CNN face detector, available via the cnn_face_detection_model_v1 function
#this method accepts a single parameter, modelPath, which is the path to the pre-trained MMOD-Model_FaceDetection.dat
#file residing on disk.
face_detect = dlib.get_frontal_face_detector()
rects = face_detect(imq, 1) #gettinf the coordinates for the rectangle on the front face
for (i, rect) in enumerate(rects): #actually getting the coordinates from their tuple format
(x, y, w, h) = face_utils.rect_to_bb(rect)
cv2.rectangle(imq, (x, y), (x + w, y + h), (0, 255, 128), 3) #plotting the rect over the image
plt.figure(figsize=(12, 8))
plt.imshow(imq, cmap='gray') #cmap is colormap instance
plt.show()
# RdBu_r, RdGy RdGy_r RdYlBu_r RdYlBu | Ziaf007/Attendance-using-Facial_recognition | Detection.py | Detection.py | py | 2,226 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": ... |
6257445976 | from art import logo
def resources_sufficient(order_ingredients):
"""Returns True if the drink can be made, returns False if resources are insufficient"""
for item in order_ingredients:
if order_ingredients[item] > resources[item]:
print(f"Sorry there is not enough {item}. Please make another choice.")
return False
return True
def process_coins():
"""Returns the total of coins received from the customer"""
print("Please insert coins.")
total = int(input("how many quarters? ")) * 0.25
total += int(input("how many dimes? ")) * 0.10
total += int(input("how many nickels? ")) * 0.05
total += int(input("how many pennies? ")) * 0.01
return total
def transaction_successful(money_received, drink_cost):
"""Returns True if payment is accepted, returns False if funds are insufficient"""
if money_received >= drink_cost:
change = round(money_received - drink_cost, 2)
print(f"Here is ${change:.2f} in change.")
global profit
profit += drink_cost
return True
else:
print("Sorry that's not enough money. Money refunded.")
return False
def make_coffee(drink_name, order_ingredients):
"""Deduct the required ingredients from the coffee machine resources"""
for item in order_ingredients:
resources[item] -= order_ingredients[item]
print(f"Here is your {drink_name} ☕️. Enjoy!")
MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
resources = {
"water": 300,
"milk": 200,
"coffee": 100,
}
profit = 0
machine_on = True
while machine_on:
print(logo)
choice = input("What would you like? (espresso/latte/cappuccino): ")
if choice == "off":
machine_on = False
print("Coffee machine powering down. Please remove any perishable resources.")
elif choice == "report":
print(f"Water: {resources['water']}ml")
print(f"Milk: {resources['milk']}ml")
print(f"Coffee: {resources['coffee']}g")
print(f"Money: ${profit:.2f}")
else:
try:
drink = MENU[choice]
if resources_sufficient(drink['ingredients']):
print(f"Price: ${drink['cost']:.2f}")
payment = process_coins()
if transaction_successful(payment, drink['cost']):
make_coffee(choice, drink['ingredients'])
except KeyError:
print("Not a valid choice. Please try again.")
| wintermute111/100DaysOfPython | Day015/coffee-machine/main.py | main.py | py | 2,888 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "art.logo",
"line_number": 79,
"usage_type": "argument"
}
] |
22056165678 | import json
import os
import click
from twarc.decorators2 import FileSizeProgressBar
from twarc.expansions import ensure_flattened
@click.command()
@click.option(
'--granularity',
'-g',
type=click.Choice(
['year', 'month', 'day', 'hour', 'minute', 'second'],
case_sensitive=False
),
default='day',
show_default=True,
help='Granularity of temporal windows'
)
@click.argument('infile', type=click.File('r'), default='-')
@click.argument('outdir', type=click.Path(exists=False))
def divide(granularity, infile, outdir):
'''
Divides the input file in an output file for each temporal window.
Each output file contains the tweets published in its temporal window.
Output files are saved in the output directory,
and their names are the dates of the temporal windows.
If an output file already exists, new output is appended to the old one.
'''
indices = {
'year': 4,
'month': 7,
'day': 10,
'hour': 13,
'minute': 16,
'second': 19
}
index = indices[granularity]
with FileSizeProgressBar(infile, None) as progress:
for line in infile:
for t in ensure_flattened(json.loads(line)):
date = t['created_at'][:index]
with open(os.path.join(outdir, date + '.jsonl'), 'a') as out:
out.write(json.dumps(t) + '\n')
progress.update(len(line))
| JoanMassachs/twarc-divide | twarc_divide.py | twarc_divide.py | py | 1,448 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "twarc.decorators2.FileSizeProgressBar",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "twarc.expansions.ensure_flattened",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 43,
"usage_type": "call"
},
{
... |
72322916513 | # -*- coding:utf-8 -*-
from __future__ import print_function
import numpy
import theano.tensor as T
import cv2
import cPickle
numpy.random.seed(1337) # for reproducibility
from PIL import Image
from keras.models import Sequential
from keras.layers import Input, Dense, Dropout, LSTM
from keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as K
img_rows, img_cols = 143, 143
nb_classes = 2
nb_epoch = 60
batch_size = 10
def loadData(file):
with open(file, 'r') as f:
train_x, train_y = cPickle.load(f)
valid_x, valid_y = cPickle.load(f)
test_x, test_y = cPickle.load(f)
rval = [(train_x, train_y), (valid_x, valid_y), (test_x, test_y)]
return rval
'''
def Net_model():
input = Input(shape=(img_rows, img_cols))
lstm = LSTM(64)
#lstm = LSTM(32, return_state=True)(lstm)
#lstm = LSTM(32, return_state=True)(lstm)
hidden = Dense(1000, activation='relu')
out = Dropout(0.5)
predictions = Dense(nb_classes, activation = 'softmax')
model = Model(inputs = input, output=predictions)
model.compile(optimizer=['rmsprop'],
loss = 'categorical_crossentropy',
metrix = ['accuracy'])
return model
'''
def Net_model():
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=(img_rows, img_cols)))
model.add(LSTM(128,return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(256))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax', name='output_layer'))
model.compile(loss='categorical_crossentropy',
optimizer = 'rmsprop',
metrics = ['accuracy']
)
return model
def train_model(model, x_train, y_train, x_val, y_val):
model.fit(x_train, y_train, batch_size = batch_size, epochs = nb_epoch,
verbose=1, validation_data=(x_val, y_val))
model.save_weights('./weights/model_weights.h5', overwrite = True)
def test_model(model, x_test, y_test):
model.load_weights('./weights/model_weights.h5')
score = model.evaluate(model, x_test, y_test)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
return score
if __name__ == '__main__':
(X_train, Y_train), (X_val, Y_val), (X_test, Y_test) = loadData('./data/Data2.pkl')
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols)
X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_val.shape[0], 'validate samples')
print(X_test.shape[0], 'test samples')
Y_train = np_utils.to_categorical(Y_train, nb_classes)
Y_val = np_utils.to_categorical(Y_val, nb_classes)
Y_test = np_utils.to_categorical(Y_test, nb_classes)
model = Net_model()
train_model(model, X_train, Y_train, X_val, Y_val)
print (' the code is ok!!!')
| jsxxj/RBF_DCNN | MCNN/LSTM.py | LSTM.py | py | 3,202 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cPickle.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cPickle.load",
"li... |
6661308948 | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
#?sessão de administração padrão do django
path('admin/', admin.site.urls),
#?rotas para os apps
path('', include('Rest_app.urls')),
#?rotas de autentificação do rest_framework
path('api-auth/', include('rest_framework.urls')),
path('dj-rest-auth/', include('dj_rest_auth.urls')),
]
| Rip4568/veiculo_django_project | Veiculos_project/urls.py | urls.py | py | 405 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "dja... |
26597490747 | from rest_framework import viewsets, decorators, status
from rest_framework.response import Response
from django.db import transaction
from geoplaces.models import Place
from geoplaces.serializers import PlaceSerializer, PlaceIncreaseRatingSerializer
from geoplaces.filters import GeoPlacesFilter
class PlaceViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows places to be viewed or searched.
Include additional query parameters to perform filter on the objects:
- text: str
- location: geojson
- distance: float, in meters
- rating: int, in range 1..5
- tags_titles: list of str
- types_titles: list of str
- priceranges_titles: list of str
"""
queryset = (
Place.objects.all()
.prefetch_related("tags", "types", "priceranges")
.order_by("-updated_at")
)
serializer_class = PlaceSerializer
filter_backends = [GeoPlacesFilter]
@decorators.action(
detail=True,
methods=["POST"],
url_name="submit-rating",
serializer_class=PlaceIncreaseRatingSerializer,
)
def submit_rating(self, request, pk=None):
serializer = PlaceIncreaseRatingSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
place = self.get_object()
# update the place rating with moving average over last ratings
with transaction.atomic():
serializer.save(place=place)
place.update_rating().save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
| marqueewinq/klubok | klubok/geoplaces/views.py | views.py | py | 1,575 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "geoplaces.models.Place.objects.all",
"line_number": 26,
"usage_... |
40967112979 | from collections import deque
from lib.Intcode2 import Intcode
from lib.utils import print_array
SIZE = 30
xy = [[-1] * SIZE for i in range(SIZE)]
with open("data/19.txt") as f:
_program = list(map(int, f.readline().split(",")))
min_x = 0
max_x = 0
y = 0
count = 0
MIN_SIZE = 100
size_flag = False
arr_y = []
while True:
# print(y)
hash_flag = False
x = min_x
if max_x < min_x:
max_x = min_x
while x < max_x + 20:
computer = Intcode(_program)
computer.set_input([x, y])
out = computer.run()
# xy[y][x] = out
if out == 1 and not hash_flag:
hash_flag = True
min_x = x
x = max_x
x += 1
continue
# if max_x - min_x > MIN_SIZE:
# size_flag = True
# print(y)
if out == 0 and hash_flag:
max_x = x
break
x += 1
arr_y.append((min_x, max_x))
if y > MIN_SIZE:
x1, x2 = arr_y[y - MIN_SIZE + 1]
x3, x4 = min_x, max_x
if x3 + MIN_SIZE <= x2:
print("RESULT:", x3, y - MIN_SIZE + 1)
print("RESULT calculated:", x3 * 10000 + (y - MIN_SIZE + 1))
break
y += 1
# print_array(xy) | szerlak/advent_of_code | 2019/19.py | 19.py | py | 1,242 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "lib.Intcode2.Intcode",
"line_number": 29,
"usage_type": "call"
}
] |
11510553212 | # Released under the MIT License. See LICENSE for details.
#
"""Defines ScoreBoard Actor and related functionality."""
from __future__ import annotations
import weakref
from typing import TYPE_CHECKING
import bascenev1 as bs
if TYPE_CHECKING:
from typing import Any, Sequence
class _Entry:
def __init__(
self,
scoreboard: Scoreboard,
team: bs.Team,
do_cover: bool,
scale: float,
label: bs.Lstr | None,
flash_length: float,
):
# pylint: disable=too-many-statements
self._scoreboard = weakref.ref(scoreboard)
self._do_cover = do_cover
self._scale = scale
self._flash_length = flash_length
self._width = 140.0 * self._scale
self._height = 32.0 * self._scale
self._bar_width = 2.0 * self._scale
self._bar_height = 32.0 * self._scale
self._bar_tex = self._backing_tex = bs.gettexture('bar')
self._cover_tex = bs.gettexture('uiAtlas')
self._mesh = bs.getmesh('meterTransparent')
self._pos: Sequence[float] | None = None
self._flash_timer: bs.Timer | None = None
self._flash_counter: int | None = None
self._flash_colors: bool | None = None
self._score: float | None = None
safe_team_color = bs.safecolor(team.color, target_intensity=1.0)
# FIXME: Should not do things conditionally for vr-mode, as there may
# be non-vr clients connected which will also get these value.
vrmode = bs.app.env.vr
if self._do_cover:
if vrmode:
self._backing_color = [0.1 + c * 0.1 for c in safe_team_color]
else:
self._backing_color = [0.05 + c * 0.17 for c in safe_team_color]
else:
self._backing_color = [0.05 + c * 0.1 for c in safe_team_color]
opacity = (0.8 if vrmode else 0.8) if self._do_cover else 0.5
self._backing = bs.NodeActor(
bs.newnode(
'image',
attrs={
'scale': (self._width, self._height),
'opacity': opacity,
'color': self._backing_color,
'vr_depth': -3,
'attach': 'topLeft',
'texture': self._backing_tex,
},
)
)
self._barcolor = safe_team_color
self._bar = bs.NodeActor(
bs.newnode(
'image',
attrs={
'opacity': 0.7,
'color': self._barcolor,
'attach': 'topLeft',
'texture': self._bar_tex,
},
)
)
self._bar_scale = bs.newnode(
'combine',
owner=self._bar.node,
attrs={
'size': 2,
'input0': self._bar_width,
'input1': self._bar_height,
},
)
assert self._bar.node
self._bar_scale.connectattr('output', self._bar.node, 'scale')
self._bar_position = bs.newnode(
'combine',
owner=self._bar.node,
attrs={'size': 2, 'input0': 0, 'input1': 0},
)
self._bar_position.connectattr('output', self._bar.node, 'position')
self._cover_color = safe_team_color
if self._do_cover:
self._cover = bs.NodeActor(
bs.newnode(
'image',
attrs={
'scale': (self._width * 1.15, self._height * 1.6),
'opacity': 1.0,
'color': self._cover_color,
'vr_depth': 2,
'attach': 'topLeft',
'texture': self._cover_tex,
'mesh_transparent': self._mesh,
},
)
)
clr = safe_team_color
maxwidth = 130.0 * (1.0 - scoreboard.score_split)
flatness = (1.0 if vrmode else 0.5) if self._do_cover else 1.0
self._score_text = bs.NodeActor(
bs.newnode(
'text',
attrs={
'h_attach': 'left',
'v_attach': 'top',
'h_align': 'right',
'v_align': 'center',
'maxwidth': maxwidth,
'vr_depth': 2,
'scale': self._scale * 0.9,
'text': '',
'shadow': 1.0 if vrmode else 0.5,
'flatness': flatness,
'color': clr,
},
)
)
clr = safe_team_color
team_name_label: str | bs.Lstr
if label is not None:
team_name_label = label
else:
team_name_label = team.name
# We do our own clipping here; should probably try to tap into some
# existing functionality.
if isinstance(team_name_label, bs.Lstr):
# Hmmm; if the team-name is a non-translatable value lets go
# ahead and clip it otherwise we leave it as-is so
# translation can occur..
if team_name_label.is_flat_value():
val = team_name_label.evaluate()
if len(val) > 10:
team_name_label = bs.Lstr(value=val[:10] + '...')
else:
if len(team_name_label) > 10:
team_name_label = team_name_label[:10] + '...'
team_name_label = bs.Lstr(value=team_name_label)
flatness = (1.0 if vrmode else 0.5) if self._do_cover else 1.0
self._name_text = bs.NodeActor(
bs.newnode(
'text',
attrs={
'h_attach': 'left',
'v_attach': 'top',
'h_align': 'left',
'v_align': 'center',
'vr_depth': 2,
'scale': self._scale * 0.9,
'shadow': 1.0 if vrmode else 0.5,
'flatness': flatness,
'maxwidth': 130 * scoreboard.score_split,
'text': team_name_label,
'color': clr + (1.0,),
},
)
)
def flash(self, countdown: bool, extra_flash: bool) -> None:
"""Flash momentarily."""
self._flash_timer = bs.Timer(
0.1, bs.WeakCall(self._do_flash), repeat=True
)
if countdown:
self._flash_counter = 10
else:
self._flash_counter = int(20.0 * self._flash_length)
if extra_flash:
self._flash_counter *= 4
self._set_flash_colors(True)
def set_position(self, position: Sequence[float]) -> None:
"""Set the entry's position."""
# Abort if we've been killed
if not self._backing.node:
return
self._pos = tuple(position)
self._backing.node.position = (
position[0] + self._width / 2,
position[1] - self._height / 2,
)
if self._do_cover:
assert self._cover.node
self._cover.node.position = (
position[0] + self._width / 2,
position[1] - self._height / 2,
)
self._bar_position.input0 = self._pos[0] + self._bar_width / 2
self._bar_position.input1 = self._pos[1] - self._bar_height / 2
assert self._score_text.node
self._score_text.node.position = (
self._pos[0] + self._width - 7.0 * self._scale,
self._pos[1] - self._bar_height + 16.0 * self._scale,
)
assert self._name_text.node
self._name_text.node.position = (
self._pos[0] + 7.0 * self._scale,
self._pos[1] - self._bar_height + 16.0 * self._scale,
)
def _set_flash_colors(self, flash: bool) -> None:
self._flash_colors = flash
def _safesetcolor(node: bs.Node | None, val: Any) -> None:
if node:
node.color = val
if flash:
scale = 2.0
_safesetcolor(
self._backing.node,
(
self._backing_color[0] * scale,
self._backing_color[1] * scale,
self._backing_color[2] * scale,
),
)
_safesetcolor(
self._bar.node,
(
self._barcolor[0] * scale,
self._barcolor[1] * scale,
self._barcolor[2] * scale,
),
)
if self._do_cover:
_safesetcolor(
self._cover.node,
(
self._cover_color[0] * scale,
self._cover_color[1] * scale,
self._cover_color[2] * scale,
),
)
else:
_safesetcolor(self._backing.node, self._backing_color)
_safesetcolor(self._bar.node, self._barcolor)
if self._do_cover:
_safesetcolor(self._cover.node, self._cover_color)
def _do_flash(self) -> None:
assert self._flash_counter is not None
if self._flash_counter <= 0:
self._set_flash_colors(False)
else:
self._flash_counter -= 1
self._set_flash_colors(not self._flash_colors)
def set_value(
self,
score: float,
max_score: float | None = None,
countdown: bool = False,
flash: bool = True,
show_value: bool = True,
) -> None:
"""Set the value for the scoreboard entry."""
# If we have no score yet, just set it.. otherwise compare
# and see if we should flash.
if self._score is None:
self._score = score
else:
if score > self._score or (countdown and score < self._score):
extra_flash = (
max_score is not None
and score >= max_score
and not countdown
) or (countdown and score == 0)
if flash:
self.flash(countdown, extra_flash)
self._score = score
if max_score is None:
self._bar_width = 0.0
else:
if countdown:
self._bar_width = max(
2.0 * self._scale,
self._width * (1.0 - (float(score) / max_score)),
)
else:
self._bar_width = max(
2.0 * self._scale,
self._width * (min(1.0, float(score) / max_score)),
)
cur_width = self._bar_scale.input0
bs.animate(
self._bar_scale, 'input0', {0.0: cur_width, 0.25: self._bar_width}
)
self._bar_scale.input1 = self._bar_height
cur_x = self._bar_position.input0
assert self._pos is not None
bs.animate(
self._bar_position,
'input0',
{0.0: cur_x, 0.25: self._pos[0] + self._bar_width / 2},
)
self._bar_position.input1 = self._pos[1] - self._bar_height / 2
assert self._score_text.node
if show_value:
self._score_text.node.text = str(score)
else:
self._score_text.node.text = ''
class _EntryProxy:
"""Encapsulates adding/removing of a scoreboard Entry."""
def __init__(self, scoreboard: Scoreboard, team: bs.Team):
self._scoreboard = weakref.ref(scoreboard)
# Have to store ID here instead of a weak-ref since the team will be
# dead when we die and need to remove it.
self._team_id = team.id
def __del__(self) -> None:
scoreboard = self._scoreboard()
# Remove our team from the scoreboard if its still around.
# (but deferred, in case we die in a sim step or something where
# its illegal to modify nodes)
if scoreboard is None:
return
try:
bs.pushcall(bs.Call(scoreboard.remove_team, self._team_id))
except bs.ContextError:
# This happens if we fire after the activity expires.
# In that case we don't need to do anything.
pass
class Scoreboard:
"""A display for player or team scores during a game.
category: Gameplay Classes
"""
_ENTRYSTORENAME = bs.storagename('entry')
def __init__(self, label: bs.Lstr | None = None, score_split: float = 0.7):
"""Instantiate a scoreboard.
Label can be something like 'points' and will
show up on boards if provided.
"""
self._flat_tex = bs.gettexture('null')
self._entries: dict[int, _Entry] = {}
self._label = label
self.score_split = score_split
# For free-for-all we go simpler since we have one per player.
self._pos: Sequence[float]
if isinstance(bs.getsession(), bs.FreeForAllSession):
self._do_cover = False
self._spacing = 35.0
self._pos = (17.0, -65.0)
self._scale = 0.8
self._flash_length = 0.5
else:
self._do_cover = True
self._spacing = 50.0
self._pos = (20.0, -70.0)
self._scale = 1.0
self._flash_length = 1.0
def set_team_value(
self,
team: bs.Team,
score: float,
max_score: float | None = None,
countdown: bool = False,
flash: bool = True,
show_value: bool = True,
) -> None:
"""Update the score-board display for the given bs.Team."""
if team.id not in self._entries:
self._add_team(team)
# Create a proxy in the team which will kill
# our entry when it dies (for convenience)
assert self._ENTRYSTORENAME not in team.customdata
team.customdata[self._ENTRYSTORENAME] = _EntryProxy(self, team)
# Now set the entry.
self._entries[team.id].set_value(
score=score,
max_score=max_score,
countdown=countdown,
flash=flash,
show_value=show_value,
)
def _add_team(self, team: bs.Team) -> None:
if team.id in self._entries:
raise RuntimeError('Duplicate team add')
self._entries[team.id] = _Entry(
self,
team,
do_cover=self._do_cover,
scale=self._scale,
label=self._label,
flash_length=self._flash_length,
)
self._update_teams()
def remove_team(self, team_id: int) -> None:
"""Remove the team with the given id from the scoreboard."""
del self._entries[team_id]
self._update_teams()
def _update_teams(self) -> None:
pos = list(self._pos)
for entry in list(self._entries.values()):
entry.set_position(pos)
pos[1] -= self._spacing * self._scale
| efroemling/ballistica | src/assets/ba_data/python/bascenev1lib/actor/scoreboard.py | scoreboard.py | py | 15,161 | python | en | code | 468 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "bascenev1.Team",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "bascenev1.Lstr",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "weakref.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.