seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19153093382 | from django.db import models
class AudioTourModel(models.Model):
title = models.CharField(max_length=200, blank=False, help_text='Title for audio tour area')
soundcloud_id = models.IntegerField(
blank=False,
help_text='Specific Id from SoundCloud service to embed')
index = models.DecimalField(
blank=False,
unique=True,
decimal_places=2,
max_digits=4,
help_text='Locations will be sorted by this index. \
You can use up to 2 decimal places up to 99.99')
| imagreenplant/beacon-food-forest | tours/models.py | models.py | py | 546 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "... |
42014569567 | import config
from datetime import timedelta, datetime
import covid_data
import plot
import spain_data
AGE_GROUPS = {
"0-9": ["0-9"],
"10-19": ["10-19"],
"20-39": ["20-29", "30-39"],
"40-59": ["40-49", "50-59"],
"60-69": ["60-69"],
"70+": ["70-79", "80+"],
}
AGE_GROUP_COLORS = {
"0-9": "#FF69B4",
"10-19": "#BA55D3",
"20-39": "#00BFFF",
"40-59": "#228B22",
"60-69": "#778899",
"70+": "#000000",
}
def _calculate_relative_to_cases(evolutions):
num_cases = evolutions[config.ORIG_CASES_COL].values
evolutions = evolutions.div(num_cases, axis="index")
del evolutions[config.ORIG_CASES_COL]
return evolutions
def plot_age_evolution(
plot_path,
sex=None,
date_range=None,
rate_by_100k=False,
by_week=False,
age_ranges=None,
community=None,
title=None,
):
params_to_plot = config.ORIG_COUNT_COLS
fig = plot.SharedXFigure(
num_axes=len(params_to_plot), fig_size=config.FOR_PANEL_EVOLUTION_FIG_SIZE
)
for idx, (age_group_name, ages) in enumerate(age_ranges.items()):
if community is None:
evolutions = covid_data.get_evolutions_for_spain(
sex=sex,
date_range=date_range,
rate_by_100k=rate_by_100k,
by_week=by_week,
age_ranges=ages,
)
else:
evolutions = covid_data.get_evolutions_per_param(
by=config.COMMUNITY,
sex=sex,
date_range=date_range,
rate_by_100k=rate_by_100k,
by_week=by_week,
age_ranges=ages,
)
evolutions = {
param: evolution.loc[:, community]
for param, evolution in evolutions.items()
}
res = _plot_evolution_for_age_group(
fig,
evolutions,
rate_by_100k,
age_group_name,
by_week,
date_range,
params_to_plot,
)
axes = fig.get_axes(0)
axes.legend()
if title:
axes.set_title(title)
fig.save_fig(plot_path)
def _plot_evolution_for_age_group(
fig, evolutions, rate_by_100k, age_group_name, by_week, date_range, params_to_plot
):
for idx, param in enumerate(params_to_plot):
axes = fig.get_axes(idx)
evolution = evolutions[param]
if date_range is not None:
evolution = evolution.loc[date_range[0] : date_range[1]]
axes.plot(
evolution.index,
evolution.values,
color=AGE_GROUP_COLORS[age_group_name],
label=age_group_name,
)
ylabel = config.PLOT_PARAM_DESCRIPTIONS[param]
if rate_by_100k:
ylabel = f"{ylabel} (por 1e5 hab.)"
if by_week:
ylabel = f"{ylabel}\n(semanal)"
plot.set_y_label(axes, ylabel)
plot.set_x_ticks_format(axes, 45, ha="right")
return {"axes": axes}
if __name__ == "__main__":
last_date = covid_data.get_last_date_in_dframe()
one_week = timedelta(days=7)
last_date = last_date - one_week
first_date = datetime(2022, 2, 1)
DATE_RANGE = (first_date, last_date)
date_rage_str = f"{first_date.date().day}-{first_date.date().month} al {last_date.date().day}-{last_date.date().month}"
out_dir = config.AGE_COMMUNITY_GROUP_PLOT_DIR
out_dir.mkdir(exist_ok=True)
region_names_per_iso_code = spain_data.CA_NAMES_PER_ISO_CODE
for region_iso, region_name in region_names_per_iso_code.items():
if region_iso == "nd":
continue
plot_path = (
out_dir / f"evolution_per_age_range.{region_name}.{date_rage_str}.png"
)
plot_age_evolution(
plot_path,
by_week=True,
date_range=DATE_RANGE,
age_ranges=AGE_GROUPS,
rate_by_100k=True,
community=region_iso,
title=region_name,
)
if False:
plot_path = out_dir / f"evolution_per_age_range.{region_name}.png"
plot_age_evolution(
plot_path,
by_week=True,
date_range=None,
age_ranges=AGE_GROUPS,
rate_by_100k=True,
community=region_iso,
title=region_name,
)
out_dir = config.AGE_GROUP_PLOT_DIR
out_dir.mkdir(exist_ok=True)
plot_path = out_dir / f"evolution_per_age_range.{date_rage_str}.png"
plot_age_evolution(
plot_path,
by_week=True,
date_range=DATE_RANGE,
age_ranges=AGE_GROUPS,
rate_by_100k=True,
)
plot_path = out_dir / "evolution_per_age_range.png"
plot_age_evolution(
plot_path,
by_week=True,
date_range=None,
age_ranges=AGE_GROUPS,
rate_by_100k=True,
)
| JoseBlanca/covid_situation_spain | src/plot_evolution_per_age_group.py | plot_evolution_per_age_group.py | py | 4,899 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config.ORIG_CASES_COL",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "config.ORIG_CASES_COL",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "config.ORIG_COUNT_COLS",
"line_number": 46,
"usage_type": "attribute"
},
{
... |
25959933939 | #-*- coding: utf-8 -*-
from datetime import date, timedelta
from django.db import connection, models
from django.utils.translation import ugettext_lazy as _
from twa.utils import DEFAULT_MAX_LENGTH, AbstractModel
class RequestManager( models.Manager ):
def get_query_set( self ):
return super( RequestManager, self ).get_query_set()
def get_user_agents_top_10( self ):
fmt = '%Y-%m-%d'
enddate = date.today()
startdate = enddate - timedelta( 90 )
SQL = 'SELECT COUNT(*) AS c, user_agent'
SQL += ' FROM requests_request'
SQL += " WHERE last_modified BETWEEN '%s' AND '%s'" % ( startdate.strftime( fmt ), enddate.strftime( fmt ) )
SQL += ' GROUP BY user_agent'
SQL += ' ORDER BY c DESC'
SQL += ' LIMIT 10'
cursor = connection.cursor()
cursor.execute( SQL )
agents = []
for i in range( cursor.rowcount ):
row = cursor.fetchone()
agents.append( { 'count': row[0], 'user_agent': row[1] } )
cursor.close()
return agents
# Django 1.1
# from django.db.models import Count
# return self.get_query_set().values( 'user_agent' ).annotate( agent_count = Count( 'user_agent' ) ).order_by( '-agent_count' )[:10]
class Request( AbstractModel ):
user = models.CharField( _( 'User' ), max_length = DEFAULT_MAX_LENGTH, blank = True )
user_agent = models.CharField( _( 'User Agent' ), max_length = 500, blank = True )
path = models.CharField( _( 'Path' ), max_length = DEFAULT_MAX_LENGTH, blank = True )
remote = models.CharField( _( 'Remote' ), max_length = DEFAULT_MAX_LENGTH, blank = True )
objects = RequestManager()
def __unicode__( self ):
return self.user_agent
| marcusti/mti-twa | requests/models.py | models.py | py | 1,769 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Manager",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "date... |
3689179156 | """feedback URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.contrib import admin
from django.urls import path
from django.urls import path
from webapp.views import IndexView, ProductView, ProductCreateView, \
ProductUpdateView, ProductDeleteView, ReviewCreateView, ReviewForProductCreateView, \
ReviewListView, ReviewUpdateView, ReviewDeleteView
urlpatterns = [
path('admin/', admin.site.urls),
path('', IndexView.as_view(), name='index'),
path('product/<int:pk>/', ProductView.as_view(), name='product_view'),
path('product/add/', ProductCreateView.as_view(), name='product_add'),
path('product/<int:pk>/edit/', ProductUpdateView.as_view(), name='product_update'),
path('product/<int:pk>/delete/', ProductDeleteView.as_view(), name='product_delete'),
path('comments/', ReviewListView.as_view(), name='comment_list'),
path('comment/add/', ReviewCreateView.as_view(), name='comment_add'),
path('comment/<int:pk>/edit/', ReviewUpdateView.as_view(), name='comment_update'),
path('comment/<int:pk>/delete/', ReviewDeleteView.as_view(), name='comment_delete'),
path('product/<int:pk>/add-comment/', ReviewForProductCreateView.as_view(), name='article_comment_create'),
]
| da5tan93/feedback | feedback/urls.py | urls.py | py | 1,866 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "... |
23934877636 | from django_cron import CronJobBase, Schedule
from django.db.models import Q
from .models import Device
class UpdateDeviceStatuses(CronJobBase):
devices = []
RUN_EVERY_MINS = 1
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'ARM.update_device_statuses'
def do(self):
devices = Device.objects.filter(
~(Q(status=Device.send) | Q(status=Device.in_progress)),
stock__isnull=True,
next_check_date__isnull=False,
)
for device in devices:
if device.next_check_date:
device.status = device.get_status()
Device.objects.bulk_update(devices, fields=["status"]) | tayotoki/arm-kip | ARM/cron.py | cron.py | py | 684 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django_cron.CronJobBase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django_cron.Schedule",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Device.objects.filter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name... |
39899914837 | from UI.editEmployeeWin import Ui_Dialog
from PyQt5.QtWidgets import QDialog, QLineEdit
from PyQt5.QtGui import QRegExpValidator
from PyQt5.QtCore import QRegExp
class EditEmployeeWin(QDialog, Ui_Dialog):
def __init__(self, parent, roles, empData = None) -> None:
super().__init__(parent)
Ui_Dialog.__init__(self)
self.setupUi(self)
self.empData = empData
self.isEdit = empData != None
selectedIndex = None
self.editPassword.setEchoMode(QLineEdit.EchoMode.Password)
self.editPassword_2.setEchoMode(QLineEdit.EchoMode.Password)
self.prevLogin = None
#self.editPassword.setValidator(QRegExpValidator(QRegExp(r"^(?=.*[0-9])(?=.*[a-z])([a-z0-9_-]+)$"), self.editPassword))
#self.editPassword.keyPressed.connect(self.on_line_edit_key_pressed)
#validator = QRegExpValidator(QRegExp(r"[\.]+"))
self.pushButton.setEnabled(self.isEdit)
for lineEdit in self.findChildren(QLineEdit):
#lineEdit.setValidator(validator)
lineEdit.textChanged.connect(self.on_line_edit_key_pressed)
for index, role in enumerate(roles):
self.cmRoles.addItem(str(role[1]), role[0])
if selectedIndex == None and self.isEdit and empData[6] == role[0]:
selectedIndex = index
self.cmRoles.setCurrentIndex(selectedIndex)
self.empGuid = None
if self.isEdit:
self.prevLogin = empData[4]
self.empGuid = empData[0]
self.toggle_boxes(False)
self.cbChangeLoginPsw.setChecked(False)
self.cbChangeLoginPsw.stateChanged.connect(self.state_changed)
self.editName.setText(empData[1])
self.editSurname.setText(empData[2])
self.editMidname.setText(empData[3])
self.editLogin.setText(empData[4])
else:
self.cbChangeLoginPsw.hide()
def on_line_edit_key_pressed(self, event):
sender = self.sender()
#validator = sender.validator()
validator = QRegExpValidator(QRegExp(r'.+'))
state = validator.validate(event, 0)[0]
if state != 2:
sender.setStyleSheet('QLineEdit { border: 1px solid red }')
else:
sender.setStyleSheet('')
childsEmpData = self.frameEmpData.findChildren(QLineEdit)
childsUserData = self.frameUserData.findChildren(QLineEdit)
empCorrect = all([validator.validate(line.text(), 0)[0] == 2 for line in childsEmpData])
userCorrect = all([validator.validate(line.text(), 0)[0] == 2 for line in childsUserData])
self.pushButton.setEnabled(empCorrect and (userCorrect or self.isEdit and not self.cbChangeLoginPsw.isChecked()))
def state_changed(self, checked):
self.toggle_boxes(checked)
def toggle_boxes(self, isVisible):
if isVisible:
self.boxLogin_2.show()
self.boxPsw_2.show()
self.boxPswElse_2.show()
else:
self.boxLogin_2.hide()
self.boxPsw_2.hide()
self.boxPswElse_2.hide()
| Vittallya/MlApp | Views/editEmpWin.py | editEmpWin.py | py | 3,278 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "UI.editEmployeeWin.Ui_Dialog",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "UI.editEmployeeWin.Ui_Dialog.__init__",
"line_number": 11,
"usage_type": "call"
},
... |
11040292875 | from common.FrontendTexts import FrontendTexts
view_texts = FrontendTexts('quotes')
labels = view_texts.getComponent()['selector']['choices']
ACTION_CHOICES = (
(1, labels['edit']),
(2, labels['edit_materials'])
)
PROVIDER_CHOICES = (
(1, "MP100001 - Conpancol Ingenieros"),
(2, "MP100002 - Maasteel UK")
)
| Conpancol/PyHeroku | CPFrontend/quotes/choices.py | choices.py | py | 328 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "common.FrontendTexts.FrontendTexts",
"line_number": 2,
"usage_type": "call"
}
] |
33493058365 | import logging
import numpy as np
from sklearn.metrics import pairwise_distances
class PointCorresponder():
def point_correspond_3d_to_2d(self, projected_pts, visible_indces, keypoints, top_closest=1):
"""
Args:
projected_pts:
visible_indces:
keypoints:
top_closest:
Returns:
Dictionary with idx keypoint : top closest indeces of mean_shape points
"""
visible_idxs = list(visible_indces.keys())
assert projected_pts.shape[1] == keypoints.shape[1], 'Points should be 2d.'
# 68 x N_vis_points filtered only visible pts
projected_pts_visible = [projected_pts[idx] for idx in visible_idxs]
logging.info(f'We have {len(projected_pts_visible)} visible pts on image!')
distances = pairwise_distances(keypoints, projected_pts_visible, metric='euclidean')
# 68 x top
top_idx_vis_pts = np.argsort(distances, axis=1)[:, :top_closest]
# indeces of mean_shape points
top_idx_mean_shape = np.empty((keypoints.shape[0], top_closest))
for idx, top_idxs in enumerate(top_idx_vis_pts):
top_idx_mean_shape[idx] = [visible_idxs[i] for i in top_idxs]
correspondence_dict = {idx: top_idxs.astype(np.int).tolist() for idx, top_idxs in
zip(range(len(keypoints)), top_idx_mean_shape)}
return correspondence_dict
| Mikhail-Klochkov/face3dmorphablemodel | morphable_model/point_correspond.py | point_correspond.py | py | 1,436 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.info",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise_distances",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.e... |
11559234229 | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: xinganguo@gmail.com
@site: http://www.xingag.top
@software: PyCharm
@file: js2py_demo.py
@time: 2020-07-22 17:49
@description:js2py
"""
# 依赖
# pip3 install js2py
import js2py
from js_code import *
def test_simple():
"""
简单
:return:
"""
# 将js代码转为python
add = js2py.eval_js(js_simple())
# 当做python函数调用
result = add(1, 2)
print(result)
def test_js_from_file():
"""
从文件中读取js进行执行
:return:
"""
# 从文件中读取js代码
js_content = js_from_file('./norm.js')
# 使用获取上下js2py生成一个上下文环境
context = js2py.EvalJs()
# 执行整段JS代码
context.execute(js_content)
# 使用context调用具体的函数
result = context.add(1, 2)
print(result)
if __name__ == '__main__':
# test_simple()
test_js_from_file()
| xingag/tools_python | Python执行JS总结/js2py_demo.py | js2py_demo.py | py | 1,012 | python | en | code | 148 | github-code | 1 | [
{
"api_name": "js2py.eval_js",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "js2py.EvalJs",
"line_number": 48,
"usage_type": "call"
}
] |
28949575640 | import json
import os
from bs4 import BeautifulSoup
import requests
import time
from typing import List,Dict
import socket
def extract_urls(path_to_urls_json):
with open(path_to_urls_json) as f :
json_list = json.load(f)
return json_list
def download_url(url:str):
try:
x = requests.get(url,timeout=2)
data = x.text
except :
data = None
return data
def extract_titles_from_html(html) -> List:
soup = BeautifulSoup(html, 'html.parser')
titles = []
for t in soup.find_all('title'):
title = t.get_text()
titles.append(title)
return titles
def flatten(l):
return [item for sublist in l for item in sublist]
def save_index_to_json(index:Dict, path_directory, file_name,is_pos=False):
pos_str = ".pos_index" if is_pos else ".non_pos_index"
file_path = os.path.join(path_directory,file_name+pos_str+".json")
with open(file_path,'w') as f:
f.write(json.dumps(index,ensure_ascii=False))
def save_metada_to_json(metadadata:Dict,path_directory : str):
file_path = os.path.join(path_directory,"metadata.json")
with open(file_path,'w') as f:
f.write(json.dumps(metadadata,ensure_ascii=False))
| fwallyn1/SimpleIndex | index/utils.py | utils.py | py | 1,263 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_numb... |
30005077930 | import torch
from torch.nn import Module, Linear, LayerNorm
class LSTMCellLayerNorm(Module):
"""
A lstm cell that layer norms the cell state
https://github.com/seba-1511/lstms.pth/blob/master/lstms/lstm.py for reference.
Original License Apache 2.0
"""
def __init__(self, input_size, hidden_size, bias=True, forget_bias=0):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.ih = Linear(input_size, 4 * hidden_size, bias=bias)
self.hh = Linear(hidden_size, 4 * hidden_size, bias=bias)
if bias:
self.ih.bias.data.fill_(0)
self.hh.bias.data.fill_(0)
# forget bias init
self.ih.bias.data[hidden_size : hidden_size * 2].fill_(forget_bias)
self.hh.bias.data[hidden_size : hidden_size * 2].fill_(forget_bias)
self.ln_cell = LayerNorm(hidden_size)
def forward(self, x, hidden):
"""
LSTM Cell that layer normalizes the cell state.
:param x: Tensor{B, C}
:param hidden: A Tuple[Tensor{B, C}, Tensor{B, C}] of (previous output, cell state)
:return:
"""
h, c = hidden
# Linear mappings
i2h = self.ih(x)
h2h = self.hh(h)
preact = i2h + h2h
# activations
gates = preact[:, : 3 * self.hidden_size].sigmoid()
g_t = preact[:, 3 * self.hidden_size :].tanh()
i_t = gates[:, : self.hidden_size]
f_t = gates[:, self.hidden_size : 2 * self.hidden_size]
o_t = gates[:, -self.hidden_size :]
# cell computations
c_t = torch.mul(c, f_t) + torch.mul(i_t, g_t)
c_t = self.ln_cell(c_t)
h_t = torch.mul(o_t, c_t.tanh())
return h_t, c_t
| heronsystems/adeptRL | adept/modules/sequence.py | sequence.py | py | 1,770 | python | en | code | 202 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn.LayerNorm",
... |
31226951252 | # Flask utils
from flask import Flask, request, render_template
from werkzeug.utils import secure_filename
import os
from model_file import *
from whale_title import *
model.load_state_dict(torch.load('./model/VGG-whaleFin_ImageClassification_model.pt',map_location ='cpu'))
model.eval()
# Define a flask app
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index2.html", predictions=" ")
@app.route('/uploader', methods = ['POST'])
def upload_file():
predictions=""
explain=""
if request.method == 'POST':
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(basepath, 'static','uploads', secure_filename(f.filename))
f.save(file_path)
x = pil_loader(file_path)
x = transform(x)
x = x.unsqueeze(0)
x = x.to(device)
y_pred, _ = model(x)
y_prob = F.softmax(y_pred, dim = -1)
top_pred = y_prob.argmax(1, keepdim = True)
top_pred = top_pred.cpu()
classes = ['blue_whale',
'brydes_whale',
'cuviers_beaked_whale',
'false_killer_whale',
'fin_whale', 'gray_whale',
'humpback_whale', 'killer_whale',
'long_finned_pilot_whale',
'melon_headed_whale',
'minke_whale',
'pygmy_killer_whale',
'sei_whale',
'short_finned_pilot_whale',
'southern_right_whale']
name = name_title(classes[top_pred])
explain = switch(classes[top_pred])
return render_template("index1.html", predictions = name, explain = explain, image_name = classes[top_pred])
if __name__ == "__main__":
app.run(host="0.0.0.0",debug=True,port="4100")
| sclee0724/Whale_Fin_Image_Classification_Project | app.py | app.py | py | 1,852 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "flask.req... |
18082080648 | import torch
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import zipfile
import math
import geopandas
from shapely import geometry
import os
import torch
import sys
from sklearn.preprocessing import MinMaxScaler
# load some default Python modules
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import pandas as pd
#geo data
data = geopandas.read_file("taxi_zones.json")
data['centroid_column'] = data.centroid
coord_list = [(x,y) for x,y in zip(data['centroid_column'].x , data['centroid_column'].y)]
coord_list = np.array(coord_list)
from sklearn.preprocessing import MinMaxScaler
#coord_list_min_max = MinMaxScaler().fit_transform(coord_list)
scaler = MinMaxScaler()
scaler.fit(coord_list)
coord_list_min_max = scaler.transform(coord_list)
#scaler.inverse_transform()
#coord_list_min_max
print("—————————js file loaded———————————")
df_train_6 = pd.read_csv('06.csv', parse_dates=["tpep_pickup_datetime"])#nrows = 2_000_000,
df_train_7 = pd.read_csv('07.csv', parse_dates=["tpep_pickup_datetime"])
df_train_8 = pd.read_csv('08.csv', parse_dates=["tpep_pickup_datetime"])
df_train = pd.concat([df_train_6, df_train_7, df_train_8],axis=0,ignore_index=True)
df_train["tpep_dropoff_datetime"] =pd.to_datetime(df_train["tpep_dropoff_datetime"])
x = df_train.copy()
train_X = pd.DataFrame()
#train_X["year"] = x['tpep_dropoff_datetime'].map(lambda x:x.year)
train_X["month"] = x['tpep_dropoff_datetime'].map(lambda x:x.month)
train_X["day"] = x['tpep_dropoff_datetime'].map(lambda x:x.day)
train_X["hour"] = x['tpep_dropoff_datetime'].map(lambda x:x.hour)
train_X["minute"] = x['tpep_dropoff_datetime'].map(lambda x:x.minute)
train_X["second"] = x['tpep_dropoff_datetime'].map(lambda x:x.second)
#train_X = train_X.values.reshape(train_X.shape[0],train_X.shape[1])
#train_X.to_csv("total_data.csv")
#y.to_csv("y.csv")
##########################################################
#train_X = pd.read_csv('total_data.csv')
#print(train_X.head())
#y = pd.read_csv('y.csv')
#print(y.head())
########################################################
print("—————————csv loading finished———————————")
#y = pd.DataFrame()
#y['DOLocationID'] = df_train['DOLocationID']
coord_list = np.r_[coord_list,[[np.nan,np.nan]]]
coord_list = np.r_[coord_list,[[np.nan,np.nan]]]
train_y = coord_list[y['DOLocationID'].values-1]
a = np.concatenate((train_X,train_y),axis=1)
train = a[~np.isnan(a).any(axis=1)]
#train = a[:n_train_samples, :]
#test = a[n_train_samples:, :]
train_X,train_y = train[:,1:6],train[:,6 :8]
train_X = train_X.reshape(train_X.shape[0],1,train_X.shape[1])
train_y[:,0] = -train_y[:,0]
print("train_X.shape,train_y.shape:", train_X.shape,train_y.shape)
"""
A Mixture Density Layer for Keras
cpmpercussion: Charles Martin (University of Oslo) 2018
https://github.com/cpmpercussion/keras-mdn-layer
Hat tip to [Omimo's Keras MDN layer](https://github.com/omimo/Keras-MDN)
for a starting point for this code.
Provided under MIT License
"""
#from .version import __version__
from tensorflow.compat.v1 import keras
from tensorflow.compat.v1.keras import backend as K
from tensorflow.compat.v1.keras import layers
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_probability import distributions as tfd
def elu_plus_one_plus_epsilon(x):
"""ELU activation with a very small addition to help prevent
NaN in loss."""
return keras.backend.elu(x) + 1 + keras.backend.epsilon()
class MDN(layers.Layer):
"""A Mixture Density Network Layer for Keras.
This layer has a few tricks to avoid NaNs in the loss function when training:
- Activation for variances is ELU + 1 + 1e-8 (to avoid very small values)
- Mixture weights (pi) are trained in as logits, not in the softmax space.
A loss function needs to be constructed with the same output dimension and number of mixtures.
A sampling function is also provided to sample from distribution parametrised by the MDN outputs.
"""
def __init__(self, output_dimension, num_mixtures, **kwargs):
self.output_dim = output_dimension
self.num_mix = num_mixtures
with tf.name_scope('MDN'):
self.mdn_mus = layers.Dense(self.num_mix * self.output_dim, name='mdn_mus') # mix*output vals, no activation
self.mdn_sigmas = layers.Dense(self.num_mix * self.output_dim, activation=elu_plus_one_plus_epsilon, name='mdn_sigmas') # mix*output vals exp activation
self.mdn_pi = layers.Dense(self.num_mix, name='mdn_pi') # mix vals, logits
super(MDN, self).__init__(**kwargs)
def build(self, input_shape):
with tf.name_scope('mus'):
self.mdn_mus.build(input_shape)
with tf.name_scope('sigmas'):
self.mdn_sigmas.build(input_shape)
with tf.name_scope('pis'):
self.mdn_pi.build(input_shape)
super(MDN, self).build(input_shape)
@property
def trainable_weights(self):
return self.mdn_mus.trainable_weights + self.mdn_sigmas.trainable_weights + self.mdn_pi.trainable_weights
@property
def non_trainable_weights(self):
return self.mdn_mus.non_trainable_weights + self.mdn_sigmas.non_trainable_weights + self.mdn_pi.non_trainable_weights
def call(self, x, mask=None):
with tf.name_scope('MDN'):
mdn_out = layers.concatenate([self.mdn_mus(x),
self.mdn_sigmas(x),
self.mdn_pi(x)],
name='mdn_outputs')
return mdn_out
def compute_output_shape(self, input_shape):
"""Returns output shape, showing the number of mixture parameters."""
return (input_shape[0], (2 * self.output_dim * self.num_mix) + self.num_mix)
def get_config(self):
config = {
"output_dimension": self.output_dim,
"num_mixtures": self.num_mix
}
base_config = super(MDN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# classmethod
# def from_config(cls, config):
# return cls(**config)
def get_mixture_loss_func(output_dim, num_mixes):
"""Construct a loss functions for the MDN layer parametrised by number of mixtures."""
# Construct a loss function with the right number of mixtures and outputs
def mdn_loss_func(y_true, y_pred):
# Reshape inputs in case this is used in a TimeDistribued layer
y_pred = tf.reshape(y_pred, [-1, (2 * num_mixes * output_dim) + num_mixes], name='reshape_ypreds')
y_true = tf.reshape(y_true, [-1, output_dim], name='reshape_ytrue')
# Split the inputs into paramaters
out_mu, out_sigma, out_pi = tf.split(y_pred, num_or_size_splits=[num_mixes * output_dim,
num_mixes * output_dim,
num_mixes],
axis=-1, name='mdn_coef_split')
# Construct the mixture models
cat = tfd.Categorical(logits=out_pi)
component_splits = [output_dim] * num_mixes
mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
coll = [tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale
in zip(mus, sigs)]
mixture = tfd.Mixture(cat=cat, components=coll)
loss = mixture.log_prob(y_true)
loss = tf.negative(loss)
loss = tf.reduce_mean(loss)
return loss
# Actually return the loss function
with tf.name_scope('MDN'):
return mdn_loss_func
def get_mixture_sampling_fun(output_dim, num_mixes):
"""Construct a TensorFlor sampling operation for the MDN layer parametrised
by mixtures and output dimension. This can be used in a Keras model to
generate samples directly."""
def sampling_func(y_pred):
# Reshape inputs in case this is used in a TimeDistribued layer
y_pred = tf.reshape(y_pred, [-1, (2 * num_mixes * output_dim) + num_mixes], name='reshape_ypreds')
out_mu, out_sigma, out_pi = tf.split(y_pred, num_or_size_splits=[num_mixes * output_dim,
num_mixes * output_dim,
num_mixes],
axis=1, name='mdn_coef_split')
cat = tfd.Categorical(logits=out_pi)
component_splits = [output_dim] * num_mixes
mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
coll = [tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale
in zip(mus, sigs)]
mixture = tfd.Mixture(cat=cat, components=coll)
samp = mixture.sample()
# Todo: temperature adjustment for sampling function.
return samp
# Actually return the loss_func
with tf.name_scope('MDNLayer'):
return sampling_func
def get_mixture_mse_accuracy(output_dim, num_mixes):
"""Construct an MSE accuracy function for the MDN layer
that takes one sample and compares to the true value."""
# Construct a loss function with the right number of mixtures and outputs
def mse_func(y_true, y_pred):
# Reshape inputs in case this is used in a TimeDistribued layer
y_pred = tf.reshape(y_pred, [-1, (2 * num_mixes * output_dim) + num_mixes], name='reshape_ypreds')
y_true = tf.reshape(y_true, [-1, output_dim], name='reshape_ytrue')
out_mu, out_sigma, out_pi = tf.split(y_pred, num_or_size_splits=[num_mixes * output_dim,
num_mixes * output_dim,
num_mixes],
axis=1, name='mdn_coef_split')
cat = tfd.Categorical(logits=out_pi)
component_splits = [output_dim] * num_mixes
mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
coll = [tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale
in zip(mus, sigs)]
mixture = tfd.Mixture(cat=cat, components=coll)
samp = mixture.sample()
mse = tf.reduce_mean(tf.square(samp - y_true), axis=-1)
# Todo: temperature adjustment for sampling functon.
return mse
# Actually return the loss_func
with tf.name_scope('MDNLayer'):
return mse_func
def split_mixture_params(params, output_dim, num_mixes):
"""Splits up an array of mixture parameters into mus, sigmas, and pis
depending on the number of mixtures and output dimension.
Arguments:
params -- the parameters of the mixture model
output_dim -- the dimension of the normal models in the mixture model
num_mixes -- the number of mixtures represented
"""
mus = params[:num_mixes * output_dim]
sigs = params[num_mixes * output_dim:2 * num_mixes * output_dim]
pi_logits = params[-num_mixes:]
return mus, sigs, pi_logits
def softmax(w, t=1.0):
"""Softmax function for a list or numpy array of logits. Also adjusts temperature.
Arguments:
w -- a list or numpy array of logits
Keyword arguments:
t -- the temperature for to adjust the distribution (default 1.0)
"""
e = np.array(w) / t # adjust temperature
e -= e.max() # subtract max to protect from exploding exp values.
e = np.exp(e)
dist = e / np.sum(e)
return dist
def sample_from_categorical(dist):
"""Samples from a categorical model PDF.
Arguments:
dist -- the parameters of the categorical model
Returns:
One sample from the categorical model, or -1 if sampling fails.
"""
r = np.random.rand(1) # uniform random number in [0,1]
accumulate = 0
for i in range(0, dist.size):
accumulate += dist[i]
if accumulate >= r:
return i
tf.logging.info('Error sampling categorical model.')
return -1
def sample_from_output(params, output_dim, num_mixes, temp=1.0, sigma_temp=1.0):
"""Sample from an MDN output with temperature adjustment.
This calculation is done outside of the Keras model using
Numpy.
Arguments:
params -- the parameters of the mixture model
output_dim -- the dimension of the normal models in the mixture model
num_mixes -- the number of mixtures represented
Keyword arguments:
temp -- the temperature for sampling between mixture components (default 1.0)
sigma_temp -- the temperature for sampling from the normal distribution (default 1.0)
Returns:
One sample from the the mixture model.
"""
mus, sigs, pi_logits = split_mixture_params(params, output_dim, num_mixes)
pis = softmax(pi_logits, t=temp)
m = sample_from_categorical(pis)
# Alternative way to sample from categorical:
# m = np.random.choice(range(len(pis)), p=pis)
mus_vector = mus[m * output_dim:(m + 1) * output_dim]
sig_vector = sigs[m * output_dim:(m + 1) * output_dim]
scale_matrix = np.identity(output_dim) * sig_vector # scale matrix from diag
cov_matrix = np.matmul(scale_matrix, scale_matrix.T) # cov is scale squared.
cov_matrix = cov_matrix * sigma_temp # adjust for sigma temperature
sample = np.random.multivariate_normal(mus_vector, cov_matrix, 1)
return sample
from tensorflow import keras
from keras.utils import plot_model
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from matplotlib import pyplot
from keras.utils import plot_model
OUTPUT_DIMS = 2
N_MIXES =40 #用来拟合分布的个数
N_HIDDEN_1 = 100
N_HIDDEN_2 = 100
model = Sequential()
model.add(keras.layers.Dense(N_HIDDEN_1, batch_input_shape=(None, 5), activation='sigmoid'))
model.add(keras.layers.Dense(N_HIDDEN_2, activation='sigmoid'))
#model.add(LSTM(200, input_shape=(train_X.shape[1], train_X.shape[2])))
#model.add(Dense(200))
model.add(MDN(OUTPUT_DIMS, N_MIXES))
model.compile(loss=get_mixture_loss_func(OUTPUT_DIMS,N_MIXES), optimizer=keras.optimizers.Adam())
model.summary()
plot_model(model, to_file='model.png')
print("—————————模型搭建完成———————————")
from keras.callbacks import EarlyStopping, ModelCheckpoint
filepath = "Model_M=40_n1=100_n2=100sigmoid.h5" #'my_model_3.h5'#保存的模型名字
#callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=0),ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True, verbose=0),]
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max', period=1)
callbacks_list = [checkpoint]
history = model.fit(x=train_X, y=train_y, epochs=100,batch_size=100000, validation_split=0.30, callbacks= callbacks_list)#batch_size=72,
###################################储存model###############################################
#model.save(filepath)
###################################储存history###############################################
import pickle
with open('Model_M=40_n1=100_n2=100sigmoid.pickle', 'wb') as file_pi:
pickle.dump(history.history, file_pi)
from matplotlib import pyplot
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.savefig("history.png")
pyplot.legend()
pyplot.show()
plt.figure(0)
pyplot.figure(0)
#model.load_weights("vi_128_lrdefault.hdf5")
train_X = train_X.reshape(train_X.shape[0],train_X.shape[2])
y_input = train_X[200:500]
################
print("y_input:",y_input)
x_test = model.predict(y_input)
print("x_test:",x_test)
y_samples = np.apply_along_axis(sample_from_output, 1, x_test, OUTPUT_DIMS, N_MIXES, temp=1.0, sigma_temp=1.0)
#print("y_samples:",y_samples)
import matplotlib.pyplot as plt
y_samples = y_samples.reshape(y_samples.shape[0],y_samples.shape[2])
image = plt.scatter(y_samples[:,0],y_samples[:,1])
#image.show()
plt.savefig("Model_M=40_n1=100_n2=100sigmoid.png")
plt.show
plt.close(0)
plt.figure(0)
#y_samples = train_y
#import matplotlib.pyplot as plt
#image = plt.scatter(y_samples[:,0],y_samples[:,1])
#plt.savefig("scatter_plot_true.png")
#plt.show
#plt.close(0)
#plt.figure(0)
import matplotlib.pyplot as pl
import scipy.stats as st
data = train_y[1:100000]
x = data[:, 0]
y = data[:, 1]
xmin, xmax = data[:, 0].max(), data[:, 0].min()
ymin, ymax = data[:, 1].max(), data[:, 1].min()
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
fig = pl.figure()
ax = fig.gca()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# Contourf plot
cfset = ax.contourf(xx, yy, f, cmap='Blues')
## Or kernel density estimate plot instead of the contourf plot
#ax.imshow(np.rot90(f), cmap='Blues', extent=[xmin, xmax, ymin, ymax])
# Contour plot
cset = ax.contour(xx, yy, f, colors='k')
# Label plot
ax.clabel(cset, inline=1, fontsize=10)
ax.set_xlabel('Y1')
ax.set_ylabel('Y0')
pl.show() | ipmLessing/Taxi-Demand-and-Fare-Prediction | demand_prediction/Model_M=40_n1=100_n2=100sigmoid.py | Model_M=40_n1=100_n2=100sigmoid.py | py | 17,686 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_na... |
39164509106 | from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QLabel
import sys
from PyQt5.QtGui import QIcon, QFont
from PyQt5.QtCore import QRect, QSize
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.create_ui()
def create_ui(self):
self.setWindowTitle("PyQt5 Window")
self.setGeometry(100, 100, 600, 300)
self.setWindowIcon(QIcon("icons/pyicon.png"))
self.create_widget()
self.show()
def create_widget(self):
# create pushbutton
self.btn = QPushButton("Click Me", self)
self.btn.setGeometry(QRect(50,50,100,50))
self.btn.setIcon(QIcon("icons/pyicon.png"))
self.btn.setIconSize(QSize(40,50))
self.btn.setToolTip("<h2>I am a button</h2>")
self.btn.clicked.connect(self.btn_clicked)
# create my label
self.label = QLabel("I am a label", self)
self.label.setFont(QFont("Times New Roman", 15))
self.label.setStyleSheet('color:green')
self.label.setGeometry(QRect(120,120,200,200))
def btn_clicked(self):
self.label.setText("Button Is clicked")
self.label.setStyleSheet('color:red')
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec()) | BjornChrisnach/Pinterest_course_GUI | Mywindow.py/Mywindow.py | Mywindow.py | py | 1,315 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 24,
"usage_type": "call"
},
{
"api_name... |
20831591596 | from flask import Flask, render_template, url_for, jsonify, request
from http import HTTPStatus
from jinja2 import Template
import pymysql.cursors
import json
import os
app = Flask(__name__)
def get_connection():
connection = pymysql.connect(
host='localhost',
user=os.environ['dbuser'],
password=os.environ['dbpassword'],
database='laravel',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
return connection
def create_statement(obj):
template = Template('''SELECT{% for selector in obj['selectors'] %}{% if loop.last %}{% if selector['alias'] != '' %} {{ selector['column'] }} AS '{{ selector['alias'] }}'{% else %} {{ selector['column'] }}{% endif %}{% else %}{% if selector['alias'] != '' %} {{ selector['column'] }} AS '{{ selector['alias'] }}',{% else %} {{ selector['column'] }},{% endif %}{% endif %}{% endfor %} FROM {{ obj['table'] }} {% if obj['filters'] == [] %}{% else %}WHERE{% for filter in obj['filters'] %}{% if loop.first %}{% else %} {{ filter['logicalOperator'] }}{% endif %} {{ filter['filterTarget'] }} {{ operators[filter['operation']] }} {{ filter['filterValue'] }}{% endfor %};{% endif %}''')
operators = {
"EQUAL": '=',
"GREATER THAN": '>',
"LESS THAN": '<',
"GREATER THAN OR EQUAL TO": '>='
}
sql_statement = template.render(obj=obj, operators=operators)
return sql_statement
@app.route('/')
def home():
return render_template('index.html')
@app.route("/tables")
def tables():
with get_connection().cursor() as cursor:
# Read a single record
sql = "show tables;"
cursor.execute(sql)
response = cursor.fetchall()
return jsonify([res['Tables_in_laravel'] for res in response]), HTTPStatus.OK
@app.route("/columns", methods=['POST', 'GET'])
def columns():
table = request.form['table_name']
with get_connection().cursor() as cursor:
# Read a single record
sql = f"describe {table};"
cursor.execute(sql)
response = cursor.fetchall()
cols = [res['Field'] for res in response]
cols.pop(0)
return jsonify(cols), HTTPStatus.OK
@app.route("/build_report", methods=['POST'])
def build_report():
try:
report =""
data = json.loads(list(request.form)[0])
template = Template('''
<table class="table">
<thead>
<tr>
{% for head in headers %}
<th class="col">{{ head | upper }}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for row in rows %}
<tr>
{% for head in headers %}
<td>{{ row[head] }}</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
''')
for obj in data:
sql = create_statement(obj)
with get_connection().cursor() as cursor:
cursor.execute(sql)
rows = list(cursor.fetchall())
if rows != []:
headers = rows[0].keys()
report = report + template.render(rows=rows, headers=headers)
except Exception as e:
print(e)
return jsonify({'html': report}), HTTPStatus.OK | pdmarc7/analytica | app.py | app.py | py | 3,312 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "os.environ",
"... |
39745820789 | import nose
import subprocess
import os
_epochdir = os.path.dirname(os.path.abspath(__file__))
_epochdir = os.path.join(_epochdir, '..')
_subdir = None
def setcwd(relative=None):
'''
resets the current working directiory to the path
of this file.
'''
os.chdir(_epochdir)
os.chdir(_subdir)
if relative:
os.chdir(relative)
def compileepoch():
'''
compiles the EPOCH code. The exit code of 'make' is returned.
'''
setcwd()
exitcode = subprocess.call('make', shell=True)
if exitcode != 0:
print('compiling EPOCH errored (exitcode {})'.format(exitcode))
return exitcode
def run_tests(args):
'''
use nose to collect the tests and run them all.
'''
noseargv = ['']
if args.test:
noseargv += ['tests.test_' + args.test]
setcwd()
testsok = nose.run(argv=noseargv)
return testsok
def clean():
'''
clean the tests directory and all its subdirectoryies
by calling 'make clean' in each of them.
'''
setcwd()
subprocess.call('rm -rf tests/__pycache__', shell=True) # python3
subprocess.call('rm -rf tests/*.pyc', shell=True) # python2
setcwd()
files = [os.path.join('tests', f) for f in os.listdir('tests')]
dirs = [d for d in files if os.path.isdir(d)]
for d in dirs:
# call 'make clean' in every subdir
setcwd(d)
subprocess.call('make clean', shell=True)
def main():
import argparse
parser = argparse.ArgumentParser(description='''
This runs the testsuite for EPOCH1D.
It compiles EPOCH and runs the tests.
It does NOT: install the python SDF reader or any other dependencies,
which might be needed!
''')
parser.add_argument('codeversion', help='''
specify the code version to run the tests on.
''', choices=['epoch1d', '1d', 'epoch2d', '2d', 'epoch3d', '3d'])
parser.add_argument('test', nargs='?', help='''
run only a single test specified by its name, i.e. 'laser'
''')
parser.add_argument('--clean', '-c', action='store_true',
help=clean.__doc__)
parser.add_argument('--build', '-b', action='store_true', help='''
build only. Do not run the code.
''')
args = parser.parse_args()
subdirdict = {
'1d': 'epoch1d',
'2d': 'epoch2d',
'3d': 'epoch3d'}
global _subdir
_subdir = args.codeversion
if args.codeversion in subdirdict:
_subdir = subdirdict[_subdir]
if args.clean:
clean()
exit()
epochexitcode = compileepoch()
if epochexitcode != 0:
exit(epochexitcode)
if args.build:
exit()
testsok = run_tests(args)
exit(int(not testsok))
if __name__ == '__main__':
main()
| Warwick-Plasma/epoch | scripts/run-tests.py | run-tests.py | py | 2,754 | python | en | code | 143 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
9528154926 | from PyQt5.QtWidgets import *
from PyQt5 import uic
from crawler.web_crawler import WebCrawler
from crawler.popup_window import PopupWindowClass
from crawler.qthread_worker import QThreadWorker
from bs4 import BeautifulSoup
from PyQt5.QtCore import QCoreApplication, QMutex, QThread, QWaitCondition, pyqtSignal
import sys
import time
import json
class MainWindow(QMainWindow):
gi = 0
def __init__(self):
super().__init__()
uic.loadUi("login_form.ui", self)
self.show()
self.lineEdit_PW.setEchoMode(QLineEdit.Password)
self.pushButton_LOGIN.clicked.connect(self.btn_event_login)
self.pushButton_NOTI.clicked.connect(self.btn_event_noti)
self.lineEdit_ID.setText('')
self.lineEdit_PW.setText('')
# Contents Update QThread Worker
self.worker_contents = QThreadWorker(id='', password='', duration=4)
self.worker_contents.signal.connect(self.worker_update_contents)
def btn_event_login(self):
self.line_id = self.lineEdit_ID.text()
self.line_pw = self.lineEdit_PW.text()
if self.line_id == '' or self.line_pw == '':
QMessageBox.information(self, "알림", "ID 또는 PW를 입력하세요!")
else:
# 크롤링 초기화
self.browser = WebCrawler()
self.browser.login('', self.line_id, self.line_pw)
self.event_form_change()
def btn_event_noti(self, user_id, user_pw, url):
main_window = PopupWindowClass(user_id, user_pw, url)
main_window.show()
def event_form_change(self):
uic.loadUi("main_form.ui", self)
self.show()
self.lineEdit_TIME.setText(time.ctime())
self.worker_contents.start()
# Contents Update QThread Worker
def worker_update_contents(self, item):
dict_from_json = json.loads(item)
print(dict_from_json['totalElements'])
if MainWindow.gi == 0:
MainWindow.gi = dict_from_json['totalElements']
for line in dict_from_json['content']:
self.listWidget_SQUARE.addItem(str(line['contents']))
else:
if dict_from_json['totalElements'] == MainWindow.gi:
MainWindow.gi = dict_from_json['totalElements']
self.btn_event_noti(self.line_id, self.line_pw, 408183)
self.listWidget_SQUARE.clear()
for line in dict_from_json['content']:
self.listWidget_SQUARE.addItem(str(line['contents']))
# for line in html.select('div.cardTheme-item'):
# user_info = line.find('div', attrs={'class': 'user-info'})
# user_name = user_info. find('img')['alt']
# user_img = user_info.find('img')['src']
# user_date = user_info.find('span', attrs={'class': 'date'}).text
#
# contents = line.find('div', attrs={'class': 'feed-desc'}).text
# self.listWidget_SQUARE.addItem(str(contents))
# print(contents)
if __name__ == "__main__":
app = QApplication(sys.argv)
myWindow = MainWindow()
myWindow.show()
sys.exit(app.exec_())
| chunppo/WebCrawler | main.py | main.py | py | 3,166 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "crawler.qthread_worker.QThreadWorker",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "crawl... |
185739374 | import pyarrow as pa
from petastorm.local_disk_cache import LocalDiskCache
class LocalDiskArrowTableCache(LocalDiskCache):
"""A disk cache implementation """
def __init__(self, *args, **kwargs):
super(LocalDiskArrowTableCache, self).__init__(*args, **kwargs)
# Workaround for https://issues.apache.org/jira/browse/ARROW-5260
# unless we try to serialize something before deserialize_components is called, we would crash with a sigsegv
pa.serialize(0)
def get(self, key, fill_cache_func):
value = self._cache.get(key, default=None)
if value is None:
value = fill_cache_func()
table_pandas = value.to_pandas()
serialized_df = pa.serialize(table_pandas)
components = serialized_df.to_components()
self._cache.set(key, components)
else:
original_df = pa.deserialize_components(value)
value = pa.Table.from_pandas(original_df, preserve_index=False)
return value
| jem0101/BigSwag-SQA2022-AUBURN | TestOrchestrator4ML-main/resources/Data/supervised/GITLAB_REPOS/chaitanya_kaul@petastorm/petastorm/local_disk_arrow_table_cache.py | local_disk_arrow_table_cache.py | py | 1,022 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "petastorm.local_disk_cache.LocalDiskCache",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pyarrow.serialize",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyarrow.serialize",
"line_number": 19,
"usage_type": "call"
},
{
"api_... |
74825614112 | """
HikerDataToCSV.py
Aggregates the data contained in the 'ValidatedHikers' directory. This script reads in every hiker in the directory and modifies each data structure in memory by
adding and calculating the fields required for hiker distance prediciton. The updated data structures are then written in CSV form and fed into the
Hiker Distance Prediction Model.
:author: Chris Campell
:version: 1/28/2017
"""
import os
import json
from datetime import datetime
from collections import OrderedDict
import copy
__author__ = "Chris Campell"
__version__ = "1/28/2017"
def get_validated_hikers(validated_hikers_path):
"""
get_validated_hikers -Reads all validated hikers in the specified directory into memory. Returns an
OrderedDictionary of validated_hikers sorted by hiker identifier. Additionally, this method will
sort every hiker's trail journal by Journal Entry Number; this somewhat resembles a chronological sorting.
:param validated_hikers_path: The file path to the CSV file containing the validated hikers.
:return sorted_validated_hikers: A dictionary containing the sorted validated hikers.
"""
validated_hikers = {}
for filename in os.listdir(validated_hikers_path):
with open(validated_hikers_path + "/" + filename, 'r') as fp:
hiker = json.load(fp=fp)
validated_hikers[int(filename.strip(".json"))] = hiker
# Sort the validated_hikers by hiker id:
sorted_validated_hikers = OrderedDict(sorted(validated_hikers.items()))
for hid, hiker in sorted_validated_hikers.items():
sorted_hiker_journal = OrderedDict()
for enum, entry in hiker['journal'].items():
sorted_hiker_journal[int(enum)] = entry
# Sort the hiker's trail journals by entry number (chronologically....kinda):
sorted_validated_hikers[hid]['journal'] = OrderedDict(sorted(sorted_hiker_journal.items()))
return sorted_validated_hikers
def get_validated_shelters(validated_shelters_path):
"""
get_validated_shelters -Returns a dictionary of the shelters validated using the combined TNL and ATC data sets.
:param validated_shelters_path: The file path to the CSV file containing the validated shelters.
:return validated_shelters: A dictionary containing information about shelters on the AT.
"""
validated_shelters = OrderedDict()
line_num = 0
with open(validated_shelters_path + "/ATSheltersCombinedNoDuplicates.csv", 'r') as fp:
for line in iter(fp):
if not line_num == 0:
split_string = str.split(line, sep=",")
shelter_id = split_string[0]
shelter_name = split_string[1]
data_set = split_string[2]
lat = float(split_string[3])
lon = float(split_string[4])
type = split_string[5]
validated_shelters[shelter_id] = {
'name': shelter_name, 'dataset': data_set,
'type': type, 'lat': lat, 'lon': lon
}
line_num += 1
return validated_shelters
def get_csv_header_list():
"""
get_csv_header_list -Returns the CSV header as a list object ready to be written once delimited by commas and
appended with a newline character.
:return csv_header: A list of strings to be used as column headers for the written CSV file.
"""
csv_header = []
csv_header.append("HID")
csv_header.append("ENUM")
csv_header.append("LOCDIR")
# Append a column header to keep track of the total mileage of the current entry:
csv_header.append("TM")
# Append a column header to keep track of the date of the current entry:
csv_header.append("Date")
# Append a column header to keep track of the elapsed change in trip mileage between entries:
csv_header.append("DTM")
# Append a column header to keep track of the number of elapsed days between entries:
csv_header.append("DD")
# Append a column header to keep track of the miles/day between journal entries:
csv_header.append("MPD")
# Append a column header to keep track of the user's average miles per day:
csv_header.append("AVG_MPD")
# Append a column header to keep track of the user's bias in relation to the average:
csv_header.append("UB")
# Append a column header to serve as the regularization term:
csv_header.append("bias")
return csv_header
def write_csv_header(csv_header, storage_location_path):
"""
write_csv_header - Writes the csv header to DistancePrediction.csv in the specified storage directory.
:param csv_header: A list of strings representing the column header for the CSV.
:param storage_location_path: The location to write the output file 'DistancePrediction.csv'
:return: Upon completion, the file DistancePrediction.csv will be written to the specified storage directory
and populated with the CSV header.
"""
# print("csv_header_len: %d" % len(csv_header))
with open(storage_location_path + "/DistancePrediction.csv", 'w') as fp:
# Write the CSV header to the output file.
for i in range(len(csv_header)):
if i != len(csv_header) - 1:
fp.write(csv_header[i] + ",")
else:
fp.write(csv_header[i] + "\n")
def get_delta_trip_miles(journal_entry_one, journal_entry_two):
"""
get_delta_trip_miles -Returns the elapsed difference in mileage between consecutive journal entries.
:param journal_entry_one: The first journal entry.
:param journal_entry_two: The next chronological journal entry.
:return delta_mileage: The elapsed trip mileage between journal entry one and two.
"""
return journal_entry_two['trip_mileage'] - journal_entry_one['trip_mileage']
def get_delta_days(journal_entry_one, journal_entry_two):
"""
get_delta_days -Returns the elapsed number of days between journal entries.
:param journal_entry_one: The first journal entry.
:param journal_entry_two: The next chronological journal entry.
:return delta.days: The elapsed number of days between journal entry one and two.
"""
entry_one_date_string = journal_entry_one['date'].replace(",", "")
entry_two_date_string = journal_entry_two['date'].replace(",", "")
try:
entry_one_date = datetime.strptime(entry_one_date_string, "%A %B %d %Y")
entry_two_date = datetime.strptime(entry_two_date_string, "%A %B %d %Y")
delta = entry_two_date - entry_one_date
return delta.days
except ValueError:
print("Error: entry_one_date=%s\tentry_two_date=%s" %entry_two_date_string, entry_two_date_string)
return None
def get_miles_per_day(delta_trip_miles, delta_days):
"""
get_miles_per_day -Returns the elapsed mileage between the two provided journal entries.
:param delta_trip_miles: The elapsed number of total trip miles between two journal entries.
:param delta_days: The elapsed number of days between two journal entries.
:return miles_per_day: The elapsed mileage between start locations of the two provided journal entries.
"""
if delta_days != 0:
miles_per_day = delta_trip_miles / delta_days
else:
miles_per_day = 0
return miles_per_day
def compute_hiker_avg_miles_per_day(validated_hiker):
"""
compute_hiker_avg_miles_per_day -Returns an individual hiker's average miles per day computed over all valid
consecutive start_locations.
:param validated_hiker: A validated hiker read by 'get_validated_hikers' and its updated delta
attributes as computed by 'add_hiker_attributes'.
:return hiker_average_miles_per_day: The given hiker's average miles per day computed over all validated
start_locations and using the elapsed number of days given by the datetime object.
"""
total_miles_per_day = 0
total_num_entries = 0
for enum, entry in validated_hiker['journal'].items():
if 'miles_per_day' in entry:
total_num_entries += 1
total_miles_per_day += entry['miles_per_day']
hiker_average_miles_per_day = total_miles_per_day / total_num_entries
return hiker_average_miles_per_day
def add_hiker_attributes(validated_hiker):
"""
add_hiker_attributes - Adds the hiker attributes that can only be calculated using pairwise comparison of journal
entries.
:param validated_hiker: A validated hiker read into memory by the method: get_validated_hikers.
:return updated_validated_hiker -A deepcopy of the original validated_hiker with added delta attributes
(attributes that can only be calculated via pairwise comparison).
Updated Attributes include:
-'loc-dir': The location (SID) and direction (N,S) of travel between consecutive entries.
-'delta_mileage': The elapsed total mileage between consecutive entries.
-'delta_days': The elapsed number of days between consecutive entries as calculated via datetime obj.
-'miles_per_day': A metric for hiker speed comparison expressed by: delta_mileage/delta_days
"""
updated_validated_hiker = copy.deepcopy(validated_hiker)
sorted_hiker_journal_keys = list(updated_validated_hiker['journal'].keys())
for enum1, enum2 in zip(sorted_hiker_journal_keys, sorted_hiker_journal_keys[1:]):
entry_one = updated_validated_hiker['journal'][enum1]
entry_two = updated_validated_hiker['journal'][enum2]
updated_validated_hiker['journal'][enum1]['date'] = entry_one['date'].replace(",", "")
location_direction = entry_one['start_loc']['SID'] + entry_one['start_loc']['dir']
delta_mileage = get_delta_trip_miles(entry_one, entry_two)
delta_days = get_delta_days(entry_one, entry_two)
miles_per_day = get_miles_per_day(delta_mileage, delta_days)
updated_validated_hiker['journal'][enum1]['loc-dir'] = location_direction
updated_validated_hiker['journal'][enum1]['delta_mileage'] = delta_mileage
updated_validated_hiker['journal'][enum1]['delta_days'] = delta_days
updated_validated_hiker['journal'][enum1]['miles_per_day'] = miles_per_day
updated_validated_hiker['journal'][enum1]['entry_string'] = ""
return updated_validated_hiker
def write_to_csv(valid_hikers, storage_location_path):
"""
write_to_csv -Iterates through the dictionary of hikers that have been updated by the 'add_hiker_attributes'
method. Writes the CSV-Header and all updated hikers to the storage location file path specified.
:param valid_hikers: An list of validated hikers read by 'get_validated_hikers' and their updated delta
attributes as computed by 'add_hiker_attributes'.
:param storage_location_path: The location for which to write the generated CSV too.
:return None: Upon completion; the supplied storage_location_path will be populated with a csv file containing
all neccessary delta attributes used by the Hiker Distance Prediction Model.
"""
write_csv_header(get_csv_header_list(), storage_location_path)
with open(storage_location_path + "/DistancePrediction.csv", 'a') as fp:
for hid, hiker in valid_hikers.items():
for enum, entry in hiker['journal'].items():
if 'entry_string' in entry:
fp.write(entry['entry_string'])
def main(valid_shelters_path, valid_hikers_path, storage_location_path):
"""
:param valid_shelters_path:
:param valid_hikers_path:
:param storage_location_path:
:return:
"""
valid_shelters = get_validated_shelters(validated_shelters_path=valid_shelters_path)
valid_hikers = get_validated_hikers(validated_hikers_path=valid_hikers_path)
# Update every hiker dict with new attributes:
for hid, hiker in valid_hikers.items():
valid_hikers[hid] = add_hiker_attributes(hiker)
# Give every hiker an attribute corresponding to the hiker's average miles per day.
for hid, hiker in valid_hikers.items():
hiker_avg_miles_per_day = compute_hiker_avg_miles_per_day(hiker)
valid_hikers[hid]['avg_miles_per_day'] = hiker_avg_miles_per_day
# Get the average hiker's average miles-per-day:
num_entries = 0
total_avg_miles_per_day = 0
for hid, hiker in valid_hikers.items():
total_avg_miles_per_day += hiker['avg_miles_per_day']
num_entries += 1
avg_hikers_miles_per_day = total_avg_miles_per_day / num_entries
# Calculate the user bias for every hiker in relation to the average:
for hid, hiker in valid_hikers.items():
valid_hikers[hid]['user_bias'] = hiker['avg_miles_per_day'] - avg_hikers_miles_per_day
# Update journal entry strings:
for hid, hiker in valid_hikers.items():
for enum, entry in hiker['journal'].items():
if 'miles_per_day' in entry:
journal_entry_string = \
str(hiker['identifier']) + "," + str(enum) + "," \
+ entry['loc-dir'] + "," + str(entry['trip_mileage']) + "," \
+ entry['date'] + "," + str(entry['delta_mileage']) + "," \
+ str(entry['delta_days']) + "," + str(entry['miles_per_day']) + "," \
+ str(hiker['avg_miles_per_day']) + "," + str(hiker['user_bias']) + ",1\n"
valid_hikers[hid]['journal'][enum]['entry_string'] = journal_entry_string
write_to_csv(valid_hikers, storage_location_path)
"""
--------------------------------Required Directories---------------------------------------
"""
if __name__ == '__main__':
validated_shelter_data_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../..', 'Data/TrailShelters/'))
validated_hikers_data_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../..', 'Data/HikerData/VHDistancePrediction/'))
csv_write_location_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../..', 'Data/HikerData/'))
main(valid_shelters_path=validated_shelter_data_path, valid_hikers_path=validated_hikers_data_path,
storage_location_path=csv_write_location_path)
| campellcl/APMAT | Program/DataManipulation/HikerDataToCSV.py | HikerDataToCSV.py | py | 14,187 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict"... |
17928255590 | """empty message
Revision ID: b2a4b30fdece
Revises: 0a6d3d1508a5
Create Date: 2021-03-04 18:52:11.795274
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b2a4b30fdece'
down_revision = '0a6d3d1508a5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('complaint',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('userID', sa.Integer(), nullable=True),
sa.Column('subject', sa.String(length=60), nullable=True),
sa.Column('complaint', sa.String(length=500), nullable=True),
sa.Column('status', sa.Boolean(), nullable=True),
sa.Column('registeredAdmin', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['registeredAdmin'], ['admin.id'], ),
sa.ForeignKeyConstraint(['userID'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('complaintLocation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('complaintID', sa.Integer(), nullable=True),
sa.Column('lng', sa.String(length=60), nullable=True),
sa.Column('lat', sa.String(length=60), nullable=True),
sa.ForeignKeyConstraint(['complaintID'], ['complaint.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('complaintLocation')
op.drop_table('complaint')
# ### end Alembic commands ###
| amankumar4real/ticketing | backend/migrations/versions/b2a4b30fdece_.py | b2a4b30fdece_.py | py | 1,526 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
26426713407 | #어른상어 2시
from collections import defaultdict,deque
global n,m,k,g,shark,sdir,smell
# INITIALIZE
shark = defaultdict(list)
dir = [(-1,0), (1,0), (0,-1), (0,1)] # 위, 아래, 왼쪽, 오른쪽
n,m,k = map(int ,input().split())
gg = [[*map(int,input().split())] for _ in range(n)]
g = [[[0,0] for _ in range(n)] for _ in range(n)]
initial = [*map(int, input().split())]
sdir = defaultdict(list)
smell = deque()
for mm in range(m):
for i in range(4):
l = [*map(int, input().split())]
sdir[mm+1].append( (l[0]-1, l[1]-1 , l[2]-1, l[3]-1) )
for y in range(n):
for x in range(n):
if gg[y][x] != 0 :
shark[ gg[y][x] ] = [y,x,initial[ gg[y][x]-1 ] -1 ]
g[y][x] = [k, gg[y][x]]
smell.append((y,x))
# 상어 이동
def sharkmove():
global n, m, k, g, shark, sdir, smell
sg = [[0] * n for _ in range(n)]
keylist = list( shark.keys() )
neww = deque()
for snum in keylist:
y,x,sd = shark[snum]
yxd = [[-1,-1,-1] for _ in range(2) ]
for d in sdir[snum][sd]:
ny,nx = y+dir[d][0], x+dir[d][1]
if ny<0 or nx<0 or ny>= n or nx>=n: continue
if g[ny][nx][0] == 0 or (ny, nx) in neww : yxd[0] = [ny, nx, d]; break
elif g[ny][nx][0] >0 and g[ny][nx][1] == snum:
if yxd[1][0]==-1: yxd[1] = [ny,nx,d]
for i in range(2):
yy,xx,dd = yxd[i]
if yy > -1:
if sg[yy][xx]>0 and sg[yy][xx] < snum: del shark[snum]
else:
if sg[yy][xx] > 0 and sg[yy][xx] > snum : del shark[sg[yy][xx]] # 다른 상어 잡아먹기
g[yy][xx] = [k+1, snum]
if not (yy,xx) in smell : smell.append( (yy,xx) )
shark[snum] = [yy,xx,dd]
sg[yy][xx] = snum
if i == 0: neww.append((yy,xx))
break
for time in range(1000) :
sharkmove()
# 냄새 1씩 줄이기
if smell :
pre = smell.copy()
for l in pre:
y,x = l
if g[y][x][0] >0 : g[y][x] = [g[y][x][0]-1, g[y][x][1] ]
if g[y][x][0] == 0: smell.remove((y,x)); g[y][x] = [0 , 0]
if len(shark) == 1: print(time+1);exit(0)
print(-1) | dohui-son/Python-Algorithms | simulation_samsung/b19237_어른상어.py | b19237_어른상어.py | py | 2,283 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "coll... |
28418502484 | #link - https://www.hackerrank.com/challenges/iterables-and-iterators/problem?isFullScreen=true
from itertools import combinations
n = int(input())
arr = [x for x in input().split()]
k = int(input())
combi = list(combinations(arr, k))
total = len(combi)
count = [1 for each in combi if 'a' in each]
print(len(count)/total) | prince001996/Hackerrank | Python/iterables_and_iterators.py | iterables_and_iterators.py | py | 340 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.combinations",
"line_number": 9,
"usage_type": "call"
}
] |
34577004526 | #!/usr/bin/python
import sys, os, argparse
import asyncio
import traceback
import rulebook
from .util import *
from .libnetconf import NetworkState
import rulebook.runtime
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
try:
from IPython.kernel.zmq.kernelapp import IPKernelApp
import zmq
except ImportError:
logger.warn("IPython and/or pyzmq not available. Interactive console will not work.")
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
if HAVE_IPYTHON:
# XXX IPython infests this application with threads!! I'm not sure why.
# I'm not even sure in which one the interactive commands are executed.
# TODO Try to get rid of that.
class IPythonEmbed:
def __init__(self, ns):
self.ns = ns
self.app = IPKernelApp(transport='ipc')
NOP = lambda *a,**kw: None
# Don't exit upon parent process exit
self.app.init_poller = NOP
# Don't redirect stdio
self.app.init_io = NOP
self.app.init_blackhole = NOP
# Don't catch SIGINT
self.app.init_signal = NOP
self.app.init_connection_file = NOP
self.app.log_connection_info = NOP
self.app.connection_file = str(RUNDIR / 'ipython.json')
def start(self):
# Make sure only root can access the sockets and the connection file
with umask_ctx(0o077):
try: os.unlink(self.app.connection_file)
except FileNotFoundError: pass
self.app.initialize()
self.app.kernel.user_module = sys.modules[__name__]
self.app.kernel.user_ns = self.ns
self.app.shell.set_completer_frame()
self.app.kernel.start()
for stream in self.app.kernel.shell_streams:
fd = stream.socket.getsockopt(zmq.FD)
def callback(stream):
stream.flush(zmq.POLLIN, 1)
stream.flush(zmq.POLLOUT)
asyncio.get_event_loop().add_reader(fd, callback, stream)
# That wasn't so hard, right? Why do the IPython developers keep on recommending
# regular polling as the way of integrating an IPython kernel with a mainloop then?
class Daemon:
def __init__(self):
self.rulebooks = {}
self.loop = asyncio.get_event_loop()
if HAVE_IPYTHON:
self.ipython = IPythonEmbed({'daemon': self})
# arg_parser = argparse.ArgumentParser()
# arg_parser.add_argument('-c', nargs=1, dest='config_path', help="Specify alternative configuration directory.")
# arg_parser.add_argument('-C', action='store_false', dest='want_builtin', default=True,
# help="Do not load builtin rules.")
# def parse_cmdline(self, argv):
# args = self.arg_parser.parse_args(argv)
def _load_rules(self, dirs):
for dir in dirs:
for file in Path(dir).glob('*.rbk'):
self._load_rbk(file)
def _load_rbk(self, file):
file = Path(file).resolve()
logger.info("Loading rulebook %s", file)
self.rulebooks[file] = rulebook.load(file, self.ctx)[0]
self.rulebooks[file].set_active(True)
def _exception_handler(self, loop, context):
exc = context.get('exception')
if isinstance(exc, asyncio.InvalidStateError):
# XXX Sometimes an exception like this appers as a result of a cancelled coroutine:
# Traceback (most recent call last):
# File "/usr/lib/python3.4/asyncio/events.py", line 39, in _run
# self._callback(*self._args)
# File "/usr/lib/python3.4/asyncio/subprocess.py", line 44, in connection_made
# self.waiter.set_result(None)
# File "/usr/lib/python3.4/asyncio/futures.py", line 298, in set_result
# raise InvalidStateError('{}: {!r}'.format(self._state, self))
# asyncio.futures.InvalidStateError: CANCELLED: Future<CANCELLED>
# Maybe I'm doing something wrong, but it looks as a bug in `asyncio`.
return
if isinstance(exc, asyncio.CancelledError):
return
traceback.print_exception(type(exc), exc, exc.__traceback__)
traceback.print_stack()
sys.exit(1)
@asyncio.coroutine
def initialize(self):
# Make all exceptions fatal for easier debugging
self.loop.set_exception_handler(self._exception_handler)
tasks = []
logger.info("Loading network state")
self.ns = NetworkState()
self.ctx = rulebook.runtime.Context()
self.ctx.ns.ns = self.ns # Make the NetworkState available under the name 'ns'
# in the namespace of the rulebooks (a bit unfortunate
# clash of acronyms, TODO better naming).
self.ctx.ns.logger = logging.getLogger('ns_rbk')
yield from self.ns.start()
logger.info("Loading configuration")
self._load_rules([RULES_BUILTIN, RULES_USER])
ctl_path = str(RUNDIR / 'ctl.sock')
try: os.unlink(ctl_path)
except FileNotFoundError: pass
yield from asyncio.start_unix_server(self._unix_conn, ctl_path)
if HAVE_IPYTHON:
self.ipython.ns['ns'] = self.ns
self.ipython.ns['ctx'] = self.ctx
self.ipython.start()
logger.info("IPython ready. Connect with: ``nsctl console`` or ``ipython console --existing %s``",
self.ipython.app.connection_file)
def _unix_conn(self, reader, writer):
logger.info('New unix connection')
@asyncio.coroutine
def conn_coro():
while True:
line = yield from reader.readline()
if not line: break
line = line.decode('utf-8').strip()
logger.info('Got ctl command: %s', line)
run_task(conn_coro())
def main(self):
self.loop.run_until_complete(self.initialize())
logger.info("Entering mainloop")
self.loop.run_forever()
def main():
daemon = Daemon()
daemon.main()
if __name__ == '__main__':
main()
| regnarg/networksecretary | networksecretary/daemon.py | daemon.py | py | 6,292 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "IPython.kern... |
7790807669 | import csv
import string
import chess
import chess.pgn
import random
import json
from random import randint
import pandas as pd
# Flask and WSGI
import flask
from flask import Flask, Blueprint, jsonify
from flask import request
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
# Reading database
db = pd.read_csv('datadifficulty.csv')
# divided by difficulty
db_easy = db.loc[db['Level'] == 'Easy']
db_easy.reset_index(inplace=True, drop=True)
db_medium = db.loc[db['Level'] == 'Medium']
db_medium.reset_index(inplace=True, drop=True)
db_hard = db.loc[db['Level'] == 'Hard']
db_hard.reset_index(inplace=True, drop=True)
# Globals that track current answers
side_move = ''
curr_fen = ''
white_elo = 0; black_elo= 0
white_cp = 0; black_cp = 0
white_wdl = 0.00; black_wdl = 0.00
ans_cpadv = ''; ans_wdladv = ''
def player_move(stm):
if stm == 'W':
return 'White'
else:
return 'Black'
@app.route('/',methods=['GET', 'POST'])
@cross_origin()
def blank_text():
return "Blank"
# Extracts a random fen from database and sets globals accordingly
@app.route('/boardupdate', methods=['GET','POST'])
@cross_origin()
def board_update():
global curr_fen, white_elo, black_elo, white_cp, black_cp, white_wdl, black_wdl, ans_cpadv, ans_wdladv, side_move
#for testing
diff_v = request.args.get('difficulty')
dbr = pd.DataFrame()
# matching database
if diff_v == 'E':
dbr = db_easy
elif diff_v == 'M':
dbr = db_medium
elif diff_v == 'H':
dbr = db_hard
else:
dbr = db
fen_row = random.randint(0,len(dbr))
curr_fen = dbr.loc[fen_row,'FEN']
white_elo = dbr.loc[fen_row,'White ELO']; black_elo = dbr.loc[fen_row,'Black ELO']
white_cp = dbr.loc[fen_row,'White cp']; black_cp = dbr.loc[fen_row,'Black cp']
white_wdl = dbr.loc[fen_row,'White wdl']; black_wdl = dbr.loc[fen_row, 'Black wdl']
ans_cpadv = dbr.loc[fen_row,'CpAdv']; ans_wdladv = dbr.loc[fen_row,'WdlAdv']
side_move = player_move(str(dbr.loc[fen_row,'STM']))
print(ans_cpadv, ans_wdladv)
dict_send = {'fen': curr_fen,
'WhiteELO': str(white_elo),
'BlackELO': str(black_elo),
'blackWDL': str(black_wdl),
'whiteWDL': str(white_wdl),
'blackCP': str(black_cp),
'whiteCP': str(white_cp),
'side': str(side_move)}
return jsonify(dict_send)
@app.route('/updatechoice', methods=['GET','POST'])
@cross_origin()
def update_choice():
choice = request.args.get('UserChoice')
print('this is the choice', choice)
comments = ''
print(ans_cpadv,ans_wdladv)
if (choice == ans_cpadv) or (choice == ans_wdladv):
comments = 'User has guessed correctly!'
else:
comments = 'User has guessed wrong.' + ' Correct Answer is ' + ans_wdladv
print(comments)
return comments
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=True)
| kapilpownikar/QCRI-Chess-Project | backend.py | backend.py | py | 3,095 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask_cors.cross_origin",... |
13103595331 | import numpy as np
import pathlib, os, time
class Neuron:
def __init__(self, numofinputs, weights=[-999], layerName="undefined"):
""" initialises neuron class type. weights MUST be list, not integer. If only 1 weighting of n then pass [n] """
self.activation = tanh
self.layerName = layerName
self.output = 0
if weights[0] == -999:
seed = gen_random_seed()
np.random.seed()
self.synaptic_weights = 2 * np.random.random((numofinputs, 1)) - 1
else:
self.synaptic_weights = weights
# self.output = []
true = True
def mutate(self): # this function randomly changes the weightings on the neuron
for weight in self.synaptic_weights:
weight = weight + 0.05 * (2 * np.random.random() - 1)
# alternative way of thinking that stores output as object property:
def think(self, input):
self.output = self.activation(np.dot(input, self.synaptic_weights))
# activation function being used ---------------------------------------------------------------------------------------------------------------------------------------------
def tanh(input):
return (np.tanh(input))
def tanh_derivative(x):
return (1 - np.square(tanh(x)))
def gen_random_seed():
return int(round(time.time() * 100) / 200) - np.random.rand(0, 1000)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
class NeuralNet:
""" This class constitutes multi layer neural network, with variable layer sizes. This translates into one input layer, one output layer and n hidden layers
Requires inputs of matrix of layer sizes, form [n, m, o etc] where sum m,n,o = num of layers and m, n and o are respective layer sizes.
Then, weights for each if known , the name of the net and the generation number.
Passing -999 * layersize will generate weights randomly."""
def __init__(self, layers=[], weights=[[], [], []], name=None, generation=0):
self.name = name or str(int(round(time.time() * 1000)))
self.layers = []
self.generation = generation
self.score = 0
self.path = ""
count = -1
for n in range(0, len(layers)):
count += 1
self.layers.append([""] * layers[n])
if n == 0:
numOfInputs = layers[0]
else:
numOfInputs = layers[n - 1]
self.init_layer(self.layers[n], weights[n], numOfInputs)
def init_layer(self, layer, weights, numOfInputs): # numOfInputs is num PER NEURON not total for layer
if weights == []:
weights.append(0) * len(layer) # <-----------------------------------------
for i in range(0, len(layer)): # <----------------------------------------- HERE NEEDS WORK
layer[i] = Neuron(numOfInputs, weights[0][i]) # <-----------------------------------------
def run_first_layer(self, board):
"""the first layer takes inputs in a different way to the other layers and thus requires a seperate subroutine
takes board as input. players squares should be represented as a one """
count = 0
for row in board:
for square in row:
self.layers[0][count].think(square)
count += 1
def feed_forward(self, layerRef=1):
""" takes reference of the layer to pass info from [reference being with indexing starting at 1,
so to pass info from first layer to second, layerRef = 1 etc]"""
inputs = []
for source in self.layers[layerRef - 1]:
inputs.append(source.output)
for target in self.layers[layerRef]:
target.think(inputs)
def mutate_network(self):
""" layer1 should not be mutated as weightings must always remain zero
This method mutates the other two layers randomly"""
for layer in self.layers[1:]:
for i in layer:
i.mutate()
def rtn_rating(self):
return self.layers[-1][0].output
def save_network(self):
""" saves network into directory set """
fLayers = open(self.path + "/layers.txt", "w")
fLayers.write(str(len(self.layers)))
fLayers.close()
for i in range(0, len(self.layers)):
fLayer = open(self.path + "/layer_%s.txt" % i, "w")
for n in self.layers[i]:
fLayer.write(str(n.synaptic_weights))
fLayer.write("\n\n")
fLayer.close()
return 0
def read_weights_from_file(self, projectName):
""" reads weights from file, needs projectName parameter. rest of file path is derived from self data."""
fLayers = open(self.path + "/layers.txt", "r")
numOfLayers = int(fLayers.read())
fLayers.close()
weights = []
tempNeuron = []
tempLayer = []
for i in range(0, numOfLayers):
fLayer = open(self.path + "/layer_%s.txt" % i, "r")
for line in fLayer:
if line != "\n":
tempNeuron.append(float(strip_brackets_and_whitespace(line)))
else:
tempLayer.append(tempNeuron)
tempNeuron = []
weights.append(tempLayer)
tempLayer = []
return weights
def make_net_dir(self, projectName):
""" Makes new directory for Network.
Takes path of file to save relative to, name of project, generation reference string, """
self.path = os.path.join(os.getcwd(), projectName, "gen_" + str(self.generation), self.name)
pathlib.Path(self.path).mkdir(parents=True, exist_ok=True)
#########################################################################################################################################
def strip_brackets_and_whitespace(input):
return input.rstrip().replace('[', '').replace(']', '')
def load_network(projectName, generation, name):
""" loads network from a file. """
networkPath = os.path.join(os.cwd(), projectName, generation, name)
############### to this point, return
return 1 | lloydarnold/a-level-coursework | old_neural_network.py | old_neural_network.py | py | 6,296 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.random",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random"... |
12921162394 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 13 20:23:59 2019
@author: de-sinnett
"""
# Import random, operator and plotting modules
import random
import operator
import matplotlib.pyplot
# Create a new empty list
agents = []
# Set up variables to start at random coordinates in a 100x100 grid (agent 1)
agents.append([random.randint(0,99),random.randint(0,99)])
# Random walk one step
if random.random() < 0.5:
agents[0][0] = agents[0][0] + 1
else:
agents[0][0] = agents[0][0] - 1
if random.random() < 0.5:
agents[0][1] = agents[0][1] + 1
else:
agents[0][1] = agents[0][1] - 1
# Set up variables to start at random coordinates in a 100x100 grid (agent 2)
agents.append([random.randint(0,99),random.randint(0,99)])
# Random walk one step
if random.random() < 0.5:
agents[1][0] = agents[1][0] + 1
else:
agents[1][0] = agents[1][0] - 1
if random.random() < 0.5:
agents[1][1] = agents[1][1] + 1
else:
agents[1][1] = agents[1][1] - 1
print (agents)
# Print agent with maximum value in first co-ordinate
print (max(agents))
# Print agent with maximum value in second co-ordinate
print (max(agents, key=operator.itemgetter(1)))
# Plot the agents
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.xlim(0, 99)
matplotlib.pyplot.scatter(agents[0][1],agents[0][0])
matplotlib.pyplot.scatter(agents[1][1],agents[1][0])
matplotlib.pyplot.scatter(m[1], m[0], color='red')
matplotlib.pyplot.show()
| DanielleSinnett/AgentBasedModellingTest | model2.py | model2.py | py | 1,432 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line... |
33608410850 | import shutil
import string
import torch
import torch.nn.functional as F
import torch.utils.data as data
import tqdm
import numpy as np
import json
from collections import Counter
class SQuAD():
def __init__(self):
self.contexts = []
self.questions = []
def loadSquad(self, data_path):
self.contexts = []
self.questions = []
with open(data_path,'r') as f:
dataset = json.load(f)
id_counter = 0
for theme in dataset["data"]:
for paragraph in theme["paragraphs"]:
if not (paragraph["context"] in self.contexts):
self.contexts.append(paragraph["context"])
for question in paragraph["qas"]:
self.questions.append((question["question"],id_counter))
id_counter += 1
else:
id_cont = self.contexts.index(paragraph["context"])
for question in paragraph["qas"]:
self.questions.append((question["question"],id_cont))
| Onepierre/Context-detection | dataloader.py | dataloader.py | py | 1,113 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 26,
"usage_type": "call"
}
] |
22025109541 | import boto3
import json
import logging
import os
import cv2
import ast
from botocore.exceptions import ClientError
s3 = boto3.client('s3')
s3inputbucket = 'inputvideobucket2022'
sourcefile = 'source.mp4'
sourceoutputfile = 'source.mp4'
labelidentifier = 'Human'
labelconfidence = 80
jsonsource = '0data.json'
def S3Exist(InputVideoBucket, InputVideoKey):
s3 = boto3.client('s3')
try:
response = s3.head_object(Bucket=InputVideoBucket, Key=InputVideoKey)
return response
except ClientError:
#print('key not found ...')
return ('False')
def CvFrameProcessor(SourceVideoFile, FrameTimeStamp, FrameRate, OutputFrameName):
VidCapture = cv2.VideoCapture(SourceVideoFile)
length = int(VidCapture.get(cv2.CAP_PROP_FRAME_COUNT))
print("length of the input video is:", length, "seconds")
framenumber = FrameTimeStamp*0.0001*FrameRate
VidCapture.set(1, framenumber) #Based on total length calculated above in seconds and Framerate, calculate which frame to extract
ret, frame = VidCapture.read()
cv2.imwrite(OutputFrameName, frame)
def RekognitionOutputParser (JsonInput, ConfidenceScore, LabelIdentifier):
NewLabelData = []
with open(jsonsource) as f:
data = json.load(f)
#print(type(data))
VideoMetadata = ast.literal_eval(data)
#print(type(VideoMetadata))
#print(VideoMetadata['VideoMetadata'])
#VideoMetadata = data['VideoMetadata']
#print(type(VideoMetadata))
for LabelData in VideoMetadata['Labels']:
if (LabelData['Label']['Name'] == LabelIdentifier) and (LabelData['Label']['Confidence'] > ConfidenceScore):
NewLabelData.append(LabelData)
#print(NewLabelData)
return NewLabelData, VideoMetadata
response = S3Exist(s3inputbucket, sourcefile)
if response != 'False':
print('Key exists, continue ...\n')
try:
s3.download_file(s3inputbucket, sourcefile, sourceoutputfile) #Download Video From S3
if not os.path.exists('OutputFrames'):
os.makedirs('OutputFrames')
#Get specific LabelData which is data of interest basd on constraints provided
LabelData = RekognitionOutputParser(jsonsource, labelconfidence, labelidentifier)
#Set critical variables to run video frame extracter
#print(LabelData[1]['VideoMetadata'])
framerate = LabelData[1]['VideoMetadata']['FrameRate']
for frame in LabelData[0]:
outputframename = ( "OutputFrames/" + str(frame["Timestamp"]) + ".jpeg")
CvFrameProcessor(sourceoutputfile, frame["Timestamp"], framerate, outputframename)
print (frame["Timestamp"])
#Run Frame Processor using OpenCV to pull the specific Frames based on Label criteria
#CvFrameProcessor(sourceoutputfile, timestamp, framerate, outputframename)
except OSError:
print('File already exists ... Removing exisitng file \n')
os.remove('~/environment/RekognitionSid/HumanVideoDetect/source.mp4')
else:
print('Failed to locate input key in S3 ... ') | sidraj2002/RekognitionSid | HumanVideoDetect/FrameExtracter.py | FrameExtracter.py | py | 3,090 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "boto3.client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "cv2.VideoCap... |
26067512419 | import importlib.resources
from pathlib import Path
import numpy as np
import scipy.linalg as sla
from hivpy.sex_behaviour_data import SexualBehaviourData
def test_probability_loading():
# Load example file
data_path = Path(__file__).parent / "test_data" / "sbd_testing.yaml"
SBD = SexualBehaviourData(data_path)
dists = SBD._get_discrete_dist_list("Test_Example_List")
# explicit distribution
d0 = dists[0]
N = 10000
pop0 = d0.sample(size=N)
for i in range(5):
v = i + 1
p = 0.1*i
count = sum(pop0 == v)
E_count = p * N
var = N * p * (1-p)
assert E_count - 2*var <= count <= E_count + 2*var
# uniform over range
d1 = dists[1]
pop1 = d1.sample(size=N)
for i in range(1, 11):
count = sum(pop1 == i)
E_count = 0.1 * N
var = N * 0.09
assert E_count - 2*var <= count <= E_count + 2*var
# uniform over specific values
d2 = dists[2]
pop2 = d2.sample(size=N)
for i in range(5):
v = i + 1
p = 0.2
count = sum(pop2 == v)
E_count = N * p
var = N * p * (1-p)
assert E_count - 2*var <= count <= E_count + 2*var
def test_sex_behaviour_matrices_diagonalisable():
"""Checks that all of the sexual behaviour transition matrices are diagonalisable
and therefore can be used to calculate transition matrices for variable time-steps.
This only needs to be run if the matrices change, and doesn't need to be run every time."""
with importlib.resources.path("hivpy.data", "sex_behaviour.yaml") as data_path:
SBD = SexualBehaviourData(data_path)
count_bad = 0
bad_matrices = []
all_matrices = [SBD.sex_behaviour_transition_options["Male"],
SBD.sex_behaviour_transition_options["Female"]]
for matList in all_matrices:
for m in matList:
print("m = ", m)
T = np.array(m).transpose()
evals, evecs = sla.eig(T)
print(evals)
print(evecs)
# Check that eigenvectors are linearly independent
# i.e. that no eigenvector is a multiple of any other
for i in range(0, evals.size):
for j in range(i + 1, evals.size):
ratio = evecs[i] / evecs[j]
print((i, j), ratio)
if np.allclose(ratio, ratio[0]):
count_bad += 1
bad_matrices.append(T)
print("***************************************************")
print(count_bad)
print(bad_matrices)
assert (count_bad == 0)
| UCL/hivpy | src/tests/test_sex_behaviour_data.py | test_sex_behaviour_data.py | py | 2,640 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "hivpy.sex_behaviour_data.SexualBehaviourData",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "importlib.resources.resources.path",
"line_number": 53,
"usage_type": "call"
... |
26827467096 | import os
import pandas as pd
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
# root mean square error metric between predictions and the ground truth
def rmse(pred, true):
assert pred.shape == true.shape
n, = true.shape
return np.sqrt(np.sum((pred - true) ** 2) / n)
def prepo(X):
"""
For easier updatig, preposessing
Create a full rating matrix M: N1*N2
Create two dictionaries mapping users to movies and the reverse
"""
N1=max(X[0]) # N1 users
N2=max(X[1]) # N2 movies
M=np.zeros((N1, N2))
uv={} # the user i to all movies rated
vu={} # the movie j to all users rating
for i in range(len(X[0])):
# rating matrix M
M[X[0][i]-1][X[1][i]-1]=X[2][i]
# a dictionary mapping users ui to movies
if X[0][i]-1 in uv:
uv[X[0][i]-1].append(X[1][i]-1)
else:
uv[X[0][i]-1]=[X[1][i]-1]
# another dictionary mapping movies vj to users
if X[1][i]-1 in vu:
vu[X[1][i]-1].append(X[0][i]-1)
else:
vu[X[1][i]-1]=[X[0][i]-1]
return M,uv,vu
# update the user i location ui or the object j location vj: d*1 dimension
def update(a,r,list,const,d):
"""
update a vector b using some vectors a[i] in matrix a and a index list containing {i}.
r: a rating array
d: dimension
const: constant
:return: updated b
"""
s1=np.zeros((d,d))
s2=np.zeros(d)
for i in list:
s1=s1+(a[i][np.newaxis, :].T)*a[i]
s2=s2+r[i]*a[i]
return sum((inv(const+s1)*s2).T)
# the objective function
def objfunc(X,u,v,M_pred,var,numbda):
s1=0
for i in range(len(X[0])):
s1=s1+(X[2][i]-M_pred[X[0][i]-1,X[1][i]-1])**2
return -(s1/2/var+sum(sum(v**2))*numbda/2+sum(sum(u**2))*numbda/2)
def MatrixfFactorization(X,M,uv,vu,var,d,numbda,T):
"""
To recomender user ui for object vj given Gaussian Model Assumption by MatrixfFactorization
T: Iteration
:return: predicted missing rates
"""
# Constants
N1=max(X[0]) # N1 users
N2=max(X[1]) # N2 movies
# Initialze vj from a Guassian distribution
mean=np.zeros((d,), dtype=np.int)
var0=1/numbda*np.eye(d)
v = np.random.multivariate_normal(mean, var0, N2)
u = np.random.multivariate_normal(mean, var0, N1)
# Updating
L=[]
const=numbda*var*np.eye(d)
for t in range(T):
# update the user i location ui
for i in range(N1):
if i in vu:
u[i]=update(v,M[i],uv[i],const,d)
# update the object j location vj
for j in range(N2):
if j in vu:
v[j]=update(u,M.T[j],vu[j],const,d)
# predict ratings
u1=np.asmatrix(u)
v1=np.asmatrix(v)
M_pred=u1*(v1.T)
# the objective function
Lt=objfunc(X,u,v,var,numbda)
# make a list for every value of the objective function
L.append(Lt)
return L, M_pred, v
#############################################################
# for question 2
# loading the set indexes an incomplete ratings matrix M
X = pd.read_csv(os.path.join("/Users/cengjianhuan/Documents/ML/HW4", 'ratings.csv'), header=None)
# loading the testing set
M_ture = pd.read_csv(os.path.join("/Users/cengjianhuan/Documents/ML/HW4", 'ratings_test.csv'), header=None)
# constants
var = 0.25
d = 10
numbda = 1.0
T=100
# create a full rating matrix M: N1*N2 and two dictionaries mapping users to movies and the reverse
M,uv,vu=prepo(X)
# create lists to store results
L=[]
rmse_list=[]
Lmax=-5000
# run 10 times
for k in range(10):
# get the objective value and prediction at the k time
Lk, M_pred, vk=MatrixfFactorization(X,M,uv,vu,var,d,numbda,100)
L.append(Lk[-1])
# pick the relative predicted value M_test for rmse
M_test=np.zeros(len(M_ture[2]))
for i in range(len(M_ture[2])):
M_test[i]=M_pred[M_ture[0][i]-1,M_ture[1][i]-1]
# find the rmse at k time
rmse_list.append(rmse(M_test, M_ture[2]))
# find the optimal L
if L[-1]>Lmax:
Lmax=L
vmax=vk
# for question a
for i in range(10):
plt.plot(L[i][1:],label="L"+str(i))
plt.legend(loc='best')
plt.show()
# for question b
N2,_=vmax.shape
xt=[vmax[49],vmax[484],vmax[181]]
d=[]
minv=[]
idxv=[]
for k in range(len(xt)):
minv.append([])
idxv.append([])
for i in range(1682):
d.append(np.linalg.norm(xt[k]-vmax[i]))
M_test=np.zeros(len(M_ture[2]))
for i in range(len(M_ture[2])):
M_test[i]=M_pred[M_ture[0][i]-1,M_ture[1][i]-1]
for t in range(11):
mn,idx = min((d[i],i) for i in range(len(d)))
minv[k].append(mn)
idxv[k].append(idx)
d[idx]=10
| JianhuanZeng/Machine-Learning-NLP | NLP-Lab2-MovieRecomendation/FM.py | FM.py | py | 4,720 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.sqrt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 48,... |
1443274025 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Author : alex
Created : 2020-09-11 15:18:05
Comments :
"""
# %% IMPORTS
# -- global
import logging
import time
from PyQt5 import QtWidgets
from PyQt5.QtGui import QFont, QIcon
from PyQt5.QtWidgets import QShortcut, QMessageBox, QAction, QMenu
from pathlib import Path
from collections import OrderedDict
from functools import wraps
# -- local
from .. import loader
from . import (
filebrowser,
display,
dataexplorer,
quickplot,
correlations,
fitting,
testing,
misc,
menubar,
advancedplot,
commandpalette,
)
from .MainUI import Ui_mainWindow
from ..classes.settings import Settings
from ..gui import local_folder
# %% TOOLS
def _isnumber(x):
try:
float(x)
return True
except (TypeError, ValueError):
return False
# %% DECORATOR FOR DEBUGGING
def logCallback(f):
"""a wrapper for callback, for debug purposes"""
@wraps(f)
def wrapper(*args, **kwds):
# get log callback setting
log_callbacks = args[0].settings.config["dev"]["log callbacks"]
if eval(log_callbacks):
name = f.__name__
args[0].logger.debug(f"called {name}")
return f(*args, **kwds)
return wrapper
# cf. https://stackoverflow.com/a/6307868
def forAllCallbacks(decorator):
"""should decorate all methods with names starting with '_'"""
def decorate(cls):
for attr in cls.__dict__:
if attr == "__init__":
continue
if attr.startswith("_") and callable(getattr(cls, attr)):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
# %% CALLBACK DEFINITIONS
# for the sake of readability, we define all the
# callbacks in a list of tuples, with the following form :
# ("widget", "signal", "callback")
# where "widget" is the name of the widget object, "signal"
# is a string containing the signal name, and "callback" the name
# of the callback function. All the callbacks are then automatically
# set in a loop.
#
# Example :
# --------
# adding ("todayButton", "clicked", "_todayButtonClicked") to
# the callback list will result in a callback definition leading to
# the same result as the following :
#
# self.todayButton.clicked.connect(self._todayButtonClicked)
# We disable black formatting : some lines will to too long, but
# this is better for readability IMHO
# fmt: off
CALLBACK_LIST = [
# -- FILE BROWSER --
# year / month / day lists
("yearList", "itemSelectionChanged", "_yearListSelectionChanged"),
("monthList", "itemSelectionChanged", "_monthListSelectionChanged"),
("dayList", "itemSelectionChanged", "_dayListSelectionChanged"),
# seq / run / sets lists
("seqList", "itemSelectionChanged", "_seqListSelectionChanged"),
("runList", "itemSelectionChanged", "_runListSelectionChanged"),
("setList", "itemSelectionChanged", "_setListSelectionChanged"),
# custom context menu (when right click on list)
("runList", "customContextMenuRequested", "_runListShowContextMenu"),
("setList", "customContextMenuRequested", "_setListShowContextMenu"),
# buttons
("refreshRunListButton", "clicked", "_refreshRunListButtonClicked"),
("todayButton", "clicked", "_todayButtonClicked"),
("dateEdit", "dateChanged", "_dateEditClicked"),
# -- DATA DISPLAY --
# select data type
("dataTypeComboBox", "currentIndexChanged", "_dataTypeComboBoxSelectionChanged"),
# select colormap
("colorMapComboBox", "currentIndexChanged", "_colorMapComboBoxSelectionChanged"),
# colormap scale
("scaleMinEdit", "editingFinished", "_scaleMinEditChanged"),
("scaleMaxEdit", "editingFinished", "_scaleMaxEditChanged"),
("autoScaleCheckBox", "stateChanged", "_autoScaleCheckBoxChanged"),
# display type selector
("displaySelectionGroup", "triggered", "_displaySelectionChanged"),
# -- DATA EXPLORER --
# meta data management
("metaDataList", "itemSelectionChanged", "_metaDataListSelectionChanged"),
("refreshMetadataCachebutton", "clicked", "_refreshMetadataCachebuttonClicked"),
# sets management
("setList", "doubleClicked", "_renameDataSet"),
("dataSetCreateAction", "triggered", "_createNewDataSet"),
("dataSetDeleteAction", "triggered", "_deleteDataSet"),
("dataSetFavAction", "triggered", "_favDataSet"),
("dataSetAddAction", "triggered", "_addToDataSet"),
("dataSetRenameAction", "triggered", "_renameDataSet"),
# quickplot
("quickPlotButton", "clicked", "_quickPlotButtonClicked"),
("quickStatsButton", "clicked", "_quickStatsButtonClicked"),
("quickPlot2DButton", "clicked", "_quickPlot2DButtonClicked"),
("quickPlotYToolButtonActionGroup", "triggered", "_quickPlotSelectionChanged"),
("quickPlotXToolButtonActionGroup", "triggered", "_quickPlotSelectionChanged"),
("quickPlotPlotbyToolButtonActionGroup", "triggered", "_quickPlotSelectionChanged"),
("quickPlot2DYToolButtonActionGroup", "triggered", "_quickPlotSelectionChanged"),
("quickPlot2DXToolButtonActionGroup", "triggered", "_quickPlotSelectionChanged"),
("quickPlot2DZToolButtonActionGroup", "triggered", "_quickPlotSelectionChanged"),
("quickPlotFitToolButtonActionGroup", "triggered", "_quickPlotFitSelectionChanged"),
("plottingOptionsButton", "clicked", "_quickplotShowOptions"),
# -- ADVANCED DATA ANALYSIS / PLOT
("variableDeclarationTable", "itemChanged", "_variableDeclarationChanged"),
("exportToMatplotlibButton", "clicked", "_exportToMatplotlibButtonClicked"),
("updateSubplotLayoutButton", "clicked", "_updateSubplotLayoutButtonClicked"),
("resetSubplotLayoutButton", "clicked", "_resetSubplotLayoutButtonClicked"),
("subplotContentTable", "itemChanged", "_subplotContentTableChanged"),
("advancedPlotSaveButton", "clicked", "_advancedPlotSaveButtonClicked"),
("advancedPlotSaveAsButton", "clicked", "_advancedPlotSaveAsButtonClicked"),
("advancedPlotDeleteButton", "clicked", "_advancedPlotDeleteButtonClicked"),
(
"advancedPlotSelectionBox",
"currentIndexChanged",
"_advancedPlotSelectionBoxSelectionChanged"
),
("exportDataButton", "clicked", "_exportDataButtonClicked"),
("advancedStatButton", "clicked", "_advancedStatButtonClicked"),
("advancedPlotResetButton", "clicked", "_advancedPlotResetButtonClicked"),
# correlations plots
("correlationsPlotButton", "clicked", "_plotCorrelations"),
("correlationsHueToolButtonActionGroup", "triggered", "_correlationsSelectionChanged"),
# -- FITTING --
# ROI
("addRoiButton", "clicked", "_addRoiButtonClicked"),
("renameRoiButton", "clicked", "_renameRoiButtonClicked"),
("deleteRoiButton", "clicked", "_deleteRoiButtonClicked"),
("resetRoiButton", "clicked", "_resetRoiButtonClicked"),
("selectRoiComboBox", "currentIndexChanged", "_selectRoiComboBoxSelectionChanged"),
# background
("backgroundCheckBox", "stateChanged", "_backgroundCheckBoxChanged"),
# fit buttons
("fitButton", "clicked", "_fitButtonClicked"),
("deleteFitButton", "clicked", "_deleteFitButtonClicked"),
# -- MENU BAR --
("menuAboutGotoGithubAction", "triggered", "_gotoGithub"),
("menuAboutOnlineHelpAction", "triggered", "_getOnlineHelp"),
("menuPreferencesEditSettingsAction", "triggered", "_editSettings"),
("menuScriptsActionGroup", "triggered", "_playScript"),
("openScriptFolderMenuAction", "triggered", "_openUserScriptFolder"),
("openModuleFolderAction", "triggered", "_openUserModuleFolder"),
("menuDataOpenDataFolderAction", "triggered", "_openDataFolder"),
("menuAboutdisplayShortcutsAction", "triggered", "_displayShortcuts"),
]
# fmt: on
# Format for keyboard shorcut = ("shortcut", "callback", "description for help")
# is description is empty ==> do not appear in help
KEYBOARD_SHORTCUTS = [
("F5", "_refreshRunListButtonClicked", "refresh run list"),
("Shift+F5", "_refreshMetadataCachebuttonClicked", "refresh metadata cache"),
("Ctrl+B", "_ctrlB", "add background"),
("Ctrl+D", "_DEBUG", ""),
("Ctrl+F", "_fitButtonClicked", "fit current selection"),
("Ctrl+P", "_ctrlP", "show command palette"),
("Ctrl+R", "_addRoiButtonClicked", "add ROI"),
("Ctrl+Shift+R", "_resetRoiButtonClicked", "reset all ROIs"),
("Ctrl+-", "_ctrlMinus", ""),
]
# %% DEFINE GUI CLASS
@forAllCallbacks(logCallback)
class MainWindow(QtWidgets.QMainWindow, Ui_mainWindow):
# == INITIALIZATIONS
def __init__(self, parent=None, debug=False):
super(MainWindow, self).__init__(parent)
# -- SETUP LOGGER
# setup log level
self.__DEBUG__ = debug # debug mode
self.logger = logging.getLogger(__name__)
# print first log
self.logger.debug("HAL started")
# -- Hidden
self._version = "0.1-beta"
self._name = "HAL"
self._url = "https://github.com/adareau/HAL"
self._settings_folder = Path().home() / ".HAL"
self._user_modules_folder = self._settings_folder / "user_modules"
self._user_scripts_folder = self._settings_folder / "user_scripts"
self._kl = []
self._t0 = 0
# -- FIRST
# create HAL settings folder
self._settings_folder.mkdir(exist_ok=True)
self._user_modules_folder.mkdir(exist_ok=True)
self._user_scripts_folder.mkdir(exist_ok=True)
# load settings
global_config_path = self._settings_folder / "global.conf"
self.settings = Settings(path=global_config_path)
# -- configure window
# icon
icon_file = Path(local_folder) / "icon.png"
if icon_file.is_file():
icon = QIcon(str(icon_file))
self.setWindowIcon(icon)
else:
self.logger.warning(f"icon file '{icon_file}' not found")
# -- USER MODULES AND SCRIPTS
loader.modules.load(self)
loader.scripts.load(self)
# -- Set font size and Family
font_family = self.settings.config["gui"]["font family"]
font_size = self.settings.config["gui"]["font size"]
font = QFont(font_family, int(font_size))
self.setFont(font)
if parent is not None:
parent.setFont(font)
# -- GUI related initializations
# setup UI (as defined in HAL.gui.MainUI)
self.setupUi(self)
# creates the plotting options window, wrt the quickplot tab
self.quickplotOptionsWindow = quickplot.PlottingOptionsWindow()
# setup UI (define here)
self.setupElements()
# connect callbacks
self.connectActions()
# setup palette
commandpalette.setupPaletteList(self)
# setup keyboard shortcuts
self.setupKeyboardShortcuts()
# -- Metadata cache
# cache
self.metadata_cache = {}
# init "lists" of available meta data
# those are in fact "sets", so that the fields are only
# counted once
meta_names = [m().name for m in self.metadata_classes]
ordered_dic_init = [(m, set()) for m in meta_names]
self.available_metadata = OrderedDict(ordered_dic_init)
self.available_numeric_metadata = OrderedDict(ordered_dic_init)
# live display subplots
self.live_display_subplots = []
# -- Other initializations
self.current_folder = None
self.current_export_folder = None
self.current_fig = None
self.dark_theme = False
self.default_palette = self.palette()
# -- Keyboard shortcuts
def setupElements(self):
submodule_list = [
filebrowser,
display,
dataexplorer,
quickplot,
correlations,
advancedplot,
fitting,
menubar,
]
for submodule in submodule_list:
submodule.setupUi(self)
def connectActions(self):
# automatic definition of callbacks
# from the CALLBACK_LIST, defined at the top of this file !
global CALLBACK_LIST
for callback in CALLBACK_LIST:
widget_name, signal_name, callback_name = callback
widget = getattr(self, widget_name)
signal = getattr(widget, signal_name)
callback = getattr(self, callback_name)
signal.connect(callback)
def setupKeyboardShortcuts(self):
# automatic definition of keyboard shortcuts
# from the KEYBOARD_SHORTCUTS list, defined at the top of this file !
global KEYBOARD_SHORTCUTS
# save for later acces
self.keyboard_shortcuts_lists = KEYBOARD_SHORTCUTS
# assign shortcuts
for shortcut in KEYBOARD_SHORTCUTS:
sequence, callback_name, tooltip = shortcut
qshortcut = QShortcut(sequence, self)
callback = getattr(self, callback_name)
qshortcut.activated.connect(callback)
# == CALLBACKS
# -- FILE BROWSER (defined in gui.filebrowser)
def _yearListSelectionChanged(self, *args, **kwargs):
filebrowser.yearListSelectionChanged(self)
def _monthListSelectionChanged(self, *args, **kwargs):
filebrowser.monthListSelectionChanged(self)
def _dayListSelectionChanged(self, *args, **kwargs):
filebrowser.dayListSelectionChanged(self)
dataexplorer.refreshDataSetList(self)
def _runListSelectionChanged(self, *args, **kwargs):
# handle special selection rules
# (for instance, if a sequence is selected)
filebrowser.runListSelectionChanged(self)
# display
display.plotSelectedData(self)
# metadata
dataexplorer.displayMetaData(self)
if eval(self.settings.config["metadata"]["autorefresh cache"]):
dataexplorer.updateMetadataCache(self)
def _runListShowContextMenu(self, position, *args, **kwargs):
# -- return if no item
if not self.runList.itemAt(position):
return
# -- create context menu
contextMenu = QMenu()
# -- Fit related actions
# do fit
fitAction = QAction("fit", self)
fitAction.triggered.connect(self._fitButtonClicked)
contextMenu.addAction(fitAction)
# delete fit
delFitAction = QAction("delete fit", self)
delFitAction.triggered.connect(self._deleteFitButtonClicked)
contextMenu.addAction(delFitAction)
# -- Dataset related
contextMenu.addSeparator()
# crate dataset
createSetAction = QAction("create set", self)
createSetAction.triggered.connect(self._createNewDataSet)
contextMenu.addAction(createSetAction)
# add to selected set
addToSetAction = QAction("add to selected set", self)
addToSetAction.triggered.connect(self._addToDataSet)
contextMenu.addAction(addToSetAction)
# -- show
contextMenu.exec_(self.runList.mapToGlobal(position))
def _setListShowContextMenu(self, position, *args, **kwargs):
# -- return if no item
if not self.setList.itemAt(position):
return
# -- create context menu
contextMenu = QMenu()
# -- Dataset related
# rename
renameSetAction = QAction("rename", self)
renameSetAction.triggered.connect(self._renameDataSet)
contextMenu.addAction(renameSetAction)
# delete
deleteSetAction = QAction("delete", self)
deleteSetAction.triggered.connect(self._deleteDataSet)
contextMenu.addAction(deleteSetAction)
# add to favorite
favDataSetAction = QAction("add to favorite", self)
favDataSetAction.triggered.connect(self._favDataSet)
contextMenu.addAction(favDataSetAction)
# add to selected set
addToSetAction = QAction("add selected runs", self)
addToSetAction.triggered.connect(self._addToDataSet)
contextMenu.addAction(addToSetAction)
# -- show
contextMenu.exec_(self.setList.mapToGlobal(position))
def _seqListSelectionChanged(self, *args, **kwargs):
filebrowser.refreshCurrentFolder(self)
dataexplorer.refreshDataSetList(self)
if eval(self.settings.config["metadata"]["autorefresh cache"]):
dataexplorer.updateMetadataCache(self)
def _setListSelectionChanged(self, *args, **kwargs):
if eval(self.settings.config["metadata"]["autorefresh cache"]):
dataexplorer.updateMetadataCache(self)
def _dateEditClicked(self, date):
filebrowser.dateEditClicked(self)
dataexplorer.refreshDataSetList(self)
def _refreshRunListButtonClicked(self, *args, **kwargs):
filebrowser.refreshCurrentFolder(self)
dataexplorer.refreshDataSetList(self)
def _todayButtonClicked(self, checked=False):
filebrowser.todayButtonClicked(self)
dataexplorer.refreshDataSetList(self)
# -- DATA VISUALIZATION
def _dataTypeComboBoxSelectionChanged(self, *args, **kwargs):
# update scale
data_class = self.dataTypeComboBox.currentData()
sc_min, sc_max = data_class().default_display_scale
self.scaleMinEdit.setText(str(sc_min))
self.scaleMaxEdit.setText(str(sc_max))
# refresh
filebrowser.refreshCurrentFolder(self)
display.plotSelectedData(self)
def _colorMapComboBoxSelectionChanged(self, *args, **kwargs):
display.updateColormap(self)
def _scaleMaxEditChanged(self, *args, **kwargs):
new_max = self.scaleMaxEdit.text()
if not _isnumber(new_max):
data_class = self.dataTypeComboBox.currentData()
_, sc_max = data_class().default_display_scale
self.scaleMaxEdit.setText(str(sc_max))
display.plotSelectedData(self, update_fit=False)
def _scaleMinEditChanged(self, *args, **kwargs):
new_min = self.scaleMinEdit.text()
if not _isnumber(new_min):
data_class = self.dataTypeComboBox.currentData()
sc_min, _ = data_class().default_display_scale
self.scaleMinEdit.setText(str(sc_min))
display.plotSelectedData(self, update_fit=False)
def _displaySelectionChanged(self, action):
display.displaySelectionChanged(self, action)
def _autoScaleCheckBoxChanged(self, *args, **kwargs):
display.plotSelectedData(self, update_fit=False)
# -- DATA EXPLORER
def _metaDataListSelectionChanged(self, *args, **kwargs):
dataexplorer.displayMetaData(self)
dataexplorer.updateMetadataCache(self, reset_cache=True)
quickplot.refreshMetaDataList(self)
def _refreshMetadataCachebuttonClicked(self, *args, **kwargs):
dataexplorer.updateMetadataCache(self, reset_cache=True)
def _createNewDataSet(self, *args, **kwargs):
dataexplorer.createNewDataSet(self)
def _addToDataSet(self, *args, **kwargs):
dataexplorer.addToDataSet(self)
def _deleteDataSet(self, *args, **kwargs):
dataexplorer.deleteDataSet(self)
def _favDataSet(self, *args, **kwargs):
dataexplorer.favDataSet(self)
def _renameDataSet(self, *args, **kwargs):
dataexplorer.renameDataSet(self)
def _quickPlotButtonClicked(self, *args, **kwargs):
quickplot.plotData(self)
def _quickPlot2DButtonClicked(self, *args, **kwargs):
quickplot.plotData2D(self)
def _quickPlotSelectionChanged(self, *args, **kwargs):
quickplot.quickPlotSelectionChanged(self)
def _quickPlotFitSelectionChanged(self, *args, **kwargs):
quickplot.quickPlotFitSelectionChanged(self)
def _quickplotShowOptions(self, *args, **kwargs):
self.quickplotOptionsWindow.show()
def _quickStatsButtonClicked(self, *args, **kwargs):
advancedplot.quickStatsButtonClicked(self)
# -- ADVANCED DATA ANALYSIS / PLOT
def _variableDeclarationChanged(self, item):
advancedplot.variableDeclarationChanged(self, item)
def _exportToMatplotlibButtonClicked(self, *args, **kwargs):
advancedplot.exportToMatplotlib(self)
def _updateSubplotLayoutButtonClicked(self, *args, **kwargs):
advancedplot.updateSubplotLayout(self)
def _resetSubplotLayoutButtonClicked(self, *args, **kwargs):
advancedplot.resetSubplotLayout(self)
def _subplotContentTableChanged(self, item):
advancedplot.subplotContentChanged(self, item)
def _advancedPlotSaveButtonClicked(self, *args, **kwargs):
advancedplot.advancedPlotSaveButtonClicked(self)
def _advancedPlotSaveAsButtonClicked(self, *args, **kwargs):
advancedplot.advancedPlotSaveAsButtonClicked(self)
def _advancedPlotDeleteButtonClicked(self, *args, **kwargs):
advancedplot.advancedPlotDeleteButtonClicked(self)
def _advancedPlotSelectionBoxSelectionChanged(self, *args, **kwargs):
advancedplot.advancedPlotSelectionBoxSelectionChanged(self)
def _exportDataButtonClicked(self, *args, **kwargs):
advancedplot.exportDataButtonClicked(self)
def _advancedStatButtonClicked(self, *args, **kwargs):
advancedplot.advancedStatButtonClicked(self)
def _advancedPlotResetButtonClicked(self, *args, **kwargs):
advancedplot.advancedPlotResetButtonClicked(self)
# correlations plots
def _plotCorrelations(self, *args, **kwargs):
correlations.plotCorrelations(self)
def _correlationsSelectionChanged(self, *args, **kwargs):
correlations.correlationsSelectionChanged(self)
# -- FITTING
def _addRoiButtonClicked(self, *args, **kwargs):
fitting.addROI(self)
def _renameRoiButtonClicked(self, *args, **kwargs):
fitting.renameROI(self)
# refresh
filebrowser.refreshCurrentFolder(self)
dataexplorer.refreshDataSetList(self)
def _deleteRoiButtonClicked(self, *args, **kwargs):
fitting.removeROI(self)
# refresh
filebrowser.refreshCurrentFolder(self)
dataexplorer.refreshDataSetList(self)
def _resetRoiButtonClicked(self, *args, **kwargs):
fitting.clearROIs(self)
# refresh
filebrowser.refreshCurrentFolder(self)
dataexplorer.refreshDataSetList(self)
def _selectRoiComboBoxSelectionChanged(self, *args, **kwargs):
display.updateFitForSelectedData(self)
def _fitButtonClicked(self, *args, **kwargs):
# fit
fitting.batchFitData(self)
# refresh
filebrowser.refreshCurrentFolder(self)
dataexplorer.refreshDataSetList(self)
display.updateFitForSelectedData(self)
dataexplorer.displayMetaData(self)
def _deleteFitButtonClicked(self, *args, **kwargs):
# fit
fitting.deleteSavedFits(self)
# refresh
filebrowser.refreshCurrentFolder(self)
dataexplorer.refreshDataSetList(self)
def _backgroundCheckBoxChanged(self, *args, **kwargs):
if self.backgroundCheckBox.isChecked():
fitting.addBackground(self)
else:
fitting.removeBackground(self)
display.plotSelectedData(self, update_fit=False)
# -- MENUBAR
def _gotoGithub(self, *args, **kwargs):
menubar.gotoGithub(self)
def _getOnlineHelp(self, *args, **kwargs):
menubar.getOnlineHelp(self)
def _editSettings(self, *args, **kwargs):
if self.settings.openGuiEditor(parent=self):
msg = "New user settings loaded. You might have to restart HAL now."
QMessageBox.warning(self, "I am afraid Dave", msg)
def _playScript(self, action, *args, **kwargs):
"""runs the selected script"""
# get script info from action data
cat, name, func = action.data()
# play
sname = cat + ":" + name if cat else name
self.logger.debug(f"running script {sname}")
func(self)
def _openUserScriptFolder(self, *args, **kwargs):
menubar.openUserScriptFolder(self)
def _openUserModuleFolder(self, *args, **kwargs):
menubar.openUserModuleFolder(self)
def _openDataFolder(self, *args, **kwargs):
menubar.openDataFolder(self)
def _displayShortcuts(self, *args, **kwargs):
menubar.displayShortcuts(self)
# -- DEBUG
def _DEBUG(self, *args, **kwargs):
# self.autoScaleCheckBox.setChecked(True)
testing.open_image(self)
# testing.open_image_and_fit(self)
# testing.declare_variables(self)
# testing.select_livemetadata_display(self)
# self._editSettings()
def _tic(self, msg=None, name=""):
if msg is not None:
logger = logging.getLogger(name)
logger.debug(msg)
self._t0 = time.time()
def _toc(self, name=""):
tf = time.time()
logger = logging.getLogger(name)
logger.debug("DONE in %.2f seconds" % (tf - self._t0))
# == KEYBOARD SHORTCUTS
def _ctrlB(self, *args, **kwargs):
"""called when 'Ctrl+B' is pressed"""
self.backgroundCheckBox.toggle()
def _ctrlP(self, *args, **kwargs):
"""called when 'Ctrl+P' is pressed"""
commandpalette.showPalette(self)
def _ctrlMinus(self, *args, **kwargs):
"""called when 'Ctrl+-' is pressed"""
self.logger.debug("-" * 50)
def keyPressEvent(self, event):
"""key pressed"""
self._kl.append(event.key())
misc.analyse_keylog(self)
# == MAIN
def main(self):
self.show()
| adareau/HAL | HAL/gui/main.py | main.py | py | 25,565 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "functools.wraps",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "Ma... |
72143444195 | import datetime
from typing import Any, Dict, List, Optional, Type, TypeVar, Union, cast
import attr
from dateutil.parser import isoparse
from ..types import UNSET, Unset
T = TypeVar("T", bound="PatchedApplication")
@attr.s(auto_attribs=True)
class PatchedApplication:
"""Dynamically removes fields from serializer.
https://stackoverflow.com/questions/27935558/dynamically-exclude-or-include-a-field-in-django-rest-framework-serializer"""
url: Union[Unset, str] = UNSET
id: Union[Unset, int] = UNSET
ecosystem: Union[Unset, str] = UNSET
primary_software: Union[Unset, str] = UNSET
dependent_software: Union[Unset, List[str]] = UNSET
machine_name: Union[Unset, str] = UNSET
name: Union[Unset, str] = UNSET
description: Union[Unset, str] = UNSET
trello_board_url: Union[Unset, Optional[str]] = UNSET
documentation_url: Union[Unset, Optional[str]] = UNSET
pipelines: Union[Unset, List[str]] = UNSET
owning_team: Union[Unset, str] = UNSET
created: Union[Unset, datetime.datetime] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
url = self.url
id = self.id
ecosystem = self.ecosystem
primary_software = self.primary_software
dependent_software: Union[Unset, List[Any]] = UNSET
if not isinstance(self.dependent_software, Unset):
dependent_software = self.dependent_software
machine_name = self.machine_name
name = self.name
description = self.description
trello_board_url = self.trello_board_url
documentation_url = self.documentation_url
pipelines: Union[Unset, List[Any]] = UNSET
if not isinstance(self.pipelines, Unset):
pipelines = self.pipelines
owning_team = self.owning_team
created: Union[Unset, str] = UNSET
if not isinstance(self.created, Unset):
created = self.created.isoformat()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if url is not UNSET:
field_dict["url"] = url
if id is not UNSET:
field_dict["id"] = id
if ecosystem is not UNSET:
field_dict["ecosystem"] = ecosystem
if primary_software is not UNSET:
field_dict["primary_software"] = primary_software
if dependent_software is not UNSET:
field_dict["dependent_software"] = dependent_software
if machine_name is not UNSET:
field_dict["machine_name"] = machine_name
if name is not UNSET:
field_dict["name"] = name
if description is not UNSET:
field_dict["description"] = description
if trello_board_url is not UNSET:
field_dict["trello_board_url"] = trello_board_url
if documentation_url is not UNSET:
field_dict["documentation_url"] = documentation_url
if pipelines is not UNSET:
field_dict["pipelines"] = pipelines
if owning_team is not UNSET:
field_dict["owning_team"] = owning_team
if created is not UNSET:
field_dict["created"] = created
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
url = d.pop("url", UNSET)
id = d.pop("id", UNSET)
ecosystem = d.pop("ecosystem", UNSET)
primary_software = d.pop("primary_software", UNSET)
dependent_software = cast(List[str], d.pop("dependent_software", UNSET))
machine_name = d.pop("machine_name", UNSET)
name = d.pop("name", UNSET)
description = d.pop("description", UNSET)
trello_board_url = d.pop("trello_board_url", UNSET)
documentation_url = d.pop("documentation_url", UNSET)
pipelines = cast(List[str], d.pop("pipelines", UNSET))
owning_team = d.pop("owning_team", UNSET)
created: Union[Unset, datetime.datetime] = UNSET
_created = d.pop("created", UNSET)
if not isinstance(_created, Unset):
created = isoparse(_created)
patched_application = cls(
url=url,
id=id,
ecosystem=ecosystem,
primary_software=primary_software,
dependent_software=dependent_software,
machine_name=machine_name,
name=name,
description=description,
trello_board_url=trello_board_url,
documentation_url=documentation_url,
pipelines=pipelines,
owning_team=owning_team,
created=created,
)
patched_application.additional_properties = d
return patched_application
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| caltechads/brigid-api-client | brigid_api_client/models/patched_application.py | patched_application.py | py | 5,315 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.TypeVar",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number... |
31637375221 | # coding=utf-8
import copy
import os
import glob
import shutil
import unittest
from mock import patch
from testfixtures import TempDirectory
from ddt import ddt, data
from provider import cleaner
import activity.activity_AcceptedSubmissionStrikingImages as activity_module
from activity.activity_AcceptedSubmissionStrikingImages import (
activity_AcceptedSubmissionStrikingImages as activity_object,
)
import tests.test_data as test_case_data
from tests.activity.classes_mock import (
FakeLogger,
FakeSession,
FakeStorageContext,
)
from tests.activity import helpers, settings_mock, test_activity_data
def input_data(file_name_to_change=""):
activity_data = test_case_data.ingest_accepted_submission_data
activity_data["file_name"] = file_name_to_change
return activity_data
@ddt
class TestAcceptedSubmissionStrikingImages(unittest.TestCase):
def setUp(self):
fake_logger = FakeLogger()
self.activity = activity_object(settings_mock, fake_logger, None, None, None)
# instantiate the session here so it can be wiped clean between test runs
self.session = FakeSession(
copy.copy(test_activity_data.accepted_session_example)
)
def tearDown(self):
TempDirectory.cleanup_all()
# clean the temporary directory completely
shutil.rmtree(self.activity.get_tmp_dir())
# reset the session value
self.session.store_value("cleaner_log", None)
@patch.object(activity_module, "storage_context")
@patch.object(activity_module, "get_session")
@patch.object(cleaner, "storage_context")
@patch.object(activity_object, "clean_tmp_dir")
@data(
{
"comment": "example with no cover art",
"filename": "28-09-2020-RA-eLife-63532.zip",
"expected_result": True,
"expected_images_status": None,
"expected_upload_xml_status": None,
"expected_rename_files_status": None,
"expected_upload_files_status": None,
},
{
"comment": "example with cover art",
"filename": "30-01-2019-RA-eLife-45644.zip",
"expected_result": True,
"expected_images_status": True,
"expected_upload_xml_status": True,
"expected_rename_files_status": True,
"expected_upload_files_status": True,
"expected_xml_contains": [
("<upload_file_nm>45644-a_striking_image.tif</upload_file_nm>"),
],
"expected_bucket_upload_folder_contents": [
"30-01-2019-RA-eLife-45644.xml",
"45644-a_striking_image.tif",
],
"expected_striking_images_bucket_folder_contents": [
"45644-a_striking_image.tif"
],
},
)
def test_do_activity(
self,
test_data,
fake_clean_tmp_dir,
fake_cleaner_storage_context,
fake_session,
fake_storage_context,
):
directory = TempDirectory()
fake_clean_tmp_dir.return_value = None
zip_sub_folder = test_data.get("filename").replace(".zip", "")
zip_xml_file = "%s.xml" % zip_sub_folder
article_id = zip_sub_folder.rsplit("-", 1)[1]
zip_file_path = os.path.join("tests", "files_source", test_data.get("filename"))
resources = helpers.expanded_folder_bucket_resources(
directory,
test_activity_data.accepted_session_example.get("expanded_folder"),
zip_file_path,
)
fake_storage_context.return_value = FakeStorageContext(
directory.path, resources, dest_folder=directory.path
)
fake_cleaner_storage_context.return_value = FakeStorageContext(
directory.path, resources, dest_folder=directory.path
)
fake_session.return_value = self.session
# do the activity
result = self.activity.do_activity(input_data(test_data.get("filename")))
self.assertEqual(result, test_data.get("expected_result"))
temp_dir_files = glob.glob(self.activity.directories.get("TEMP_DIR") + "/*/*")
xml_file_path = os.path.join(
self.activity.directories.get("TEMP_DIR"),
zip_sub_folder,
zip_xml_file,
)
self.assertTrue(xml_file_path in temp_dir_files)
# assertion on XML contents
if test_data.get("expected_xml_contains"):
with open(xml_file_path, "r", encoding="utf-8") as open_file:
xml_content = open_file.read()
for fragment in test_data.get("expected_xml_contains"):
self.assertTrue(
fragment in xml_content,
"failed in {comment}".format(comment=test_data.get("comment")),
)
for status_type in ["images", "upload_xml", "rename_files", "upload_files"]:
self.assertEqual(
self.activity.statuses.get(status_type),
test_data.get("expected_%s_status" % status_type),
"status_type {status_type} failed in {comment}".format(
status_type=status_type, comment=test_data.get("comment")
),
)
# assertion on activity log contents
if test_data.get("expected_activity_log_contains"):
for fragment in test_data.get("expected_activity_log_contains"):
self.assertTrue(
fragment in str(self.activity.logger.loginfo),
"failed in {comment}".format(comment=test_data.get("comment")),
)
# assertion on cleaner.log contents
if test_data.get("expected_cleaner_log_contains"):
log_file_path = os.path.join(
self.activity.get_tmp_dir(), self.activity.activity_log_file
)
with open(log_file_path, "r", encoding="utf8") as open_file:
log_contents = open_file.read()
for fragment in test_data.get("expected_cleaner_log_contains"):
self.assertTrue(
fragment in log_contents,
"failed in {comment}".format(comment=test_data.get("comment")),
)
# assertion on the session cleaner log content
if test_data.get("expected_upload_xml_status"):
session_log = self.session.get_value("cleaner_log")
self.assertIsNotNone(
session_log,
"failed in {comment}".format(comment=test_data.get("comment")),
)
# check output bucket folder contents
if "expected_bucket_upload_folder_contents" in test_data:
bucket_folder_path = os.path.join(
directory.path,
test_activity_data.accepted_session_example.get("expanded_folder"),
zip_sub_folder,
)
try:
output_bucket_list = os.listdir(bucket_folder_path)
except FileNotFoundError:
# no objects were uploaded so the folder path does not exist
output_bucket_list = []
for bucket_file in test_data.get("expected_bucket_upload_folder_contents"):
self.assertTrue(
bucket_file in output_bucket_list,
"%s not found in bucket upload folder" % bucket_file,
)
# check striking images bucket folder contents
if "expected_striking_images_bucket_folder_contents" in test_data:
bucket_folder_path = os.path.join(directory.path, article_id, "vor")
try:
output_bucket_list = os.listdir(bucket_folder_path)
except FileNotFoundError:
# no objects were uploaded so the folder path does not exist
output_bucket_list = []
for bucket_file in test_data.get(
"expected_striking_images_bucket_folder_contents"
):
self.assertTrue(
bucket_file in output_bucket_list,
"%s not found in striking images bucket folder" % bucket_file,
)
| elifesciences/elife-bot | tests/activity/test_activity_accepted_submission_striking_images.py | test_activity_accepted_submission_striking_images.py | py | 8,187 | python | en | code | 19 | github-code | 1 | [
{
"api_name": "tests.test_data.ingest_accepted_submission_data",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "tests.test_data",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 32,
"usage_type": "attribute"
}... |
18021165672 | import Pyro4
import Pyro4.naming
from xbmcjson import XBMC
import _thread as thread
from subprocess import run, PIPE
from evdev import UInput, InputEvent, ecodes as e
from time import sleep
import logging as log
from pulse_mute import Pulse
@Pyro4.expose
class RemoteServer(object):
def __init__(self):
self.ui = UInput()
cap = {e.EV_REL: [e.REL_X, e.REL_Y]}
cap = {
e.EV_REL : (e.REL_X, e.REL_Y),
e.EV_KEY : (e.BTN_MOUSE,),
}
self.mouse = UInput(cap)
def run(self, args):
proc = run(args, check=False, stdout=PIPE, stderr=PIPE)
return (args, proc.returncode, proc.stderr,proc.stdout)
def handshake(self):
return
@Pyro4.oneway
def sleep(self):
log.debug("Putting system to sleep")
proc = run(["sudo","systemctl","suspend"], check=False)
def keycombo(self,keys):
for key in keys:
self.ui.write(e.EV_KEY, key, 1)
for key in keys:
self.ui.write(e.EV_KEY, key, 0)
self.ui.syn()
def mouseaction(self, events):
for ev in events:
self.mouse.write(ev['type'],ev['code'],ev['value'])
self.mouse.syn()
def pause(self,appname):
if appname == "spotify":
return self.pause_spotify()
elif appname == "kodi":
return self.pause_kodi()
def pause_spotify(self):
log.debug("Pausing spotify")
args = ["dbus-send", "--print-reply",
"--dest=org.mpris.MediaPlayer2.spotify",
"/org/mpris/MediaPlayer2",
"org.mpris.MediaPlayer2.Player.Pause"]
return self.run(args)
def pause_kodi(self):
log.debug("Pausing Kodi")
kodi = XBMC("http://192.168.1.69:8080/jsonrpc")
playerid_result = kodi.Player.GetActivePlayers()['result']
if playerid_result:
playerid = playerid_result[0]['playerid']
else:
return
speed = kodi.Player.GetProperties(
{'playerid':playerid,'properties':['speed']}
)['result']['speed']
if speed != 0:
kodi.Player.PlayPause({'playerid': playerid})
def mute_app(self, appname):
Pulse.mute_input(appname)
def unmute_app(self, appname):
Pulse.unmute_input(appname)
def pause_service(self,appname):
log.debug("Suspending service: %s" % appname)
args = ["systemctl","--user","kill","-s","STOP","%s.service" %appname]
result = self.run(args)
return result
def continue_service(self,appname):
log.debug("Resuming_ service: %s" % appname)
args = ["systemctl","--user","kill","-s","CONT","%s.service" %appname]
return self.run(args)
def init_log():
log_format = '%(levelname)s:%(message)s'
log_file = '/home/zteifel/remote/server.log'
log.basicConfig(format=log_format, level=log.DEBUG,filename=log_file)
def start_nameserver():
Pyro4.naming.startNSloop(host,port)
init_log()
log.info('Starting remote server')
host = "192.168.1.69"
port = 9093
thread.start_new_thread(start_nameserver,())
rs = RemoteServer()
daemon = Pyro4.Daemon(host)
rs_uri = daemon.register(rs)
ns = Pyro4.locateNS(host,port)
ns.register("zteifel.remoteserver", rs_uri)
daemon.requestLoop()
| zteifel/raspberry_remote | htpc/server_pyro.py | server_pyro.py | py | 3,311 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "evdev.UInput",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "evdev.ecodes.EV_REL",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "evdev.ecodes",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "evdev.ecodes.REL_X... |
18805300498 | ##################################################### Import system libraries ######################################################
import matplotlib as mpl
mpl.rcdefaults()
mpl.rcParams.update(mpl.rc_params_from_file('meine-matplotlibrc'))
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as const
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import (
nominal_values as noms,
std_devs as stds,
)
################################################ Finish importing system libraries #################################################
################################################ Adding subfolder to system's path #################################################
import os, sys, inspect
# realpath() will make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"python_custom_scripts")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
############################################# Finish adding subfolder to system's path #############################################
##################################################### Import custom libraries ######################################################
from curve_fit import ucurve_fit
from table import (
make_table,
make_full_table,
make_composed_table,
make_SI,
write,
)
from regression import (
reg_linear,
reg_quadratic,
reg_cubic
)
from error_calculation import(
MeanError
)
################################################ Finish importing custom libraries #################################################
################################ FREQUENTLY USED CODE ################################
#
########## IMPORT ##########
# t, U, U_err = np.genfromtxt('data.txt', unpack=True)
# t *= 1e-3
########## ERRORS ##########
# R_unc = ufloat(R[0],R[2])
# U = 1e3 * unp.uarray(U, U_err)
# Rx_mean = np.mean(Rx) # Mittelwert und syst. Fehler
# Rx_mean_err = MeanError(noms(Rx)) # Fehler des Mittelwertes
#
## Relative Fehler zum späteren Vergleich in der Diskussion
# RelFehler_G = (G_mess - G_lit) / G_lit
# RelFehler_B = (B_mess - B_lit) / B_lit
# write('build/RelFehler_G.tex', make_SI(RelFehler_G*100, r'\percent', figures=1))
# write('build/RelFehler_B.tex', make_SI(RelFehler_B*100, r'\percent', figures=1))
########## CURVE FIT ##########
# def f(t, a, b, c, d):
# return a * np.sin(b * t + c) + d
#
# params = ucurve_fit(f, t, U, p0=[1, 1e3, 0, 0]) # p0 bezeichnet die Startwerte der zu fittenden Parameter
# params = ucurve_fit(reg_linear, x, y) # linearer Fit
# params = ucurve_fit(reg_quadratic, x, y) # quadratischer Fit
# params = ucurve_fit(reg_cubic, x, y) # kubischer Fit
# a, b = params
# write('build/parameter_a.tex', make_SI(a * 1e-3, r'\kilo\volt', figures=1)) # type in Anz. signifikanter Stellen
# write('build/parameter_b.tex', make_SI(b * 1e-3, r'\kilo\hertz', figures=2)) # type in Anz. signifikanter Stellen
########## PLOTTING ##########
# plt.clf # clear actual plot before generating a new one
#
## automatically choosing limits with existing array T1
# t_plot = np.linspace(np.amin(T1), np.amax(T1), 100)
# plt.xlim(t_plot[0]-1/np.size(T1)*(t_plot[-1]-t_plot[0]), t_plot[-1]+1/np.size(T1)*(t_plot[-1]-t_plot[0]))
#
## hard coded limits
# t_plot = np.linspace(-0.5, 2 * np.pi + 0.5, 1000) * 1e-3
#
## standard plotting
# plt.plot(t_plot * 1e3, f(t_plot, *noms(params)) * 1e-3, 'b-', label='Fit')
# plt.plot(t * 1e3, U * 1e3, 'rx', label='Messdaten')
## plt.errorbar(B * 1e3, noms(y) * 1e5, fmt='rx', yerr=stds(y) * 1e5, label='Messdaten') # mit Fehlerbalken
## plt.xscale('log') # logarithmische x-Achse
# plt.xlim(t_plot[0] * 1e3, t_plot[-1] * 1e3)
# plt.xlabel(r'$t \:/\: \si{\milli\second}$')
# plt.ylabel(r'$U \:/\: \si{\kilo\volt}$')
# plt.legend(loc='best')
# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# plt.savefig('build/aufgabenteil_a_plot.pdf')
########## WRITING TABLES ##########
### IF THERE IS ONLY ONE COLUMN IN A TABLE (workaround):
## a=np.array([Wert_d[0]])
## b=np.array([Rx_mean])
## c=np.array([Rx_mean_err])
## d=np.array([Lx_mean*1e3])
## e=np.array([Lx_mean_err*1e3])
#
# write('build/Tabelle_b.tex', make_table([a,b,c,d,e],[0, 1, 0, 1, 1])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
# write('build/Tabelle_b_texformat.tex', make_full_table(
# 'Messdaten Kapazitätsmessbrücke.',
# 'table:A2',
# 'build/Tabelle_b.tex',
# [1,2,3,4,5], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# # die Multicolumns sein sollen
# ['Wert',
# r'$C_2 \:/\: \si{\nano\farad}$',
# r'$R_2 \:/\: \si{\ohm}$',
# r'$R_3 / R_4$', '$R_x \:/\: \si{\ohm}$',
# r'$C_x \:/\: \si{\nano\farad}$']))
#
## Aufsplitten von Tabellen, falls sie zu lang sind
# t1, t2 = np.array_split(t * 1e3, 2)
# U1, U2 = np.array_split(U * 1e-3, 2)
# write('build/loesung-table.tex', make_table([t1, U1, t2, U2], [3, None, 3, None])) # type in Nachkommastellen
#
## Verschmelzen von Tabellen (nur Rohdaten, Anzahl der Zeilen muss gleich sein)
# write('build/Tabelle_b_composed.tex', make_composed_table(['build/Tabelle_b_teil1.tex','build/Tabelle_b_teil2.tex']))
########## ARRAY FUNCTIONS ##########
# np.arange(2,10) # Erzeugt aufwärts zählendes Array von 2 bis 10
# np.zeros(15) # Erzeugt Array mit 15 Nullen
# np.ones(15) # Erzeugt Array mit 15 Einsen
#
# np.amin(array) # Liefert den kleinsten Wert innerhalb eines Arrays
# np.argmin(array) # Gibt mir den Index des Minimums eines Arrays zurück
# np.amax(array) # Liefert den größten Wert innerhalb eines Arrays
# np.argmax(array) # Gibt mir den Index des Maximums eines Arrays zurück
#
# a1,a2 = np.array_split(array, 2) # Array in zwei Hälften teilen
# np.size(array) # Anzahl der Elemente eines Arrays ermitteln
########## ARRAY INDEXING ##########
# y[n - 1::n] # liefert aus einem Array jeden n-ten Wert als Array
########## DIFFERENT STUFF ##########
# R = const.physical_constants["molar gas constant"] # Array of value, unit, error
# E-Feld Teilaufgabe a)
D_lang, U_1, U_2, U_3 = np.genfromtxt('messdaten/messung_E_lang.txt', unpack=True)
D_kurz, U_4, U_5 = np.genfromtxt('messdaten/messung_E_kurz.txt', unpack=True)
D_lang = D_lang * 0.0254
D_kurz = D_kurz * 0.0254 # in meter umrechnen
print("Die folgenden Plots werden Ihnen präsentiert von: Micra Tours!")
# U_b,1 = 200 V
params1 = ucurve_fit(reg_linear, U_1, D_lang) # linearer Fit
m1, b1 = params1
write('build/parameter_m1.tex', make_SI(m1, r'\metre\per\volt', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b1.tex', make_SI(b1, r'\volt', figures=2)) # type in Anz. signifikanter Stellen
t_plot1 = np.linspace(np.amin(U_1)-0.5, np.amax(U_1)+0.5, 100)
plt.plot(t_plot1, (m1.n*t_plot1+b1.n)*100, 'b-', label='Linearer Fit')
plt.plot(U_1, D_lang*100, 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$D \:/\: \si{\centi\metre}$')
plt.xlabel(r'$U_1 \:/\: \si{\volt}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a1.pdf')
print("In Kooperation mit: Sassi Vacation!")
print("Starring Amba-Lamb-Guy (https://www.youtube.com/watch?v=fLQWKOB5se0)")
# U_b,2 = 250 V
plt.clf()
params2 = ucurve_fit(reg_linear, U_2, D_lang) # linearer Fit
m2, b2 = params2
write('build/parameter_m2.tex', make_SI(m2, r'\metre\per\volt', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b2.tex', make_SI(b2, r'\volt', figures=2)) # type in Anz. signifikanter Stellen
t_plot2 = np.linspace(np.amin(U_2)-0.5, np.amax(U_2)+0.5, 100)
plt.plot(t_plot2, (m2.n*t_plot2+b2.n)*100, 'b-', label='Linearer Fit')
plt.plot(U_2, D_lang*100, 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$D \:/\: \si{\centi\metre}$')
plt.xlabel(r'$U_2 \:/\: \si{\volt}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a2.pdf')
print("... das maken dauert heute aber echt lang...")
# U_b,3 = 300 V
plt.clf()
params3 = ucurve_fit(reg_linear, U_3, D_lang) # linearer Fit
m3, b3 = params3
write('build/parameter_m3.tex', make_SI(m3, r'\metre\per\volt', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b3.tex', make_SI(b3, r'\volt', figures=2)) # type in Anz. signifikanter Stellen
t_plot3 = np.linspace(np.amin(U_3)-0.5, np.amax(U_3)+0.5, 100)
plt.plot(t_plot3, (m3.n*t_plot3+b3.n)*100, 'b-', label='Linearer Fit')
plt.plot(U_3, D_lang*100, 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$D \:/\: \si{\centi\metre}$')
plt.xlabel(r'$U_3 \:/\: \si{\volt}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a3.pdf')
print("Wer lässt auch gefühlt 34 Funktionen in einem Protokoll fitten?!")
# U_b,4 = 350 V
plt.clf()
params4 = ucurve_fit(reg_linear, U_4, D_kurz) # linearer Fit
m4, b4 = params4
write('build/parameter_m4.tex', make_SI(m4, r'\metre\per\volt', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b4.tex', make_SI(b4, r'\volt', figures=2)) # type in Anz. signifikanter Stellen
t_plot4 = np.linspace(np.amin(U_4)-0.5, np.amax(U_4)+0.5, 100)
plt.plot(t_plot4, (m4.n*t_plot4+b4.n)*100, 'b-', label='Linearer Fit')
plt.plot(U_4, D_kurz*100, 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$D \:/\: \si{\centi\metre}$')
plt.xlabel(r'$U_4 \:/\: \si{\volt}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a4.pdf')
print("Der isst auch kleine Kinder...")
# U_b,5 = 400 V
plt.clf()
params5 = ucurve_fit(reg_linear, U_5, D_kurz) # linearer Fit
m5, b5 = params5
write('build/parameter_m5.tex', make_SI(m5, r'\metre\per\volt', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b5.tex', make_SI(b5, r'\volt', figures=2)) # type in Anz. signifikanter Stellen
t_plot5 = np.linspace(np.amin(U_5)-0.5, np.amax(U_5)+0.5, 100)
plt.plot(t_plot5, (m5.n*t_plot5+b5.n)*100, 'b-', label='Linearer Fit')
plt.plot(U_5, D_kurz*100, 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$D \:/\: \si{\centi\metre}$')
plt.xlabel(r'$U_5 \:/\: \si{\volt}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a5.pdf')
# E-Feld a), Teil 2
die_bs = np.array([b1.n, b2.n, b3.n, b4.n, b5.n])
die_bs_err = np.array([b1.s, b2.s, b3.s, b4.s, b5.s])
die_ms = np.array([m1.n, m2.n, m3.n, m4.n, m5.n])
die_ms_err = np.array([m1.s, m2.s, m3.s, m4.s, m5.s])
print("Übrigens: Das ist unser letztes Protkoll aus dem AP!")
U_b = np.array([200, 250, 300, 350, 400])
empf = unp.uarray([m1.n, m2.n, m3.n, m4.n, m5.n], [m1.s, m2.s, m3.s, m4.s, m5.s])
plt.clf()
write('Tabelle_c.tex', make_table([U_b, die_ms*10**4, die_ms_err*10**4, die_bs*10**3, die_bs_err*10**3],[0, 2, 2, 2, 2])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('Tabelle_c_texformat.tex', make_full_table(
'Fitparameter: Steigung $m$ und y-Achsenabschnitt $b$.',
'tab:c',
'Tabelle_c.tex',
[], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# die Multicolumns sein sollen
[
r'$U_b \:/\: \si{\volt}$',
r'$m \:/\: 10^{-4}\si{\metre\per\volt}$',
r'$\increment{m} \:/\: 10^{-4}\si{\metre\per\volt}$',
r'$b \:/\: 10^{-3}\si{\volt}$',
r'$\increment{b} \:/\: 10^{-3}\si{\volt}$']))
params6 = ucurve_fit(reg_linear, 1/U_b, noms(empf)) # linearer Fit
m6, b6 = params6
write('build/parameter_m6.tex', make_SI(m6, r'\metre', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b6.tex', make_SI(b6, r'\metre\per\volt', figures=2)) # type in Anz. signifikanter Stellen
t_plot6 = np.linspace(np.amin(1/U_b*100)-0.001*20, np.amax(1/U_b*100)+0.001*20, 10)
plt.plot(t_plot6, (m6.n*t_plot6+b6.n*100), 'b-', label='Linearer Fit')
plt.errorbar(1/U_b*100, noms(empf)*100, fmt='rx', yerr=stds(empf)*100, label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$\frac{D}{U_d} \:/\: 10^{-2}\si{\metre\per\volt}$')
plt.xlabel(r'$\frac{1}{U_\text{B}} \:/\: 10^{-2}\si{\volt\tothe{-1}}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a6.pdf')
d = 0.38 * 0.01
p = 1.9 * 0.01
L = 14.3*0.01
m6_lit = (p*L)/(2*d)
write('build/parameter_m6_lit.tex', make_SI(m6_lit, r'\metre', figures=3)) # type in Anz. signifikanter Stellen
err_m6 = (m6 - m6_lit) / m6_lit
write('build/parameter_m6_rel.tex', make_SI(err_m6.n*100, r'\percent', figures=2))
# E-Feld b)
v, A = np.genfromtxt('messdaten/frequenzen.txt', unpack=True)
U_b = 400 #(???)
D_amp = A[1] * 0.0254 # habe nicht ganz verstanden welchen Amplitudenwert wir nehmen sollen
U_amp = 1/m6 * U_b * D_amp # Formel umgestellt und eingesetzt
write('build/U_amp.tex', make_SI(U_amp, r'\volt', figures=2))
write('build/v0.tex', make_SI(v[0], r'\kilo\hertz', figures=1))
write('build/v1.tex', make_SI(v[1], r'\kilo\hertz', figures=1))
write('build/v2.tex', make_SI(v[2], r'\kilo\hertz', figures=1))
write('build/v3.tex', make_SI(v[3], r'\kilo\hertz', figures=1))
write('build/v0_mal_2.tex', make_SI(v[0]*2, r'\kilo\hertz', figures=1))
write('build/v3_durch_2.tex', make_SI(v[3]/2, r'\kilo\hertz', figures=1))
######################################
# B-Feld a)
D_lang, I_1, I_2, I_3 = np.genfromtxt('messdaten/messung_B_lang.txt', unpack=True)
D_kurz, I_4, I_5 = np.genfromtxt('messdaten/messung_B_kurz.txt', unpack=True)
D_lang = D_lang * 0.0254
D_kurz = D_kurz * 0.0254 # in meter umrechnen
U_b_B = np.array([250, 300, 350, 400, 450])
mu_0 = 4*np.pi*10**(-7)
N = 20 #? geraten
R = 0.282 # ? geraten
L = 17.5*0.01
B_1 = mu_0 * 8/np.sqrt(125) * N/R * I_1
B_2 = mu_0 * 8/np.sqrt(125) * N/R * I_2
B_3 = mu_0 * 8/np.sqrt(125) * N/R * I_3
B_4 = mu_0 * 8/np.sqrt(125) * N/R * I_4
B_5 = mu_0 * 8/np.sqrt(125) * N/R * I_5
# U_b,1 = 250 V
plt.clf()
params7 = ucurve_fit(reg_linear, B_1, D_lang/(L**2+D_lang**2)) # linearer Fit
m7, b7 = params7
write('build/parameter_m7.tex', make_SI(m7, r'\per\metre\per\tesla', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b7.tex', make_SI(b7, r'\per\metre', figures=2)) # type in Anz. signifikanter Stellen
t_plot7 = np.linspace(np.amin(B_1), np.amax(B_1), 100)
plt.plot(t_plot7*10**6, (m7.n*t_plot7+b7.n), 'b-', label='Linearer Fit')
plt.plot(B_1*10**(6), D_lang/(L**2+D_lang**2), 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$\frac{D}{L^2 + D^2} \:/\: \si{\per\metre} $')
plt.xlabel(r'$B_1 \:/\: \si{\micro\tesla}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a7.pdf')
# U_b,2 = 300 V
plt.clf()
params8 = ucurve_fit(reg_linear, B_2, D_lang/(L**2+D_lang**2)) # linearer Fit
m8, b8 = params8
print("https://www.youtube.com/watch?v=Mdi534Q1Zsg")
write('build/parameter_m8.tex', make_SI(m8, r'\per\metre\per\tesla', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b8.tex', make_SI(b8, r'\per\metre', figures=2)) # type in Anz. signifikanter Stellen
t_plot8 = np.linspace(np.amin(B_2), np.amax(B_2), 100)
plt.plot(t_plot8*10**(6), (m8.n*t_plot8+b8.n), 'b-', label='Linearer Fit')
plt.plot(B_2*10**(6), D_lang/(L**2+D_lang**2), 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$\frac{D}{L^2 + D^2} \:/\: \si{\per\metre} $')
plt.xlabel(r'$B_2 \:/\: \si{\micro\tesla}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a8.pdf')
# U_b,2 = 350 V
plt.clf()
params9 = ucurve_fit(reg_linear, B_3, D_lang/(L**2+D_lang**2)) # linearer Fit
m9, b9 = params9
write('build/parameter_m9.tex', make_SI(m9, r'\per\metre\per\tesla', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b9.tex', make_SI(b9, r'\per\metre', figures=2)) # type in Anz. signifikanter Stellen
t_plot9 = np.linspace(np.amin(B_3), np.amax(B_3), 100)
plt.plot(t_plot9*10**(6), (m9.n*t_plot9+b9.n), 'b-', label='Linearer Fit')
plt.plot(B_3*10**(6), D_lang/(L**2+D_lang**2), 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$\frac{D}{L^2 + D^2} \:/\: \si{\per\metre} $')
plt.xlabel(r'$B_3 \:/\: \si{\micro\tesla}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a9.pdf')
# U_b,2 = 400 V
plt.clf()
params10 = ucurve_fit(reg_linear, B_4, D_kurz/(L**2+D_kurz**2)) # linearer Fit
m10, b10 = params10
write('build/parameter_m10.tex', make_SI(m10, r'\per\metre\per\tesla', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b10.tex', make_SI(b10, r'\per\metre', figures=2)) # type in Anz. signifikanter Stellen
t_plot10 = np.linspace(np.amin(B_4), np.amax(B_4), 100)
plt.plot(t_plot10*10**(6), (m10.n*t_plot10+b10.n), 'b-', label='Linearer Fit')
plt.plot(B_4*10**(6), D_kurz/(L**2+D_kurz**2), 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$\frac{D}{L^2 + D^2} \:/\: \si{\per\metre} $')
plt.xlabel(r'$B_4 \:/\: \si{\micro\tesla}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a10.pdf')
# U_b,2 = 450 V
plt.clf()
params11 = ucurve_fit(reg_linear, B_5, D_kurz/(L**2+D_kurz**2)) # linearer Fit
m11, b11 = params11
write('build/parameter_m11.tex', make_SI(m11, r'\per\metre\per\tesla', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b11.tex', make_SI(b11, r'\per\metre', figures=2)) # type in Anz. signifikanter Stellen
t_plot11 = np.linspace(np.amin(B_5), np.amax(B_5), 100)
plt.plot(t_plot11*10**(6), (m11.n*t_plot11+b11.n), 'b-', label='Linearer Fit')
plt.plot(B_5*10**(6), D_kurz/(L**2+D_kurz**2), 'rx', label='Messdaten')
#plt.xlim(t_plot1[0], t_plot1[-1])
plt.ylabel(r'$\frac{D}{L^2 + D^2} \:/\: \si{\per\metre} $')
plt.xlabel(r'$B_5 \:/\: \si{\micro\tesla}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_a11.pdf')
die_bs_b = np.array([b7.n, b8.n, b9.n, b10.n, b11.n])
die_bs_err_b = np.array([b7.s, b8.s, b9.s, b10.s, b11.s])
die_ms_b = np.array([m7.n, m8.n, m9.n, m10.n, m11.n])
die_ms_err_b = np.array([m7.s, m8.s, m9.s, m10.s, m11.s])
write('Tabelle_e.tex', make_table([U_b_B, die_ms_b, die_ms_err_b, die_bs_b, die_bs_err_b],[0, 2, 2, 2, 2])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('Tabelle_e_texformat.tex', make_full_table(
'Fitparameter: Steigung $m$ und y-Achsenabschnitt $b$.',
'tab:e',
'Tabelle_e.tex',
[], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# die Multicolumns sein sollen
[
r'$U_b \:/\: \si{\volt}$',
r'$m \:/\:\si{\per\metre\per\tesla}$',
r'$\increment{m} \:/\:\si{\per\metre\per\tesla}$',
r'$b \:/\: \si{\per\metre}$',
r'$\increment{b} \:/\:\si{\per\metre}$']))
steigungen = unp.uarray(die_ms_b, die_ms_err_b)
konstante = 8*U_b_B*steigungen**2
write('build/konstante_0.tex', make_SI(konstante[0]*10**(-11), r'\coulomb\per\kilogram','e11', figures=2)) # type in Anz. signifikanter Stellen
write('build/konstante_1.tex', make_SI(konstante[1]*10**(-11), r'\coulomb\per\kilogram','e11', figures=2)) # type in Anz. signifikanter Stellen
write('build/konstante_2.tex', make_SI(konstante[2]*10**(-11), r'\coulomb\per\kilogram','e11', figures=2)) # type in Anz. signifikanter Stellen
write('build/konstante_3.tex', make_SI(konstante[3]*10**(-11), r'\coulomb\per\kilogram','e11', figures=2)) # type in Anz. signifikanter Stellen
write('build/konstante_4.tex', make_SI(konstante[4]*10**(-11), r'\coulomb\per\kilogram','e11', figures=2)) # type in Anz. signifikanter Stellen
mean_k = np.mean(noms(konstante))
std_k = np.std(noms(konstante))
k = ufloat(mean_k, std_k)
write('build/konstante_mean.tex', make_SI(k*10**(-11), r'\coulomb\per\kilogram','e11', figures=2)) # type in Anz. signifikanter Stellen
k_lit = 1.758820024*10**11
write('build/lit.tex', make_SI(k_lit*10**(-11), r'\coulomb\per\kilogram','e11', figures=4))
k_rel = (mean_k - k_lit) / k_lit
write('build/rel.tex', make_SI(k_rel*100, r'\percent', figures=1))
#
## Errrdmagnetfeld
I_erd = 0.26 # Kompensationsstrom in Ampere
U_erd = 200 # Beschleunigungsspannung
phi = 70/360 * 2*np.pi #grad inklinationswinkel
B_erd = mu_0 * 8/np.sqrt(125) * N/R * I_erd
write('build/erdmagnetfeld.tex', make_SI(B_erd*10**6, r'\micro\tesla', figures=2))
B_erd_korrigiert = B_erd / np.sin(phi)
write('build/erdmagnetfeld_korrigiert.tex', make_SI(B_erd_korrigiert*10**6, r'\micro\tesla', figures=2))
B_erd_lit = 19.3221
write('build/erdmagnetfeld_korrigiert_lit.tex', make_SI(B_erd_lit, r'\micro\tesla', figures=2))
| Jean1995/Praktikum | V501/PythonSkript.py | PythonSkript.py | py | 21,644 | python | de | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.rcdefaults",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams.update",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name":... |
7972926046 | # Author: Leandro Cruz Hermida <hermidal@cs.umd.edu>
"""
sksurv_extensions is a library of custom extensions and core improvements to
scikit-survival
"""
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn_extensions.pipeline import ExtendedPipeline
@if_delegate_has_method(delegate='_final_estimator')
def predict_cumulative_hazard_function(self, X, **predict_params):
"""Predict cumulative hazard function.
The cumulative hazard function for an individual
with feature vector :math:`x` is defined as
.. math::
H(t \\mid x) = \\exp(x^\\top \\beta) H_0(t) ,
where :math:`H_0(t)` is the baseline hazard function,
estimated by Breslow's estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
Returns
-------
cum_hazard : ndarray, shape = (n_samples,)
Predicted cumulative hazard functions.
"""
Xt, predict_params = self._transform_pipeline('predict',
X, predict_params)
return self.steps[-1][-1].predict_cumulative_hazard_function(
Xt, **predict_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_survival_function(self, X, **predict_params):
"""Predict survival function.
The survival function for an individual
with feature vector :math:`x` is defined as
.. math::
S(t \\mid x) = S_0(t)^{\\exp(x^\\top \\beta)} ,
where :math:`S_0(t)` is the baseline survival function,
estimated by Breslow's estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
Returns
-------
survival : ndarray, shape = (n_samples,)
Predicted survival functions.
"""
Xt, predict_params = self._transform_pipeline('predict',
X, predict_params)
return self.steps[-1][-1].predict_survival_function(Xt, **predict_params)
ExtendedPipeline.predict_cumulative_hazard_function = (
predict_cumulative_hazard_function)
ExtendedPipeline.predict_survival_function = (
predict_survival_function)
| ruppinlab/tcga-microbiome-prediction | sksurv_extensions/__init__.py | __init__.py | py | 2,183 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "sklearn.utils.metaestimators.if_delegate_has_method",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.metaestimators.if_delegate_has_method",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn_extensions.pipeline.ExtendedPipel... |
70662958755 | from collections import deque
d = [deque() for _ in range(0, 5)]
d[3].appendleft(1)
d[3].appendleft(2)
d[3].appendleft(3)
d[3].append(1)
print(d[1].maxlen)
print(d)
e = [deque() for _ in range(0, 5)]
e[2].appendleft('world')
e[2].appendleft('hello')
def write_chain(chain):
print(' '.join(chain))
write_chain(each for each in e[2])
e[1].append("10")
e[1].append("5")
e[1].append("2")
e[1].append("99")
diff = 10 - 3
for x in range(diff - 1, -1, -1):
print(x) | ashrielbrian/coursera-algorithms-specialization | 02 Data Structures/03 Week 3/test.py | test.py | py | 475 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 12,
"usage_type": "call"
}
] |
28138890438 | import json
import inspect
import urllib.parse
from longitude.core.data_sources.base import DataSource
class DisabledCache:
data_source = None
def __init__(self, ds):
if ds and not isinstance(ds, DataSource):
raise TypeError('DisabledCache can only be applied to DataSource subclasses.')
self.data_source = ds
def __enter__(self):
self.data_source.disable_cache()
def __exit__(self, *args):
self.data_source.enable_cache()
def method_not_supported(o):
# We assume that this function is called from an object (o) directly from the not supported method
# If so, the index 1 in the stack is the call previous to method_not_supported, so it holds the info about the
# previous call (the not supported method!). Then we take the name, which is stored in the third index.
method = inspect.stack()[1][3]
o.logger.error("%s does not support %s" % (o.__class__.__name__, method))
def add_url_params(url, params):
""" Add GET params to provided URL being aware of existing.
:param url: string of target URL
:param params: dict containing requested params to be added
:return: string with updated URL
>> url = 'http://stackoverflow.com/test?answers=true'
>> new_params = {'answers': False, 'data': ['some','values']}
>> add_url_params(url, new_params)
'http://stackoverflow.com/test?data=some&data=values&answers=false'
"""
# Unquoting URL first so we don't loose existing args
url = urllib.parse.unquote(url)
# Extracting url info
parsed_url = urllib.parse.urlparse(url)
# Extracting URL arguments from parsed URL
get_args = parsed_url.query
# Converting URL arguments to dict
parsed_get_args = dict(urllib.parse.parse_qsl(get_args))
# Merging URL arguments dict with new params
parsed_get_args.update(params)
# Bool and Dict values should be converted to json-friendly values
# you may throw this part away if you don't like it :)
parsed_get_args.update(
{k: json.dumps(v) for k, v in parsed_get_args.items()
if isinstance(v, (bool, dict))}
)
# Converting URL argument to proper query string
encoded_get_args = urllib.parse.urlencode(parsed_get_args, doseq=True)
# Creating new parsed result object based on provided with new
# URL arguments. Same thing happens inside of urlparse.
new_url = urllib.parse.ParseResult(
parsed_url.scheme, parsed_url.netloc, parsed_url.path,
parsed_url.params, encoded_get_args, parsed_url.fragment
).geturl()
return new_url
| GeographicaGS/Longitude | longitude/core/common/helpers.py | helpers.py | py | 2,596 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "longitude.core.data_sources.base.DataSource",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "inspect.stack",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse.unquote",
"line_number": 45,
"usage_type": "call"
},
... |
6235823673 | """Client to interface with Rito API."""
import os
from typing import Any, Dict, Optional
from absl import logging
import requests
def call(endpoint: str,
api_key: str,
params: Optional[Dict[str, Any]] = None,
data: Optional[Any] = None,
platform_id: str = 'americas'):
"""Helper function to call rito API.
Note: Riot's API is a bit inconsistent on where to provide your request
data. It can either be in the endpoint path, as URL params, or in the get
body.
Args:
endpoint: Relative path to endpoint within Riot API.
E.g., `/lol/match/v4/matches/{matchId}`
api_key: Your secret API key to authenticate with Rito.
params: Additional params to pass to the web request.
data: Arbitrary data sent in the GET body. Used to specify constraints
for tournament api.
Returns:
JSON response from Rito.
Raises:
RuntimeError: If request fails.
"""
url = os.path.join(
'https://%s.api.riotgames.com' % platform_id,
endpoint)
headers = {'X-Riot-Token': api_key}
if not data:
response = requests.get(url, params=params, headers=headers)
else:
response = requests.post(url, params=params, data=data, headers=headers)
if response.status_code != requests.codes.ok:
logging.info('Code: %', response.status_code)
logging.info('Response: %s', response.content)
raise RuntimeError('Failed request for: %s' % url)
return response.json()
| vilhelm/icl-bot | server/riot_client.py | riot_client.py | py | 1,458 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_nu... |
34564509479 | # Path generator by Team Griffin
from __future__ import print_function
import pyvisgraph as vg
import itertools
import time
import sys
if (len(sys.argv) > 1):
instance_number = int(sys.argv[1])
# Import instance information
execfile("instance_%d.py" % instance_number)
# Calculate shortest paths
g = vg.VisGraph()
g.load('graph_%d.pk1' % instance_number)
# Compute paths
f = open('C:\\dev\\path_%d.txt' % instance_number, 'w')
t1 = time.clock()
loopCounter = 0
totalCount = len(robots)
for pair in itertools.combinations(robots, 2):
#print("From: %d to %d:" % (pair[0].extradata_1, pair[1].extradata_1))
shortest = g.shortest_path(pair[0], pair[1])
numWaypoints = len(shortest) - 2
#print("Waypoint length: %d" % numWaypoints)
if numWaypoints > 0:
#print(shortest)
builtstr = "%d,%d#" % (pair[0].extradata_1, pair[1].extradata_1)
# First element
builtstr += str(shortest[1].extradata_1)
builtstr += ":"
builtstr += str(shortest[1].extradata_2)
# All the rest
for waypoint in shortest[2:-1]:
builtstr += "|"
builtstr += str(waypoint.extradata_1)
builtstr += ":"
builtstr += str(waypoint.extradata_2)
print(builtstr, file=f)
loopCounter += 1
if (loopCounter % 20 == 0):
elapsed = time.clock() - t1
completed = pair[0].extradata_1
left = totalCount - pair[0].extradata_1
if completed > 0:
time_for_one = elapsed / completed
else:
time_for_one = 0
time_remaining = time_for_one * left
print("Status: %d:%d / %d. Elapsed: %f seconds. Estimated time remaining: %f s." % (completed, pair[1].extradata_1, totalCount, elapsed, time_remaining))
print("Computing paths took %f seconds." % (time.clock()-t1))
else:
print("Invalid instance number") | tsuiwwwayne/move-and-tag-competition | py/loadgraph.py | loadgraph.py | py | 2,120 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pyvisgraph.VisGraph",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.clock",
"lin... |
71317076194 | import os
import numpy as np
from scipy import signal
from scipy.io import loadmat
import torch
from torch import nn
import torch.nn.functional as F
import conv
from layers.modules import *
from layers.functions import ConvMotionFunction
class Pooling(nn.Module):
def __init__(self, n_in):
super(Pooling, self).__init__()
self.conv1 = nn.Conv2d(n_in, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, padding=1)
self.conv5 = nn.Conv2d(32, 32, 3, padding=1)
self.conv6 = nn.Conv2d(32, 1, 3, padding=1)
self.elu1 = nn.ELU()
self.elu2 = nn.ELU()
self.elu3 = nn.ELU()
self.elu4 = nn.ELU()
self.elu5 = nn.ELU()
self.n_in = n_in
def forward(self, inputs):
x = torch.cat(inputs, 1)
assert(x.size(1) == self.n_in)
x = self.elu1(self.conv1(x))
x = self.elu2(self.conv2(x))
x = self.elu3(self.conv3(x))
x = self.elu4(self.conv4(x))
x = self.elu5(self.conv5(x))
x = self.conv6(x)
return x
class DenoiserGradient(nn.Module):
def __init__(self, K=6):
super(DenoiserGradient, self).__init__()
self.K = K
self.conv = [nn.Conv2d(1,64,5)]
for k in range(K-2):
self.conv.append(nn.Conv2d(64,64,3))
self.conv.append(nn.Conv2d(64,1,3))
self.conv = nn.ModuleList(self.conv)
def forward(self, x):
for k in range(self.K-1):
hker = self.conv[k].weight.shape[-1]//2
x = F.pad(x, (hker, hker, hker, hker), 'reflect')
x = F.relu(self.conv[k](x))
x = self.conv[-1](F.pad(x, (hker, hker, hker, hker), 'reflect'))
return x
class HQS(nn.Module):
def __init__(self, n_iter=5, n_in=2):
super(HQS, self).__init__()
self.n_iter = n_iter
self.n_in = n_in
self.beta = np.array([0,4**0,4,4**2,4**3,4**4,4**5,4**6,4**7,4**8])*1e-3 / 10 * 81
self.beta = torch.from_numpy(self.beta).float()
def forward(self, y, kmag, kori, labels):
raise NotImplementedError()
def init_grad(self):
filters = torch.zeros(2,1,5,5)
filters[1,0,2,2] = 1
filters[1,0,1,2] = -1
filters[0,0,2,2] = 1
filters[0,0,2,1] = -1
return nn.Parameter(filters, requires_grad=False)
class CHQS(HQS):
def __init__(self, n_out=5, n_in=2, lambd=0.005):
super(CHQS, self).__init__(n_out, n_in)
self.weight = self.init_grad()
self.lambd = lambd
def forward(self, input, k, d):
hks = k.shape[-1]//2
hds = d.shape[-1]//2
x_padding = (hks, hks, hks, hks)
r_padding = (hds, hds, hds, hds)
output = []
for c in range(input.size(1)):
y = input[:, c].unsqueeze(1)
x = y.clone()
for i in range(self.n_iter):
# z update
z = F.conv2d(F.pad(x, (2, 2, 2, 2), 'replicate'), self.weight)
z = F.softshrink(z, self.lambd / max(1e-4, self.beta[i].item()))
# x update
for j in range(self.n_in):
r0 = y - F.conv2d(F.pad(x, x_padding, 'replicate'), k)
r1 = z - F.conv2d(F.pad(x, (2, 2, 2, 2), 'replicate'), self.weight)
r = torch.cat([r0, r1], dim=1)
r_pad = F.pad(r, r_padding, 'replicate')
for l in range(3):
x = x + F.conv2d(r_pad[:, l].unsqueeze(0), d[i, l].unsqueeze(0).unsqueeze(0))
x = x.clamp(0, 1)
output.append(x.clone())
output = torch.cat(output, 1)
return output
#### Main network for uniform deblurring ####
class LCHQS(HQS):
def __init__(self, n_out=5, n_in=2, K=6):
super(LCHQS, self).__init__(n_out, n_in)
self.weight = self.init_grad()
self.denoiser = nn.ModuleList([DenoiserGradient(K) for i in range(n_out)])
self.pooling = nn.ModuleList([Pooling(3) for i in range(n_out*n_in)])
def forward(self, input, k, d, k1, k2, d1, d2):
hks = k.shape[-1]//2
hds = d.shape[-1]//2
hdds = d1.shape[-1]//2
x_padding = (hks, hks, hks, hks)
r_padding = (hds, hds, hds, hds)
d_padding = (hdds, hdds, hdds, hdds)
k_padding = (2, 2, 2, 2)
output = []
y = input
x = y.clone()
for i in range(self.n_iter):
# z update
x1 = conv.conv2d(x, k1, 'replicate')
z1 = self.denoiser[i](x1)
x2 = conv.conv2d(x, k2, 'replicate')
z2 = self.denoiser[i](x2.transpose(-1, -2)).transpose(-1, -2)
# x update
for j in range(self.n_in):
r0 = y - conv.conv2d(x, k, 'replicate') # deblurred residual
r0 = conv.conv2d(r0, d, 'replicate')
r1 = z1 - conv.conv2d(x, k1, 'replicate') # denoised residual
r1 = conv.conv2d(r1, d1, 'replicate')
r2 = z2 - conv.conv2d(x, k2, 'replicate') # denoised residual
r2 = conv.conv2d(r2, d2, 'replicate')
x = x + self.pooling[i*self.n_in+j]([r0, r1, r2])
output.append(x.clone())
return output
class NUCHQS(HQS):
def __init__(self, n_iter=5, n_in=2, N_c=2, lambd=0.005):
super(NUCHQS, self).__init__(n_iter, n_in)
self.weight = self.init_grad()
kerpath1 = "./data/kers_grad.pt"
kerpath2 = "./data/inverse_filter_nonuniform.pt"
self.nu_conv1 = ConvMotion(self.weight)
self.nu_conv2 = nn.ModuleList([ConvCls(kerpath2.format(i)) for j in range(n_in) for i in range(n_iter)])
self.lambd = lambd
N_l = self.nu_conv2[0].weight.shape[0]
self.line = nn.ModuleList([Line(self.lambd, self.beta[i].item(), N_l, N_c) for i in range(n_iter)])
def forward(self, y, kmag, kori, labels):
x = y.clone()
for i in range(self.n_iter):
z = F.conv2d(F.pad(x, (2,2,2,2), 'reflect'), self.weight)
z = self.line[i](z, labels)
z = torch.cat([y, z], 1)
for j in range(self.n_in):
r = z - self.nu_conv1(x, kmag, kori)
x = x + self.nu_conv2[i*self.n_in + j](r, labels)
x = x.clamp(0, 1)
return x
#### Main network for non-uniform deblurring ####
class NULCHQS(HQS):
def __init__(self, weights, n_out=5, n_in=2, K=6):
super(NULCHQS, self).__init__(n_out, n_in)
self.weight = self.init_grad()
self.denoiser = nn.ModuleList([DenoiserGradient(K) for i in range(n_out)])
self.pooling = nn.ModuleList([Pooling(3) for i in range(n_out*n_in)])
self.nu_conv = NUConv2d()
self.inv_nu_conv = InvNUConv2d(weights)
def forward(self, input, mag, ori, labels, k1, k2, d1, d2):
hdds = d1.shape[-1]//2
d_padding = (hdds, hdds, hdds, hdds)
k_padding = (2, 2, 2, 2)
output = []
y = input
x = y.clone()
for i in range(self.n_iter):
# z update
x1 = F.conv2d(F.pad(x, k_padding, 'replicate'), k1)
z1 = self.denoiser[i](x1)
x2 = F.conv2d(F.pad(x, k_padding, 'replicate'), k2)
z2 = self.denoiser[i](x2.transpose(-1, -2)).transpose(-1, -2)
# x update
for j in range(self.n_in):
r0 = y - self.nu_conv(x, mag, ori)
r0 = self.inv_nu_conv(r0, labels)
r1 = z1 - F.conv2d(F.pad(x, k_padding, 'replicate'), k1) # denoised residual
r1 = F.conv2d(F.pad(r1, d_padding, 'replicate'), d1)
r2 = z2 - F.conv2d(F.pad(x, k_padding, 'replicate'), k2) # denoised residual
r2 = F.conv2d(F.pad(r2, d_padding, 'replicate'), d2)
x = x + self.pooling[i*self.n_in+j]([r0, r1, r2])
output.append(x.clone())
return output | teboli/CPCR | networks.py | networks.py | py | 8,025 | python | en | code | 25 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
73111178914 | # -*- coding: utf-8 -*-
import scrapy
import json
import time
from scrapy.http import FormRequest
from loguru import logger
from SafetyInformation.items import SafeInfoItem
from SafetyInformation.settings import SLEEP_TIME, TOTAL_PAGES
class NosecSpider(scrapy.Spider):
name = 'nosec'
allowed_domains = ['nosec.org']
start_urls = ['https://nosec.org/home/ajaxindexdata']
page = 1
headers = {
'Referer': 'https://nosec.org/home/index',
'Host': 'nosec.org',
'X-Requested-With': 'XMLHttpRequest',
'X-CSRF-TOKEN': 'xx7Ivf9c0KCZOptafzkAvZNXMcOQ4ptZ6SbDgWkM',
'Cookie': 'Hm_lvt_176023972b5e615deb22d97a52557032=1589855362,1589878474; XSRF-TOKEN=eyJpdiI6IkVUYUpcL0s2eTVhU0p2UlBDenhabElRPT0iLCJ2YWx1ZSI6IkZ5b3pJR1R4K3ZYNllaMmNVTmpFa1dqTDNGdmpUMFNVNk5XckNJVDFRU0diSFRxWEhaYXBqRTVLVGlrWXJmYUxmbmIwdHpIcm1sb0h1Z3p6dWlxS0tRPT0iLCJtYWMiOiIwMzUzYjEwMmY1NWQwNzBmNTIzZmI0ZDE3ZjJlZjI0N2E3NDNhNGFiNTNkZWQ5YzVlNGViNDA3ODA0M2RjYTJlIn0%3D; laravel_session=eyJpdiI6InMwZXBuU1o5cFJ5SWZDaTl3dENzZkE9PSIsInZhbHVlIjoiRmcwd29Ra1J0Z1RONlJ4cWFXeFcxR0FVRUhRXC80YkNkU21vNEVyM2JhcXlcL3BoYk4zbVRHU0VueUFCM0xTS1wvTzZ6dVhRQ0xJbUdVeWZcL0poOGJ2d0JnPT0iLCJtYWMiOiIyNWQ2MTliMWQzM2NkNzI4MDQ1ODcyYzNiN2ZiYjgwZTJlZGU1MjE5ODY1Yzc5NDA2NDI5MWMwZDBmNGNhNmM3In0%3D; Hm_lpvt_176023972b5e615deb22d97a52557032=1589878542'
}
param = {
'keykind': '',
'page': str(page),
}
source = 'https://nosec.org'
def start_requests(self):
yield FormRequest(url=self.start_urls[0], headers=self.headers, formdata=self.param, callback=self.parse)
def parse(self, response):
logger.info("==========当前正在抓取第{}页==========".format(self.page))
item = SafeInfoItem()
result_list = json.loads(response.text)['data']['threatData']['data']
for result in result_list:
id = result['id']
title = result['title']
link = 'https://nosec.org/home/detail/{}.html'.format(id)
intro = result['summary']
date = result['publiced_at'].split(' ')[0]
author = result['username']
tags = json.dumps([result['kind_name']])
source = self.source
info_type = 'news'
item['title'] = title
item['link'] = link
item['intro'] = intro
item['date'] = date
item['author'] = author
item['source'] = source
item['type'] = info_type
logger.info(item)
yield item
time.sleep(SLEEP_TIME)
self.page += 1
self.param['page'] = str(self.page)
if self.page <= TOTAL_PAGES:
yield FormRequest(url=self.start_urls[0], headers=self.headers, formdata=self.param, callback=self.parse)
| Silentsoul04/SafetyInformation | SafetyInformation/spiders/nosec.py | nosec.py | py | 2,793 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "scrapy.http.FormRequest",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "loguru.logger.info",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "loguru.... |
70342931234 | import json
import logging
import os
import shutil
import sys
# pylint: disable=no-name-in-module
# No name 'tar' in module 'sh'
from sh import tar
try:
from tempfile import TemporaryDirectory
except ImportError:
from s2e_env.utils.tempdir import TemporaryDirectory
from s2e_env import CONSTANTS
from s2e_env.command import ProjectCommand, CommandError
from s2e_env.commands.import_export import S2E_ENV_PLACEHOLDER, rewrite_files
logger = logging.getLogger('export')
class Command(ProjectCommand):
"""
Export a project so that it can be shared with other S2E environments.
All files listed under ``exported_files`` in ``config.yaml`` will be
exported. Each of these files will be checked for references to the S2E
environment path, and all occurances of this path will be replaced by a
placeholder marker. When importing the project into a new S2E environment,
this placeholder will be rewritten with the path of the new S2E
environment.
The user can also export previous analysis results (i.e. all of the
``s2e-out-*`` directories) if required.
"""
help = 'Export an S2E project as an archive'
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('output_path', nargs='?',
help='The path to the exported project archive. '
'Defaults to <project_name>.zip in the '
'current working directory')
parser.add_argument('-r', '--export-results', action='store_true',
help='Export the results in the s2e-out-* directories')
def handle(self, *args, **options):
# Name the project archive if it doesn't already have a name
output_path = options['output_path']
if not output_path:
output_path = self.env_path(f'{self.project_name}.zip')
with TemporaryDirectory() as temp_dir:
# Store all of the exported files in a temporary directory so that
# we can just execute tar on the entire directory
export_dir = os.path.join(temp_dir, self.project_name)
# Do **not** export these files
blacklist = CONSTANTS['import_export']['blacklist']
if not options['export_results']:
blacklist.extend(['s2e-last', 's2e-out-*'])
# Copy the project directory
logger.info('Copying project %s', self.project_name)
shutil.copytree(self.project_path(), export_dir,
ignore=shutil.ignore_patterns(*blacklist))
# Rewrite certain project files (e.g., launch-s2e.sh, etc.) to
# remove the absolute path to the current S2E environment. This
# path is replaced with a placeholder token which is then rewritten
# with the absolute path of the new S2E environment when the
# project is imported
logger.info('Rewriting project files')
rewrite_files(export_dir,
CONSTANTS['import_export']['project_files'],
self.env_path(), S2E_ENV_PLACEHOLDER)
# Update project.json
#
# project.json has already had its S2E environment path
# overwritten. However, there are still other paths that need
# rewriting to ensure that the project can be correctly imported.
logger.info('Updating project.json')
with open(os.path.join(export_dir, 'project.json'), 'r+', encoding='utf-8') as f:
proj_desc = json.load(f)
# The target files in a project are normally symbolic links.
# However, when exported they are no longer symbolic links and
# so we must update their paths
proj_path = proj_desc['project_dir']
def update_path(p):
return os.path.join(proj_path, os.path.basename(p))
target_path = proj_desc.get('target_path')
if target_path:
proj_desc['target_path'] = update_path(target_path)
target_files = proj_desc.get('target_files')
if target_files:
proj_desc['target_files'] = [update_path(tf) for tf in target_files]
# Update the project.json in the temporary directory
proj_desc_json = json.dumps(proj_desc, sort_keys=True, indent=4)
f.seek(0)
f.write(proj_desc_json)
f.truncate()
# Create the archive of the temporary directory's contents
self._create_archive(output_path, temp_dir)
logger.success('Project successfully exported to %s', output_path)
def _create_archive(self, archive_path, export_dir):
"""
Create the final archive of all the exported project files.
Args:
archive_path: Path to the ``tar.xz`` archive.
export_dir: Path to the directory containing the files to export.
"""
try:
logger.info('Creating archive %s', archive_path)
if archive_path.endswith('.tar.xz'):
create_archive = tar.bake(create=True, xz=True, verbose=True,
file=archive_path, directory=export_dir,
_out=sys.stdout,
_err=sys.stderr)
create_archive(self._project_name)
elif archive_path.endswith('zip'):
shutil.make_archive(archive_path.removesuffix('.zip'), 'zip', export_dir)
else:
raise Exception('Unsupported archive format extension')
except Exception as e:
raise CommandError(f'Failed to archive project - {e}') from e
| S2E/s2e-env | s2e_env/commands/export_project.py | export_project.py | py | 5,889 | python | en | code | 89 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "s2e_env.command.ProjectCommand",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "s2e_env.utils.tempdir.TemporaryDirectory",
"line_number": 57,
"usage_type": "call"
},
... |
31014597352 | #!usr/bin/env python
# -*- coding:utf-8 _*-
"""
@Created by sublime_text at home on 2018/2/28—20:12!
@Gnome: Live and learn!
@Author: 葛绪涛
@Nickname: wordGe
@QQ: 690815818
@Filename: web.py
@Blog: http://higexutao.blog.163.com
"""
import os.path
from tornado import httpserver, web, ioloop
# 功能模块 实现具体的功能
class MainPageHandler(web.RequestHandler):
def get(self, *args, **kwargs):
# self.write("Hello world!")
self.render("formSubmit.html")
def post(self, *args, **kwargs):
# pass
# file_metas = self.request.files.get('file')
# print(type(file_metas))
# print("Hi")
user_name = self.get_argument("username")
user_email = self.get_argument("email")
user_website = self.get_argument("website")
user_language = self.get_argument("language")
self.render("user.html", username=user_name, email=user_email, website=user_website, language=user_language)
template_path = os.path.join(os.path.dirname(__file__), "template")
print(template_path)
# 设置路径
settings = {
'template_path': "template",
# 模板文件路径参数 ,一般指 网页
'static_path': "static",
}
# 路由 也即分机,也可以理解为功能模块在哪里
# 下面是没有加设置参数时的代码,**是设置动态参数
# application = web.Application([
# (r"/", MainPageHandler),
# ])
# 上面设置了动态参数,下面开始应用,注意是加**settings
application = web.Application([
(r"/", MainPageHandler),
], **settings)
# handlers = [
# (r"/", IndexHandler),
# (r"/user", UserHandler)
# ]
# socket 服务 前台 联系方式 诸如端口
if __name__ == '__main__':
http_server = httpserver.HTTPServer(application)
http_server.listen(5407)
ioloop.IOLoop.current().start()
| tagxt/WenZiShiBie_tornado | web.py | web.py | py | 1,860 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tornado.web.RequestHandler",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path... |
34724927724 | from fastapi import HTTPException
from pydantic import BaseModel, EmailStr, constr
from datetime import date, timedelta
from db_mysql.Connection_mysql import conecction_mysql
class UserRegister(BaseModel):
sexo: constr(max_length=10)
fecha_de_nacimiento: date
nombre: constr(max_length=100)
apellido: constr(max_length=100)
email: EmailStr
direccion: constr(max_length=200)
casa_apartamento: constr(max_length=50)
pais: constr(max_length=100)
departamento: constr(max_length=100)
ciudad: constr(max_length=100)
def adult(birth_date: date) -> bool:
today = date.today()
limit_date = today - timedelta(days=365*18)
return birth_date <= limit_date
def register(user_register: UserRegister):
sexo = user_register.sexo
fecha_de_nacimiento = user_register.fecha_de_nacimiento
nombre= user_register.nombre
apellido= user_register.apellido
email= user_register.email
direccion= user_register.direccion
casa_apartamento=user_register.casa_apartamento
pais=user_register.pais
departamento= user_register.departamento
ciudad= user_register.ciudad
if adult(birth_date=fecha_de_nacimiento):
query = f"SELECT COUNT(*) FROM users WHERE ciudad = '{ciudad}'"
conecction = conecction_mysql()
cursor = conecction.cursor()
cursor.execute(query)
count_city = cursor.fetchone()[0]
if count_city < 3:
query = f"""INSERT INTO users
(sexo, fecha_de_nacimiento,
nombre, apellido, email,
direccion, casa_apartamento,
pais, departamento,ciudad)
VALUES ('{sexo}', '{fecha_de_nacimiento}',
'{nombre}', '{apellido}', '{email}',
'{direccion}', '{casa_apartamento}',
'{pais}', '{departamento}', '{ciudad}')"""
cursor.execute(query)
conecction.commit()
user_id = cursor.lastrowid
return {"detail": f"User register with success ",
"id": user_id}
else:
raise HTTPException(status_code=401, detail="already on the limit of allowed cities")
else:
raise HTTPException(status_code=401, detail="the user is not of legal age")
| Warriors2021/FormUserCRUD | Dependencies/Register.py | Register.py | py | 2,323 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pydantic.constr",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pydantic.constr",
"... |
2715999025 | # start
# find highly selected genes in one species across populations and donors
import os
import glob
import copy
from Bio import SeqIO
from Bio.Seq import Seq
import argparse
############################################ Arguments and declarations ##############################################
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument("-i",
help="path of folders of WGS of each species",
type=str, default='.',
metavar='input/')
required.add_argument("-fq",
help="file extension of WGS fastq #1 files",
type=str, default='_1.fastq',
metavar='_1.fastq')
# optional output setup
optional.add_argument("-s",
help="a folder to store all scripts",
type=str, default='scripts/',
metavar='scripts/')
optional.add_argument("-o",
help="a folder to store all output",
type=str, default='snp_output/',
metavar='snp_output/')
# optional search parameters
optional.add_argument('-t',
help="Optional: set the thread number assigned for running XXX (default 1)",
metavar="1 or more", action='store', default=1, type=int)
optional.add_argument('-job',
help="Optional: command to submit jobs",
metavar="nohup or customized",
action='store', default='jobmit', type=str)
# requirement for software calling
optional.add_argument('-bw', '--bowtie',
help="Optional: complete path to bowtie if not in PATH",
metavar="/usr/local/bin/bowtie",
action='store', default='bowtie', type=str)
optional.add_argument('-sp', '--spades',
help="Optional: complete path to spades if not in PATH",
metavar="/usr/local/bin/spades",
action='store', default='spades', type=str)
optional.add_argument('-pro', '--prodigal',
help="Optional: complete path to prodigal if not in PATH, None for no prodigal (default)",
metavar="/usr/local/bin/prodigal",
action='store', default='None', type=str)
optional.add_argument('-bcf', '--bcftools',
help="Optional: complete path to bcftools if not in PATH",
metavar="/usr/local/bin/bcftools",
action='store', default='bcftools', type=str)
optional.add_argument('-sam', '--samtools',
help="Optional: complete path to bwa if not in PATH",
metavar="/usr/local/bin/samtools",
action='store', default='samtools', type=str)
optional.add_argument('-mini', '--minimap2',
help="Optional: complete path to minimap2 if not in PATH",
metavar="/usr/local/bin/minimap2",
action='store', default='minimap2', type=str)
optional.add_argument('--u','--usearch',
help="Optional: cluster genes with SNPs",
metavar="usearch",
action='store', default='usearch', type=str)
################################################## Definition ########################################################
args = parser.parse_args()
input_script_sub = args.s +'/annotate'
input_script = args.s
genome_root = args.i + '/round*'
output_dir_merge = args.o +'/vcf_round%s/merge_genome/'%(Round)
all_fasta = os.path.join(output_dir_merge + '/summary', 'all.denovo.gene.faa')
all_fasta_HS = os.path.join(output_dir_merge + '/summary', 'all.selected.gene.faa')
input_summary = output_dir_merge + '/summary/all.donor.species.dnds.txt'
# functions
# clustering
def cluster_uc(cluster_input):
Clusters = dict()
High_select2 = set()
High_select2_3SNP = dict()
High_select2_output = []
for lines in open(cluster_input, 'r'):
line_set = lines.split('\n')[0].split('\t')
cluster = line_set[1]
record_name = line_set[8]
record_name2 = line_set[9]
Clusters.setdefault(record_name, cluster)
if record_name2!= '*':
donor = record_name.split('_')[0]
donor_species = record_name.split('__')[0]
species = donor_species.replace(donor + '_', '')
donor2 = record_name2.split('_')[0]
donor_species2 = record_name2.split('__')[0]
species2 = donor_species2.replace(donor2 + '_', '')
if species == species2:
if species not in SNP_cutoff:
High_select2.add(record_name2)
High_select2.add(record_name)
else:
High_select2_3SNP.setdefault(record_name,set())
High_select2_3SNP[record_name].add(record_name2)
High_select2_3SNP.setdefault(record_name2, set())
High_select2_3SNP[record_name2].add(record_name)
if High_select2_3SNP!= dict():
for record in High_select2_3SNP:
if len(High_select2_3SNP[record]) >= 3:
High_select2.append(record)
return [Clusters,High_select2_output,High_select2]
# correct highly selected genes dNdS
def format_freq(freq):
freq = freq.split(':')
return [int(freq[0]),int(freq[1])]
def init_highselect_notHS(line_set):
allspecies_highselect = line_set
for i in [3,5,6,7,8,-13,-11,-9,-7,-5,-3]:
allspecies_highselect[i] = int(allspecies_highselect[i])
for i in [-12,-10,-8,-6,-4,-2]:
allspecies_highselect[i] = format_freq(line_set[i])
return allspecies_highselect
def init_highselect_empty(line_set):
allspecies_highselect = line_set
for i in [3]:
allspecies_highselect[i] = int(allspecies_highselect[i])
for i in [5,6,7,8,9,10,11,12,-13,-11,-9,-7,-5,-3]:
allspecies_highselect[i] = 0
for i in [-12,-10,-8,-6,-4,-2]:
allspecies_highselect[i] = format_freq('0:0')
return allspecies_highselect
def add_freq(freq_new,freq_old):
temp_result = format_freq(freq_new)
freq_old[0] += temp_result[0]
freq_old[1] += temp_result[1]
def add_highselect(line_set,allspecies_highselect):
for i in [3,5,6,7,8,-13,-11,-9,-7,-5,-3]:
allspecies_highselect[i] += int(line_set[i])
for i in [-12,-10,-8,-6,-4,-2]:
add_freq(line_set[i], allspecies_highselect[i])
return allspecies_highselect
def calculate_NS(allspecies,allspecies_highselect):
# expect NS
# expected NS ratio
tempNS = [0, 0]
i = -12
for freq in allspecies:
tempNS[0] += freq * allspecies_highselect[i][0]
tempNS[1] += freq * allspecies_highselect[i][1]
i += 2
if tempNS[1] > 0:
allspecies_highselect[11] = tempNS[0] / tempNS[1] # expect
else:
allspecies_highselect[11] = 'expect_N_only'
# NS ratio
if allspecies_highselect[8] > 0:
allspecies_highselect[10] = allspecies_highselect[7]/allspecies_highselect[8] # observe
if allspecies_highselect[11] != 'expect_N_only':
# dNdS
allspecies_highselect[12] = allspecies_highselect[10] / allspecies_highselect[11]# dnds
elif allspecies_highselect[7] > 0:
allspecies_highselect[10] = 'observe_N_only'
allspecies_highselect[12] = 'observe_N_only'
return allspecies_highselect
def output_highlight(allspecies_highselect,allspecies,newoutput):
allspecies_highselect = calculate_NS(allspecies, allspecies_highselect)
for i in range(0, len(allspecies_highselect)):
if i in [14, 16, 18, 20, 22, 24]:
allspecies_highselect[i] = '%s:%s' % (allspecies_highselect[i][0],
allspecies_highselect[i][1])
else:
allspecies_highselect[i] = str(allspecies_highselect[i])
newoutput.append('\t'.join(allspecies_highselect) + '\n')
return newoutput
def genus_species(donor_species):
genus = donor_species.split('_')[1].replace('BA', 'Bifidobacterium').replace('BL',
'Bifidobacterium').replace(
'PB', 'Parabacteroides')
if any(item in donor_species for item in ['BA','BL','PB']):
species = donor_species.split('_')[1].replace('BA', 'Bifidobacterium_adolescentis').replace('BL',
'Bifidobacterium_longum').replace(
'PB', 'Parabacteroides_butyrate')
else:
species = (donor_species.split('_')[1] + '_' +
donor_species.split('_')[2])
return [genus,species]
def add_new_selection(input_summary,High_select2):
Donor_species = dict()
Donor_species_notHS = dict()
Genus = dict()
Genus_notHS = dict()
Species = dict()
Species_notHS = dict()
newoutput = []
# use selected codon NS ratio (SNP pair) * all genes freq of no single event
for lines in open(input_summary_nosingleevent, 'r'):
line_set = lines.split('\n')[0].split('\t')
Selected = line_set[-1]
if Selected == 'False':
donor_species = line_set[0]
record_id = line_set[1]
# all species
if record_id == 'allspecies':
# all genes freq
allspecies = [int(line_set[-13]), int(line_set[-11]), int(line_set[-9]),
int(line_set[-7]), int(line_set[-5]), int(line_set[-3])]
allspecies_highselect = init_highselect_empty(line_set)
allspecies_highselect[1] = 'allspecies_highselect'
newoutput.append(lines)
elif record_id in ['allspecies_flexible', 'allspecies_core']:
newoutput.append(lines)
elif donor_species == record_id:
# donor_species not high select set up
line_set = lines.split('\n')[0].split('\t')
donor_species_highselect = init_highselect_notHS(line_set)
Donor_species_notHS.setdefault(donor_species,
donor_species_highselect)
line_set = lines.split('\n')[0].split('\t')
donor_species_highselect = init_highselect_empty(line_set)
donor_species_highselect[1] = donor_species + '_highselect'
Donor_species.setdefault(donor_species,
donor_species_highselect)
genus, species = genus_species(donor_species)
line_set = lines.split('\n')[0].split('\t')
if genus not in Genus_notHS:
genus_highselect = init_highselect_notHS(line_set)
genus_highselect[0] = genus
genus_highselect[1] = genus
Genus_notHS.setdefault(genus, genus_highselect)
line_set = lines.split('\n')[0].split('\t')
genus_highselect = init_highselect_empty(line_set)
genus_highselect[0] = genus
genus_highselect[1] = genus + '_highselect'
Genus.setdefault(genus, genus_highselect)
else:
genus_highselect = Genus_notHS[genus]
Genus_notHS[genus] = add_highselect(line_set, genus_highselect)
line_set = lines.split('\n')[0].split('\t')
if species not in Species_notHS:
species_highselect = init_highselect_notHS(line_set)
species_highselect[0] = species
species_highselect[1] = species
Species_notHS.setdefault(species, species_highselect)
line_set = lines.split('\n')[0].split('\t')
species_highselect = init_highselect_empty(line_set)
species_highselect[0] = species
species_highselect[1] = species + '_highselect'
Species.setdefault(species, species_highselect)
else:
species_highselect = Species_notHS[species]
Species_notHS[species] = add_highselect(line_set, species_highselect)
# calculate NS for new HS genes
for lines in open(input_summary, 'r'):
line_set = lines.split('\n')[0].split('\t')
Selected = line_set[-1]
donor_species = line_set[0]
record_id = line_set[1]
# species or genes
if Selected == 'False':
if record_id not in ['0', 'gene', 'allspecies', 'allspecies_highselect',
'allspecies_core', 'allspecies_flexible',
'allspecies_highselect_noNS']:
genus, species = genus_species(donor_species)
donor_species_set = donor_species.split('_')
try:
donor_species = '%s_%s_%s' % (donor_species_set[0],
donor_species_set[1][0:min(6, len(donor_species_set[1]))],
donor_species_set[2][0:min(6, len(donor_species_set[2]))])
except IndexError:
donor_species = '%s_%s_%s' % (donor_species_set[0],
donor_species_set[1],
donor_species_set[1])
if 'cluster' in donor_species_set[-1]:
try:
donor_species += '_CL' + donor_species_set[-2].split('cluster')[1]
except IndexError:
donor_species += '_CL' + donor_species_set[-1].split('cluster')[1]
record_id = '%s__C_%s_G_%s' % (donor_species, record_id.split('_')[1], record_id.split('_')[-1])
# highly selected genes
if record_id in High_select2:
if (line_set[10] == 'observe_N_only' or float(line_set[10]) > NSratioobserve_cutoff):
newoutput.append('\t'.join(line_set[:-1]) + '\tTrue\n')
allspecies_highselect = add_highselect(line_set, allspecies_highselect)
Donor_species[line_set[0]] = add_highselect(line_set, Donor_species[line_set[0]])
Genus[genus] = add_highselect(line_set, Genus[genus])
Species[species] = add_highselect(line_set, Species[species])
else:
newoutput.append('\t'.join(line_set[:-1]) + '\tFalse\n')
High_select2.remove(record_id)
# other genes and species
elif line_set[0] not in line_set[1]:
newoutput.append(lines)
else:
print('not output %s' % (line_set[1]))
# original highly selected
elif Selected == 'True':
newoutput.append(lines)
allspecies_highselect = add_highselect(line_set, allspecies_highselect)
Donor_species[line_set[0]] = add_highselect(line_set, Donor_species[line_set[0]])
Genus[genus] = add_highselect(line_set, Genus[genus])
Species[species] = add_highselect(line_set, Species[species])
else:
newoutput.append(lines)
# output new highselect
newoutput = output_highlight(allspecies_highselect, allspecies, newoutput)
for donor_species in Donor_species:
line_set = Donor_species_notHS[donor_species]
line_set_sub = [int(line_set[-13]), int(line_set[-11]), int(line_set[-9]),
int(line_set[-7]), int(line_set[-5]), int(line_set[-3])]
newoutput = output_highlight(Donor_species[donor_species],
line_set_sub, newoutput)
newoutput = output_highlight(Donor_species_notHS[donor_species],
line_set_sub, newoutput)
for genus in Genus:
line_set = Genus_notHS[genus]
line_set_sub = [int(line_set[-13]), int(line_set[-11]), int(line_set[-9]),
int(line_set[-7]), int(line_set[-5]), int(line_set[-3])]
newoutput = output_highlight(Genus[genus],
line_set_sub, newoutput)
newoutput = output_highlight(Genus_notHS[genus],
line_set_sub, newoutput)
for species in Species:
line_set = Species_notHS[species]
line_set_sub = [int(line_set[-13]), int(line_set[-11]), int(line_set[-9]),
int(line_set[-7]), int(line_set[-5]), int(line_set[-3])]
newoutput = output_highlight(Species[species],
line_set_sub, newoutput)
newoutput = output_highlight(Species_notHS[species],
line_set_sub, newoutput)
# output new summary
f1 = open(input_summary + '.High_select2.txt', 'w')
f1.write(''.join(newoutput))
f1.close()
return High_select2
def sum_gene(input_summary,High_select2, selected = 1):
genelist = []
genelist += High_select2
for lines in open(input_summary,'r'):
lines_set = lines.replace('\n','').split('\t')
if not lines.startswith('#'):
if selected == 0 or lines_set[-1] == 'True':
# highly selected genes or all genes
gene_name = lines_set[1]
genelist.append(gene_name)
print(len(genelist))
genelist = list(set(genelist))
print(len(genelist))
Output = []
output_set = []
for record in SeqIO.parse(all_fasta_HS, 'fasta'):
Output.append('>%s\n%s\n' % (str(record.id), str(record.seq)))
output_set.append(str(record.id))
for record in SeqIO.parse(all_fasta, 'fasta'):
if str(record.id) in genelist and str(record.id) not in output_set:
Output.append('>%s\n%s\n' % (str(record.id), str(record.seq)))
output_set.append(str(record.id))
f1 = open(all_fasta_HS + '.High_select2.faa' , 'w')
f1.write(''.join(Output))
f1.close()
def annotation(all_filter_gene_fasta_file,pre_cluster = ''):
# run cluster
cutoff = 0.7
cmd_cluster = ('%s -sort length -cluster_fast %s -id %s -centroids %s.cluster.aa -uc %s.uc -threads %s\n'
% ('usearch', all_filter_gene_fasta_file, cutoff, all_filter_gene_fasta_file,
all_filter_gene_fasta_file, 40))
os.system(cmd_cluster)
all_filter_gene_fasta_file = all_filter_gene_fasta_file + '.cluster.aa'
if pre_cluster!= '':
os.system('#%s -makeudb_usearch %s -output %s.udb' %
('usearch', pre_cluster, pre_cluster))
os.system('%s -ublast %s -db %s.udb -evalue 1e-2 -accel 0.5 -blast6out %s -threads 2'%
('usearch', all_filter_gene_fasta_file,pre_cluster, all_filter_gene_fasta_file + '.ref.out.txt'))
# run prokka
cmdsprokka = 'py37\nprokka --kingdom Bacteria --outdir %s/prokka_%s --protein %s --locustag Bacter %s/%s\n' % \
(output_dir_merge + '/summary', os.path.split(all_filter_gene_fasta_file)[-1],
all_filter_gene_fasta_file,
output_dir_merge + '/summary',
os.path.split(all_filter_gene_fasta_file)[-1].replace('.faa', '.fna'))
f1 = open(os.path.join(input_script_sub, 'prokka.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s' % (cmdsprokka))
f1.close()
# run metacyc
cutoff = 50
cutoff2 = 80
database = '/scratch/users/mit_alm/database/metacyc/protseq.fsa'
cmds = ("diamond blastp --query %s --db %s.dmnd --out %s.metacyc.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
f1 = open(os.path.join(input_script_sub, 'metacyc.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s'%(cmds))
f1.close()
# run eggnog
cutoff = 0.01
database = '/scratch/users/mit_alm/database/eggnog/xaa.hmm'
cmds = ('hmmsearch --tblout %s.eggnog.1.txt --cpu 40 -E %s %s %s\n') %(all_filter_gene_fasta_file,cutoff,database,all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, 'eggnog.1.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s'%(cmds))
f1.close()
database = '/scratch/users/mit_alm/database/eggnog/xab.hmm'
cmds = ('hmmsearch --tblout %s.eggnog.2.txt --cpu 40 -E %s %s %s\n') % (
all_filter_gene_fasta_file, cutoff, database, all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, 'eggnog.2.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s' % (cmds))
f1.close()
database = '/scratch/users/mit_alm/database/eggnog/xac.hmm'
cmds = ('hmmsearch --tblout %s.eggnog.3.txt --cpu 40 -E %s %s %s\n') % (
all_filter_gene_fasta_file, cutoff, database, all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, 'eggnog.3.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s' % (cmds))
f1.close()
# run kegg
cutoff = 0.01
database = '/scratch/users/mit_alm/database/kegg/kofam/profiles/prokaryote/prokaryote.hmm'
cmds = ('hmmsearch --tblout %s.kegg.txt --cpu 40 -E %s %s %s\n') %(all_filter_gene_fasta_file,cutoff,database,all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, 'kegg.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s'%(cmds))
f1.close()
# run customed database
cutoff = 80
cutoff2 = 80
cmds = ''
database = '/scratch/users/anniz44/scripts/database/SARG.db.fasta'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.SARG.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 50
cutoff2 = 50
database = '/scratch/users/anniz44/scripts/database/AHR.aa.db'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.AHR.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 60
cutoff2 = 80
database = '/scratch/users/anniz44/scripts/database/Butyrate.pro.aa'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.buty.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 50
cutoff2 = 80
database = '/scratch/users/anniz44/scripts/database/IntI1_database.fasta'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.int.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 50
cutoff2 = 80
database = '/scratch/users/anniz44/scripts/database/SRB.AA'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.SRB.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 0.01
database = '/scratch/users/anniz44/scripts/database/NR.hmm'
cmds += ('hmmsearch --tblout %s.NR.txt --cpu 40 -E %s %s %s\n') %(all_filter_gene_fasta_file,cutoff,database,all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, 'customed.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s'%(cmds))
f1.close()
# all scripts
f1 = open(os.path.join(input_script, 'allannotate.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n')
for sub_scripts in glob.glob(os.path.join(input_script_sub, '*.sh')):
f1.write('jobmit %s %s\n' % (sub_scripts, os.path.split(sub_scripts)[-1]))
f1.close()
# clustering
Clusters_gene, High_select2_output,High_select2 = cluster_uc(all_fasta + '.uc')
f1 = open(all_fasta + '.High_select2.txt', 'w')
f1.write(''.join(High_select2_output))
f1.close()
# correcting
High_select2 = add_new_selection(input_summary,High_select2)
# run clustering
sum_gene(input_summary,High_select2,1)
annotation(all_fasta_HS + '.High_select2.faa',pre_cluster)
################################################### END ########################################################
| caozhichongchong/snp_finder | snp_finder/scripts/oldscripts/parallel_evolution.py | parallel_evolution.py | py | 24,817 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 75,
"usage_type": "call"
},
{
"ap... |
42292075170 | import logging
from typing import Any, Dict, Hashable, TypeVar, Union
logger = logging.getLogger(__name__)
D = TypeVar("D")
def get_by_path(d: Dict, *path: Hashable, default: D = None) -> Union[Any, D]:
"""
Given a nested dict of dicts, traverse a given path and return the result or the default if it is not found.
This is used as a replacement for the pattern
>>> d.get("a", {}).get("b", {}).get("c", {}).get("d", 1)
with
>>> get_by_path(d, "a", "b", "c", "d", default=1)
Does not traverse lists. Should be dict all the way down.
"""
if len(path) == 0:
raise ValueError("No path given")
head, *tail = path
logger.debug("head: %s, tail: %s", head, tail)
if not tail:
return d.get(head, default)
try:
return get_by_path(d[head], *tail, default=default)
except KeyError:
return default
| abn/cafeteria | cafeteria/patterns/dict.py | dict.py | py | 878 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Hashable",
"line... |
12657959262 | """Dataset Handler."""
import os
import torch
from datasets import cifar_handler
from datasets import tinyimagenet_handler
from datasets import imagenet2012_handler
from datasets import stl10_handler
import numpy as np
class DataHandler:
"""Handler for datasets."""
def __init__(
self,
dataset_name,
data_root,
test_dataset_root,
experiment_root,
grayscale,
gauss_noise,
gauss_noise_std,
blur,
blur_std,
val_split=0.1,
test_split=0.1,
split_idxs_root="split_idxs",
noise_type=None,
load_previous_splits=True,
verbose=True,
**kwargs
):
"""Initialize dataset handler."""
self.dataset_name = dataset_name
self.data_root = data_root
self.test_dataset_root = test_dataset_root
self.experiment_root = experiment_root
self.test_split = test_split
self.val_split = val_split
self.noise_type = noise_type
self._verbose = verbose
self.load_previous_splits = load_previous_splits
self.grayscale = grayscale #pg_grayscale
self.gauss_noise = gauss_noise
self.gauss_noise_std = gauss_noise_std
self.blur = blur
self.blur_std = blur_std
self._kwargs = kwargs
self._set_num_classes(dataset_name)
# Set idx with dataset_name
split_idxs_root = os.path.join(split_idxs_root, dataset_name)
if not os.path.exists(split_idxs_root):
os.makedirs(split_idxs_root)
if split_idxs_root and val_split:
self.split_idxs_root = self._build_split_idx_root(
split_idxs_root,
dataset_name
)
else:
self.split_idxs_root = None
# Create datasets
self.datasets = self._build_datasets()
def _set_num_classes(self, dataset_name):
"""Set number of classes in dataset."""
if dataset_name == "CIFAR10":
self.num_classes = 10
elif dataset_name == "CIFAR100":
self.num_classes = 100
elif dataset_name == "TinyImageNet":
self.num_classes = 200
elif dataset_name == "STL10":
self.num_classes = 10
def get_transform(self, dataset_key=None):
"""Build dataset transform."""
if dataset_key is None:
dataset_key = list(self.datasets.keys())[0]
normalize_transform = None
# Grab transforms - location varies depending on base dataset.
try:
transforms = self.datasets[dataset_key].transform.transforms
found = True
except AttributeError:
found = False
if not found:
try:
transforms = self.datasets[dataset_key].dataset.transform.transforms
found = True
except AttributeError:
found = False
if not found:
print("Transform list not found!")
else:
found = False
for xform in transforms:
if "normalize" in str(xform).lower():
normalize_transform = xform
found = True
break
if not found:
print("Normalization transform not found!")
return normalize_transform
def _build_split_idx_root(self, split_idxs_root, dataset_name):
"""Build directory for split idxs."""
if ".json" in split_idxs_root and not os.path.exists(split_idxs_root):
split_idxs_root = os.path.join(split_idxs_root, dataset_name)
print(f"Setting split idxs root to {split_idxs_root}")
if not os.path.exists(split_idxs_root):
print(f"{split_idxs_root} does not exist!")
os.makedirs(split_idxs_root)
print("Complete.")
return split_idxs_root
def _build_datasets(self):
"""Build dataset."""
print("_build_datasets")
print("self.grayscale", self.grayscale)
print("self.gauss_noise", self.gauss_noise)
print("self.gauss_noise_std", self.gauss_noise_std)
print("self.blur", self.blur)
print("self.blur_std", self.blur_std)
if "cifar" in self.dataset_name.lower():
dataset_dict = cifar_handler.create_datasets(
self.data_root,
dataset_name=self.dataset_name,
val_split=self.val_split,
grayscale=self.grayscale,
gauss_noise=self.gauss_noise,
gauss_noise_std=self.gauss_noise_std,
blur=self.blur,
blur_std=self.blur_std,
split_idxs_root=self.split_idxs_root,
noise_type=self.noise_type,
load_previous_splits=self.load_previous_splits,
verbose=self._verbose
)
elif self.dataset_name.lower() == "tinyimagenet":
dataset_dict = tinyimagenet_handler.create_datasets(
self.data_root,
self.val_split,
self.split_idxs_root
)
elif "stl" in self.dataset_name.lower():
dataset_dict = stl10_handler.create_datasets(
self.data_root,
dataset_name=self.dataset_name,
val_split=self.val_split,
grayscale=self.grayscale,
gauss_noise=self.gauss_noise,
gauss_noise_std=self.gauss_noise_std,
blur=self.blur,
blur_std=self.blur_std,
split_idxs_root=self.split_idxs_root,
noise_type=self.noise_type,
load_previous_splits=self.load_previous_splits,
verbose=self._verbose
)
elif str.find(self.dataset_name.lower(), "imagenet2012")>-1:
# Build path dataframe
path_df = imagenet2012_handler.build_path_df(self.data_root, self.experiment_root)
test_path_df = imagenet2012_handler.build_test_path_df(self.test_dataset_root, self.grayscale, self.gauss_noise, self.blur, self.gauss_noise_std, self.blur_std)
assert len(path_df), "Failed to load path df"
# Subset data
if "imagenet_params" in self._kwargs:
path_df = imagenet2012_handler.subset_path_df(
path_df,
self._kwargs["imagenet_params"]
)
test_path_df = imagenet2012_handler.subset_path_df(
test_path_df,
self._kwargs["imagenet_params"]
)
# Set number of classes!
self.num_classes = path_df.class_lbl.unique().shape[0]
# Build dataset dict
dataset_dict = imagenet2012_handler.create_datasets(
path_df,
self.val_split,
self.test_split,
self.split_idxs_root,
self.experiment_root,
self.grayscale,
self.gauss_noise,
self.gauss_noise_std,
self.blur,
self.blur_std,
test_path_df
)
return dataset_dict
def build_loader(
self,
dataset_key,
flags,
dont_shuffle_train=False
):
"""Build dataset loader."""
# Get dataset source
dataset_src = self.datasets[dataset_key]
# Specify shuffling
if dont_shuffle_train:
shuffle = False
else:
shuffle = dataset_key == "train"
#create weighted sampler for 16 class dataset
if self.dataset_name == 'ImageNet2012_16classes_rebalanced' and dataset_key == 'train':
class_sample_count = np.array([len(dataset_src.path_df[dataset_src.path_df.y == t]) for t in dataset_src.path_df.y.unique()])
weights = 1./class_sample_count
samples_weight = torch.from_numpy(np.array([weights[t] for t in dataset_src.path_df.y])).double()
weighted_sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(dataset_src.path_df), replacement=True)
# Creates dataloaders, which load data in batches
loader = torch.utils.data.DataLoader(
dataset=dataset_src,
batch_size=flags.batch_size,
sampler = weighted_sampler,
num_workers=flags.num_workers,
drop_last=flags.drop_last,
pin_memory=True)
else:
# Creates dataloaders, which load data in batches
loader = torch.utils.data.DataLoader(
dataset=dataset_src,
batch_size=flags.batch_size,
shuffle=shuffle,
num_workers=flags.num_workers,
drop_last=flags.drop_last,
pin_memory=True)
return loader
| ajaysub110/satbench | cnet/datasets/dataset_handler.py | dataset_handler.py | py | 7,854 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
31415576748 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('promotions_app', '0002_promotionmanager'),
]
operations = [
migrations.DeleteModel(
name='PromotionManager',
),
migrations.AddField(
model_name='promotion',
name='account',
field=models.ForeignKey(default='', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='promotion',
name='desc',
field=models.TextField(),
preserve_default=True,
),
migrations.AlterField(
model_name='promotion',
name='image',
field=models.ImageField(upload_to=b'promotions'),
preserve_default=True,
),
]
| mvpgomes/shopit-app | promotions_app/migrations/0003_auto_20150223_2016.py | 0003_auto_20150223_2016.py | py | 1,029 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.swappable_dependency",
"line_number": 11,
"usage_type": "call... |
27406796251 | from typing import Optional, List
from fastapi import Body, Depends, HTTPException, status, Query
from pydantic import EmailStr
from .app_helper import (
create_student,
get_student,
update_student,
delete_student,
)
from .models import (
Student,
StudentDelete,
StudentUpdate,
StudentReturn,
Message,
TokenData,
)
from fastapi import APIRouter
from ..accessmanager.app_helper import get_current_user
student_manager_app = APIRouter(tags=["Student Manager"])
@student_manager_app.post(
"/student",
summary="Create a new Student",
description="Add a new Student",
response_model=StudentReturn,
response_description="Creates Student",
status_code=status.HTTP_201_CREATED,
responses={400: {"model": Message}},
tags=["Student Manager"],
)
async def create_user(user: Student = Body(...)):
try:
response_usr = create_student(user.dict())
if response_usr:
return response_usr
except Exception as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
@student_manager_app.get(
"/student",
summary="Get all Students",
description="Get all Students",
response_model=List[StudentReturn],
response_description="Get all Students",
status_code=status.HTTP_200_OK,
responses={400: {"model": Message}},
tags=["Student Manager"],
)
async def get_user(
current_user: TokenData = Depends(get_current_user),
instituteId: int = Query(
None, title="Institute Id", description="Id of The Institute", example=12345
),
u_id: Optional[int] = Query(
None, title="Student Id", description="Id of The Student", example=12345
),
userName: Optional[str] = Query(
None,
title="Student Name",
description="UserName of The Student",
example="adam_sit",
),
emailAddress: Optional[EmailStr] = Query(
None,
title="Email of Student",
description="Email of Student",
example="dd@testemail.com",
),
mobileNumber: Optional[str] = Query(
None,
title="Mobile number of Student",
description="Mobile of The Student",
example="+91-9028077584",
),
):
try:
response_usr = get_student(
institute_id=instituteId,
u_id=u_id,
username=userName,
email=emailAddress,
mobile=mobileNumber,
)
return response_usr
except Exception as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
@student_manager_app.put(
"/student",
summary="Update a Student",
description="Update a Student",
response_model=StudentReturn,
status_code=status.HTTP_202_ACCEPTED,
responses={400: {"model": Message}},
tags=["Student Manager"],
)
async def update_user(
current_user: TokenData = Depends(get_current_user),
u_id: int = Query(..., title="User Id", description="Id of The user", example=123),
user: StudentUpdate = Body(...),
):
try:
response_inst = update_student(u_id, user.dict())
return response_inst
except Exception as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
@student_manager_app.delete(
"/student",
summary="Delete a Student",
description="Delete a Student",
response_description="Delete Student",
response_model=StudentDelete,
status_code=status.HTTP_200_OK,
responses={400: {"model": Message}},
tags=["Student Manager"],
)
async def delete_user(
current_user: TokenData = Depends(get_current_user),
instituteId: int = Query(
None, title="Institute Id", description="Id of The Institute", example=1234
),
u_id: Optional[int] = Query(
None, title="Student Id", description="Id of The Student", example=1234
),
userName: Optional[str] = Query(
None,
title="Student Name",
description="UserName of The Student",
example="adam_sit",
),
emailAddress: Optional[EmailStr] = Query(
None,
title="Email of Student",
description="Email of The Student",
example="dd@testemail.com",
),
mobileNumber: Optional[str] = Query(
None,
title="Mobile number of Student",
description="Mobile of The Student",
example="+91-9028077584",
),
):
try:
response_usr = delete_student(
institute_id=instituteId,
u_id=u_id,
username=userName,
email=emailAddress,
mobile=mobileNumber,
)
return {"status_delete": response_usr}
except Exception as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
| Ajinkya7poppyi/DeepManager | app/modules/studentmanager/app.py | app.py | py | 4,792 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.Student",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "fastapi.Body",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "app_helper.create_stude... |
34953688010 | # -*- coding: utf-8 -*-
"""Common test case for all message based resources.
"""
import ctypes
import gc
import logging
import time
from types import ModuleType
from typing import Optional
import pytest
from pyvisa import constants, errors
from pyvisa.constants import EventType, ResourceAttribute
from pyvisa.resources import Resource
from .resource_utils import (
EventAwareResourceTestCaseMixin,
LockableResourceTestCaseMixin,
ResourceTestCase,
)
np: Optional[ModuleType]
try:
import numpy
np = numpy
except ImportError:
np = None
class EventHandler:
"""Event handler."""
def __init__(self) -> None:
self.event_success = False
self.srq_success = False
self.io_completed = False
self.handle = None
self.session = None
def handle_event(self, session, event_type, event, handle=None):
"""Event handler
Ctypes handler are expected to return an interger.
"""
self.session = session
self.handle = handle
if event_type == EventType.service_request:
self.event_success = True
self.srq_success = True
return 0
if event_type == EventType.io_completion:
self.event_success = True
self.io_completed = True
return 0
else:
self.event_success = True
return 0
def simplified_handler(self, resource, event, handle=None):
"""Simplified handler that can be wrapped."""
self.session = resource.session
self.handle = handle
event_type = event.event_type
if event_type == EventType.service_request:
self.event_success = True
self.srq_success = True
return None
elif event_type == EventType.io_completion:
self.event_success = True
self.io_completed = True
return None
else:
self.event_success = True
return None
class MessagebasedResourceTestCase(ResourceTestCase):
"""Base test case for all message based resources."""
#: Type of resource being tested in this test case.
#: See RESOURCE_ADDRESSES in the __init__.py file of this package for
#: acceptable values
RESOURCE_TYPE = ""
# Any test involving communication involve to first write to glider the
# data then request it to send it back
def setup_method(self):
"""Create a resource using the address matching the type."""
super().setup_method()
self.instr.write_termination = "\n"
self.instr.read_termination = "\n"
self.instr.timeout = 100
def compare_user_handle(self, h1, h2):
"""Function comparing to user handle as passed to a callback.
We need such an indirection because we cannot safely always return
a Python object and most ctypes object do not compare equal.
"""
if isinstance(h1, ctypes.Structure):
return h1 == h2
elif hasattr(h1, "value"):
return h1.value == h2.value
else: # assume an array
return all((i == j for i, j in zip(h1, h2)))
def test_encoding(self):
"""Tets setting the string encoding."""
assert self.instr.encoding == "ascii"
self.instr.encoding = "utf-8"
with pytest.raises(LookupError):
self.instr.encoding = "test"
def test_termchars(self):
"""Test modifying the termchars."""
# Write termination
self.instr.write_termination = "\r\n"
assert self.instr.write_termination == "\r\n"
self.instr.read_termination = "\r\0"
assert self.instr.read_termination == "\r\0"
assert self.instr.get_visa_attribute(ResourceAttribute.termchar) == ord("\0")
assert self.instr.get_visa_attribute(ResourceAttribute.termchar_enabled)
# Disable read termination
self.instr.read_termination = None
assert self.instr.get_visa_attribute(ResourceAttribute.termchar) == ord("\n")
assert not self.instr.get_visa_attribute(ResourceAttribute.termchar_enabled)
# Ban repeated term chars
with pytest.raises(ValueError):
self.instr.read_termination = "\n\n"
def test_write_raw_read_bytes(self):
"""Test writing raw data and reading a specific number of bytes."""
# Reading all bytes at once
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
count = self.instr.write_raw(b"SEND\n")
assert count == 5
self.instr.flush(constants.VI_READ_BUF)
msg = self.instr.read_bytes(5, chunk_size=2)
assert msg == b"test\n"
# Reading one byte at a time
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
for ch in b"test\n":
assert self.instr.read_bytes(1) == ch.to_bytes(1, "little")
# Breaking on termchar
self.instr.read_termination = "\r"
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"te\rst\r\n")
self.instr.write_raw(b"SEND\n")
assert self.instr.read_bytes(100, break_on_termchar=True) == b"te\r"
assert self.instr.read_bytes(100, break_on_termchar=True) == b"st\r"
assert self.instr.read_bytes(1) == b"\n"
# Breaking on end of message
self.instr.read_termination = "\n"
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
assert self.instr.read_bytes(100, break_on_termchar=True) == b"test\n"
def test_handling_exception_in_read_bytes(self, caplog):
"""Test handling exception in read_bytes (monkeypatching)"""
def false_read(session, size):
raise errors.VisaIOError(constants.VI_ERROR_ABORT)
read = self.instr.visalib.read
self.instr.visalib.read = false_read
with caplog.at_level(logging.DEBUG):
try:
self.instr.read_bytes(1)
except errors.VisaIOError:
pass
finally:
self.instr.visalib.read = read
assert "- exception while reading:" in caplog.records[1].message
def test_write_raw_read_raw(self):
"""Test writing raw data and reading an answer."""
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
assert self.instr.read_raw(size=2) == b"test\n"
def test_clear(self):
"""Test clearing the incoming buffer."""
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
self.instr.clear()
self.instr.timeout = 10
with pytest.raises(errors.VisaIOError):
self.instr.read_raw()
def test_write_read(self):
"""Test writing and reading."""
self.instr.write_termination = "\n"
self.instr.read_termination = "\r\n"
self.instr.write("RECEIVE")
with pytest.warns(UserWarning):
self.instr.write("test\r\n")
count = self.instr.write("SEND")
assert count == 5
assert self.instr.read() == "test"
# Missing termination chars
self.instr.read_termination = "\r\n"
self.instr.write("RECEIVE")
self.instr.write("test")
self.instr.write("SEND")
with pytest.warns(Warning):
assert self.instr.read() == "test\n"
# Dynamic termination
self.instr.write_termination = "\r"
self.instr.write("RECEIVE\n", termination=False)
self.instr.write("test\r", termination="\n")
self.instr.write("SEND", termination="\n")
assert self.instr.read(termination="\r") == "test"
# Test query
self.instr.write_termination = "\n"
self.instr.write("RECEIVE")
self.instr.write("test\r")
tic = time.time()
assert self.instr.query("SEND", delay=0.5) == "test"
assert time.time() - tic > 0.49
# Test handling repeated term char
self.instr.read_termination = "\n"
for char in ("\r", None):
self.instr.write_termination = "\n" if char else "\r"
self.instr.write("RECEIVE", termination="\n")
with pytest.warns(Warning):
self.instr.write("test\r", termination=char)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
assert self.instr.read() == "test\r\r"
# TODO not sure how to test encoding
def test_handling_exception_in_read_raw(self, caplog):
"""Test handling exception in read_bytes (monkeypatching)"""
def false_read(session, size):
raise errors.VisaIOError(constants.VI_ERROR_ABORT)
read = self.instr.visalib.read
self.instr.visalib.read = false_read
with caplog.at_level(logging.DEBUG):
try:
self.instr.read()
except errors.VisaIOError:
pass
finally:
self.instr.visalib.read = read
assert caplog.records
def test_write_ascii_values(self):
"""Test writing ascii values."""
# Standard separator
values = [1, 2, 3, 4, 5]
self.instr.write("RECEIVE")
count = self.instr.write_ascii_values("", values, "d")
assert count == 10
self.instr.write("SEND")
assert self.instr.read() == "1,2,3,4,5"
# Non standard separator and termination
self.instr.write_termination = "\r"
self.instr.write("RECEIVE", termination="\n")
self.instr.write_ascii_values("", values, "d", separator=";", termination=False)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
assert self.instr.read() == "1;2;3;4;5"
# Test handling repeated term char
for char in ("\r", None):
self.instr.write_termination = "\n" if char else "\r"
self.instr.write("RECEIVE", termination="\n")
with pytest.warns(Warning):
values = [1, 2, 3, 4, 5]
self.instr.write_ascii_values(
"\r", values, "s", separator=";", termination=char
)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
assert self.instr.read() == "\r1;2;3;4;5\r"
@pytest.mark.parametrize(
"hfmt, prefix",
list(zip(("ieee", "hp", "empty"), (b"#212", b"#A\x0c\x00", b""))),
)
def test_write_binary_values(self, hfmt, prefix):
"""Test writing binary data."""
values = [1, 2, 3, 4, 5, 6]
self.instr.write_termination = "\n"
self.instr.write("RECEIVE")
count = self.instr.write_binary_values("", values, "h", header_fmt=hfmt)
# Each interger encoded as h uses 2 bytes
assert count == len(prefix) + 12 + 1
self.instr.write("SEND")
msg = self.instr.read_bytes(13 + len(prefix))
assert msg == prefix + b"\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\n"
if hfmt == "hp":
fl_prefix = prefix[0:2] + prefix[-2::][::-1]
else:
fl_prefix = prefix
self.instr.write_termination = "\r"
self.instr.write("RECEIVE", termination="\n")
self.instr.write_binary_values(
"", values, "h", is_big_endian=True, termination=False, header_fmt=hfmt
)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
assert (
self.instr.read_bytes(13 + len(prefix))
== fl_prefix + b"\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\n"
)
# Test handling repeated term char
for char in ("\r", None):
self.instr.write_termination = "\n" if char else "\r"
self.instr.write("RECEIVE", termination="\n")
with pytest.warns(Warning):
self.instr.write_binary_values(
"\r", values, "h", header_fmt=hfmt, termination=char
)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
msg = self.instr.read()
assert (
msg
== "\r"
+ prefix.decode("ascii")
+ "\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\r"
)
# Wrong header format
with pytest.raises(ValueError):
self.instr.write_binary_values("", values, "h", header_fmt="zxz")
# Without and with trailing comma
@pytest.mark.parametrize("msg", ["1,2,3,4,5", "1,2,3,4,5,"])
def test_read_ascii_values(self, msg):
"""Test reading ascii values."""
# Standard separator
self.instr.write("RECEIVE")
self.instr.write(msg)
self.instr.write("SEND")
values = self.instr.read_ascii_values()
assert type(values[0]) is float
assert values == [1.0, 2.0, 3.0, 4.0, 5.0]
# Non standard separator and termination
self.instr.write("RECEIVE")
self.instr.write(msg.replace(",", ";"))
tic = time.time()
values = self.instr.query_ascii_values(
"SEND", converter="d", separator=";", delay=0.5
)
assert time.time() - tic > 0.5
assert type(values[0]) is int
assert values == [1, 2, 3, 4, 5]
# Numpy container
if np:
self.instr.write("RECEIVE")
self.instr.write(msg)
self.instr.write("SEND")
values = self.instr.read_ascii_values(container=np.array)
expected = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
assert values.dtype is expected.dtype
np.testing.assert_array_equal(values, expected)
@pytest.mark.parametrize("hfmt", ("ieee", "hp"))
def test_read_binary_values(self, hfmt):
"""Test reading binary data."""
# TODO test handling binary decoding issue (troublesome)
self.instr.read_termination = "\r"
# 3328 in binary short is \x00\r this way we can interrupt the
# transmission midway to test some corner cases
data = [1, 2, 3328, 3, 4, 5, 6, 7]
self.instr.write("RECEIVE")
self.instr.write_binary_values(
"", data, "h", header_fmt=hfmt, termination="\r\n"
)
self.instr.write("SEND")
new = self.instr.read_binary_values(
datatype="h",
is_big_endian=False,
header_fmt=hfmt,
expect_termination=True,
chunk_size=8,
)
self.instr.read_bytes(1)
assert data == new
self.instr.write("RECEIVE")
self.instr.write_binary_values(
"", data, "h", header_fmt=hfmt, is_big_endian=True
)
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt=hfmt,
is_big_endian=True,
expect_termination=False,
chunk_size=8,
container=np.array if np else list,
)
self.instr.read_bytes(1)
if np:
np.testing.assert_array_equal(new, np.array(data, dtype=np.int16))
else:
assert data == new
def test_read_query_binary_values_invalid_header(self):
"""Test we properly handle an invalid header."""
data = [1, 2, 3328, 3, 4, 5, 6, 7]
self.instr.write("RECEIVE")
self.instr.write_binary_values(
"", data, "h", header_fmt="ieee", is_big_endian=True
)
self.instr.write("SEND")
with pytest.raises(ValueError):
self.instr.read_binary_values(
datatype="h",
is_big_endian=False,
header_fmt="invalid",
expect_termination=True,
chunk_size=8,
)
self.instr.write("RECEIVE")
self.instr.write_binary_values(
"", data, "h", header_fmt="ieee", is_big_endian=True
)
with pytest.raises(ValueError):
self.instr.query_binary_values(
"*IDN",
datatype="h",
is_big_endian=False,
header_fmt="invalid",
expect_termination=True,
chunk_size=8,
)
# Not sure how to test this
@pytest.mark.skip
def test_handling_malformed_binary(self):
""" """
pass
@pytest.mark.parametrize("hfmt, header", list(zip(("ieee", "empty"), ("#0", ""))))
def test_read_binary_values_unreported_length(self, hfmt, header):
"""Test reading binary data."""
self.instr.read_termination = "\r"
# 3328 in binary short is \x00\r this way we can interrupt the
# transmission midway to test some corner cases
data = [1, 2, 3328, 3, 4, 5]
self.instr.write("RECEIVE")
self.instr.write(
header + "\x01\x00\x02\x00\x00\r\x03\x00\x04\x00\x05\x00",
termination="\r\n",
)
self.instr.write("SEND")
new = self.instr.read_binary_values(
datatype="h",
is_big_endian=False,
header_fmt=hfmt,
expect_termination=True,
chunk_size=6,
data_points=6,
)
self.instr.read_bytes(1)
assert data == new
self.instr.write("RECEIVE")
self.instr.write(
header + "\x00\x01\x00\x02\r\x00\x00\x03\x00\x04\x00\x05",
termination="\r\n",
)
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt=hfmt,
is_big_endian=True,
expect_termination=False,
chunk_size=6,
container=np.array if np else list,
data_points=6,
)
self.instr.read_bytes(1)
if np:
np.testing.assert_array_equal(new, np.array(data, dtype=np.int16))
else:
assert data == new
# Check we do error on unreported/unspecified length
self.instr.write("RECEIVE")
self.instr.write(
header + "\x01\x00\x02\x00\x00\r\x03\x00\x04\x00\x05\x00",
termination="\r\n",
)
self.instr.write("SEND")
with pytest.raises(ValueError):
self.instr.read_binary_values(
datatype="h",
is_big_endian=False,
header_fmt=hfmt,
expect_termination=True,
chunk_size=6,
)
def test_read_binary_values_empty(self):
"""Test reading binary data."""
self.instr.write("RECEIVE")
self.instr.write("#10")
self.instr.write("SEND")
new = self.instr.read_binary_values(
datatype="h",
is_big_endian=False,
header_fmt="ieee",
expect_termination=True,
chunk_size=6,
)
assert not new
if np:
self.instr.write("RECEIVE")
self.instr.write(
"#10",
termination="\n",
)
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt="ieee",
is_big_endian=True,
expect_termination=False,
chunk_size=6,
container=np.array if np else list,
)
assert not new.size if np else not new
def test_delay_in_query_ascii(self):
"""Test handling of the delay argument in query_ascii_values."""
# Test using the instrument wide delay
self.instr.query_delay = 1.0
self.instr.write("RECEIVE")
self.instr.write("1,2,3,4,5")
tic = time.perf_counter()
values = self.instr.query_ascii_values("SEND")
assert time.perf_counter() - tic > 0.99
assert type(values[0]) is float
assert values == [1.0, 2.0, 3.0, 4.0, 5.0]
# Test specifying the delay
self.instr.query_delay = 0.0
self.instr.write("RECEIVE")
self.instr.write("1,2,3,4,5")
tic = time.perf_counter()
values = self.instr.query_ascii_values("SEND", delay=1.0)
assert time.perf_counter() - tic > 0.99
assert type(values[0]) is float
assert values == [1.0, 2.0, 3.0, 4.0, 5.0]
# Test specifying a 0 delay
self.instr.query_delay = 1.0
self.instr.write("RECEIVE")
self.instr.write("1,2,3,4,5")
tic = time.perf_counter()
values = self.instr.query_ascii_values("SEND", delay=0.0)
assert time.perf_counter() - tic < 0.99
assert type(values[0]) is float
assert values == [1.0, 2.0, 3.0, 4.0, 5.0]
def test_instrument_wide_delay_in_query_binary(self):
"""Test handling delay in query_ascii_values."""
header = "#0"
data = [1, 2, 3328, 3, 4, 5]
# Test using the instrument wide delay
self.instr.query_delay = 1.0
self.instr.write("RECEIVE")
self.instr.write(
header + "\x00\x01\x00\x02\r\x00\x00\x03\x00\x04\x00\x05",
termination="\r\n",
)
tic = time.perf_counter()
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt="ieee",
is_big_endian=True,
expect_termination=False,
chunk_size=6,
data_points=6,
)
assert time.perf_counter() - tic > 0.99
assert data == new
def test_delay_args_in_query_binary(self):
"""Test handling of the delay argument in query_ascii_values."""
header = "#0"
data = [1, 2, 3328, 3, 4, 5]
self.instr.query_delay = 0.0
self.instr.write("RECEIVE")
self.instr.write(
header + "\x00\x01\x00\x02\r\x00\x00\x03\x00\x04\x00\x05",
termination="\r\n",
)
tic = time.perf_counter()
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt="ieee",
is_big_endian=True,
expect_termination=False,
chunk_size=6,
data_points=6,
delay=1.0,
)
assert time.perf_counter() - tic > 0.99
assert data == new
def test_no_delay_args_in_query_binary(self):
"""Test handling of the delay argument in query_ascii_values."""
header = "#0"
data = [1, 2, 3328, 3, 4, 5]
self.instr.query_delay = 1.0
self.instr.write("RECEIVE")
self.instr.write(
header + "\x00\x01\x00\x02\r\x00\x00\x03\x00\x04\x00\x05",
termination="\r\n",
)
tic = time.perf_counter()
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt="ieee",
is_big_endian=True,
expect_termination=False,
chunk_size=6,
data_points=6,
delay=0.0,
)
assert time.perf_counter() - tic < 1.0
assert data == new
def test_stb(self):
"""Test reading the status byte."""
assert 0 <= self.instr.stb <= 256
assert 0 <= self.instr.read_stb() <= 256
class EventAwareMessagebasedResourceTestCaseMixin(EventAwareResourceTestCaseMixin):
"""Mixin for message based resources supporting events."""
def test_manually_called_handlers(self):
"""Test calling manually even handler."""
class FalseResource(Resource):
session = None
visalib = None
_session = None
def __init__(self):
pass
fres = FalseResource()
fres2 = FalseResource()
fres2.session = 1
handler = EventHandler()
false_wrapped_handler = fres.wrap_handler(handler.simplified_handler)
false_wrapped_handler(None, EventType.clear, 1, 1)
assert handler.event_success
with pytest.raises(RuntimeError):
false_wrapped_handler(1, EventType.clear, 1, 1)
def test_handling_invalid_handler(self):
"""Test handling an error related to a wrong handler type."""
with pytest.raises(errors.VisaTypeError):
event_type = EventType.exception
self.instr.install_handler(event_type, 1, object())
def test_uninstalling_missing_visa_handler(self):
"""Test uninstalling a visa handler that was not registered."""
handler1 = EventHandler()
handler2 = EventHandler()
event_type = EventType.exception
self.instr.install_handler(event_type, handler1.handle_event)
with pytest.raises(errors.UnknownHandler):
self.instr.uninstall_handler(event_type, handler2.handle_event)
self.instr.uninstall_handler(event_type, handler1.handle_event)
with pytest.raises(errors.UnknownHandler):
self.instr.uninstall_handler(event_type, handler2.handle_event)
def test_handler_clean_up_on_resource_del(self):
"""Test that handlers are properly cleaned when a resource is deleted."""
handler = EventHandler()
event_type = EventType.exception
self.instr.install_handler(event_type, handler.handle_event)
self.instr = None
gc.collect()
assert not self.rm.visalib.handlers
def test_uninstall_all_handlers(self):
"""Test uninstall all handlers from all sessions."""
handler = EventHandler()
event_type = EventType.exception
self.instr.install_handler(event_type, handler.handle_event)
self.rm.visalib.uninstall_all_visa_handlers(None)
assert not self.rm.visalib.handlers
def test_manual_async_read(self):
"""Test handling IOCompletion event which has extra attributes."""
# Prepare message
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
# Enable event handling
event_type = EventType.io_completion
event_mech = constants.EventMechanism.queue
wait_time = 2000 # set time that program waits to receive event
self.instr.enable_event(event_type, event_mech, None)
try:
visalib = self.instr.visalib
buffer, job_id, status_code = visalib.read_asynchronously(
self.instr.session, 10
)
assert buffer is visalib.get_buffer_from_id(job_id)
response = self.instr.wait_on_event(event_type, wait_time)
finally:
self.instr.disable_event(event_type, event_mech)
assert response.event.status == constants.StatusCode.success
assert bytes(buffer) == bytes(response.event.buffer)
assert bytes(response.event.data) == b"test\n"
assert response.event.return_count == 5
assert response.event.operation_name == "viReadAsync"
def test_getting_unknown_buffer(self):
"""Test getting a buffer with a wrong ID."""
assert self.instr.visalib.get_buffer_from_id(1) is None
def test_wait_on_event_timeout(self):
"""Test waiting on a VISA event."""
event_type = EventType.service_request
event_mech = constants.EventMechanism.queue
# Emit a clear to avoid dealing with previous requests
self.instr.clear()
self.instr.enable_event(event_type, event_mech, None)
try:
response = self.instr.wait_on_event(event_type, 10, capture_timeout=True)
finally:
self.instr.disable_event(event_type, event_mech)
assert response.timed_out
assert response.event.event_type == event_type
with pytest.raises(errors.VisaIOError):
self.instr.enable_event(event_type, event_mech, None)
try:
response = self.instr.wait_on_event(event_type, 10)
finally:
self.instr.disable_event(event_type, event_mech)
def test_wait_on_event(self):
"""Test waiting on a VISA event."""
event_type = EventType.service_request
event_mech = constants.EventMechanism.queue
wait_time = 2000 # set time that program waits to receive event
self.instr.enable_event(event_type, event_mech, None)
self.instr.write("RCVSLOWSRQ")
self.instr.write("1")
self.instr.write("SENDSLOWSRQ")
try:
response = self.instr.wait_on_event(event_type, wait_time)
finally:
self.instr.disable_event(event_type, event_mech)
assert not response.timed_out
assert response.event.event_type == EventType.service_request
assert self.instr.read() == "1"
def test_managing_visa_handler(self):
"""Test using visa handlers."""
def _test(handle):
handler = EventHandler()
event_type = EventType.service_request
event_mech = constants.EventMechanism.handler
user_handle = self.instr.install_handler(
event_type, handler.handle_event, user_handle=handle
)
self.instr.enable_event(event_type, event_mech, None)
self.instr.write("RCVSLOWSRQ")
self.instr.write("1")
self.instr.write("SENDSLOWSRQ")
try:
t1 = time.time()
while not handler.event_success:
if (time.time() - t1) > 2:
break
time.sleep(0.1)
finally:
self.instr.disable_event(event_type, event_mech)
self.instr.uninstall_handler(
event_type, handler.handle_event, user_handle
)
assert handler.session == self.instr.session
assert self.compare_user_handle(handler.handle, user_handle)
assert handler.srq_success
assert self.instr.read() == "1"
self.instr.clear()
class Point(ctypes.Structure):
_fields_ = [("x", ctypes.c_int), ("y", ctypes.c_int)]
def __eq__(self, other):
if type(self) is not type(other):
return False
return self.x == other.x and self.y == other.y
for handle in (1, 1.0, "1", [1], [1.0], Point(1, 2)):
print(handle)
_test(handle)
def test_wrapping_handler(self):
"""Test wrapping a handler using a Resource."""
handler = EventHandler()
event_type = EventType.service_request
event_mech = constants.EventMechanism.handler
wrapped_handler = self.instr.wrap_handler(handler.simplified_handler)
user_handle = self.instr.install_handler(event_type, wrapped_handler, 1)
self.instr.enable_event(event_type, event_mech, None)
self.instr.write("RCVSLOWSRQ")
self.instr.write("1")
self.instr.write("SENDSLOWSRQ")
try:
t1 = time.time()
while not handler.event_success:
if (time.time() - t1) > 2:
break
time.sleep(0.1)
finally:
self.instr.disable_event(event_type, event_mech)
self.instr.uninstall_handler(event_type, wrapped_handler, user_handle)
assert self.instr.session == handler.session
assert self.compare_user_handle(handler.handle, user_handle)
assert handler.srq_success
assert self.instr.read() == "1"
def test_bare_handler(self):
"""Test using a bare handler passing raw backend values."""
from pyvisa import ctwrapper
if not isinstance(self.instr.visalib, ctwrapper.IVIVisaLibrary):
return
ctwrapper.WRAP_HANDLER = False
try:
handler = EventHandler()
event_type = EventType.service_request
event_mech = constants.EventMechanism.handler
user_handle = self.instr.install_handler(
event_type, handler.handle_event, 1
)
self.instr.enable_event(event_type, event_mech, None)
self.instr.write("RCVSLOWSRQ")
self.instr.write("1")
self.instr.write("SENDSLOWSRQ")
try:
t1 = time.time()
while not handler.event_success:
if (time.time() - t1) > 2:
break
time.sleep(0.1)
finally:
self.instr.disable_event(event_type, event_mech)
self.instr.uninstall_handler(
event_type, handler.handle_event, user_handle
)
assert self.instr.session == handler.session.value
assert self.compare_user_handle(handler.handle.contents, user_handle)
assert handler.srq_success
assert self.instr.read() == "1"
finally:
ctwrapper.WRAP_HANDLER = True
class LockableMessagedBasedResourceTestCaseMixin(LockableResourceTestCaseMixin):
"""Mixing for message based resources supporting locking."""
def test_shared_locking(self):
"""Test locking/unlocking a resource."""
instr2 = self.rm.open_resource(str(self.rname))
instr3 = self.rm.open_resource(str(self.rname))
key = self.instr.lock()
instr2.lock(requested_key=key)
assert self.instr.query("*IDN?")
assert instr2.query("*IDN?")
with pytest.raises(errors.VisaIOError):
instr3.query("*IDN?")
# Share the lock for a limited time
with instr3.lock_context(requested_key=key) as key2:
assert instr3.query("*IDN?")
assert key == key2
# Stop sharing the lock
instr2.unlock()
with pytest.raises(errors.VisaIOError):
instr2.query("*IDN?")
with pytest.raises(errors.VisaIOError):
instr3.query("*IDN?")
self.instr.unlock()
assert instr3.query("*IDN?")
def test_exclusive_locking(self):
"""Test locking/unlocking a resource."""
instr2 = self.rm.open_resource(str(self.rname))
self.instr.lock_excl()
with pytest.raises(errors.VisaIOError):
instr2.query("*IDN?")
self.instr.unlock()
assert instr2.query("*IDN?")
# Share the lock for a limited time
with self.instr.lock_context(requested_key="exclusive") as key:
assert key is None
with pytest.raises(errors.VisaIOError):
instr2.query("*IDN?")
| pyvisa/pyvisa | pyvisa/testsuite/keysight_assisted_tests/messagebased_resource_utils.py | messagebased_resource_utils.py | py | 34,742 | python | en | code | 721 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "types.ModuleType",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pyvisa.constants.EventType.service_request",
"line_number": 51,
"usage_type": "attribute"
},
{
"... |
38639297269 | #!/usr/bin/env python
from setuptools import find_packages, setup
with open('README.rst') as readme_file:
readme = readme_file.read()
requirements = ['scitools-iris', 'numpy', 'scipy', 'matplotlib', 'metpy']
setup_requirements = []
test_requirements = []
setup(
author="Leo Saffin",
author_email='string_buster@hotmail.com',
classifiers=[
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python",
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Atmospheric Science'],
description="Scripts for work.",
install_requires=requirements,
license="MIT license",
long_description=readme,
include_package_data=True,
keywords='',
name='myscripts',
packages=find_packages(include=['myscripts']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/LSaffin/myscripts',
version='0.2',
)
| leosaffin/scripts | setup.py | setup.py | py | 1,150 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 34,
"usage_type": "call"
}
] |
9893919789 | from typing import Any, List, Dict
import xlwt
import xlrd2
import openpyxl
from ..basic.exClass import CommonException
from ..exFunc import *
from .fileFunc import readLines, isExist, createFile, deleteFile, writeAppend
class XlsWriter():
def __init__(self, path: str) -> None:
self.path = path
if path.endswith('.xlsx'):
self.isXlsx = True
self.workbook = openpyxl.Workbook(write_only=True)
elif path.endswith('.xls'):
self.isXlsx = False
self.workbook = xlwt.Workbook(encoding="utf-8")
else:
raise CommonException('文件类型不正确')
def save(self):
self.workbook.save(self.path)
def createSheet(self, headers: List[str], datas: List[List[Any]],
sheetName: str='sheet1'):
# 生成sheet
if self.isXlsx:
sheet = self.workbook.create_sheet(sheetName)
# 写入标题
sheet.append(headers)
# 写入每一行
for data in datas:
sheet.append(data)
else:
sheet = self.workbook.add_sheet(sheetName)
# 写入标题
for col, column in enumerate(headers):
sheet.write(0, col, column)
# 写入每一行
for row, data in enumerate(datas):
for col, col_data in enumerate(data):
sheet.write(row + 1, col, col_data)
def createSheetWithDict(self, dataList: List[Dict[str, Any]],
sheetName: str='sheet1'):
if len(dataList) > 10:
headers = list(set(dataList[0:10].flatMap(lambda d: d.ks())))
else:
headers = list(set(dataList.flatMap(lambda d: d.ks())))
datas = []
for data in dataList:
tmp = []
for t in headers:
if t in data and data[t]:
tmp.append(data[t])
else:
tmp.append('')
datas.append(tmp)
self.createSheet(headers, datas, sheetName)
class XlsReader():
def __init__(self, path) -> None:
self.workbook = xlrd2.open_workbook(path)
def sheetNames(self):
return self.workbook.sheet_names()
def readSheetByName(self, sheetName: str) -> List[Dict[str, Any]]:
sh = self.workbook.sheet_by_name(sheetName)
re = []
headers = sh.row_values(0)
for i in range(1, sh.nrows):
d = dict(zip(headers, sh.row_values(i)))
re.append(d)
return re
def readSheetByIndex(self, sheetIndex: int=0) -> List[Dict[str, Any]]:
sh = self.workbook.sheet_by_index(sheetIndex)
re = []
headers = sh.row_values(0)
for i in range(1, sh.nrows):
d = dict(zip(headers, sh.row_values(i)))
re.append(d)
return re
def readSheetAsMatrixByName(self, sheetName: str) -> List[Dict[str, Any]]:
sh = self.workbook.sheet_by_name(sheetName)
re = []
for i in range(0, sh.nrows):
re.append(sh.row_values(i))
return re
class CsvRW():
def __init__(self, path, splitFlag: str = ',', encoding='utf-8') -> None:
self.path = path
self.splitFlag = splitFlag
self.encoding = encoding
if not isExist(self.path):
createFile(self.path)
def headers(self):
lines = readLines(self.path, 1, encoding=self.encoding)
if len(lines) >= 1:
return lines[0].split(self.splitFlag).filter(lambda s: s != '')
return []
def read(self):
lines = readLines(self.path, encoding=self.encoding)
re = []
headers = self.headers()
for line in lines:
d = dict(zip(headers, line.split(self.splitFlag)))
re.append(d)
return re
def clear(self):
deleteFile(self.path)
createFile(self.path)
def write(self, datas: List[dict]):
headers = self.headers()
strs = []
if len(headers) == 0:
headers = list(datas[0].keys())
strs.append(self.splitFlag.join(headers)+'\n')
for data in datas:
tmp = []
for t in headers:
if t in data and data[t]:
tmp.append(
'"'+str(data[t]).replace('\\', '\\\\').replace('"', '\"')+'"')
else:
tmp.append('""')
strs.append(self.splitFlag.join(tmp)+'\n')
writeAppend(self.path, lines=strs, encoding=self.encoding)
| Logic-Orz/logicFun | logicFun/common/excelFunc.py | excelFunc.py | py | 4,595 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.Workbook",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "xlwt.Workbook",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "basic.exClass.CommonException",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "typing.... |
1588805635 | import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
from mask_shape_border import mask_shape_border
DS_y=xr.open_dataset("yield_soy_1979-2012.nc",decode_times=False).sel(lon=slice(-61,-44),lat=slice(-5,-33))
DS_y['time'] = pd.to_datetime(list(range(1979, 2013)), format='%Y').year
DS_y=DS_y.sel(time=slice('1980','2010'))
adm1_shapes = list(shpreader.Reader('gadm36_BRA_1.shp').geometries())
clipped= mask_shape_border(DS_y,'gadm36_BRA_0.shp' )
DS_y=clipped
# DS_y2 = xr.open_dataset("yield_soy_1979-2012.nc",decode_times=False).sel(lon=slice(-100.25,-80.25),lat=slice(50.25,30.25), time=slice(1,31))
# DS_y2['time'] = pd.to_datetime(list(range(1980, 2011)), format='%Y').year
# clipped= mask_shape_border(DS_y2,'gadm36_USA_0.shp' )
# DS_y2=clipped
# Iizumi data yield
ds_iizumi = xr.open_mfdataset('soybean/*.nc4', concat_dim="time", combine='nested')
ds_iizumi = ds_iizumi.assign_coords({"time" : ds_iizumi.time})
ds_iizumi['lon'] = (ds_iizumi.coords['lon'] + 180) % 360 - 180
ds_iizumi = ds_iizumi.sortby(ds_iizumi.lon)
ds_iizumi['time'] = pd.to_datetime(list(range(1981, 2017)), format='%Y').year
ds_iizumi = ds_iizumi.rename({'lon': 'longitude','lat': 'latitude','var' : 'yield'})
ds_iizumi['yield'].attrs = {'units': 'ton/ha', 'long_name': 'Yield in tons per hectare'}
# ds_iizumi.to_netcdf('soybean_iizumi.nc')
# test = xr.open_dataset("soybean_iizumi.nc")
da_iizumi = ds_iizumi['yield']
clipped= mask_shape_border(ds_iizumi,'gadm36_BRA_0.shp' )
ds_iizumi=clipped
#%% Average and standard deviation of soybean yield
yield_mean=DS_y['yield'].mean(dim = 'time', keep_attrs=True)
yield_std=DS_y['yield'].std(dim = 'time', keep_attrs=True)
plt.figure(figsize=(20,10))
ax=plt.axes(projection=ccrs.PlateCarree())
fig=yield_mean.plot(x='lon', y='lat', transform=ccrs.PlateCarree(),robust=True,cbar_kwargs={'label': yield_mean.attrs['units']}, cmap='Reds')
ax.set_title('Mean soy yield along time')
ax.set_xticks(ax.get_xticks()[::2]); ax.set_yticks(ax.get_yticks()[::1])
ax.add_geometries(adm1_shapes, ccrs.PlateCarree(),edgecolor='black', facecolor=(0,1,0,0.0))
ax.set_extent([-61,-44,-33,-8], ccrs.PlateCarree())
plt.show()
plt.figure(figsize=(20,10))
ax=plt.axes(projection=ccrs.PlateCarree())
fig=yield_std.plot(x='lon', y='lat', transform=ccrs.PlateCarree(),robust=True,cbar_kwargs={'label': yield_mean.attrs['units']}, cmap='Reds')
ax.set_title('Standard deviation of soybean yield along time')
ax.set_xticks(ax.get_xticks()[::2]); ax.set_yticks(ax.get_yticks()[::1])
ax.add_geometries(adm1_shapes, ccrs.PlateCarree(),edgecolor='black', facecolor=(0,1,0,0.0))
ax.set_extent([-61,-44,-33,-8], ccrs.PlateCarree())
plt.show()
#Establish temporal behaviour of the yield function (model based wofosst) wrt to the mean, +-SD.
df_t=ds_iizumi['yield'].to_series().groupby(['time']).mean()
df_t_mean=df_t.mean()
df_t_std=df_t.std()
df_t_low=df_t_mean - df_t_std
df_t_high=df_t_mean + df_t_std
plt.figure(figsize=(20,10))
plt.axhline(y=df_t_mean, color='r', linestyle='-')
plt.axhline(y=df_t_low, color='r', linestyle='--')
plt.axhline(y=df_t_high, color='r', linestyle='--')
plt.plot(df_t)
# plt.show()
# df_t2=DS_y2['yield'].to_series().groupby(['time']).mean()
# df_t2_mean=df_t2.mean()
# df_t2_std=df_t2.std()
# df_t2_low=df_t2_mean - df_t2_std
# df_t2_high=df_t2_mean + df_t2_std
# # plt.figure(figsize=(20,10))
# plt.axhline(y=df_t2_mean, color='r', linestyle='-')
# plt.axhline(y=df_t2_low, color='r', linestyle='--')
# plt.axhline(y=df_t2_high, color='r', linestyle='--')
# plt.plot(df_t2)
# plt.show()
#%%
from sklearn.linear_model import LinearRegression
series = pd.read_csv('fao_yield_soybean.csv', sep = ';', header=0, index_col=0)
series = series.loc[(series.index >= 1980) & (series.index <= 2010)]
# fit linear model
X = [i for i in range(0, len(series))]
X = np.reshape(X, (len(X), 1))
y = series.values
model = LinearRegression()
model.fit(X, y)
# calculate trend
trend = model.predict(X)
# plot trend
plt.plot(y)
plt.plot(trend)
plt.show()
# detrend
detrended = [trend.mean() + y[i]-trend[i] for i in range(0, len(series))]
df_detrended = pd.DataFrame(detrended, index=series.index, columns=['yield'] ) # 1st row as the column names
df_data_det = df_detrended.loc[(df_detrended.index >= 1981) & (df_detrended.index <= 2010)]
df_data_det.index = pd.to_datetime(list(range(1981, 2011)), format='%Y').year
plt.plot(df_data_det)
plt.show()
#detrend yield
series = df_t
# fit linear model
X = [i for i in range(0, len(series))]
X = np.reshape(X, (len(X), 1))
y = series.values
model = LinearRegression()
model.fit(X, y)
# calculate trend
trend = model.predict(X)
# plot trend
plt.plot(y)
plt.plot(trend)
plt.show()
# detrend
detrended = [trend.mean() + y[i]-trend[i] for i in range(0, len(series))]
df_detrended = pd.DataFrame(detrended, index=series.index, columns=['yield'] ) # 1st row as the column names
df_iizumi = df_detrended.loc[(df_detrended.index >= 1980) & (df_detrended.index <= 2010)]
df_iizumi.index = pd.to_datetime(list(range(1981, 2011)), format='%Y').year
plt.plot(df_iizumi)
plt.show()
# compare model and data yield
plt.figure(figsize=(10,6))
p1=plt.plot(df_iizumi, label='Model yield')
p2=plt.axhline(y=df_t_low, color='r', linestyle='--', label='Upper SD model yield')
p3=plt.axhline(y=df_t_mean, color='r', linestyle='-', label='Mean model yield')
p4=plt.axhline(y=df_t_high, color='r', linestyle='--', label='Lower SD model yield')
p5=plt.plot(df_data_det, label='Data yield')
plt.title('Comparison model and data yield',fontsize='x-large')
plt.ylabel('Yield')
plt.xlabel('Year')
plt.legend(loc='best', fontsize='x-large')
plt.show()
# calculate Pearson's correlation
from scipy.stats import pearsonr
val1=[float(i) for i in df_data_det.values]
val2=[float(i) for i in df_iizumi.values]
corr, _ = pearsonr(val2, val1)
print('Pearsons correlation: %.3f' % corr)
| dumontgoulart/agr_cli | model_data_yield.py | model_data_yield.py | py | 5,932 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "xarray.open_dataset",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cartopy.io.shapereader.Reader",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "c... |
43467543395 | import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.webdriver.chrome.options import Options
from fake_useragent import UserAgent
from undetected_chromedriver import Chrome, ChromeOptions
from getCarInfo import getCarInfo
from bs4 import BeautifulSoup
from pegaproxy import pegar_proxy
completo = []
with open('linkYearsCar.txt', 'r') as file:
for linha in file:
separado = linha.split(" c")
for linha2 in separado:
completo.append(linha2)
for i in range(len(completo)):
if "atalogo" in completo[i]:
completo[i] = completo[i].replace("atalogo", "catalogo")
for i in range(len(completo)):
if "ccatalogo" in completo[i]:
completo[i] = completo[i].replace("ccatalogo", "catalogo")
def get_random_user_agent():
user_agent = UserAgent()
return user_agent.random
proxy = Proxy()
proxy.proxy_type = ProxyType.MANUAL
proxy.http_proxy = f'170.106.117.131:11513'
options = Options()
def generate_random_user_agent():
user_agent = UserAgent()
return user_agent.random
extension_path = '/home/davi/Downloads/adblock.crx'
options.add_argument("--window-size=800,600")
options.add_extension(extension_path)
options.add_argument("--disable-images")
options.add_argument("--no-first-run")
# Desativar o uso de cookies
options.add_argument("--proxy-server=http://{}".format(proxy.http_proxy))
prefs = {"profile.managed_default_content_settings.images": 2} # 2 significa não carregar imagens
options.add_experimental_option("prefs", prefs)
# Desativar plugins
def reiniciar_proxy():
global driver
driver.quit()
options = Options()
print('reiniciando proxy')
extension_path = '/home/davi/Downloads/adblock.crx'
prefs = {"profile.managed_default_content_settings.images": 2} # 2 significa não carregar imagens
options.add_experimental_option("prefs", prefs)
options.add_argument("--no-first-run")
ip, port = pegar_proxy()
proxy.http_proxy = f'{ip}:{port}'
options.add_argument("--proxy-server=http://{}".format(proxy.http_proxy))
driver = webdriver.Chrome(options=options)
driver.set_page_load_timeout(60)
driver = webdriver.Chrome(options=options)
driver.set_page_load_timeout(60)
def reiniciar_pagina(driver):
reiniciar_proxy()
time.sleep(10)
carrosNaWeb = "https://carrosnaweb.com.br/"
linksCarros = []
c = 0
#1363
for index, link2 in enumerate(completo[3987:], start=3987):
filename = "esta agora.txt"
with open(filename, "a") as f:
f.write(f'index: {index}' + "\n")
print(link2)
print(index)
try:
z = 0
while z < 1:
try:
while True:
if c == 10:
reiniciar_proxy()
c = 0
else:
c = c+1
driver.get(carrosNaWeb + link2)
time.sleep(3)
url_atual = driver.current_url
z = 10
if url_atual == 'https://www.carrosnaweb.com.br/erro.asp':
reiniciar_proxy()
time.sleep(24)
else:
break
except TimeoutException:
print("reiniciando pagina")
reiniciar_pagina(driver)
z = 0
while True:
html12 = driver.page_source
doc = BeautifulSoup(html12, "html.parser")
links = doc.find_all(["td"], bgcolor="#ffffff", align="left")
for link in links:
a_tags = link.find_all('a')
for a_tag in a_tags:
a_tag = a_tag.get('href')
try:
x = 0
while x < 1:
try:
while True:
print('chegou aqui')
driver.get(carrosNaWeb + a_tag)
time.sleep(7)
url_atual = driver.current_url
x = 10
if url_atual == 'https://www.carrosnaweb.com.br/erro.asp':
reiniciar_proxy()
time.sleep(10)
else:
break
except TimeoutException:
print('reiniciando pagina')
x = 0
reiniciar_pagina(driver)
time.sleep(10)
html = driver.page_source
pegou = getCarInfo(html)
filename = "esta agora.txt"
with open(filename, "a") as f:
f.write(f'Esse carro deu certo' + "\n")
if pegou == 'error':
filename = "erro de carros.txt"
with open(filename, "a") as f:
f.write(f'{carrosNaWeb + a_tag}' + "\n")
except:
filename = "esta agora.txt"
with open(filename, "a") as f:
f.write(f'erro no link do carro que esta agora link {carrosNaWeb + a_tag}' + "\n")
pass
element = doc.find("b", string="Próxima")
if element is not None:
print('tem mais que 10')
parent_element = element.parent
link_real = parent_element.get('href')
link_completo = carrosNaWeb + link_real
try:
while True:
driver.get(link_completo)
time.sleep(10)
url_atual = driver.current_url
if url_atual == 'https://www.carrosnaweb.com.br/erro.asp':
reiniciar_proxy()
time.sleep(20)
else:
break
except TimeoutException:
print("reiniciando pagina")
reiniciar_pagina(driver)
time.sleep(20)
else:
print('Não tem mais')
break
except:
filename = "esta agora.txt"
with open(filename, "a") as f:
f.write(f'Erro' + "\n")
pass
print("ELE FUNCIONOU CARALHO HUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU") | Sankhay/Estudos | Python/selenium/app4.py | app4.py | py | 7,174 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fake_useragent.UserAgent",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.proxy.Proxy",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.proxy.ProxyType.MANUAL",
"line_number": 39,
"usa... |
15141641723 | import requests
import json
import sys
import urllib.parse
import os.path
import hashlib
import argparse
# import sqlite3
# from tplib3 import *
# import datetime
# import time
import platform
from sqlite3 import Error
# updategroupconfig
#
# Fetches siteconfig from config file
# fetches groupconfig from cloud
# fetches from disk
# if disk file is missing or there are changes, write new config to disk.
#
# Python version: 3
#
# Modifictions
# 5/8-20 initial creation
#
# Usage:
# python updategroupconfig [--configfile filename]
progVersion = "1.0 05082020"
if platform.system().lower() == "linux":
siteConfigFile = "/home/pi/conf/siteconfig.json"
else:
siteConfigFile = "C:\\usr\\demo_projects\\pi\\hytta\\config.json"
def getGroups(siteConfig):
#
# Fetch all groups
#
headers = {"content-type": "application/json"}
try:
g = requests.get(siteConfig["groupConfigURL"], headers=headers)
except IOError as err:
print("REST API Call for: " + siteConfig["groupConfigURL"] + " failed: ", err)
return False
localConfig = []
if g.ok:
#
# Fetch memerships
#
try:
m = requests.get(siteConfig["membersURL"], headers=headers)
if m.ok:
allGroups = g.json()["items"]
allmembers = m.json()["items"]
#
# Iterate through and add member array
#
for group in list(allGroups):
#
# iterate through all memers of a group
#
members = []
for member in list(allmembers):
if member["groupname"] == group["groupname"]:
members.append(member["membername"])
group["members"] = members
localConfig.append(group)
f = open(siteConfig["groupConfigFile"], "w")
f.write(json.dumps(localConfig))
f.close()
return localConfig
else:
print(
"API call for members failed, JSON Parse Error: "
+ str(m.status_code)
)
return False
except exception as e:
print("API Call generated invalid result")
print(siteConfig["membersURL"])
print(headers)
print(str(e))
return False
else:
print("API call for groups failed: " + str(g.status_code))
return False
#
def loadSiteConfig(siteConfigFile):
#
# Load master config file to get groupConfigFile path,
# confURL, for static group membership
# tempConfigURL for dynamic temp setting
# groupconfig, contains statig gruop names and group sensor setup
#
try:
f = open(siteConfigFile, "r")
strsiteconfig = f.read()
siteConfig = json.loads(strsiteconfig)
f.close()
return siteConfig
except IOError:
print("Site Config File" + siteConfigFile + " is not accessible")
return False
return null
def main():
global siteConfigFile
#
# Identify program
#
print()
print("update group config Version: " + progVersion)
print()
#
# process comand line onl one optin available, -f filename for sysconfig fiel
#
argsParser = argparse.ArgumentParser(description="updategroupconfig program")
argsParser.add_argument(
"--configfile", default=siteConfigFile, type=str, help="Site Config File"
)
args = argsParser.parse_args()
siteConfigFile = args.configfile
#
# Load static site config information
#
siteConfig = loadSiteConfig(siteConfigFile)
if not siteConfig:
print("Error Loading confguration")
return 1
#
# get current group config file
#
if os.path.isfile(siteConfig["groupConfigFile"]):
try:
f = open(siteConfig["groupConfigFile"], "r")
strconfig = f.read()
if len(strconfig) > 1:
currentGroups = json.loads(strconfig)
else:
currentGroups = "{}"
f.close()
except IOError:
print(
"Member Config File"
+ siteConfig["groupConfigFile"]
+ " exists but is not accessible"
)
return 2
groups = getGroups(siteConfig)
if groups != False:
if (
hashlib.md5(json.dumps(groups).encode("utf-8")).digest()
!= hashlib.md5(json.dumps(currentGroups).encode("utf-8")).digest()
):
print("New version found, saving.....")
f = open(siteConfig["groupConfigFile"], "w")
f.write(json.dumps(groups))
f.close()
else:
print("Files are in sync")
sys.exit(0)
else:
print("getGroups failed")
sys.exit(1)
sys.exit(2)
else:
#
# Group Config did not exists need to fetch it from iot database
#
groups = getGroups(siteConfig)
print("groupconfig.json synchronized")
if __name__ == "__main__":
main()
| bios62/ha_hytta | python-scripts/updategroupconfig.py | updategroupconfig.py | py | 5,476 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "platform.system",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_numb... |
43073520173 | import sys
from PyQt4.QtGui import (QApplication, QWidget, QFont, QListWidget,
QHBoxLayout, QVBoxLayout, QShortcut, QKeySequence)
import numpy as np
from spyderlib.widgets.sourcecode.codeeditor import CodeEditor
from spyderlib.widgets.internalshell import InternalShell
from spyderlib.widgets.dicteditor import DictEditorWidget
class Demo(QWidget):
def __init__(self):
super(Demo, self).__init__()
self.code_editor = CodeEditor(self)
self.code_editor.setup_editor(
language = "python",
font = QFont("Courier New")
)
run_sc = QShortcut(QKeySequence("F5"), self, self.run)
self.shell = InternalShell(self, {"demo":self},
multithreaded = False,
max_line_count = 3000,
font = QFont("Courier new", 10),
message='caonima'
)
self.dict_editor = DictEditorWidget(self, {})
self.dict_editor.editor.set_filter(self.filter_namespace)
self.dict_editor.set_data(self.shell.interpreter.namespace)
vbox = QVBoxLayout()
vbox.addWidget(self.code_editor)
vbox.addWidget(self.shell)
hbox = QHBoxLayout()
hbox.addWidget(self.dict_editor)
hbox.addLayout(vbox)
self.setLayout(hbox)
self.resize(800, 600)
def filter_namespace(self, data):
result = {}
support_types = [np.ndarray, int, float, str, tuple, dict, list]
for key, value in data.items():
if not key.startswith("__") and type(value) in support_types:
result[key] = value
return result
def run(self):
code = str(self.code_editor.toPlainText())
namespace = self.shell.interpreter.namespace
exec (code,namespace )
self.dict_editor.set_data(namespace)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
try:
demo = Demo()
demo.show()
except Exception as ex:
import traceback
sys.__stdout__.write(traceback.format_exc())
sys.exit(app.exec_())
| UpSea/midProjects | BasicOperations/11_Spyder/useOfSpyderShell.py | useOfSpyderShell.py | py | 2,085 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "PyQt4.QtGui.QWidget",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "spyderlib.widgets.sourcecode.codeeditor.CodeEditor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QFont",
"line_number": 15,
"usage_type": "call"
},
... |
40319161924 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from src.ui.winsWidgetView import WinsWidgetView
logging.basicConfig(level=logging.DEBUG) # change to 'DEBUG' to see more
from PyQt5 import QtWidgets, QtGui
def setFontFamily(font):
allFamillies = QtGui.QFontDatabase().families()
familyName = font.defaultFamily()
if "微软雅黑" in allFamillies:
familyName = "微软雅黑"
font.setFamily(familyName)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
font = QtGui.QFont()
setFontFamily(font)
font.setPixelSize(14)
app.setFont(font)
with open('qml\Main.qml', 'r') as qss:
app.setStyleSheet(qss.read())
mainview = WinsWidgetView()
mainview.show()
sys.exit(app.exec_())
| winsomexiao/PyDemo | src/ui/mainView.py | mainView.py | py | 792 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtGui.QFontDatabase",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PyQt... |
43719343533 | #
# This is an example script to plot seconday chemical shifts.
# by Woonghee Lee, Ph.D. (woonghee.lee@ucdenver.edu)
#
# To run this script:
# In Poky Notepad,
# File -> Run Python Module
print('\n\n\n------------------------------------------------------')
print('POKY Secondary Shift Plot')
print('by Woonghee Lee, Ph.D. (woonghee.lee@ucdenver.edu)')
print('Department of Chemistry, University of Colorado Denver')
print('------------------------------------------------------')
# Reference to J. Biomol. NMR 1995.5 67-81
refCSdict = {# CA CB C H N AA
'A':( 52.5, 19.1, 177.8, 8.24, 123.8), # Ala
'Cr':( 58.2, 28.0, 174.6, 8.32, 118.8), # Cys_r
'Co':( 55.4, 41.1, 174.6, 8.43, 118.6), # Cys_o
'D':( 54.2, 41.1, 176.3, 8.34, 120.4), # Asp
'E':( 56.6, 29.9, 176.6, 8.42, 120.2), # Glu
'F':( 57.7, 39.6, 175.8, 8.30, 120.3), # Phe
'G':( 45.1, 0.00, 174.9, 8.33, 108.8), # Gly
'H':( 55.0, 29.0, 174.1, 8.42, 118.2), # His
'I':( 61.1, 38.8, 176.4, 8.00, 119.9), # Ile
'K':( 56.2, 33.1, 176.6, 8.29, 120.4), # Lys
'L':( 55.1, 42.4, 177.6, 8.16, 121.8), # Leu
'M':( 55.4, 32.9, 176.3, 8.28, 119.6), # Met
'N':( 53.1, 38.9, 175.2, 8.40, 118.7), # Asn
'P':( 63.3, 32.1, 177.3, 0.00, 0.00), # Pro
'Q':( 55.7, 29.4, 176.0, 8.32, 119.8), # Gln
'R':( 56.0, 30.9, 176.3, 8.23, 120.5), # Arg
'S':( 58.3, 63.8, 174.6, 8.31, 115.7), # Ser
'T':( 61.8, 69.8, 174.7, 8.15, 113.6), # Thr
'V':( 62.2, 32.9, 176.3, 8.03, 119.2), # Val
'W':( 57.5, 29.6, 176.1, 8.25, 121.3), # Trp
'Y':( 57.9, 38.8, 175.9, 8.12, 120.3) # Tyr
}
import __main__
s = __main__.main_session
clist = s.project.condition_list()
if len(clist) > 1:
cname = s.show_conditionselectiondialog('Select a condition to evaluate.', 0)
from sputil import name_to_condition
c = name_to_condition(cname, s)
else:
c = clist[0]
btn_list = ('CA', 'CB', 'C', 'H', 'N', 'Cancel')
idx = s.show_selectionexdialog('Atom type', 'Select an atom type.', btn_list)
if idx in [-1, len(btn_list)-1]:
raise SystemExit
x_list, y_list, err_list = [], [], []
for resn in c.resonance_list():
a = resn.group.symbol
cs = resn.frequency
if cs == 0.0 or resn.atom.name != btn_list[idx]:
continue
try:
if a != 'C':
row = refCSdict[resn.group.symbol]
if row[idx] == 0.0:
continue
dcs = cs - row[idx]
else: # CYS will be evaluated to the closer value from r. vs o.
row = refCSdict[resn.group.symbol + 'r']
dcs = cs - row[idx]
row2 = refCSdict[resn.group.symbol + 'o']
dcs2 = cs - row2[idx]
if abs(dcs2) < abs(dcs):
dcs = dcs2
except:
continue
x_list.append(resn.group.number)
y_list.append(dcs)
err_list.append(resn.deviation)
# plotting
from matplotlib import use as matplotlib_use
matplotlib_use('TkAgg')
import matplotlib.pyplot as plt
xlabel = 'Residue Number'
ylabel = 'd(CSobs - CSref)'
plt.figure()
plt.errorbar(x_list, y_list, err_list, fmt='bo', markersize=5)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
ylim = max(max(y_list), abs(min(y_list))) * 1.1
plt.ylim((-1 * ylim, ylim))
plt.title('Poky Secondary Shift Plot: ' + btn_list[idx])
plt.pause(0.1)
plt.show(block=False)
| pokynmr/POKY | User_Modules/plot_secondary_shift_script.py | plot_secondary_shift_script.py | py | 3,395 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "__main__.main_session",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "sputil.name_to_condition",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.use",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "ma... |
12086094939 | import sys
sys.path.append("../")
import pymysql
import math
import pandas as pd
import numpy as np
from scipy.spatial import distance
import csv
from InsertDataBase.CreateTables import *
from DBtools.init_db import init_DB
def GetMinDistanceLane(local_x, local_y):
radius = 5
node_list = list()
way_list = list()
while len(node_list) < 3:
node_list = list()
way_list = list()
min_x = local_x - radius
max_x = local_x + radius
min_y = local_y - radius
max_y = local_y + radius
sql = "select Node_To_Way.node_id, way_id " \
"from Node_To_Way " \
"join Node_Info on Node_To_Way.node_id = Node_Info.node_id " \
"where local_x BETWEEN %s and %s and local_y BETWEEN %s and %s group by Node_To_Way.node_id,way_id" \
% (min_x, max_x, min_y, max_y)
cursor.execute(sql)
node_list = list()
for i in cursor.fetchall():
node_list.append(i[0])
way_list.append(i[1])
radius = 2 * radius
way_list = list(set(way_list))
vehicle_position_list = np.array([[local_x, local_y]])
dist = np.inf
lane = None
for way_id in way_list:
temp_position_list = list()
sql = "select Node_Info.node_id, local_x, local_y " \
"from Node_Info " \
"join Node_To_Way on Node_Info.node_id = Node_To_Way.node_id " \
"where way_id = %s" % (way_id)
cursor.execute(sql)
for i in cursor.fetchall():
temp_position_list.append([float(i[1]), float(i[2])])
temp_position_list = np.array(temp_position_list)
new_dist = distance.cdist(vehicle_position_list, temp_position_list).min(axis=1).min(axis=0)
if new_dist < dist:
dist = new_dist
lane = str(way_id)
return lane
def InsertTable(cursor, VehicleInfo, table):
insertTimingSql = "insert into Traffic_timing_state" + table + "(time_stamp,vehicle_id,local_x,local_y,orientation,lane_id) " \
"values(%s,%s,%s,%s,%s,%s)"
for i in range(len(VehicleInfo)):
time_stamp = int(float(VehicleInfo[i][0]) * 1e+3)
id_str = VehicleInfo[i][1].split('-')
vehicle_id = int(id_str[-1])
local_x = float(VehicleInfo[i][3])
local_y = float(VehicleInfo[i][4])
lane = GetMinDistanceLane(local_x, local_y)
orien = 0.000
cursor.execute(insertTimingSql,(time_stamp, vehicle_id, local_x, local_y, orien, lane))
if (i % 100 == 0):
print(i)
insertPropertySql = "insert into Traffic_participant_property" + table + "(vehicle_id,vehicle_class) " \
"values(%s,%s) ON DUPLICATE KEY UPDATE vehicle_class = vehicle_class"
for i in range(len(VehicleInfo)):
vehicle_type = VehicleInfo[i][2]
id_str = VehicleInfo[i][1].split('-')
vehicle_id = int(id_str[-1])
vehicle_class = int(1)
cursor.execute(insertPropertySql,(vehicle_id, vehicle_class))
if (i % 100 == 0):
print(i)
UpdateParticipantSql = "Update Traffic_timing_state" + table + " set orientation = %s where time_stamp = %s and vehicle_id = %s"
stampsql = "select time_stamp from Traffic_timing_state" + table
vehiclesql = "select vehicle_id from Traffic_timing_state" + table + " where time_stamp = %s"
positionsql = "select local_x, local_y from Traffic_timing_state" + table + " where time_stamp = %s and vehicle_id = %s"
oriensql = "select orientation from Traffic_timing_state" + table + " where time_stamp = %s and vehicle_id = %s"
cursor.execute(stampsql)
timestampresult = cursor.fetchall()
timestamp_set = set()
for i in range(len(timestampresult)):
timestamp_set.add(timestampresult[i][0])
timestamp_list = list(timestamp_set)
timestamp_list.sort()
vehicle_zero_orien_list = list()
for t in range(len(timestamp_list) - 1):
vehicle_list = list()
cursor.execute(vehiclesql, timestamp_list[t + 1])
for vehicle in cursor.fetchall():
vehicle_list.append(vehicle[0])
if t == 0:
cursor.execute(vehiclesql, timestamp_list[t])
for vehicle_zero in cursor.fetchall():
vehicle_zero_orien_list.append(vehicle_zero[0])
for vehicle in vehicle_list:
local_x = 0
local_y = 0
last_x = 0
last_y = 0
cursor.execute(positionsql, (timestamp_list[t + 1], vehicle))
for i in cursor.fetchall():
local_x = i[0]
local_y = i[1]
cursor.execute(positionsql, (timestamp_list[t], vehicle))
for j in cursor.fetchall():
last_x = j[0]
last_y = j[1]
if local_x != None and local_y != None and last_x != None and last_y != None:
if last_x == 0 and last_y == 0:
vehicle_zero_orien_list.append(vehicle)
continue
direction = [local_x - last_x, local_y - last_y]
orien = math.atan2(direction[1], direction[0])
if local_x == last_x and local_y == last_y:
cursor.execute(oriensql, (timestamp_list[t], vehicle))
last_orien = cursor.fetchall()[0][0]
cursor.execute(UpdateParticipantSql, (last_orien, timestamp_list[t + 1], vehicle))
else:
cursor.execute(UpdateParticipantSql, (orien, timestamp_list[t + 1], vehicle))
if vehicle in vehicle_zero_orien_list:
cursor.execute(UpdateParticipantSql, (orien, timestamp_list[t], vehicle))
vehicle_zero_orien_list.remove(vehicle)
if __name__ == '__main__':
conn, cursor = init_DB("Argoverse_MIA_Scenario_DB")
csv_reader = csv.reader(open("../Annotator/sample_record.csv", encoding='utf-8'))
for i, rows in enumerate(csv_reader):
table = str(rows[0])
print("table: ", table)
VehicleInfo = list()
file = "../../argoverse_trajectory/forecasting_train_v1.1/train/data/" + table + ".csv"
VehicleInfo = pd.read_csv(file, decimal=",", low_memory=False)
VehicleInfo = np.array(VehicleInfo)
table = "_" + table
CreateTrafficParticipantPropertyTable(cursor, table)
CreateTrafficTimingStateTable(cursor, table)
InsertTable(cursor, VehicleInfo, table)
cursor.close()
conn.commit()
conn.close() | THU-changc17/MetaScenario | InsertDataBase/Argoverse_MIA_InsertTrafficParticipant.py | Argoverse_MIA_InsertTrafficParticipant.py | py | 6,690 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number"... |
71632601315 | #!/usr/bin/env python
import argparse
import glob
import os
import re
import cv2
import numpy as np
import pandas as pd
from skimage import transform as tf
from utils import create_dictionary_lang
CHROME_PATH = "/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome"
lang_code_dict = create_dictionary_lang()
root_dir = "Data/FLORES/"
# TODO: font weight?
dict_properties = {
"color": ["black"],
"opacity": ["1", "0.3"],
"font_size": ["20px"],
"letter_spacing": ["normal", "0.2em", "-0.2em"],
"italic": [True, False],
"bold": [True, False],
"gauss": [True, False],
"skew": [True, False]
}
def read_fonts_file():
df = pd.read_csv('Data/misc/languages_fonts_codes.csv')
lang_codes = df["Code"].tolist()
lang_fonts = df["Fonts"].tolist()
return lang_codes, lang_fonts
def create_style_file(
font, color, opacity, font_size, letter_spacing, italic, bold, name_style
):
str_font = "src: url(fonts/" + font + ".ttf);"
full_style = (
"""
@font-face {
font-family: defined_font;
"""
+ str_font
+ """
}
p {
font-family: defined_font;
color: """
+ color
+ ";"
+ """
opacity: """
+ opacity
+ ";"
+ """
letter-spacing:"""
+ letter_spacing
+ ";"
+ """
font-size:"""
+ font_size
+ ";"
)
if italic:
full_style += """
font-style: italic;"""
if bold:
full_style += """
font-weight: bold;"""
full_style += """
}
"""
with open("Data/augmentation/styles/" + name_style + ".css", "w+") as f:
f.write(full_style)
def create_html_file(root_path, list_sentences, name_html_file, name_style):
str_style = (
f"""<link rel="stylesheet" href="{os.path.abspath('Data/augmentation/styles/' + name_style + '.css')}">"""
)
str_html_head = (
"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content=
"width=device-width, initial-scale=1.0">
"""
+ str_style
+ """
</head>
<body>
"""
)
# put all text into one paragraph
str_html_text = "<p>" + "".join(list_sentences) + "</p>"
str_html_head_close = """
</body>
</html>
"""
full_text = str_html_head + str_html_text + str_html_head_close
with open(os.path.join(root_path, name_html_file + ".html"), "w") as f:
f.write(full_text)
def save_html_to_pdf(root_save_pdfs: str, root_html_url: str, name_html_file: str) -> None:
os.system(
CHROME_PATH
+ " --headless --print-to-pdf-no-header --print-to-pdf="
+ root_save_pdfs
+ name_html_file
+ ".pdf "
+ root_html_url
+ name_html_file
+ ".html"
)
def save_pdf_to_png(lang_name, name_file, name_html_file, root_path: str = "Data/FLORES/"):
# if entire pdf -> need to first split into pages
root_save_pdfs = "Data/augmentation/pdfs/"
os.makedirs(root_path + lang_name + "/png/" + name_file, exist_ok=True)
path_png_out = root_path + lang_name + "/png/" + name_file + "/" + name_html_file
path_pdf_in = root_save_pdfs + name_html_file + ".pdf"
print("Saving pdf to png for " + lang_name)
os.system("convert -density 300 -trim " + path_pdf_in + " -quality 100 " + path_png_out + "%02d.png")
def add_gaussian_noise(lang_name, name_file, name_html_file, root_path: str = "Data/FLORES/"):
img = cv2.imread(root_path + lang_name + "/png/" + name_file + "/" + name_html_file + ".png")
# Generate Gaussian noise
gauss = np.random.normal(0, 1, img.size)
gauss = gauss.reshape(img.shape[0], img.shape[1], img.shape[2]).astype("uint8")
# Add the Gaussian noise to the image
img_gauss = cv2.add(img, gauss)
img_gauss = cv2.cvtColor(img_gauss, cv2.COLOR_BGR2GRAY)
cv2.imwrite(
root_path
+ lang_name
+ "/png/"
+ name_file
+ "/"
+ name_html_file
+ "_gauss"
+ ".png",
img_gauss,
)
def add_salt_pepper_noise(lang_name, name_file, name_html_file, amount, s_vs_p, root_path: str = "Data/FLORES/"):
image = cv2.imread(
root_path + lang_name + "/png/" + name_file + "/" + name_html_file + ".png"
)
img_noise = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in image.shape]
img_noise[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount * image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]
img_noise[coords] = 0
img_noise = cv2.cvtColor(img_noise, cv2.COLOR_BGR2GRAY)
cv2.imwrite(
root_path
+ lang_name
+ "/png/"
+ name_file
+ "/"
+ name_html_file
+ "_salt_pepper"
+ ".png",
img_noise,
)
def add_speckle_noise(lang_name, name_file, name_html_file, root_path: str = "Data/FLORES/"):
img = cv2.imread(
root_path + lang_name + "/png/" + name_file + "/" + name_html_file + ".png"
)
gauss = np.random.normal(0, 1, img.size)
gauss = gauss.reshape(img.shape[0], img.shape[1], img.shape[2]).astype('uint8')
img_noise = img + img * gauss
img_noise = cv2.cvtColor(img_noise, cv2.COLOR_BGR2GRAY)
cv2.imwrite(
root_path
+ lang_name
+ "/png/"
+ name_file
+ "/"
+ name_html_file
+ "_gauss"
+ ".png",
img_noise,
)
def add_skew(lang_name, name_file, name_html_file, root_path: str = "Data/FLORES/"):
img = cv2.imread(
root_path + lang_name + "/png/" + name_file + "/" + name_html_file + ".png"
)
# Create Affine transform
affine_tf = tf.AffineTransform(shear=0.1)
# Apply transform to image data
img_skew = tf.warp(img, inverse_map=affine_tf) * 255
cv2.imwrite(
root_path
+ lang_name
+ "/png/"
+ name_file
+ "/"
+ name_html_file
+ "_skew"
+ ".png",
img_skew,
)
def run_augmentation_udhr(lang_code):
lang_name = lang_code_dict[lang_code]
lang_codes, lang_fonts = read_fonts_file()
index_lang_code = lang_codes.index(lang_code)
fonts = lang_fonts[index_lang_code].split("; ")
color = dict_properties["color"][0]
opacity = dict_properties["opacity"][0]
letter_spacing = dict_properties["letter_spacing"][0]
italic = dict_properties["italic"][1]
bold = dict_properties["bold"][1]
font = fonts[0]
root = os.path.join('Data/UDHR/', lang_name)
root_path = os.path.join('Data/UDHR/annotations/', lang_code, "pdfs_synth")
os.makedirs(root_path, exist_ok=True)
txt_files = glob.glob(root + '/*[0-9].txt')
name_style = font + "_" + color.replace("#", "") + "_" + opacity + "_" + letter_spacing
if italic:
name_style += "_" + "italic"
if bold:
name_style += "_" + "bold"
for txt_file in txt_files:
with open(txt_file, encoding="utf8") as file:
list_sentences = file.read()
name_file = txt_file.split("/")[-1][:-4]
nb = str(re.findall(r'\d+', name_file)[0])
name_html_file = f"{lang_code}_{nb}"
create_html_file(root_path, list_sentences, name_html_file, name_style)
root_save_pdfs = root_path
root_html_url = root_path
save_html_to_pdf(root_save_pdfs, root_html_url, name_html_file)
print("Saving pdf to png for " + name_html_file)
path_png_out = os.path.join(root_path, name_html_file + ".png")
path_pdf_in = os.path.join(root_path, name_html_file + ".pdf")
os.system("convert -density 300 -trim " + path_pdf_in + " -quality 100 " + path_png_out)
def run_augmentation(lang_code):
lang_name = lang_code_dict[lang_code]
root_lang_name = root_dir + lang_name + "/"
lang_codes, lang_fonts = read_fonts_file()
index_lang_code = lang_codes.index(lang_code)
fonts = lang_fonts[index_lang_code].split("; ")
color = dict_properties["color"][0]
opacity = dict_properties["opacity"][0]
letter_spacing = dict_properties["letter_spacing"][0]
italic = dict_properties["italic"][1]
bold = dict_properties["bold"][1]
gauss = dict_properties["gauss"][1]
skew = dict_properties["skew"][1]
font = fonts[0]
name_file = lang_code
with open(os.path.join(root_lang_name, name_file + ".txt"), encoding="utf-8") as file:
list_sentences = file.readlines()
name_style = font + "_" + color.replace("#", "") + "_" + opacity + "_" + letter_spacing
if italic:
name_style += "_" + "italic"
if bold:
name_style += "_" + "bold"
name_html_file = name_file + "_" + name_style
root_path = "Data/augmentation/htmls"
create_html_file(root_path, list_sentences, name_html_file, name_style)
root_save_pdfs = "Data/augmentation/pdfs"
root_html_url = f"file://{os.path.abspath('Data/augmentation/htmls')}"
save_html_to_pdf(root_save_pdfs, root_html_url, name_html_file)
save_pdf_to_png(lang_name, name_file, name_html_file)
if gauss:
add_salt_pepper_noise(lang_name, name_file, name_html_file, amount=0.005, s_vs_p=0.5)
if skew:
add_skew(lang_name, name_file, name_html_file)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=["FLORES", "UDHR"], default="FLORES")
return parser.parse_args()
def main() -> None:
args = parse_args()
for lang_code in lang_code_dict:
if args.dataset == "FLORES":
run_augmentation(lang_code)
elif args.dataset == "UDHR":
run_augmentation_udhr(lang_code)
else:
raise ValueError(f"Unknown dataset: {args.dataset}")
if __name__ == "__main__":
main()
| facebookresearch/flores | ocr/data_collection/augment_data.py | augment_data.py | py | 10,099 | python | en | code | 623 | github-code | 1 | [
{
"api_name": "utils.create_dictionary_lang",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path"... |
5856668077 | import logging
import sys
import os
LOG_CONFIG = {
'name': 'event-tracker',
'level': logging.DEBUG,
'stream_handler': logging.StreamHandler(sys.stdout),
'format': '%(asctime)s: %(module)s: %(levelname)s: %(message)s'
}
TWITTER_CONFIG = {
'api_key': os.environ["TWITTER_API_KEY"],
'api_secret': os.environ["TWITTER_API_KEY_SECRET"],
'bearer_token': os.environ["TWITTER_BEARER_TOKEN"],
}
REDDIT_CONFIG = {
'client_id': os.environ["REDDIT_CLIENT_ID"],
'client_secret': os.environ["REDDIT_CLIENT_SECRET"],
'user_agent': os.environ["REDDIT_USER_AGENT"]
}
TELEGRAM_CONFIG = {
'bot_token': os.environ["TELEGRAM_BOT_TOKEN"]
} | TheGBG/pic_sender | src/config/config.py | config.py | py | 666 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.DEBUG",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
... |
20275789174 | from config import TG_KEY, GPT_KEY
from keybrds import kb
from redis_client import redis_client
import openai
from aiogram import Bot, Dispatcher, executor, types
openai.api_key = GPT_KEY
bot = Bot(token=TG_KEY)
dispatcher = Dispatcher(bot)
@dispatcher.message_handler(commands=["start"])
async def start_func(message: types.Message):
print("Bot has started!")
await message.reply(
"Welcome to Chat_bot. This bot uses ChatGPT3!\n\nTo clear cache use command /del.",
reply_markup=kb,
)
@dispatcher.message_handler(commands=["del"])
async def clear_cache(message: types.Message):
user_id = types.User.get_current().id
redis_client.delete(user_id)
redis_client.close()
await message.reply("Cache was cleared!", reply_markup=kb)
@dispatcher.message_handler()
async def get_message(message: types.Message) -> None:
user_id = types.User.get_current().id
user_name = types.User.get_current().username
print(user_id, "==", user_name)
if redis_client.get(user_id):
db_message = redis_client.get(user_id).decode()
else:
db_message = str(redis_client.set(user_id, ""))
redis_client.close()
chat_history = db_message + "\n\nHuman: " + message.text + "\n\nAI:"
if len(chat_history) > 2000:
chat_history = chat_history[-2000:]
print("CHAT LENGTH: ", len(chat_history))
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": chat_history}],
temperature=1.5,
)
answer = response["choices"][0]["message"]["content"]
await message.reply(f"<code>{answer}</code>", parse_mode="html")
except:
await message.reply(
"<b>An error occured!\nTry again!</b>\n\nПроизошла ошибка!\nПопробуйте еще раз!",
parse_mode="html",
)
print("\n\n\n======EXCEPTION HANDELED!=======\n\n\n")
answer = ""
chat_history = chat_history + answer
# print('CHAT_HISTORY: ', chat_history)
redis_client.set(user_id, chat_history.encode(), ex=1800)
# print('RDB: ', redis_client.get(user_id).decode())
redis_client.close()
if __name__ == "__main__":
executor.start_polling(dispatcher, skip_updates=True)
| EgorShabalin/chat_bot | main.py | main.py | py | 2,315 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openai.api_key",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "config.GPT_KEY",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "aiogram.Bot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "config.TG_KEY",
"lin... |
412075890 | # pylint: disable=W0621,C0114,C0116,W0212,W0613
import pathlib
import textwrap
import pytest
import numpy as np
from dae.genomic_resources.testing import build_inmemory_test_repository
from dae.genomic_resources.repository import GR_CONF_FILE_NAME, \
GenomicResourceRepo
from dae.gene.gene_scores import build_gene_score_from_resource, \
GeneScoreImplementation
@pytest.fixture
def scores_repo(tmp_path: pathlib.Path) -> GenomicResourceRepo:
scores_repo = build_inmemory_test_repository({
"LinearHist": {
GR_CONF_FILE_NAME: """
type: gene_score
filename: linear.csv
scores:
- id: linear
desc: linear gene score
histogram:
type: number
number_of_bins: 3
x_log_scale: false
y_log_scale: false
""",
"linear.csv": textwrap.dedent("""
gene,linear
G1,1
G2,2
G3,3
G4,1
G5,2
G6,3
"""),
"statistics": {
"histogram_linear.yaml": textwrap.dedent("""
bars:
- 2
- 2
- 2
bins:
- 1.0
- 1.665
- 2.333
- 3.0
config:
type: number
number_of_bins: 3
score: linear
view_range:
max: 3.0
min: 1.0
x_log_scale: false
y_log_scale: false
""")
}
},
"LogHist": {
GR_CONF_FILE_NAME: """
type: gene_score
filename: log.csv
scores:
- id: log
desc: log gene score
histogram:
type: number
number_of_bins: 5
view_range:
min: 0.0
max: 1.0
x_min_log: 0.001
x_log_scale: true
y_log_scale: false
""",
"log.csv": textwrap.dedent("""
gene,log
G1,0
G2,0.0001
G3,0.001
G4,0.01
G5,0.1
G6,1.0
"""),
"statistics": {
"histogram_log.yaml": textwrap.dedent("""
bars:
- 2
- 1
- 1
- 1
- 1
bins:
- 0.0,
- 0.001,
- 0.005623413251903491,
- 0.03162277660168379,
- 0.1778279410038923,
- 1.0
config:
type: number
number_of_bins: 5
view_range:
min: 0.0
max: 1.0
x_min_log: 0.001
x_log_scale: true
y_log_scale: false
""")
}
},
"NaNTest": {
GR_CONF_FILE_NAME: """
type: gene_score
filename: scores.csv
scores:
- id: score1
desc: linear gene score
histogram:
type: number
number_of_bins: 3
x_log_scale: false
y_log_scale: false
""",
"scores.csv": textwrap.dedent("""
gene,score1
G1,1
G2,2
G3,nan
G4,1
G5,nan
G6,3
"""),
"statistics": {
"histogram_linear.yaml": textwrap.dedent("""
bars:
- 2
- 2
- 2
bins:
- 1.0
- 1.665
- 2.333
- 3.0
config:
type: number
number_of_bins: 3
score: linear
view_range:
max: 3.0
min: 1.0
x_log_scale: false
y_log_scale: false
""")
}
},
"Oops": {
GR_CONF_FILE_NAME: "",
},
"OopsHist": {
GR_CONF_FILE_NAME: """
type: gene_score
filename: oops.csv
scores:
- id: linear
desc: linear gene score
""",
"oops.csv": textwrap.dedent("""
gene,linear
G1,1
G2,2
G3,3
""")
},
"OopsScores": {
GR_CONF_FILE_NAME: """
type: gene_score
filename: oops.csv
""",
"oops.csv": textwrap.dedent("""
gene,linear
G1,1
G2,2
G3,3
""")
},
})
return scores_repo
def test_load_linear_gene_scores_from_resource(
scores_repo: GenomicResourceRepo) -> None:
res = scores_repo.get_resource("LinearHist")
assert res.get_type() == "gene_score"
result = build_gene_score_from_resource(res)
scores = result.get_scores()
assert len(scores) == 1
score_id = scores[0]
assert result.get_x_scale(score_id) == "linear"
assert result.get_y_scale(score_id) == "linear"
hist = result.get_histogram(score_id)
assert hist is not None
assert len(hist.bins) == 4
assert np.all(hist.bars == np.array([2, 2, 2]))
def test_load_log_gene_scores_from_resource(
scores_repo: GenomicResourceRepo) -> None:
res = scores_repo.get_resource("LogHist")
assert res.get_type() == "gene_score"
result = build_gene_score_from_resource(res)
scores = result.get_scores()
assert len(scores) == 1
score_id = scores[0]
assert result.get_x_scale(score_id) == "log"
assert result.get_y_scale(score_id) == "linear"
hist = result.get_histogram(score_id)
assert hist is not None
assert len(hist.bins) == 6
assert np.all(hist.bars == np.array([2, 1, 1, 1, 1]))
def test_load_wrong_resource_type(
scores_repo: GenomicResourceRepo) -> None:
res = scores_repo.get_resource("Oops")
with pytest.raises(ValueError, match="invalid resource type Oops"):
build_gene_score_from_resource(res)
def test_load_gene_score_without_histogram(
scores_repo: GenomicResourceRepo) -> None:
res = scores_repo.get_resource("OopsHist")
with pytest.raises(
ValueError,
match="Missing histogram config for linear in OopsHist"
):
build_gene_score_from_resource(res)
def test_load_gene_score_without_gene_scores(
scores_repo: GenomicResourceRepo) -> None:
res = scores_repo.get_resource("OopsScores")
with pytest.raises(ValueError,
match="missing scores config in OopsScores"):
build_gene_score_from_resource(res)
def test_gene_score(scores_repo: GenomicResourceRepo) -> None:
res = scores_repo.get_resource("LinearHist")
gene_score = build_gene_score_from_resource(res)
assert gene_score is not None
assert gene_score.get_gene_value("linear", "G2") == 2
assert gene_score.get_gene_value("linear", "G3") == 3
def test_gene_score_nan(scores_repo: GenomicResourceRepo) -> None:
res = scores_repo.get_resource("NaNTest")
gene_score = build_gene_score_from_resource(res)
assert len(gene_score.df) == 6
df = gene_score.get_score_df("score1")
assert len(df) == 4
def test_calculate_histogram(scores_repo: GenomicResourceRepo) -> None:
res = scores_repo.get_resource("LinearHist")
result = build_gene_score_from_resource(res)
assert result is not None
histogram = GeneScoreImplementation._calc_histogram(res, "linear")
assert histogram is not None
print(histogram.config.view_range[0])
print(type(histogram.config.view_range[0]))
print(histogram.serialize())
| iossifovlab/gpf | dae/dae/gene/tests/test_gene_score.py | test_gene_score.py | py | 8,621 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "dae.genomic_resources.testing.build_inmemory_test_repository",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dae.genomic_resources.repository.GR_CONF_FILE_NAME",
"line_num... |
32205094518 | import os
import torch
import argparse
import numpy as np
import PIL.Image as Image
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
from torch import nn, optim, autograd
from torchvision.transforms import transforms
from dataset import Train_Dataset, Validation_Dataset, Test_Dataset, Single_Train_Dataset
import skimage.io as io
import shutil
import stable_seed
stable_seed.setup_seed()
threshold = 0.5 # 二分类阈值
# 是否使用cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
x_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
# mask只需要转换为tensor
y_transforms = transforms.ToTensor()
l2_regularizer_weight = 0.001
lr = 0.001
penalty_anneal_iters = 300
penalty_weight = 0.01
def makedir(new_path):
folder = os.path.exists(new_path)
if not folder:
os.makedirs(new_path)
else:
shutil.rmtree(new_path)
os.makedirs(new_path)
def init_work_space(args):
makedir('./' + args.project_name + '/results')
makedir(args.ckpt)
makedir('./' + args.project_name + '/runs')
def train_model(args, writer, model, criterion, optimizer, dataload, regular=''):
save_epoch, best_val_acc, best_val_mIoU = 0, -0.1, -0.1
for epoch in range(args.epoch):
print('Epoch {}/{}'.format(epoch, args.epoch - 1))
print('-' * 10)
epoch_loss = 0
epoch_correct_pixels, epoch_total_pixels = [], []
step = 0
for x, y in dataload:
step += 1
inputs = x.to(device)
labels = y.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs).to(device)
del inputs
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# calculate accuracy
predicted = outputs.detach().cpu().numpy()
predicted[predicted >= threshold] = 1
predicted[predicted < threshold] = 0
correct = (predicted == labels.detach().cpu().numpy()).sum()
del predicted
pixel_num = 1.0
for i in range(len(labels.size())):
pixel_num *= labels.size()[i]
epoch_correct_pixels.append(correct)
epoch_total_pixels.append(pixel_num)
epoch_loss += float(loss.item())
del labels
del loss
val_accuracy, val_mIoU = validation(args, model, method='train')
epoch_loss = epoch_loss / step
epoch_train_accuracy = np.mean(epoch_correct_pixels) / np.mean(epoch_total_pixels)
print(
"epoch %d loss:%0.3f train accuracy:%0.3f val accuracy:%0.3f val mIoU:%0.3f best_val_acc:%0.4f best_mIoU:%0.4f" % (
epoch, epoch_loss, epoch_train_accuracy, val_accuracy, val_mIoU, best_val_acc, best_val_mIoU))
writer.add_scalar('loss', epoch_loss / step, global_step=epoch)
writer.add_scalar('train accuracy', epoch_train_accuracy, global_step=epoch)
writer.add_scalar('validated accuracy', val_accuracy, global_step=epoch)
writer.add_scalars('accuracy/group',
{'train_accuracy': epoch_train_accuracy, 'validated accuracy': val_accuracy},
global_step=epoch)
if best_val_acc < val_accuracy:
# save_epoch = epoch
# torch.save(model, args.ckpt + '/' + args.model + '.pth')
best_val_acc = val_accuracy
if best_val_mIoU < val_mIoU:
save_epoch = epoch
torch.save(model, args.ckpt + '/' + args.model + '.pth')
best_val_mIoU = val_mIoU
print("Model:", args.model)
print("Dataset:", args.data_file)
print("Best epoch is" + str(save_epoch))
print("Best val acc is " + str(best_val_acc))
print("Best val mIoU is " + str(best_val_mIoU))
torch.cuda.empty_cache()
return model
def mean_accuracy(outputs, labels):
predicted = outputs.detach().cpu().numpy()
predicted[predicted >= threshold] = 1
predicted[predicted < threshold] = 0
correct = (predicted == labels.detach().cpu().numpy()).sum()
pixel_num = 1.0
for i in range(len(labels.size())):
pixel_num *= labels.size()[i]
epoch_train_accuracy = np.mean(correct) / np.mean(pixel_num)
return epoch_train_accuracy
# debug code
# a = torch.rand([10, 1, 20, 20])
# b = torch.randint(low=0, high=2, size=(10, 1, 20, 20))
# print(mean_accuracy(a, b)) # =0.5
def meanIoU(imgPredict, imgLabel, numClass=2):
# ref:https://blog.csdn.net/sinat_29047129/article/details/103642140
imgLabel = imgLabel.cpu()
imgPredict[imgPredict >= threshold] = 1
imgPredict[imgPredict < threshold] = 0
def genConfusionMatrix(numClass, imgPredict, imgLabel):
# remove classes from unlabeled pixels in gt image and predict
mask = (imgLabel >= 0) & (imgLabel < numClass)
label = numClass * imgLabel[mask] + imgPredict[mask]
count = np.bincount(label, minlength=numClass ** 2) # 核心代码
confusionMatrix = count.reshape(numClass, numClass)
return confusionMatrix
confusionMatrix = genConfusionMatrix(numClass, imgPredict, imgLabel)
# Intersection = TP Union = TP + FP + FN
# IoU = TP / (TP + FP + FN)
intersection = np.diag(confusionMatrix) # 取对角元素的值,返回列表
union = np.sum(confusionMatrix, axis=1) + np.sum(confusionMatrix, axis=0) - np.diag(
confusionMatrix) # axis = 1表示混淆矩阵行的值,返回列表; axis = 0表示取混淆矩阵列的值,返回列表
IoU = intersection / union # 返回列表,其值为各个类别的IoU
mIoU = np.nanmean(IoU) # 求各类别IoU的平均
return mIoU
def penalty(logits, y, criterion=nn.BCELoss()):
# torch.nan_to_num(mid, nan=1e-8, posinf=1.0 - 1e-8)
scale = torch.tensor(1.).requires_grad_()
loss = criterion(logits * scale, y)
grad = autograd.grad(loss, [scale], create_graph=True)[
0] # reference https://blog.csdn.net/qq_36556893/article/details/91982925
return torch.sum(grad ** 2)
def train_IRM_model(args, writer, model, criterion, optimizer, env_dataloaders, regular=''):
# def penalty(logits, y):
# scale = torch.tensor(1.).to(device).requires_grad_()
# loss = criterion(logits * scale, y)
# grad = autograd.grad(loss, [scale], create_graph=True)[
# 0] # reference https://blog.csdn.net/qq_36556893/article/details/91982925
# return torch.sum(grad ** 2)
global l2_regularizer_weight
global lr
global penalty_anneal_iters
global penalty_weight
save_epoch, best_val_acc = 0, -0.1
for epoch in range(args.epoch):
print('Epoch {}/{}'.format(epoch, args.epoch - 1))
print('-' * 10)
optimizer.zero_grad()
envs = []
for env_dataloader in env_dataloaders:
env = {'nll': [], 'acc': [], 'penalty': []}
for x, y in env_dataloader:
inputs = x # .half()
labels = y # .half()
outputs = model(inputs.to(device)).cpu()
torch.nan_to_num(outputs, nan=1e-8, posinf=1.0 - 1e-8)
torch.nan_to_num(labels, nan=1e-8, posinf=1.0 - 1e-8)
env['nll'].append(criterion(outputs, labels))
env['acc'].append(mean_accuracy(outputs, labels))
env['penalty'].append(penalty(outputs, labels))
del inputs, labels, outputs
torch.cuda.empty_cache()
envs.append(env)
mid_train_nll, mid_train_acc, mid_train_penalty = [], [], []
for i_env in range(len(env_dataloaders) - 1):
mid_train_nll.extend(envs[i_env]['nll'])
mid_train_acc.extend(envs[i_env]['acc'])
mid_train_penalty.extend(envs[i_env]['penalty'])
print('mid_train_penalty', mid_train_penalty)
train_nll = torch.stack(mid_train_nll).mean()
train_acc = float(np.mean(mid_train_acc))
train_penalty = torch.stack(mid_train_penalty).mean()
weight_norm = torch.tensor(0.).to(device)
for w in model.parameters():
weight_norm += w.norm().pow(2)
loss = train_nll.clone().to(device)
loss += l2_regularizer_weight * weight_norm
# penalty_weight = (penalty_weight if epoch >= penalty_anneal_iters else 1.0)
# loss += penalty_weight * train_penalty
if penalty_weight > 1.0:
# Rescale the entire loss to keep gradients in a reasonable range
loss /= penalty_weight
loss.backward()
optimizer.step()
val_accuracy, val_mIoU = validation(args, model, method='train')
print("epoch %d loss:%0.3f train accuracy:%0.3f val accuracy:%0.3f train_penalty:%0.4f best_val_acc:%0.4f" % (
epoch, float(loss.item()), train_acc, val_accuracy, penalty_weight * train_penalty, best_val_acc))
writer.add_scalar('loss', train_nll, global_step=epoch)
writer.add_scalar('train accuracy', train_acc, global_step=epoch)
writer.add_scalar('validated accuracy', val_accuracy, global_step=epoch)
writer.add_scalars('accuracy/group',
{'train_accuracy': train_acc, 'validated accuracy': val_accuracy},
global_step=epoch)
if best_val_acc < val_accuracy:
save_epoch = epoch
torch.save(model, args.ckpt + '/' + args.model + '.pth')
best_val_acc = val_accuracy
print("Model:", args.model)
print("Dataset:", args.data_file)
print("Best epoch is" + str(save_epoch))
print("Best val acc is " + str(best_val_acc))
torch.cuda.empty_cache()
return model
# 训练模型
def train(args, writer, model, regular=''):
# args == IRM
# generate 4 single_train datasets as envs
# define new loss function
# model.half()
if args.loss == "IRM":
model.to(device)
criterion = nn.BCEWithLogitsLoss() # nn.BCELoss()
optimizer = optim.Adam(model.parameters(), )
env_dataloaders = []
for i in range(args.total_folds):
if i == args.k_fold:
continue
mid_env_dataset = Single_Train_Dataset(args.data_file, args, transform=x_transforms,
target_transform=y_transforms, k_single_set=i)
mid_env_dataloader = DataLoader(mid_env_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
print('mid_env_dataloader', len(mid_env_dataloader))
env_dataloaders.append(mid_env_dataloader)
train_IRM_model(args, writer, model, criterion, optimizer, env_dataloaders, regular)
else:
model.to(device)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), )
liver_dataset = Train_Dataset(args.data_file, args, transform=x_transforms, target_transform=y_transforms)
dataloaders = DataLoader(liver_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
train_model(args, writer, model, criterion, optimizer, dataloaders, regular)
# 用于测试模型在有image有label的数据中的表现
def validation(args, model, print_each=False, method='train'):
liver_dataset = Validation_Dataset(args.data_file, args, transform=x_transforms, target_transform=y_transforms) #
dataloaders = DataLoader(liver_dataset, batch_size=1)
if method == 'train':
dataloaders = DataLoader(liver_dataset, batch_size=8)
model.eval()
epoch_correct_pixels, epoch_total_pixels, epoch_acc, epoch_mIoU = [], [], [], []
with torch.no_grad():
for x, y, x_path in dataloaders:
inputs = x.to(device)
labels = y.to(device)
predicted = model(inputs).detach().cpu().numpy()
predicted[predicted >= threshold] = 1
predicted[predicted < threshold] = 0
correct = (predicted == labels.detach().cpu().numpy()).sum()
pixel_num = 1.0
for i in range(len(labels.size())):
pixel_num *= labels.size()[i]
epoch_correct_pixels.append(correct)
epoch_total_pixels.append(pixel_num)
epoch_mIoU.append(meanIoU(predicted, labels))
if print_each:
print(x_path, 'acc', correct / pixel_num)
mid_x_path = x_path
epoch_acc.append(correct / pixel_num)
if print_each:
print('\nepoch_acc', epoch_acc, '\nepoch_mIoU', epoch_mIoU)
return np.mean(epoch_correct_pixels) / np.mean(epoch_total_pixels), np.mean(epoch_mIoU)
# 用于测试只有image但没有label的数据
def test(args, save_gray=False, manual=False, weight_path=''):
model = None
if not manual:
model = torch.load(args.ckpt + '/' + args.model + '.pth', map_location='cpu')
if manual:
model = torch.load(weight_path, map_location='cpu') # use certain model weight.
liver_dataset = Test_Dataset(args.data_file, transform=x_transforms, target_transform=y_transforms)
dataloaders = DataLoader(liver_dataset, batch_size=1)
model.eval()
with torch.no_grad():
for x, pic_name_i in dataloaders:
pic_name_i = pic_name_i[0]
mid_x = torch.squeeze(x).numpy()
if len(mid_x.shape) == 2:
io.imsave(args.project_name + "/results/" + pic_name_i.split('.')[0] + "_x.png", mid_x)
elif len(mid_x.shape) == 3:
mid_x_image = np.array(mid_x[0])
# io.imsave(args.project_name + "/results/" + pic_name_i.split('.')[0] + "_x.png", mid_x)
predict = model(x)
predict = torch.squeeze(predict).detach().numpy()
if save_gray:
io.imsave(args.project_name + "/results/" + pic_name_i.split('.')[0] + "_gray_pre.png", predict)
predict[predict >= threshold] = 1
predict[predict < threshold] = 0
io.imsave(args.project_name + "/results/" + pic_name_i.split('.')[0] + "_label_pre.png", predict)
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
def model_forward_visualization(image_path, weight_path, model_name=''):
"""输入一张测试图像和训练好的模型权重,可视化每一步卷积的结果"""
model = torch.load(weight_path, map_location='cpu') # load trained model
save_output = SaveOutput() # register hooks for each layer
hook_handles, k1, k2 = [], 0, 0
for layer in model.modules():
k1 += 1
if isinstance(layer, torch.nn.modules.conv.Conv2d):
k2 += 1
handle = layer.register_forward_hook(save_output)
hook_handles.append(handle)
x = x_transforms(Image.open(image_path).convert('L').resize(size=(512, 512))).unsqueeze(0)
print(x, x.dtype)
y = model(x)
def module_output_to_numpy(tensor):
return tensor.detach().to('cpu').numpy()
for layer_idx in range(len(save_output.outputs)):
images = module_output_to_numpy(save_output.outputs[layer_idx])
# 这里的0代表读取output里第一个卷积层的输出
print(type(images))
print(images.shape)
mid_1 = images.shape[1]
mid_idx = 0
while mid_idx < mid_1:
# mid_idx is the index of feature
with plt.style.context("seaborn-white"):
plt.figure(frameon=False)
for idx in range(64):
# idx is the index of subplot
if mid_idx == mid_1:
break
plt.subplot(8, 8, idx + 1)
plt.imshow(images[0, mid_idx])
mid_idx += 1
plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])
plt.savefig(
'./model_visualization/' + model_name + '/layer_' + str(layer_idx) + '_mid_' + str(mid_idx) + '.png')
plt.cla()
plt.close('all')
def model_print(model):
total_num = sum(p.numel() for p in model.parameters())
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
| Jichao-Wang/MDOAU2-net | wjc_core.py | wjc_core.py | py | 16,861 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "stable_seed.setup_seed",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cud... |
14889123182 | import re
import os
def readFile():
path = os.path.join(os.path.expanduser('~'), 'listaSadow', 'listaSadow.txt')
with open(path,"r") as file:
for line in file.readlines():
yield line
def writeMongoCommand(listaCommand):
path = os.path.join(os.path.expanduser('~'), 'listaSadow', 'dbPopulate.txt')
with open(path,"w") as file:
for i in listaCommand:
file.write(i)
file.write("\n")
court_name = "TT"
s = "Sąd Rejonowy w Wysokiem Mazowieckiem dla miasta Skarżysko-Kamienna oraz gmin: Bliżyn, Łączna, Skarżysko Kościelne i Suchedniów"
def takeBiggerIndex(index, list_of_indexes):
for item in list_of_indexes:
if index < item:
return item
return None
def takeString(s,start,stop):
if stop:
return s[start:stop]
else:
return s[start:]
def getCourtName(s:str):
index_for = s.find("dla")
court_name = takeString(s, 0,index_for)
return court_name
def separate(s:str):
index_city = s.find("miasta")
index_region = s.find("gmina")
index_city_part = s.find("dzielnic")
indexes = [index_city,index_region,index_city_part]
city = takeString(s,index_city,takeBiggerIndex(index_city,indexes))
region = takeString(s,index_region,takeBiggerIndex(index_region,indexes))
city_parts = takeString(s,index_city_part,takeBiggerIndex(index_city_part,indexes))
# for item in twoString:
# if item.find("miast")>0:
# city=item
# elif item.find("dzielnic")>0:
# region = item
# else:
# dzielnic = item
# print(dzielnica)
return {"city":city,"region":region, "city_parts": city_parts}
def generateListOfResults(s:str):
strings= separate(s)
citys = regexCreateArray(strings["city"])
regions = regexCreateArray(strings["region"])
city_parts = regexCreateArray(strings["city_parts"])
return {"citys":citys,"regions":regions, "city_parts":city_parts}
def regexCreateArray(s):
return re.findall(r"\b[A-ZŚĆĘŻŹ]\w+",s)
def createMongoCommand(s):
name = getCourtName(s)
generatedResults=generateListOfResults(s)
citys = generatedResults["citys"]
regions = generatedResults["regions"]
city_parts = generatedResults["city_parts"]
template = 'db.courtCollection.insert({{name:"{}", level:"rejonowy", coverCitys:{} coverRegions:{}, coverCityParts:{}}})'.format(name,citys,regions,city_parts)
return template
def createObjToInsert(s):
name = getCourtName(s)[0:-1]
generatedResults=generateListOfResults(s)
citys = generatedResults["citys"]
regions = generatedResults["regions"]
city_parts = generatedResults["city_parts"]
obj = {"name":name, "level":"rejonowy", "coverCitys":citys, "coverRegions":regions, "coverCityParts":city_parts}
return obj
# createMongoCommand(s)
# separate(s)
# readFile()
from random import randint
from pymongo import MongoClient
client = MongoClient("192.168.1.143", 27017)
db = client.dbs
def createInputToMongoDb():
arrayWithCommands = []
for line in readFile():
comand = createMongoCommand(line)
arrayWithCommands.append(comand)
obj=createObjToInsert(line)
print(obj)
db.courtCollection.insert_one(obj)
writeMongoCommand(arrayWithCommands)
pass
# createInputToMongoDb()
# print(createObjToInsert(s))
def createCourtCollection():
for i,line in enumerate(readFile()):
# print(line)
index = line.find(";")
court_name = line[:index-1]
wlasciwosc = line[index+5:]
print(court_name)
print(wlasciwosc)
db.court.update_one({"name":court_name},{"$set":{"coverArea":wlasciwosc}})
# if(i>10):
# return "koniec"
pass
createCourtCollection() | tomektarabasz/courtApi | app/excel_to_db/dbparser.py | dbparser.py | py | 3,812 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.expanduser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
4415210155 | import numpy as np
import _pickle as pickle
import copy
from collections import OrderedDict
from ConstantVariables import *
from pathlib import Path
class Network:
def __init__(self, name='', inputs=0, outputs=0):
self.name = name
self.inputs = inputs
self.outputs = outputs
self.layers_info = []
self.layers = {}
self.connections = OrderedDict()
self.input_data_set = []
self.output_data_set = []
def add_layer(self, amount):
self.layers_info.append(amount)
def add_data_set(self, input_data_set, output_data_set):
if len(input_data_set) == len(output_data_set):
self.input_data_set = input_data_set
self.output_data_set = output_data_set
def connect(self):
self.layers['input'] = 0
current_layer = self.inputs
current_layer_name = 'input'
for index, value in enumerate(self.layers_info):
self.layers['hidden_' + str(index)] = 0
self.connections[current_layer_name +'_to_' + 'hidden_' + str(index)] = 2 * np.random.random((current_layer, value)) - 1
current_layer = value
current_layer_name = 'hidden_' + str(index)
self.layers['output'] = 0
self.connections[current_layer_name +'_to_output'] = 2 * np.random.random((current_layer, self.outputs)) - 1
def activate(self, input_data):
self.layers['input'] = input_data
current_layer_name = 'input'
for index, value in enumerate(self.layers_info):
connections = self.connections[current_layer_name +'_to_' + 'hidden_' + str(index)]
self.layers['hidden_' + str(index)] = self.sigmoid(np.dot(self.layers[current_layer_name], connections))
current_layer_name = 'hidden_' + str(index)
connections = self.connections[current_layer_name + '_to_output']
self.layers['output'] = self.sigmoid(np.dot(self.layers[current_layer_name], connections))
def learn(self, learning_rate=0.1):
self.activate(self.input_data_set)
l2_error = self.output_data_set - self.layers['output']
print("Error:" + str(np.mean(np.abs(l2_error))))
error = l2_error
delta = None
for name in reversed(self.connections):
start, end = name.split('_to_')
self.connections[name], delta = self.get_weight(
self.connections[name],
self.layers[start],
self.layers[end],
error,
learning_rate
)
error = delta.dot(self.connections[name].T)
def get_weight(self, connections_layer, start_layer, end_layer, error, learning_rate):
delta = error * self.sigmoid(end_layer, deriv=True)
connections_layer += start_layer.T.dot(delta) * learning_rate
return connections_layer, delta
def print_output(self, converted=False):
output = self.get_output(convert=converted)
if converted:
if output.shape == (4,):
if output[0]:
print('up')
if output[1]:
print('right')
if output[2]:
print('down')
if output[3]:
print('left')
else:
for result in output:
print(result)
if result[0]:
print('up')
if result[1]:
print('right')
if result[2]:
print('down')
if result[3]:
print('left')
print('---------')
else:
print(self.layers['output'])
def convert_output(self):
if len(self.layers['output'].shape) > 1:
for (x, y), result in np.ndenumerate(self.layers['output']):
if result > 0.6:
self.layers['output'][x][y] = 1
else:
self.layers['output'][x][y] = 0
else:
for x, result in np.ndenumerate(self.layers['output']):
if result > 0.6:
self.layers['output'][x] = 1
else:
self.layers['output'][x] = 0
def get_output(self, convert=False):
if convert:
self.convert_output()
return self.layers['output']
def sigmoid(self, x, deriv=False):
if deriv:
return x * (1 - x)
return 1 / (1 + np.exp(-x))
def save(self):
with open(networks_directory_save + 'Network_' + self.name + '.dump', 'wb') as output:
pickle.dump(copy.copy(self), output, -1)
def load(self):
file = Path(networks_directory_save + 'Network_' + self.name + '.dump')
if file.exists():
with open(networks_directory_save + 'Network_' + self.name + '.dump', 'rb') as input:
network = pickle.load(input)
return network
else:
with open(networks_directory_save + 'Network_1.dump', 'rb') as input:
network = pickle.load(input)
network.name = self.name
# network.save()
return network
| Robinamixan/Goblins-pygame | Networks/Network.py | Network.py | py | 5,303 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.OrderedDict",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.r... |
2714834337 | from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import LoginView
from django.shortcuts import get_object_or_404
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.views import View
from .models import *
from django.contrib.auth.models import User
from .serializers import *
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from django_filters.rest_framework import DjangoFilterBackend
@api_view(['GET', 'POST'])
def genre_list(request):
if request.method == 'GET':
genres = Genre.objects.all()
serializer = GenreSerializer(genres, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = GenreSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def genre_detail(request, pk):
try:
genre = Genre.objects.get(pk=pk)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = GenreSerializer(genre)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = GenreSerializer(genre, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
genre.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def movie_list(request):
if request.method == 'GET':
movie = Movie.objects.all()
serializer = MovieSerializer(movie, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = MovieSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def movie_detail(request, pk):
try:
movie = Movie.objects.get(pk=pk)
except Movie.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = MovieSerializer(movie)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = MovieSerializer(movie, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
movie.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def raitingStar_list(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'GET':
star = RaitingStar.objects.all()
serializer = RaitingStarSerializer(star, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = RaitingStarSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def raitingStar_detail(request, pk):
try:
star = RaitingStar.objects.get(pk=pk)
except RaitingStar.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = RaitingStarSerializer(star)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = RaitingStarSerializer(star, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
star.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def reviews_list(request):
if request.method == 'GET':
reviews = Reviews.objects.all()
serializer = ReviewsSerializer(reviews, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = ReviewsSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def aut(request):
try:
user = User.objects.get(username=request.data['username'])
if (user.password == request.data['password']):
return Response(status=status.HTTP_200_OK)
else:
return Response(request.data['password'], status=status.HTTP_404_NOT_FOUND)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
@api_view(['GET', 'PUT', 'DELETE'])
def reviews_detail(request, pk):
try:
reviews = Reviews.objects.get(pk=pk)
except Reviews.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ReviewsSerializer(reviews)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = ReviewsSerializer(reviews, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
reviews.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def raiting_list(request):
if request.method == 'GET':
raiting = Raiting.objects.all()
serializer = RaitingSerializer(raiting, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = RaitingSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def raiting_detail(request, pk):
try:
raiting = Raiting.objects.get(pk=pk)
except Raiting.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = RaitingSerializer(raiting)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = RaitingSerializer(raiting, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
raiting.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| artemovaka22pv191/back_movie | views.py | views.py | py | 7,622 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.response.Response",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_201_CREATED",
"line_number": 28,
"usage_type... |
18480733171 | from typing import List
class UserCard():
gCardIds = []
def __init__(self, tPackage: tuple=()) -> None:
"""
Parameters
----------
tPackage : tuple
(id, userId, cardCode, level, bond, userTags, moves, skills)
+ id : str
+ userId : str
+ code : str
+ level : int
+ bond : int
+ userTags : iterable
+ moves : iterable
+ skills : iterable
"""
self.mId = ''
self.mUserId = ''
self.mCode = ''
self.mLevel = 0
self.mExp = 0
self.mBond = 0
self._mUserTags = []
self.mMoves = {}
self.mSkills = []
if tPackage:
self.mId, self.mUserId, self.mCode, self.mLevel, self.mBond, tUserTags, self.mMoves, self.mSkills = tPackage
# _mUserTags
for userTag in tUserTags:
self.AddUserTag(userTag)
return
def GetUserTags(self) -> List[str]:
return self._mUserTags
def SetUserTags(self, tUserTags: list) -> None:
self._mUserTags = tUserTags
def AddUserTag(self, tUserTag: str) -> None:
"""
Add user's tag. If dupes is found, return False.
"""
if tUserTag in self._mUserTags: return False
self._mUserTags.append(tUserTag)
@property
def mId(self):
return self.mId
@mId.setter
def mId(self, tCardId):
if int(tCardId) == -1: # Find best fit in the ID list
i = 0
while True:
if int(tCardId) not in UserCard.gCardIds:
tCardId = str(i)
break
i += 1
UserCard.gCardIds.append(tCardId)
return tCardId | kaleidocli/gachaSim | model/user/UserCard.py | UserCard.py | py | 1,776 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 43,
"usage_type": "name"
}
] |
30045234159 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for serializing responses
"""
import datetime
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import webob
from senlin.common import exception
from senlin.common.i18n import _
LOG = logging.getLogger(__name__)
def is_json_content_type(request):
content_type = request.content_type
if not content_type or content_type.startswith('text/plain'):
content_type = 'application/json'
if (content_type in ('JSON', 'application/json') and
request.body.startswith(b'{')):
return True
return False
class JSONRequestDeserializer(object):
def has_body(self, request):
"""Return whether a Webob.Request object will possess an entity body.
:param request: A Webob.Request object
"""
if request is None or request.content_length is None:
return False
if request.content_length > 0 and is_json_content_type(request):
return True
return False
def from_json(self, datastring):
try:
if len(datastring) > cfg.CONF.senlin_api.max_json_body_size:
msg = _('JSON body size (%(len)s bytes) exceeds maximum '
'allowed size (%(limit)s bytes).'
) % {'len': len(datastring),
'limit': cfg.CONF.senlin_api.max_json_body_size}
raise exception.RequestLimitExceeded(message=msg)
return jsonutils.loads(datastring)
except ValueError as ex:
raise webob.exc.HTTPBadRequest(str(ex))
def default(self, request):
if self.has_body(request):
return {'body': self.from_json(request.body)}
else:
return {}
class JSONResponseSerializer(object):
def to_json(self, data):
def sanitizer(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return str(obj)
response = jsonutils.dumps(data, default=sanitizer, sort_keys=True)
LOG.debug("JSON response : %s", response)
return response
def default(self, response, result):
response.content_type = 'application/json'
response.body = encodeutils.safe_encode(self.to_json(result))
| openstack/senlin | senlin/api/common/serializers.py | serializers.py | py | 2,897 | python | en | code | 44 | github-code | 1 | [
{
"api_name": "oslo_log.log.getLogger",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "oslo_log.log",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "oslo_config.cfg.CONF",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "oslo_co... |
17287949508 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 29 13:12:03 2021
@author: thuang
Plots the sensitivity delta = master_branch - my_branch
for 1e3 and 1e4.
"""
from matplotlib import pyplot as plt
import numpy as np
class delta_bars:
def __init__(self, data_dict, tolerance):
"""
Parameters
----------
data_dict : dictionary
A dictionary containing the data to write to the Excel file.
Dictionary shall contain: {'names':[name1, name2], mode_id:{t1_blf:{'T1': t1, 'BLF': blf, 'name1':[1e3, 1e4], 'name2':[1e3, 1e4], 'delta':[1e3, 1e4]}}}
Returns
-------
None.
"""
self.data_dict = data_dict.copy()
self.tolerance = tolerance
def plot_delta(self):
"""
Plots the delta for each scenario of each mode as a bar graph.
"""
self.data_dict.pop('names', None)
num_modes = len(self.data_dict) #number of modes
num_cols = int(np.ceil(np.sqrt(num_modes))) #make the plot grid as square as possible to save space
num_rows = num_cols
fig1, *ax = plt.subplots(num_rows, num_cols)
fig1.tight_layout()
if num_cols*num_rows>1: #if only one axis, then can't flatten
ax = ax[0].flatten('F')
if num_modes < len(ax): #remove excess axes exceeding the number of plots
for this_ax in ax[num_modes:]:
this_ax.remove()
for n, mode_id in enumerate(self.data_dict.keys()):
labels = []
delta_e3 = []
delta_e4 = []
for t1_blf in self.data_dict[mode_id].keys():
labels += [self.data_dict[mode_id][t1_blf]['T1'] + ',' + self.data_dict[mode_id][t1_blf]['BLF']]
delta_e3 += [self.data_dict[mode_id][t1_blf]['delta'][0]]
delta_e4 += [self.data_dict[mode_id][t1_blf]['delta'][1]]
ax[n].bar(np.arange(len(labels)), delta_e3, tick_label=labels, color='tab:blue', width=.25)
ax[n].bar(np.arange(len(labels))+.25, delta_e4, tick_label=labels, color='tab:red', width=.25)
ax[n].legend(['1e-3', '1e-4'], fontsize='xx-large')
ax[n].grid(True, which='both', axis='both')
ax[n].set_xlabel('T1, BLF', fontsize='xx-large')
ax[n].set_ylabel(chr(916)+'sensitivity (dBm)', fontsize='xx-large')
if (np.array(delta_e3) < -self.tolerance).any():
ax[n].set_title(f'Mode {mode_id}', fontsize='xx-large', color='red')
else:
ax[n].set_title(f'Mode {mode_id}', fontsize='xx-large', color='green')
ax[n].tick_params(axis='both', labelsize='xx-large')
ax[n].minorticks_on()
| thuang-work/txrx_sync_rssi | plot_ber_delta.py | plot_ber_delta.py | py | 2,778 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.ceil",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
23870021739 | """
This script is meant to be iterated many times to replicate the spawning of many error popups.
"""
from PySide2 import QtWidgets
import random
from PySide2.QtWidgets import QMainWindow, QApplication
from PySide2.QtGui import QIcon
from configparser import ConfigParser
import sys
app = QApplication(sys.argv)
appcfg = ConfigParser()
appcfg.read('configs/appsettings.ini')
if appcfg.getint('popup_properties', 'popup_style_index') == 0:
app.setStyle('windowsvista')
elif appcfg.getint('popup_properties', 'popup_style_index') == 1:
app.setStyle('Fusion')
elif appcfg.getint('popup_properties', 'popup_style_index') == 2:
app.setStyle('Windows')
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.msg = QtWidgets.QMessageBox(self)
self.msg.setWindowTitle(appcfg.get('popup_strings', 'popup_win_title'))
self.msg.setText(appcfg.get('popup_strings', 'popup_message'))
self.msg.move(random.randint(0, 1000), random.randint(0, 600))
if appcfg.getint('popup_properties', 'popup_type_index') == 0:
self.msg.setIcon(QtWidgets.QMessageBox.Information)
elif appcfg.getint('popup_properties', 'popup_type_index') == 1:
self.msg.setIcon(QtWidgets.QMessageBox.Question)
elif appcfg.getint('popup_properties', 'popup_type_index') == 2:
self.msg.setIcon(QtWidgets.QMessageBox.Warning)
elif appcfg.getint('popup_properties', 'popup_type_index') == 3:
self.msg.setIcon(QtWidgets.QMessageBox.Critical)
self.setWindowIcon(QIcon(appcfg.get('popup_properties', 'popup_icon_path')))
self.msg.exec_()
sys.exit()
def window():
global app
MainWindow()
sys.exit(app.exec_())
window()
| blitpxl/nk-popup-generator | src/iterable_popup.py | iterable_popup.py | py | 1,783 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PySide2.QtWidgets.QApplication",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name":... |
11087451085 | # Import the libraries
import nltk
from nltk.chat.util import Chat, reflections
import tkinter as tk
from tkinter import *
# Define the responses
pairs = [
['hi', ['Hello!', 'Hi there!']],
['what is your name', ['My name is Chatbot', 'I am Chatbot']],
['how are you', ['I am doing well', 'I am fine, thanks']],
['bye', ['Goodbye', 'See you later']],
]
# Create the chatbot
chatbot = Chat(pairs, reflections)
# Create the user interface
def send():
msg = EntryBox.get("1.0", "end-1c").strip()
EntryBox.delete("0.0", END)
if msg != "":
ChatLog.config(state=NORMAL)
ChatLog.insert(END, "You: " + msg + "\n\n")
ChatLog.config(foreground="#442265", font=("Verdana", 12 ))
res = chatbot.respond(msg)
ChatLog.insert(END, "Chatbot: " + res + "\n\n")
ChatLog.config(state=DISABLED)
ChatLog.yview(END)
root = Tk()
root.title("Chatbot")
root.geometry("400x500")
root.resizable(width=FALSE, height=FALSE)
ChatLog = Text(root, bd=0, bg="purple", height="8", width="50", font="Arial",)
ChatLog.config(state=DISABLED)
scrollbar = Scrollbar(root, command=ChatLog.yview, cursor="heart")
ChatLog['yscrollcommand'] = scrollbar.set
SendButton = Button(root, font=("Verdana", 12, 'bold'), text="Send", width="12", height=5, bd=0, bg="#32de97", activebackground="#3c9d9b", fg='#ffffff', command=send)
EntryBox = Text(root, bd=0, bg="white", width="29", height="5", font="Arial")
scrollbar.place(x=376, y=6, height=386)
ChatLog.place(x=6, y=6, height=386, width=370)
EntryBox.place(x=128, y=401, height=60, width=265)
SendButton.place(x=6, y=401, height=60)
root.mainloop()
| Beimnet27/Simple-Python-Chatbot | SimpleChat_Bot.py | SimpleChat_Bot.py | py | 1,647 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "nltk.chat.util.Chat",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "nltk.chat.util.reflections",
"line_number": 16,
"usage_type": "argument"
}
] |
11806840571 |
import json
import requests
from config.const import SPOTIFY_HEADERS, SPOTIFY_URLS
from config.tokens import SPOTIFY_OAUTH_TOKEN, SPOTIFY_USER_ID
class SpotifyAPI:
name = 'spotify'
@staticmethod
def __get_request_headers():
headers = SPOTIFY_HEADERS
headers["Authorization"] = headers["Authorization"].format(spotify_oauth_token=SPOTIFY_OAUTH_TOKEN)
return headers
@staticmethod
def __get_playlists_url():
return SPOTIFY_URLS["playlists_url"]
@staticmethod
def __get_tracks_url():
return SPOTIFY_URLS["tracks_url"]
@classmethod
def _ARS_retrieve_track_info(cls, song_info):
try:
track_id = song_info['metadata']['music'][0]['external_metadata']['spotify']['track']['id']
except (KeyError, IndexError):
return None
return 'spotify:track:{}'.format(track_id)
@classmethod
def retrieve_track_info(cls, ARS_name, song_info):
if ARS_name == 'acrcloud':
return cls._ARS_retrieve_track_info(song_info)
# raise ARS_NOT_RECOGNISED but who cares
@classmethod
def _retrieve_playlist_id(cls, playlist_request):
return json.loads(playlist_request.text)['id']
@classmethod
def get_playlist_id(cls, playlist_request):
return cls._retrieve_playlist_id(playlist_request)
@classmethod
def create_playlist(cls, playlist_name: str, description="", public=False):
playlists_url = cls.__get_playlists_url().format(spotify_user_id=SPOTIFY_USER_ID)
request_body = {
"name": playlist_name,
"description": description,
"public": public
}
payload = json.dumps(request_body)
return requests.post(playlists_url, data=payload, headers=cls.__get_request_headers())
@classmethod
def add_tracks_to_playlist(cls, tracks_ids: str, playlist_id: str):
tracks_url = cls.__get_tracks_url().format(playlist_id=playlist_id)
params = {
"uris": ",".join(tracks_ids)
}
return requests.post(tracks_url, params=params, headers=cls.__get_request_headers())
| simbi0nts/local-music-to-Spotify-transfer | api/spotify.py | spotify.py | py | 2,218 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "config.const.SPOTIFY_HEADERS",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "config.tokens.SPOTIFY_OAUTH_TOKEN",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "config.const.SPOTIFY_URLS",
"line_number": 21,
"usage_type": "name"
},
... |
25650890509 | from django.urls import path
from repairshop import views
urlpatterns = [
path("", views.home, name="home"),
path("categories/<slug:sub_category>/", views.sub_category, name="sub_category"),
path("about/", views.about, name="about"),
path("categories/", views.categories, name="categories"),
path("contacts/", views.contacts, name="contacts"),
path("products/<slug:product_url>/", views.product, name="product"),
]
| bmyronov/eremont | repairshop/urls.py | urls.py | py | 441 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "repairshop.views.home",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "repairshop.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls... |
27821623666 | from flask import Flask, request
import json
from datetime import datetime
import requests
from lights import light
from constants import Tables, Params, PHUE
from sqlite import sqlite
app = Flask(__name__)
Light1 = light.Light(1)
Light2 = light.Light(3)
@app.route("/listener")
def generate_refresh_token():
code = request.args.get(Params.CODE.value)
url = f"https://api.meethue.com/oauth2/token?code={code}&grant_type=authorization_code"
generated_at = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
response = json.loads(requests.post(url, auth=PHUE.AUTH.value).text)
access_token = response[Params.ACCESS_TOKEN.value]
refresh_token = response[Params.REFRESH_TOKEN.value]
c = sqlite.SQLite()
response, code = c.delete_all(Tables.TOKENS.value)
if code == 409:
c.close_conn()
return response
response, code = c.insert(
Tables.TOKENS.value, (generated_at, access_token, refresh_token))
if code == 409:
c.close_conn()
return response
Light1.tokens.pull_tokens()
Light2.tokens.pull_tokens()
Light1.pull_light_data()
Light2.pull_light_data()
return "Success! Tokens have been generated!", 201
@app.route("/create-table", methods=['POST'])
def create_table():
body = request.json
columns = body[Params.COLUMNS.value]
table_columns = ""
for key in columns:
try:
assert (columns[key] in Tables.COLUMN_TYPES.value)
except:
message = f"Table column '{key}' can only be of type 'text', 'real' or 'integer'. '{columns[key]}' is not allowed."
return message, 409
else:
table_columns += f"{key} {columns[key]},\n"
table_columns = table_columns[:-2]
c = sqlite.SQLite()
response = c.create_table(body['table_name'], table_columns)
c.close_conn()
return response
@app.route("/lights/<id>/power-on", methods=['POST'])
def power_on(id):
light = Light1 if int(id) == 1 else Light2
return light.set_light("on")
@app.route("/lights/<id>/power-off", methods=['POST'])
def power_off(id):
assert id == request.view_args[Params.ID.value]
light = Light1 if int(id) == 1 else Light2
return light.set_light("off")
@app.route("/db/<table>", methods=['GET'])
def get_dblight_data(table):
assert table == request.view_args['table']
c = sqlite.SQLite()
response, code = c.get_all(table)
c.close_conn()
return response, code
@app.route("/lights/state", methods=["GET"])
def curr_light_state():
state = []
state.append(Light1.get_light_data())
state.append(Light2.get_light_data())
return state
| Shak-codes/Smart-Home-Routines | database/api/app.py | app.py | py | 2,648 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lights.light.Light",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lights.light",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "lights.light.Light",
... |
39156041068 | # BFG Forge
# Based on Level Buddy by Matt Lucas
# https://matt-lucas.itch.io/level-buddy
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bpy, bpy.utils.previews, bmesh, glob, math, os, time
from . import import_md5mesh, lexer
from mathutils import Vector
# used when creating light and entities, and exporting
_scale_to_game = 64.0
_scale_to_blender = 1.0 / _scale_to_game
_editor_material_paths = ["textures/common", "textures/editor"]
preview_collections = {}
################################################################################
## FILE SYSTEM
################################################################################
class FileSystem:
def __init__(self):
# highest priority first
self.search_dirs = []
if bpy.context.scene.bfg.mod_dir:
self.search_dirs.append(bpy.context.scene.bfg.mod_dir)
self.search_dirs.append("basedev")
self.search_dirs.append("base")
def calculate_relative_path(self, filename):
# e.g. if game_path is "D:\Games\DOOM 3",
# "D:\Games\DOOM 3\basedev\models\mapobjects\arcade_machine\arcade_machine.lwo"
# should return
# "models\mapobjects\arcade_machine\arcade_machine.lwo"
for search_dir in self.search_dirs:
full_search_path = os.path.join(os.path.realpath(bpy.path.abspath(bpy.context.scene.bfg.game_path)), search_dir).lower()
full_file_path = os.path.realpath(bpy.path.abspath(filename)).lower()
if full_file_path.startswith(full_search_path):
return os.path.relpath(full_file_path, full_search_path)
return None
def find_file_path(self, filename):
for search_dir in self.search_dirs:
full_path = os.path.join(os.path.realpath(bpy.path.abspath(bpy.context.scene.bfg.game_path)), search_dir, filename)
if os.path.exists(full_path):
return full_path
return None
def find_image_file_path(self, filename):
if filename == "_black":
filename = "textures/black"
elif filename == "_white":
filename = "guis/assets/white"
path = self.find_file_path(filename)
if not path:
# try some other extensions
name, extension = os.path.splitext(filename)
if extension != ".tga":
path = self.find_file_path(name + ".tga")
if not path and extension != ".png":
path = self.find_file_path(name + ".png")
return path
def find_files(self, pattern):
# don't touch the same file more than once
# e.g.
# mymod/materials/base_wall.mtr
# basedev/materials/base_wall.mtr
# ignore the second one
touched_files = []
found_files = []
for search_dir in self.search_dirs:
full_path = os.path.join(os.path.realpath(bpy.path.abspath(bpy.context.scene.bfg.game_path)), search_dir)
if os.path.exists(full_path):
for f in glob.glob(os.path.join(full_path, pattern)):
base = os.path.basename(f)
if not base in touched_files:
touched_files.append(base)
found_files.append(f)
return found_files
################################################################################
## UTILITY FUNCTIONS
################################################################################
def min_nullable(a, b):
if a == None or b < a:
return b
return a
def max_nullable(a, b):
if a == None or b > a:
return b
return a
def set_object_mode_and_clear_selection():
if bpy.context.active_object:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
def link_active_object_to_group(group):
if not group in bpy.data.groups:
bpy.ops.group.create(name=group)
bpy.ops.object.group_link(group=group)
################################################################################
## MATERIALS
################################################################################
class MaterialDeclPathPropGroup(bpy.types.PropertyGroup):
pass # name property inherited
class MaterialDeclPropGroup(bpy.types.PropertyGroup):
# name property inherited
diffuse_texture = bpy.props.StringProperty()
editor_texture = bpy.props.StringProperty()
heightmap_scale = bpy.props.FloatProperty() # 0 if normal_texture isn't a heightmap
normal_texture = bpy.props.StringProperty()
specular_texture = bpy.props.StringProperty()
texture = bpy.props.StringProperty() # any stage texture map. will be the light texture for light materials.
def material_decl_preview_items(self, context):
materials = []
pcoll = preview_collections["material"]
if pcoll.current_decl_path == context.scene.bfg.active_material_decl_path and not pcoll.force_refresh:
return pcoll.materials
fs = FileSystem()
i = 0
for decl in context.scene.bfg.material_decls:
decl_path = os.path.dirname(decl.name)
if decl_path == context.scene.bfg.active_material_decl_path:
if context.scene.bfg.hide_bad_materials and decl_path not in _editor_material_paths and (decl.diffuse_texture == "" or not fs.find_image_file_path(decl.diffuse_texture)):
# hide materials with missing diffuse texture, but not editor materials
continue
if decl.editor_texture in pcoll: # workaround blender bug, pcoll.load is supposed to return cached preview if name already exists
preview = pcoll[decl.editor_texture]
else:
preview = None
if decl.editor_texture != "":
filename = fs.find_image_file_path(decl.editor_texture)
if filename:
preview = pcoll.load(decl.editor_texture, filename, 'IMAGE')
materials.append((decl.name, os.path.basename(decl.name), decl.name, preview.icon_id if preview else 0, i))
i += 1
materials.sort()
pcoll.materials = materials
pcoll.current_decl_path = context.scene.bfg.active_material_decl_path
pcoll.force_refresh = False
return pcoll.materials
class ImportMaterials(bpy.types.Operator):
bl_idname = "scene.import_materials"
bl_label = "Import Materials"
def __init__(self):
self.num_materials_created = 0
self.num_materials_updated = 0
def parse_addnormals(self, decl, lex):
lex.expect_token("(")
return lex.parse_token()
def parse_heightmap(self, decl, lex):
lex.expect_token("(")
texture = lex.parse_token()
lex.expect_token(",")
scale = float(lex.parse_token())
lex.expect_token(")")
return (texture, scale)
def parse_material_file(self, filename):
lex = lexer.Lexer(filename)
num_materials_created = 0
num_materials_updated = 0
scene = bpy.context.scene
print("Parsing", os.path.basename(filename), "...", end="", flush=True)
while True:
token = lex.parse_token()
if token == None:
break
if token in [ "particle", "skin", "table"]:
lex.parse_token() # name
lex.skip_bracket_delimiter_section("{", "}")
else:
if token == "material":
name = lex.parse_token()
else:
name = token
if name in scene.bfg.material_decls:
decl = scene.bfg.material_decls[name]
num_materials_updated += 1
else:
num_materials_created += 1
decl = scene.bfg.material_decls.add()
decl.name = name
lex.expect_token("{")
num_required_closing = 1
in_stage = False
stage_blend = None
stage_heightmap_scale = 0
stage_texture = None
while True:
token = lex.parse_token()
if token == None:
break
elif token == "{":
num_required_closing += 1
if num_required_closing == 2:
# 2nd opening brace: now in a stage
in_stage = True
stage_blend = None
stage_heightmap_scale = 0
stage_texture = None
elif token == "}":
num_required_closing -= 1
if num_required_closing == 0:
break
elif num_required_closing == 1:
# one closing brace left: closing stage
in_stage = False
if stage_texture:
decl.texture = stage_texture # any stage texture map. will be the light texture for light materials.
if stage_blend and stage_texture:
if stage_blend.lower() == "bumpmap":
decl.normal_texture = stage_texture
decl.heightmap_scale = stage_heightmap_scale
elif stage_blend.lower() == "diffusemap":
decl.diffuse_texture = stage_texture
elif stage_blend.lower() == "specularmap":
decl.specular_texture = stage_texture
if in_stage:
if token.lower() == "blend":
stage_blend = lex.parse_token()
elif token.lower() == "map":
token = lex.parse_token()
if token.lower() == "addnormals":
stage_texture = self.parse_addnormals(decl, lex)
elif token.lower() == "heightmap":
(stage_texture, stage_heightmap_scale) = self.parse_heightmap(decl, lex)
else:
stage_texture = token
else:
if token.lower() == "bumpmap":
token = lex.parse_token()
if token.lower() == "addnormals":
decl.normal_texture = self.parse_addnormals(decl, lex)
elif token.lower() == "heightmap":
(decl.normal_texture, decl.heightmap_scale) = self.parse_heightmap(decl, lex)
else:
decl.normal_texture = token
elif token.lower() == "diffusemap":
decl.diffuse_texture = lex.parse_token()
elif token.lower() == "qer_editorimage":
decl.editor_texture = lex.parse_token()
elif token.lower() == "specularmap":
decl.specular_texture = lex.parse_token()
print(" %d materials" % (num_materials_created + num_materials_updated))
return (num_materials_created, num_materials_updated)
def update_material_decl_paths(self, scene):
scene.bfg.material_decl_paths.clear()
for decl in scene.bfg.material_decls:
name = os.path.dirname(decl.name)
if name.startswith("textures") and not name in scene.bfg.material_decl_paths:
path = scene.bfg.material_decl_paths.add()
path.name = name
@classmethod
def poll(cls, context):
return context.scene.bfg.game_path != ""
def execute(self, context):
self.num_materials_created = 0
self.num_materials_updated = 0
start_time = time.time()
fs = FileSystem()
files = fs.find_files(os.path.join("materials", "*.mtr"))
wm = context.window_manager
wm.progress_begin(0, len(files))
for i, f in enumerate(files):
result = self.parse_material_file(f)
wm.progress_update(i)
self.num_materials_created += result[0]
self.num_materials_updated += result[1]
self.update_material_decl_paths(context.scene)
preview_collections["light"].needs_refresh = True
wm.progress_end()
self.report({'INFO'}, "Imported %d materials, updated %d in %.2f seconds" % (self.num_materials_created, self.num_materials_updated, time.time() - start_time))
return {'FINISHED'}
def create_material_texture(fs, mat, texture, slot_number):
# textures may be shared between materials, so don't create one that already exists
if texture in bpy.data.textures:
tex = bpy.data.textures[texture]
else:
tex = bpy.data.textures.new(texture, type='IMAGE')
# texture image may have changed
img_filename = fs.find_image_file_path(texture)
if img_filename:
# try to use relative paths for image filenames
try:
img_filename = bpy.path.relpath(img_filename)
except ValueError:
pass
if not tex.image or tex.image.filepath != img_filename:
try:
img = bpy.data.images.load(img_filename)
except:
pass
else:
tex.image = img
# update/create the texture slot
if not mat.texture_slots[slot_number] or not mat.texture_slots[slot_number].name == texture:
texSlot = mat.texture_slots.create(slot_number)
texSlot.texture_coords = 'UV'
texSlot.texture = tex
return (tex, mat.texture_slots[slot_number])
def create_material(decl):
if decl.name in bpy.data.materials:
mat = bpy.data.materials[decl.name]
else:
mat = bpy.data.materials.new(decl.name)
fs = FileSystem()
decl_path = os.path.dirname(decl.name)
mat.preview_render_type = 'CUBE'
if decl_path in _editor_material_paths:
# editor materials: use the editor texture if diffuse is missing
create_material_texture(fs, mat, decl.diffuse_texture if decl.diffuse_texture != "" else decl.editor_texture, 0)
mat.alpha = 0.5
mat.transparency_method = 'Z_TRANSPARENCY'
mat.use_shadeless = True
mat.use_transparency = True
else:
mat.use_shadeless = bpy.context.scene.bfg.shadeless_materials
if decl.diffuse_texture != "":
create_material_texture(fs, mat, decl.diffuse_texture, 0)
elif decl.texture != "": # fallback to generic texture if no diffuse
create_material_texture(fs, mat, decl.texture, 0)
elif decl.editor_texture != "": # fallback to editor texture if no diffuse or generic
create_material_texture(fs, mat, decl.editor_texture, 0)
if decl.normal_texture != "":
(tex, slot) = create_material_texture(fs, mat, decl.normal_texture, 1)
slot.use_map_color_diffuse = False
if decl.heightmap_scale > 0:
slot.use_map_displacement = True
slot.displacement_factor = decl.heightmap_scale
else:
tex.use_normal_map = True
slot.use_map_normal = True
if decl.specular_texture != "":
(_, slot) = create_material_texture(fs, mat, decl.specular_texture, 2)
slot.use_map_color_diffuse = False
slot.use_map_color_spec = True
slot.use_map_specular = True
return mat
def get_or_create_active_material(context):
bfg = context.scene.bfg
if bfg.active_material_decl in context.scene.bfg.material_decls:
return create_material(context.scene.bfg.material_decls[bfg.active_material_decl])
return None
def assign_material(obj, mat, where='ALL'):
if obj.bfg.type == '2D_ROOM':
if where == 'CEILING' or where == 'ALL':
obj.bfg.ceiling_material = mat.name
if where == 'WALL' or where == 'ALL':
obj.bfg.wall_material = mat.name
if where == 'FLOOR' or where == 'ALL':
obj.bfg.floor_material = mat.name
update_room_plane_materials(obj)
else:
if len(obj.data.materials) == 1:
# one slot: easy, just reassign
obj.data.materials[0] = mat
else:
obj.data.materials.clear()
obj.data.materials.append(mat)
# there was more than one material slot on this object
# need to set material_index on all faces to 0
bm = bmesh.new()
bm.from_mesh(obj.data)
for f in bm.faces:
f.material_index = 0
bm.to_mesh(obj.data)
bm.free()
class AssignMaterial(bpy.types.Operator):
"""Assign the material to the selected objects or object faces"""
bl_idname = "scene.assign_material"
bl_label = "Assign"
where = bpy.props.StringProperty(name="where", default='ALL')
def execute(self, context):
obj = context.active_object
if not obj:
return {'CANCELLED'}
mat = get_or_create_active_material(context)
if not mat:
return {'CANCELLED'}
if obj.mode == 'EDIT' and hasattr(obj.data, "materials"):
# edit mode: assign to selected mesh faces
bm = bmesh.from_edit_mesh(obj.data)
selected_faces = [f for f in bm.faces if f.select]
if len(selected_faces) > 0:
# create/find a slot
material_index = -1
for i, m in enumerate(obj.data.materials):
if m == mat:
material_index = i
break
if material_index == -1:
obj.data.materials.append(mat)
material_index = len(obj.data.materials) - 1
# assign to faces
for f in selected_faces:
f.material_index = material_index
# remove any material slots that are now unused
# pop function update_data arg doesn't work, need to remap face material_index ourselves after removal
old_material_names = []
for m in obj.data.materials:
old_material_names.append(m.name)
remove_materials = []
for i, m in enumerate(obj.data.materials):
used = False
for f in bm.faces:
if f.material_index == i:
used = True
break
if not used:
remove_materials.append(m)
if len(remove_materials) > 0:
for m in remove_materials:
obj.data.materials.pop(obj.data.materials.find(m.name), True)
for f in bm.faces:
f.material_index = obj.data.materials.find(old_material_names[f.material_index])
bmesh.update_edit_mesh(obj.data)
#bm.free() # bmesh.from_edit_mesh returns garbage after this is called
else:
for s in context.selected_objects:
if hasattr(s.data, "materials"):
assign_material(s, mat, self.where)
return {'FINISHED'}
def refresh_selected_objects_materials(context):
refreshed = [] # don't refresh the same material twice
for obj in context.selected_objects:
if hasattr(obj.data, "materials"):
for mat in obj.data.materials:
if mat not in refreshed and mat.name in context.scene.bfg.material_decls:
decl = context.scene.bfg.material_decls[mat.name]
create_material(decl)
refreshed.append(mat)
class RefreshMaterials(bpy.types.Operator):
"""Refresh the select objects' materials, recreating them from their corresponding material decls"""
bl_idname = "scene.refresh_materials"
bl_label = "Refresh Materials"
@classmethod
def poll(cls, context):
return len(context.scene.bfg.material_decls) > 0
def execute(self, context):
refresh_selected_objects_materials(context)
return {'FINISHED'}
################################################################################
## MODELS
################################################################################
# creates a new object with the specified model either loaded into a new mesh, or linked to an existing mesh
# the object will be made active and selected
# return (object, error_message)
def create_model_object(context, filename, relative_path):
# check that the required import addon is enabled
extension = os.path.splitext(filename)[1]
if extension.lower() == ".lwo":
if not hasattr(bpy.types, "IMPORT_SCENE_OT_lwo"):
return (None, "LightWave Object (.lwo) import addon not enabled")
elif extension.lower() not in [".dae", ".md5mesh"]:
return (None, "Model \"%s\" uses unsupported extension \"%s\"" % (filename, extension))
set_object_mode_and_clear_selection()
# if the model has already been loaded before, don't import - link to the existing mesh
mesh = None
for obj in context.scene.objects:
if obj.bfg.entity_model == relative_path:
mesh = obj.data
break
model_obj_name = os.path.splitext(os.path.basename(relative_path))[0]
if mesh:
obj = bpy.data.objects.new(model_obj_name, mesh)
context.scene.objects.link(obj)
context.scene.objects.active = obj
else:
# prep for diffing scene objects before and after import for consistency between importers
# e.g. lwo importer doesn't select or make active the object(s) in creates
obj_names = []
for obj in context.scene.objects:
obj_names.append(obj.name)
# import
if extension.lower() == ".dae":
bpy.ops.wm.collada_import(filepath=filename)
elif extension.lower() == ".lwo":
bpy.ops.import_scene.lwo(filepath=filename, USE_EXISTING_MATERIALS=True)
elif extension.lower() == ".md5mesh":
import_md5mesh.read_md5mesh(filename)
# diff scene objects
# 0: error, 1: fine, >1: join objects
imported_objects = []
for obj in context.scene.objects:
if not obj.name in obj_names:
imported_objects.append(obj)
if len(imported_objects) == 0:
return (None, "Importing \"%s\" failed" % filename) # import must have failed
elif len(imported_objects) == 1:
# make sure the object is selected and active
imported_objects[0].select = True
context.scene.objects.active = imported_objects[0]
else:
# join objects
obj = context.active_object
bpy.ops.object.select_all(action='DESELECT')
for obj in imported_objects:
obj.select = True
context.scene.objects.active = imported_objects[0]
bpy.ops.object.join()
# fix names
context.scene.objects.active.name = model_obj_name
context.scene.objects.active.data.name = model_obj_name
obj = context.scene.objects.active
# fixup material names
for i, mat in enumerate(obj.data.materials):
(name, ext) = os.path.splitext(mat.name)
fix_material = False
if extension.lower() == ".dae":
# fix collada mangling
name = name.replace("_", "/")
if name.endswith("-material"):
name = name[:-len("-material")]
fix_material = True
if ext != "":
# remove filename extensions
# e.g. "models/items/rocket_ammo/rocket_large.tga" should be "models/items/rocket_ammo/rocket_large"
fix_material = True
if fix_material:
# if a material with the fixed name already exists, use it
# otherwise rename this one
new_mat = bpy.data.materials.get(name)
if new_mat:
obj.data.materials[i] = new_mat
else:
mat.name = name
obj.select = True
obj.bfg.entity_model = relative_path
obj.scale = [_scale_to_blender, _scale_to_blender, _scale_to_blender]
obj.lock_scale = [True, True, True]
refresh_selected_objects_materials(context)
bpy.ops.object.shade_smooth()
return (obj, None)
################################################################################
## ENTITIES
################################################################################
class EntityDictPropGroup(bpy.types.PropertyGroup):
# name property inherited
value = bpy.props.StringProperty()
class EntityPropGroup(bpy.types.PropertyGroup):
# name property inherited
dict = bpy.props.CollectionProperty(type=EntityDictPropGroup)
def get_dict_value(self, key, key_default=None):
kvp = self.dict.get(key)
if kvp:
return kvp.value
return key_default
class ModelDefPropGroup(bpy.types.PropertyGroup):
# name property inherited
inherit = bpy.props.StringProperty()
mesh = bpy.props.StringProperty() # e.g. models/md5/monsters/zfat/zfat.md5mesh
class ImportEntities(bpy.types.Operator):
bl_idname = "scene.import_entities"
bl_label = "Import Entities"
def parse_def_file(self, scene, filename):
lex = lexer.Lexer(filename)
num_entities_created = 0
num_entities_updated = 0
print("Parsing", os.path.basename(filename), "...", end="", flush=True)
while True:
token = lex.parse_token()
if token == None:
break
if token == "entityDef":
name = lex.parse_token()
if name in scene.bfg.entities:
entity = scene.bfg.entities[name]
num_entities_updated += 1
else:
entity = scene.bfg.entities.add()
entity.name = name
num_entities_created += 1
lex.expect_token("{")
num_required_closing = 1
while True:
token = lex.parse_token()
if token == None:
break
elif token == "{":
num_required_closing += 1
elif token == "}":
num_required_closing -= 1
if num_required_closing == 0:
break
elif token.startswith("editor_") or token in ["inherit", "model"]: # only store what we care about
# parse as key-value pair
key = token
if key in entity.dict:
kvp = entity.dict[key]
else:
kvp = entity.dict.add()
kvp.name = key
kvp.value = lex.parse_token()
elif token == "model":
name = lex.parse_token()
model_def = scene.bfg.model_defs.get(name)
if not model_def:
model_def = scene.bfg.model_defs.add()
model_def.name = name
lex.expect_token("{")
num_required_closing = 1
while True:
token = lex.parse_token()
if token == None:
break
elif token == "{":
num_required_closing += 1
elif token == "}":
num_required_closing -= 1
if num_required_closing == 0:
break
elif token == "inherit":
model_def.inherit = lex.parse_token()
elif token == "mesh":
model_def.mesh = lex.parse_token()
else:
name = lex.parse_token() # name, sometimes opening brace
lex.skip_bracket_delimiter_section("{", "}", True if name == "{" else False)
print(" %d entities" % (num_entities_created + num_entities_updated))
return (num_entities_created, num_entities_updated)
@classmethod
def poll(cls, context):
return context.scene.bfg.game_path != ""
def execute(self, context):
self.num_entities_created = 0
self.num_entities_updated = 0
start_time = time.time()
fs = FileSystem()
files = fs.find_files(os.path.join("def", "*.def"))
wm = context.window_manager
wm.progress_begin(0, len(files))
for i, f in enumerate(files):
result = self.parse_def_file(context.scene, f)
wm.progress_update(i)
self.num_entities_created += result[0]
self.num_entities_updated += result[1]
update_scene_entity_properties(context) # update entity objects with any new properties
wm.progress_end()
self.report({'INFO'}, "Imported %d entities, updated %d in %.2f seconds" % (self.num_entities_created, self.num_entities_updated, time.time() - start_time))
return {'FINISHED'}
# chase the model def mesh
# e.g. entityDef monster_zombie_fat { "model" "monster_zombie_fat" }
# model monster_zombie_fat { "mesh" "models/md5/monsters/zfat/zfat.md5mesh" }
# and handle inherit/recursion
def find_model_def_mesh(model):
model_def = bpy.context.scene.bfg.model_defs.get(model)
if model_def:
if model_def.mesh == "":
if model_def.inherit != "":
return find_model_def_mesh(model_def.inherit)
else:
return model_def.mesh
return model
def create_object_color_material():
name = "_object_color"
# create the material if it doesn't exist
if name in bpy.data.materials:
mat = bpy.data.materials[name]
else:
mat = bpy.data.materials.new(name)
mat.use_fake_user = True
mat.use_object_color = True
mat.use_shadeless = True
def create_object_entity_properties(context, entity, is_inherited=False):
"""Create entity properties on the active object"""
for kvp in entity.dict:
if kvp.name.startswith("editor_var"):
prop_name = kvp.name.split()[1]
# prepend "inherited_" to inherited property names
prop_name = "inherited_" + prop_name
if not context.active_object.game.properties.get(prop_name):
# don't create the prop if it already exists
bpy.ops.object.game_property_new(type='STRING', name=prop_name)
inherit = entity.dict.get("inherit")
if inherit:
parent_entity = context.scene.bfg.entities[inherit.value]
create_object_entity_properties(context, parent_entity, True)
def update_scene_entity_properties(context):
"""Add missing properties to existing entity objects"""
for obj in context.scene.objects:
if obj.bfg.type in ['BRUSH_ENTITY', 'ENTITY']:
context.scene.objects.active = obj
entity = context.scene.bfg.entities[obj.bfg.classname]
create_object_entity_properties(context, entity)
break
class AddEntity(bpy.types.Operator):
"""Add a new entity to the scene of the selected type"""
bl_idname = "scene.add_entity"
bl_label = "Add Entity"
@classmethod
def poll(cls, context):
ae = context.scene.bfg.active_entity
return ae and ae != ""
def execute(self, context):
ae = context.scene.bfg.active_entity
if ae and ae != "":
active_object = context.active_object
selected_objects = context.selected_objects
set_object_mode_and_clear_selection()
entity = context.scene.bfg.entities[ae]
entity_mins = entity.get_dict_value("editor_mins", "?")
entity_maxs = entity.get_dict_value("editor_maxs", "?")
model = entity.get_dict_value("model")
if (entity_mins == "?" or entity_maxs == "?") and not model:
# brush entity, create as empty
if not (active_object and active_object.bfg.type in ['NONE','BRUSH'] and len(selected_objects) > 0):
self.report({'ERROR'}, "Brush entities require a brush to be selected")
return {'CANCELLED'}
bpy.ops.object.empty_add(type='SPHERE')
obj = context.active_object
obj.empty_draw_size = 0.5
obj.hide_render = True
obj.location = active_object.location
obj.lock_rotation = [True, True, True]
obj.lock_scale = [True, True, True]
obj.bfg.type = 'BRUSH_ENTITY'
else:
# normal entity
obj = None
if model: # create as mesh
model = find_model_def_mesh(model) # handle "model" pointing to a model def, inheritance etc.
if model:
fs = FileSystem()
filename = fs.find_file_path(model)
if filename:
(obj, error_message) = create_model_object(context, filename, model)
else:
error_message = "Model %s not found" % model
if error_message:
self.report({'ERROR'}, error_message)
if not obj: # no model or create_model_object error: fallback to primitive
if entity_mins == "?" or entity_maxs == "?":
# need bounds
self.report({'ERROR'}, "Entity def %s is missing editor_mins and editor_maxs" % entity.name)
return {'CANCELLED'}
bpy.ops.mesh.primitive_cube_add()
obj = context.active_object
entity_color = entity.get_dict_value("editor_color", "0 0 1") # default to blue
obj.color = [float(i) for i in entity_color.split()] + [float(0.5)] # "r g b"
obj.data.name = ae
create_object_color_material()
obj.data.materials.append(bpy.data.materials["_object_color"])
obj.hide_render = True
obj.show_wire = True
obj.show_transparent = True
# set dimensions
mins = Vector([float(i) * _scale_to_blender for i in entity_mins.split()])
maxs = Vector([float(i) * _scale_to_blender for i in entity_maxs.split()])
size = maxs + -mins
obj.dimensions = size
# set origin
origin = (mins + maxs) / 2.0
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.transform.translate(value=origin)
bpy.ops.object.editmode_toggle()
obj.lock_rotation = [True, True, False]
obj.lock_scale = [True, True, True]
obj.show_axis = True # x will be forward
obj.show_name = context.scene.bfg.show_entity_names
obj.bfg.type = 'ENTITY'
obj.bfg.classname = ae
obj.name = ae
link_active_object_to_group("entities")
create_object_entity_properties(context, entity)
# parent selected objects to this brush entity, and link them to the "entities" group
# if there is a editor_material for this entity, assign that material to the selected objects
if obj.bfg.type == 'BRUSH_ENTITY':
group = bpy.data.groups["entities"]
mat = None
mat_name = entity.get_dict_value("editor_material")
if mat_name:
mat_decl = context.scene.bfg.material_decls.get(mat_name)
if mat_decl:
mat = create_material(mat_decl)
for s in selected_objects:
s.location -= obj.location
s.parent = obj
group.objects.link(s)
if mat:
assign_material(s, mat)
return {'FINISHED'}
class ShowEntityDescription(bpy.types.Operator):
"""Show entity description"""
bl_idname = "object.show_entity_description"
bl_label = "Show Entity Description"
bl_options = {'REGISTER','UNDO','INTERNAL'}
def draw(self, context):
bfg = context.scene.bfg
ent = bfg.entities[bfg.active_entity]
ent_usage = ent.get_dict_value("editor_usage")
col = self.layout.column()
#col.label(ent_usage)
# no support for text wrapping and multiline labels...
n = 50
for i in range(0, len(ent_usage), n):
col.label(ent_usage[i:i+n])
@classmethod
def poll(cls, context):
ae = context.scene.bfg.active_entity
if ae and ae != "ae":
ent = context.scene.bfg.entities[ae]
return ent.dict.get("editor_usage") != None
return False
def invoke(self, context, event):
return context.window_manager.invoke_popup(self)
def execute(self, context):
return {'FINISHED'}
class ShowEntityPropertyDescription(bpy.types.Operator):
"""Show entity property description"""
bl_idname = "object.show_entity_property_description"
bl_label = "Show Entity Property Description"
bl_options = {'REGISTER','UNDO','INTERNAL'}
classname = bpy.props.StringProperty(default="")
name = bpy.props.StringProperty(default="")
def find_prop_info(self, context, entity):
info = entity.get_dict_value("editor_var " + self.name)
if info:
return info
inherit = entity.dict.get("inherit")
if inherit:
parent_entity = context.scene.bfg.entities[inherit.value]
return self.find_prop_info(context, parent_entity)
return None
def draw(self, context):
col = self.layout.column()
if self.classname != "" and self.name != "":
entity = context.scene.bfg.entities[self.classname]
info = self.find_prop_info(context, entity)
if not info:
info = "No info"
#col.label(info)
# no support for text wrapping and multiline labels...
n = 50
for i in range(0, len(info), n):
col.label(info[i:i+n])
def invoke(self, context, event):
return context.window_manager.invoke_popup(self)
def execute(self, context):
return {'FINISHED'}
class NewCustomEntityProperty(bpy.types.Operator):
"""Create a new custom entity property"""
bl_idname = "scene.new_custom_entity_property"
bl_label = "New Entity Property"
bl_options = {'REGISTER','UNDO','INTERNAL'}
name = bpy.props.StringProperty(name="Name", default="")
value = bpy.props.StringProperty(name="Value", default="")
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)
def execute(self, context):
if self.name == "":
return {'CANCELLED'}
obj = context.active_object
# handle brush entities. the parent owns the properties
old_active = None
if obj.parent and obj.parent.bfg.type == 'BRUSH_ENTITY':
old_active = context.scene.objects.active
obj = obj.parent
context.scene.objects.active = obj
# check if a normal property already exists with this name
prop = obj.game.properties.get(self.name)
if not prop:
# check if an inherited property already exists with this name
prop = obj.game.properties.get("inherited_" + self.name)
if prop:
# show inherited properties
context.scene.bfg.show_inherited_entity_props = True
else:
# check if a custom property already exists with this name
prop_name = "custom_" + self.name
prop = obj.game.properties.get(prop_name)
if not prop:
# finally, create the property
bpy.ops.object.game_property_new(type='STRING', name=prop_name)
prop = obj.game.properties[prop_name]
# whether the property has been created, or already exists, set the value
prop.value = self.value
# restore active object
if old_active:
context.scene.objects.active = old_active
else:
context.scene.objects.active = obj # force ui refresh. game_property_new doesn't seem to trigger it.
return {'FINISHED'}
class RemoveCustomEntityProperty(bpy.types.Operator):
"""Remove a custom entity property"""
bl_idname = "scene.remove_custom_entity_property"
bl_label = "Remove Entity Property"
bl_options = {'REGISTER','UNDO','INTERNAL'}
name = bpy.props.StringProperty(default="")
def execute(self, context):
obj = context.active_object
# handle brush entities. the parent owns the properties
old_active = None
if obj.parent and obj.parent.bfg.type == 'BRUSH_ENTITY':
old_active = context.scene.objects.active
obj = obj.parent
context.scene.objects.active = obj
prop_index = obj.game.properties.find(self.name)
if prop_index != -1:
bpy.ops.object.game_property_remove(index=prop_index)
# restore active object
if old_active:
context.scene.objects.active = old_active
return {'FINISHED'}
################################################################################
## LIGHTS
################################################################################
class AddLight(bpy.types.Operator):
bl_idname = "scene.add_light"
bl_label = "Add Light"
def execute(self, context):
set_object_mode_and_clear_selection()
data = bpy.data.lamps.new(name="Light", type='POINT')
obj = bpy.data.objects.new(name="Light", object_data=data)
context.scene.objects.link(obj)
obj.select = True
context.scene.objects.active = obj
obj.data.distance = 300.0 * _scale_to_blender
obj.data.energy = obj.data.distance
#obj.scale = obj.distance
#obj.show_bounds = True
#obj.draw_bounds_type = 'SPHERE'
obj.data.use_sphere = True
link_active_object_to_group("lights")
return {'FINISHED'}
def get_light_radius(self):
return self.data.distance
def set_light_radius(self, value):
self.data.distance = value
self.data.energy = value
def light_material_preview_items(self, context):
lights = []
pcoll = preview_collections["light"]
if not pcoll.needs_refresh:
return pcoll.lights
fs = FileSystem()
lights.append(("default", "default", "default", 0, 0))
i = 1
for decl in context.scene.bfg.material_decls:
# material name must start with "lights" and have a texture
if os.path.dirname(decl.name).startswith("lights") and decl.texture != "":
preview = None
if decl.texture in pcoll: # workaround blender bug, pcoll.load is supposed to return cached preview if name already exists
preview = pcoll[decl.texture]
else:
filename = fs.find_image_file_path(decl.texture)
if filename:
preview = pcoll.load(decl.texture, filename, 'IMAGE')
elif context.scene.bfg.hide_bad_materials:
continue # hide if the texture file is missing
lights.append((decl.name, os.path.basename(decl.name), decl.name, preview.icon_id if preview else 0, i))
i += 1
lights.sort()
pcoll.lights = lights
pcoll.needs_refresh = False
return pcoll.lights
################################################################################
## STATIC MODELS
################################################################################
class AddStaticModel(bpy.types.Operator):
"""Browse for a static model to add"""
bl_idname = "scene.add_static_model"
bl_label = "Add Static Model"
filepath = bpy.props.StringProperty(default="", options={'HIDDEN', 'SKIP_SAVE'})
filter_glob = bpy.props.StringProperty(default="*.dae;*.lwo;*.md5mesh", options={'HIDDEN'})
@classmethod
def poll(cls, context):
return context.scene.bfg.game_path != ""
def execute(self, context):
# the func_static entity model value looks like this
# "models/mapobjects/arcade_machine/arcade_machine.lwo"
# so the file path must descend from one of the search paths
fs = FileSystem()
relative_path = fs.calculate_relative_path(self.properties.filepath)
if not relative_path:
self.report({'ERROR'}, "File \"%s\" not found. Path must descend from \"%s\"" % (self.properties.filepath, context.scene.bfg.game_path))
return {'CANCELLED'}
(obj, error_message) = create_model_object(context, self.properties.filepath, relative_path)
if error_message:
self.report({'ERROR'}, error_message)
return {'CANCELLED'}
else:
obj.bfg.type = 'STATIC_MODEL'
obj.bfg.classname = "func_static"
link_active_object_to_group("static models")
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
################################################################################
## MAP
################################################################################
def update_room_plane_modifier(obj):
if obj.modifiers:
mod = obj.modifiers[0]
if mod.type == 'SOLIDIFY':
mod.thickness = obj.bfg.room_height
mod.material_offset = 1
mod.material_offset_rim = 2
def update_room_plane_materials(obj):
if bpy.data.materials.find(obj.bfg.floor_material) != -1:
obj.material_slots[0].material = bpy.data.materials[obj.bfg.floor_material]
if bpy.data.materials.find(obj.bfg.ceiling_material) != -1:
obj.material_slots[1].material = bpy.data.materials[obj.bfg.ceiling_material]
if bpy.data.materials.find(obj.bfg.wall_material) != -1:
obj.material_slots[2].material = bpy.data.materials[obj.bfg.wall_material]
def update_room(self, context):
obj = context.active_object
if obj.bfg.type == '2D_ROOM':
update_room_plane_modifier(obj)
update_room_plane_materials(obj)
def flip_mesh_normals(mesh):
bm = bmesh.new()
bm.from_mesh(mesh)
for f in bm.faces:
f.normal_flip()
bm.to_mesh(mesh)
bm.free()
def apply_boolean(dest, src, bool_op, flip_normals=False):
# auto unwrap this 3D room or brush if that's what the user wants
if src.bfg.type in ['3D_ROOM', 'BRUSH'] and src.bfg.auto_unwrap:
auto_unwrap(src.data, src.location, src.scale)
# generate mesh for the source object
# transform to worldspace
bpy.ops.object.select_all(action='DESELECT')
dest.select = True
me = src.to_mesh(bpy.context.scene, True, 'PREVIEW')
me.transform(src.matrix_world)
# 2D rooms are always unwrapped (the to_mesh result, not the object - it's just a plane)
if src.bfg.type == '2D_ROOM':
auto_unwrap(me)
if flip_normals:
flip_mesh_normals(me)
# bool object - need a temp object to hold the result of to_mesh
ob_bool = bpy.data.objects.new("_bool", me)
# copy materials
for mat in src.data.materials:
if not mat.name in dest.data.materials:
dest.data.materials.append(mat)
# apply the boolean modifier
mod = dest.modifiers.new(name=src.name, type='BOOLEAN')
mod.object = ob_bool
mod.operation = bool_op
mod.solver = 'CARVE'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier=src.name)
def flip_object_normals(obj):
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.flip_normals()
bpy.ops.object.editmode_toggle()
def move_object_to_layer(obj, layer_number):
layers = 20 * [False]
layers[layer_number] = True
obj.layers = layers
def add_all_materials(obj):
i = 0
for m in bpy.data.materials:
if len(obj.data.materials) > i:
has_material = False
for mat in obj.data.materials:
if mat.name == m.name:
has_material = True
if not has_material:
obj.data.materials[i] = m
else:
obj.data.materials.append(m)
i += 1
def build_map(context, rooms, brushes, map_name):
scene = context.scene
# get all the temp bool objects from the last time this map was built
bool_objects = [obj for obj in bpy.data.objects if obj.name.startswith(map_name + "_bool")]
# create map object
# if a map object already exists, its old mesh is removed
set_object_mode_and_clear_selection()
old_map_mesh = None
map_mesh_name = map_name + "_mesh"
if map_mesh_name in bpy.data.meshes:
old_map_mesh = bpy.data.meshes[map_mesh_name]
old_map_mesh.name = "_worldspawn_old"
if len(rooms) > 0:
# first room: generate the mesh and transform to worldspace
if rooms[0].bfg.type == '3D_ROOM' and rooms[0].bfg.auto_unwrap:
auto_unwrap(rooms[0].data)
map_mesh = rooms[0].to_mesh(scene, True, 'PREVIEW')
map_mesh.name = map_mesh_name
map_mesh.transform(rooms[0].matrix_world)
# 2D rooms are always unwrapped (the to_mesh result, not the object - it's just a plane)
if rooms[0].bfg.type == '2D_ROOM':
auto_unwrap(map_mesh)
else:
map_mesh = bpy.data.meshes.new(map_mesh_name)
if map_name in bpy.data.objects:
map = bpy.data.objects[map_name]
map.data = map_mesh
else:
map = bpy.data.objects.new(map_name, map_mesh)
scene.objects.link(map)
if old_map_mesh:
bpy.data.meshes.remove(old_map_mesh)
map.layers[scene.active_layer] = True
scene.objects.active = map
map.select = True
map.hide = False
# combine rooms
if len(rooms) > 0:
flip_object_normals(map)
for i, room in enumerate(rooms):
if i > 0:
# not the first room: bool union with existing mesh
apply_boolean(map, room, 'UNION', flip_normals=True)
map.select = True
if len(rooms) > 0:
flip_object_normals(map)
# combine brushes
for brush in brushes:
apply_boolean(map, brush, 'UNION')
link_active_object_to_group("map")
move_object_to_layer(map, scene.bfg.map_layer)
map.hide_select = True
bpy.ops.object.select_all(action='DESELECT')
# cleanup temp bool objects
for obj in bool_objects:
mesh = obj.data
bpy.data.objects.remove(obj)
bpy.data.meshes.remove(mesh)
class AddRoom(bpy.types.Operator):
bl_idname = "scene.add_room"
bl_label = "Add Room"
def execute(self, context):
scene = context.scene
set_object_mode_and_clear_selection()
bpy.ops.mesh.primitive_plane_add(radius=1)
bpy.ops.object.modifier_add(type='SOLIDIFY')
obj = context.active_object
obj.lock_scale = [False, False, True]
obj.modifiers[0].offset = 1
obj.modifiers[0].use_even_offset = True
obj.modifiers[0].use_flip_normals = True
obj.modifiers[0].use_quality_normals = True
obj.name = "room2D"
obj.data.name = "room2D"
obj.bfg.room_height = 4
obj.bfg.type = '2D_ROOM'
if context.scene.bfg.wireframe_rooms:
obj.draw_type = 'WIRE'
obj.game.physics_type = 'NO_COLLISION'
obj.hide_render = True
if len(bpy.data.materials) > 0:
mat = get_or_create_active_material(context)
if mat:
obj.data.materials.append(mat)
obj.data.materials.append(mat)
obj.data.materials.append(mat)
obj.bfg.ceiling_material = mat.name
obj.bfg.wall_material = mat.name
obj.bfg.floor_material = mat.name
else:
obj.data.materials.append(bpy.data.materials[0])
obj.data.materials.append(bpy.data.materials[0])
obj.data.materials.append(bpy.data.materials[0])
obj.bfg.ceiling_material = bpy.data.materials[0].name
obj.bfg.wall_material = bpy.data.materials[0].name
obj.bfg.floor_material = bpy.data.materials[0].name
else:
bpy.ops.object.material_slot_add()
bpy.ops.object.material_slot_add()
bpy.ops.object.material_slot_add()
obj.bfg.ceiling_material = ""
obj.bfg.wall_material = ""
obj.bfg.floor_material = ""
scene.objects.active = obj
update_room_plane_modifier(obj)
update_room_plane_materials(obj)
link_active_object_to_group("rooms")
return {'FINISHED'}
class AddBrush(bpy.types.Operator):
bl_idname = "scene.add_brush"
bl_label = "Add Brush"
s_type = bpy.props.StringProperty(name="s_type", default='BRUSH')
def execute(self, context):
scene = context.scene
set_object_mode_and_clear_selection()
bpy.ops.mesh.primitive_cube_add(radius=1)
obj = context.active_object
if context.scene.bfg.wireframe_rooms:
obj.draw_type = 'WIRE'
if self.s_type == '3D_ROOM':
obj.name = "room3D"
obj.data.name = "room3D"
else:
obj.name = "brush"
obj.data.name = "brush"
obj.bfg.type = self.s_type
mat = get_or_create_active_material(context)
if mat:
obj.data.materials.append(mat)
scene.objects.active = obj
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.object.auto_uv_unwrap()
bpy.ops.object.editmode_toggle()
obj.game.physics_type = 'NO_COLLISION'
obj.hide_render = True
if self.s_type == '3D_ROOM':
flip_object_normals(obj)
link_active_object_to_group("rooms")
else:
link_active_object_to_group("brushes")
return {'FINISHED'}
class CopyRoom(bpy.types.Operator):
bl_idname = "scene.copy_room"
bl_label = "Copy Room"
copy_op = bpy.props.StringProperty(name="copy_op", default='ALL')
def execute(self, context):
obj = context.active_object
selected_objects = context.selected_objects
for s in selected_objects:
if s.bfg.type == '2D_ROOM':
if self.copy_op == 'HEIGHT' or self.copy_op == 'ALL':
s.bfg.room_height = obj.bfg.room_height
if self.copy_op == 'MATERIAL_CEILING' or self.copy_op == 'MATERIAL_ALL' or self.copy_op == 'ALL':
s.bfg.ceiling_material = obj.bfg.ceiling_material
if self.copy_op == 'MATERIAL_WALL' or self.copy_op == 'MATERIAL_ALL' or self.copy_op == 'ALL':
s.bfg.wall_material = obj.bfg.wall_material
if self.copy_op == 'MATERIAL_FLOOR' or self.copy_op == 'MATERIAL_ALL' or self.copy_op == 'ALL':
s.bfg.floor_material = obj.bfg.floor_material
update_room_plane_modifier(s)
update_room_plane_materials(s)
return {'FINISHED'}
class ConvertRoom(bpy.types.Operator):
"""Convert the selected 2D room(s) to 3D room(s)"""
bl_idname = "scene.convert_room"
bl_label = "Convert Room"
def execute(self, context):
selected_objects = list(context.selected_objects) # copy the list, selected objects will change
for obj in selected_objects:
if obj.bfg.type == '2D_ROOM':
obj.bfg.type = '3D_ROOM'
# create a new mesh, applying the solidify modifer
# swap the old mesh with the new one, preserving the name
# then delete the old mesh
old_mesh = obj.data
new_mesh_name = old_mesh.name
old_mesh.name = "_temp" + old_mesh.name
new_mesh = obj.to_mesh(context.scene, True, 'PREVIEW')
new_mesh.name = new_mesh_name
obj.data = new_mesh
bpy.data.meshes.remove(old_mesh)
# remove the solidify modifier
context.scene.objects.active = obj
bpy.ops.object.modifier_remove(modifier=obj.modifiers[0].name)
# 2D room UVs are never valid, so unwrap
bpy.ops.object.auto_uv_unwrap()
return {'FINISHED'}
class BuildMap(bpy.types.Operator):
bl_idname = "scene.build_map"
bl_label = "Build Map"
bool_op = bpy.props.StringProperty(name="bool_op", default='INTERSECT')
def execute(self, context):
# worldspawn
rooms = []
brushes = []
for obj in context.scene.objects:
if obj.parent and obj.parent.bfg.type == 'BRUSH_ENTITY':
continue # ignore children of brush entities
if obj.bfg.type in ['2D_ROOM', '3D_ROOM']:
rooms.append(obj)
elif obj.bfg.type == 'BRUSH':
brushes.append(obj)
build_map(context, rooms, brushes, "_worldspawn")
# brush entities
for obj in context.scene.objects:
if obj.bfg.type == 'BRUSH_ENTITY':
brushes = []
for child in obj.children:
if child.bfg.type == 'BRUSH':
brushes.append(child)
if len(brushes) > 0:
build_map(context, [], brushes, "_" + obj.name)
return {'FINISHED'}
################################################################################
## UV UNWRAPPING
################################################################################
def auto_unwrap(mesh, obj_location=Vector(), obj_scale=Vector((1, 1, 1))):
if bpy.context.mode == 'EDIT_MESH':
bm = bmesh.from_edit_mesh(mesh)
else:
bm = bmesh.new()
bm.from_mesh(mesh)
uv_layer = bm.loops.layers.uv.verify()
bm.faces.layers.tex.verify() # currently blender needs both layers.
for f in bm.faces:
if bpy.context.mode == 'EDIT_MESH' and not f.select:
continue # ignore faces that aren't selected in edit mode
texture_size = (128, 128)
mat = mesh.materials[f.material_index]
if len(mat.texture_slots) > 0:
tex = bpy.data.textures[mat.texture_slots[0].name]
if hasattr(tex, "image") and tex.image: # if the texture type isn't set to "Image or Movie", the image attribute won't exist
texture_size = tex.image.size
nX = f.normal.x
nY = f.normal.y
nZ = f.normal.z
if nX < 0:
nX = nX * -1
if nY < 0:
nY = nY * -1
if nZ < 0:
nZ = nZ * -1
face_normal_largest = nX
face_direction = 'x'
if face_normal_largest < nY:
face_normal_largest = nY
face_direction = 'y'
if face_normal_largest < nZ:
face_normal_largest = nZ
face_direction = 'z'
if face_direction == 'x':
if f.normal.x < 0:
face_direction = '-x'
if face_direction == 'y':
if f.normal.y < 0:
face_direction = '-y'
if face_direction == 'z':
if f.normal.z < 0:
face_direction = '-z'
scale_x = _scale_to_game / texture_size[0] * (1.0 / bpy.context.scene.bfg.global_uv_scale)
scale_y = _scale_to_game / texture_size[1] * (1.0 / bpy.context.scene.bfg.global_uv_scale)
for l in f.loops:
luv = l[uv_layer]
if luv.pin_uv is not True:
if face_direction == 'x':
luv.uv.x = ((l.vert.co.y * obj_scale[1]) + obj_location[1]) * scale_x
luv.uv.y = ((l.vert.co.z * obj_scale[2]) + obj_location[2]) * scale_y
if face_direction == '-x':
luv.uv.x = (((l.vert.co.y * obj_scale[1]) + obj_location[1]) * scale_x) * -1
luv.uv.y = ((l.vert.co.z * obj_scale[2]) + obj_location[2]) * scale_y
if face_direction == 'y':
luv.uv.x = (((l.vert.co.x * obj_scale[0]) + obj_location[0]) * scale_x) * -1
luv.uv.y = ((l.vert.co.z * obj_scale[2]) + obj_location[2]) * scale_y
if face_direction == '-y':
luv.uv.x = ((l.vert.co.x * obj_scale[0]) + obj_location[0]) * scale_x
luv.uv.y = ((l.vert.co.z * obj_scale[2]) + obj_location[2]) * scale_y
if face_direction == 'z':
luv.uv.x = ((l.vert.co.x * obj_scale[0]) + obj_location[0]) * scale_x
luv.uv.y = ((l.vert.co.y * obj_scale[1]) + obj_location[1]) * scale_y
if face_direction == '-z':
luv.uv.x = (((l.vert.co.x * obj_scale[0]) + obj_location[0]) * scale_x) * 1
luv.uv.y = (((l.vert.co.y * obj_scale[1]) + obj_location[1]) * scale_y) * -1
if bpy.context.mode == 'EDIT_MESH':
bmesh.update_edit_mesh(mesh)
else:
bm.to_mesh(mesh)
bm.free()
mesh.update()
class AutoUnwrap(bpy.types.Operator):
bl_idname = "object.auto_uv_unwrap"
bl_label = "Auto Unwrap"
bl_options = {'REGISTER','UNDO'}
@classmethod
def poll(cls, context):
return bpy.context.mode in ['EDIT_MESH', 'OBJECT']
def execute(self, context):
obj = context.active_object
auto_unwrap(obj.data, obj.location, obj.scale)
return {'FINISHED'}
class FitUV(bpy.types.Operator):
"""Fit the selected face UVs to the texture dimensions along the specified axis"""
bl_idname = "object.uv_fit"
bl_label = "Fit UV"
bl_options = {'REGISTER','UNDO'}
axis = bpy.props.StringProperty(name="Axis", default='BOTH')
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH'
def execute(self, context):
obj = context.active_object
bm = bmesh.from_edit_mesh(obj.data)
uv_layer = bm.loops.layers.uv.active
if not uv_layer:
return {'CANCELLED'}
for f in bm.faces:
if not f.select:
continue
# calculate min/max
min = [None, None]
max = [None, None]
for l in f.loops:
uv = l[uv_layer].uv
if self.axis in ['HORIZONTAL', 'BOTH']:
min[0] = min_nullable(min[0], uv.x)
max[0] = max_nullable(max[0], uv.x)
if self.axis in ['VERTICAL', 'BOTH']:
min[1] = min_nullable(min[1], uv.y)
max[1] = max_nullable(max[1], uv.y)
# apply fitting
for l in f.loops:
uv = l[uv_layer].uv
if self.axis in ['HORIZONTAL', 'BOTH']:
range = max[0] - min[0]
if range != 0: # will be 0 if UVs are uninitialized
uv.x = uv.x / range * context.scene.bfg.uv_fit_repeat
if self.axis in ['VERTICAL', 'BOTH']:
range = max[1] - min[1]
if range != 0: # will be 0 if UVs are uninitialized
uv.y = uv.y / range * context.scene.bfg.uv_fit_repeat
bmesh.update_edit_mesh(obj.data)
return {'FINISHED'}
class FlipUV(bpy.types.Operator):
"""Flip the selected face UVs along the specified axis"""
bl_idname = "object.uv_flip"
bl_label = "Flip UV"
bl_options = {'REGISTER','UNDO'}
axis = bpy.props.StringProperty(name="Axis", default='HORIZONTAL')
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH'
def execute(self, context):
prev_area = context.area.type
context.area.type = 'IMAGE_EDITOR'
bpy.ops.uv.select_all(action='SELECT')
if self.axis == 'HORIZONTAL':
bpy.ops.transform.resize(value=(-1, 1, 1))
elif self.axis == 'VERTICAL':
bpy.ops.transform.resize(value=(1, -1, 1))
context.area.type = prev_area
return {'FINISHED'}
class NudgeUV(bpy.types.Operator):
"""Nudge the selected face UVs in the specified direction"""
bl_idname = "object.uv_nudge"
bl_label = "Nudge UV"
bl_options = {'REGISTER','UNDO'}
dir = bpy.props.StringProperty(name="Direction", default='LEFT')
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH'
def execute(self, context):
prev_area = context.area.type
context.area.type = 'IMAGE_EDITOR'
bpy.ops.uv.select_all(action='SELECT')
if self.dir == 'LEFT':
bpy.ops.transform.translate(value=(context.scene.bfg.uv_nudge_increment, 0, 0))
elif self.dir == 'RIGHT':
bpy.ops.transform.translate(value=(-context.scene.bfg.uv_nudge_increment, 0, 0))
elif self.dir == 'UP':
bpy.ops.transform.translate(value=(0, -context.scene.bfg.uv_nudge_increment, 0))
elif self.dir == 'DOWN':
bpy.ops.transform.translate(value=(0, context.scene.bfg.uv_nudge_increment, 0))
context.area.type = prev_area
return {'FINISHED'}
def is_uv_flipped(context):
# just the first face
obj = context.active_object
bm = bmesh.from_edit_mesh(obj.data)
uv_layer = bm.loops.layers.uv.active
if uv_layer:
for f in bm.faces:
if not f.select:
continue
v1 = f.loops[1][uv_layer].uv - f.loops[0][uv_layer].uv
v2 = f.loops[2][uv_layer].uv - f.loops[1][uv_layer].uv
if v1.cross(v2) >= 0:
return False
else:
return True
return False
class RotateUV(bpy.types.Operator):
"""Rotate the selected face UVs"""
bl_idname = "object.uv_rotate"
bl_label = "Rotate UV"
bl_options = {'REGISTER','UNDO'}
dir = bpy.props.StringProperty(name="Direction", default='HORIZONTAL')
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH'
def execute(self, context):
prev_area = context.area.type
context.area.type = 'IMAGE_EDITOR'
bpy.ops.uv.select_all(action='SELECT')
degrees = context.scene.bfg.uv_rotate_degrees
if self.dir == 'RIGHT':
degrees *= -1
if is_uv_flipped(context):
degrees *= -1 # swap left and right if the face normal is flipped
bpy.ops.transform.rotate(value=math.radians(degrees))
context.area.type = prev_area
return {'FINISHED'}
################################################################################
## GUI PANELS
################################################################################
class SettingsPanel(bpy.types.Panel):
bl_label = "Settings"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "BFGForge"
def draw(self, context):
scene = context.scene
col = self.layout.column(align=True)
col.prop(scene.bfg, "game_path", "Path")
col.prop(scene.bfg, "mod_dir")
col.operator(ImportMaterials.bl_idname, ImportMaterials.bl_label, icon='MATERIAL')
col.operator(ImportEntities.bl_idname, ImportEntities.bl_label, icon='POSE_HLT')
flow = col.column_flow(2)
flow.prop(scene.bfg, "wireframe_rooms")
flow.prop(scene.bfg, "backface_culling")
flow.prop(scene.bfg, "show_entity_names")
flow.prop(scene.bfg, "hide_bad_materials")
flow.prop(scene.bfg, "shadeless_materials")
col.prop(context.scene.bfg, "global_uv_scale")
class CreatePanel(bpy.types.Panel):
bl_label = "Create"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "BFGForge"
def draw(self, context):
scene = context.scene
col = self.layout.column(align=True)
row = col.row(align=True)
row.operator(BuildMap.bl_idname, "Build Map", icon='MOD_BUILD').bool_op = 'UNION'
row.prop(context.scene.bfg, "map_layer")
col.operator(AddRoom.bl_idname, "Add 2D Room", icon='SURFACE_NCURVE')
col.operator(AddBrush.bl_idname, "Add 3D Room", icon='SNAP_FACE').s_type = '3D_ROOM'
col.operator(AddBrush.bl_idname, "Add Brush", icon='SNAP_VOLUME').s_type = 'BRUSH'
col = self.layout.column()
if len(scene.bfg.entities) > 0:
row = col.row(align=True)
row.prop_search(scene.bfg, "active_entity", scene.bfg, "entities", "", icon='POSE_HLT')
row.operator(ShowEntityDescription.bl_idname, "", icon='INFO')
row.operator(AddEntity.bl_idname, "", icon='ZOOMIN')
col.operator(AddLight.bl_idname, AddLight.bl_label, icon='LAMP_POINT')
col.operator(AddStaticModel.bl_idname, AddStaticModel.bl_label, icon='MESH_MONKEY')
class MaterialPanel(bpy.types.Panel):
bl_label = "Material"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "BFGForge"
def draw(self, context):
scene = context.scene
if len(scene.bfg.material_decls) > 0:
col = self.layout.column()
col.prop_search(scene.bfg, "active_material_decl_path", scene.bfg, "material_decl_paths", "", icon='MATERIAL')
col.template_icon_view(scene.bfg, "active_material_decl")
col.prop(scene.bfg, "active_material_decl", "")
obj = context.active_object
if obj and len(context.selected_objects) > 0:
if obj.bfg.type == '2D_ROOM':
col.label("Assign:", icon='MATERIAL')
row = col.row(align=True)
row.operator(AssignMaterial.bl_idname, "Ceiling").where = 'CEILING'
row.operator(AssignMaterial.bl_idname, "Wall").where = 'WALL'
row.operator(AssignMaterial.bl_idname, "Floor").where = 'FLOOR'
row.operator(AssignMaterial.bl_idname, "All").where = 'ALL'
elif hasattr(obj.data, "materials") or len(context.selected_objects) > 1: # don't hide if multiple selections
col.operator(AssignMaterial.bl_idname, AssignMaterial.bl_label, icon='MATERIAL')
class ObjectPanel(bpy.types.Panel):
bl_label = "Object"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "BFGForge"
def draw_object_label(self, col, obj):
obj_icon = 'OBJECT_DATAMODE'
if obj.type == 'LAMP':
obj_icon = 'LAMP_POINT'
elif obj.bfg.type in ['BRUSH_ENTITY','ENTITY']:
obj_icon = 'POSE_HLT'
obj_label = ""
if obj.bfg.type != 'NONE':
obj_label += obj.bfg.bl_rna.properties['type'].enum_items[obj.bfg.type].name + ": "
obj_label += obj.name
col.label(obj_label, icon=obj_icon)
def draw_entity_properties(self, context, col, obj):
col.prop(context.scene.bfg, "show_inherited_entity_props")
for prop in obj.game.properties:
is_inherited = prop.name.startswith("inherited_")
if not context.scene.bfg.show_inherited_entity_props and is_inherited:
continue # user doesn't want to see inherited props
is_custom = prop.name.startswith("custom_")
row = col.row(align=True)
name = prop.name
if is_inherited:
name = name[len("inherited_"):] # remove the prefix
elif is_custom:
name = name[len("custom_"):] # remove the prefix
row.label(name + ":")
row.prop(prop, "value", text="")
props = row.operator(ShowEntityPropertyDescription.bl_idname, "", icon='INFO')
props.classname = obj.bfg.classname
props.name = name
if is_custom:
# custom properties can be removed
row.operator(RemoveCustomEntityProperty.bl_idname, "", icon='X').name = prop.name
col.operator(NewCustomEntityProperty.bl_idname, NewCustomEntityProperty.bl_label, icon='ZOOMIN')
def draw(self, context):
obj = context.active_object
if obj and len(context.selected_objects) > 0:
col = self.layout.column()
self.draw_object_label(col, obj)
if obj.bfg.type == '2D_ROOM':
sub = col.column(align=True)
sub.prop(obj.bfg, "room_height")
sub.operator(CopyRoom.bl_idname, "Copy Room Height", icon='PASTEFLIPUP').copy_op = 'HEIGHT'
sub = col.column()
sub.enabled = False
sub.prop(obj.bfg, "ceiling_material", "Ceiling")
sub.prop(obj.bfg, "wall_material", "Wall")
sub.prop(obj.bfg, "floor_material", "Floor")
col.label("Copy Materials:", icon='PASTEFLIPUP')
row = col.row(align=True)
row.operator(CopyRoom.bl_idname, "Ceiling").copy_op = 'MATERIAL_CEILING'
row.operator(CopyRoom.bl_idname, "Wall").copy_op = 'MATERIAL_WALL'
row.operator(CopyRoom.bl_idname, "Floor").copy_op = 'MATERIAL_FLOOR'
row.operator(CopyRoom.bl_idname, "All").copy_op = 'MATERIAL_ALL'
col.operator(ConvertRoom.bl_idname, ConvertRoom.bl_label, icon='SNAP_FACE')
elif obj.bfg.type in ['3D_ROOM', 'BRUSH']:
col.prop(obj.bfg, "auto_unwrap")
elif obj.bfg.type in ['BRUSH_ENTITY','ENTITY']:
self.draw_entity_properties(context, col, obj)
elif obj.type == 'LAMP':
row = col.row()
row.prop(obj, "bfg_light_radius")
row.prop(obj.data, "color", "")
col.prop(obj.data, "use_specular")
col.prop(obj.data, "use_diffuse")
col.template_icon_view(obj.bfg, "light_material")
col.prop(obj.bfg, "light_material", "")
if hasattr(obj.data, "materials") or len(context.selected_objects) > 1: # don't hide if multiple selections
col.operator(RefreshMaterials.bl_idname, RefreshMaterials.bl_label, icon='MATERIAL')
# if this object is part of a brush entity (i.e. a child of one), show the brush entity properties
if obj.parent and obj.parent.bfg.type == 'BRUSH_ENTITY':
self.draw_object_label(col, obj.parent)
self.draw_entity_properties(context, col, obj.parent)
class UvPanel(bpy.types.Panel):
bl_label = "UV"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "BFGForge"
def draw(self, context):
obj = context.active_object
if not obj or len(context.selected_objects) == 0 or not hasattr(obj.data, "materials"):
return
col = self.layout.column(align=True)
col.operator(AutoUnwrap.bl_idname, AutoUnwrap.bl_label, icon='UV_FACESEL')
if context.mode != 'EDIT_MESH':
return
col.separator()
col.label("Nudge", icon='FORWARD')
row = col.row(align=True)
row.operator(NudgeUV.bl_idname, "Left").dir = 'LEFT'
row.operator(NudgeUV.bl_idname, "Right").dir = 'RIGHT'
row = col.row(align=True)
row.operator(NudgeUV.bl_idname, "Up").dir = 'UP'
row.operator(NudgeUV.bl_idname, "Down").dir = 'DOWN'
col.prop(context.scene.bfg, "uv_nudge_increment", "Increment")
col.separator()
col.label("Rotate", icon='FILE_REFRESH')
row = col.row(align=True)
row.operator(RotateUV.bl_idname, "Left").dir = 'LEFT'
row.operator(RotateUV.bl_idname, "Right").dir = 'RIGHT'
col.prop(context.scene.bfg, "uv_rotate_degrees", "Degrees")
col.separator()
col.label("Flip", icon='LOOP_BACK')
row = col.row(align=True)
row.operator(FlipUV.bl_idname, "Horizontal").axis = 'HORIZONTAL'
row.operator(FlipUV.bl_idname, "Vertical").axis = 'VERTICAL'
col.separator()
col.label("Fit", icon='FULLSCREEN_ENTER')
row = col.row(align=True)
row.operator(FitUV.bl_idname, "Horizontal").axis = 'HORIZONTAL'
row.operator(FitUV.bl_idname, "Vertical").axis = 'VERTICAL'
row.operator(FitUV.bl_idname, "Both").axis = 'BOTH'
col.prop(context.scene.bfg, "uv_fit_repeat", "Repeat")
################################################################################
## PROPERTIES
################################################################################
def update_wireframe_rooms(self, context):
for obj in context.scene.objects:
if obj.bfg.type in ['2D_ROOM', '3D_ROOM', 'BRUSH']:
obj.draw_type = 'WIRE' if context.scene.bfg.wireframe_rooms else 'TEXTURED'
def get_backface_culling(self):
return bpy.context.space_data.show_backface_culling
def set_backface_culling(self, value):
bpy.context.space_data.show_backface_culling = value
def update_show_entity_names(self, context):
for obj in context.scene.objects:
if obj.bfg.type == 'ENTITY':
obj.show_name = context.scene.bfg.show_entity_names
def update_hide_bad_materials(self, context):
preview_collections["material"].force_refresh = True
preview_collections["light"].needs_refresh = True
def update_shadeless_materials(self, context):
for mat in bpy.data.materials:
mat_path = os.path.dirname(mat.name)
if mat.name != "_object_color" and mat_path not in _editor_material_paths:
mat.use_shadeless = context.scene.bfg.shadeless_materials
class BfgScenePropertyGroup(bpy.types.PropertyGroup):
game_path = bpy.props.StringProperty(name="RBDOOM-3-BFG Path", description="RBDOOM-3-BFG Path", subtype='DIR_PATH')
mod_dir = bpy.props.StringProperty(name="Mod Directory")
wireframe_rooms = bpy.props.BoolProperty(name="Wireframe rooms", default=True, update=update_wireframe_rooms)
backface_culling = bpy.props.BoolProperty(name="Backface culling", get=get_backface_culling, set=set_backface_culling)
show_entity_names = bpy.props.BoolProperty(name="Show entity names", default=False, update=update_show_entity_names)
hide_bad_materials = bpy.props.BoolProperty(name="Hide bad materials", description="Hide materials with missing diffuse textures", default=True, update=update_hide_bad_materials)
shadeless_materials = bpy.props.BoolProperty(name="Fullbright materials", description="Disable lighting on materials", default=True, update=update_shadeless_materials)
show_inherited_entity_props = bpy.props.BoolProperty(name="Show inherited properties", description="Show inherited entity properties", default=False)
map_layer = bpy.props.IntProperty(name="Layer", default=0, min=0, max=19)
material_decl_paths = bpy.props.CollectionProperty(type=MaterialDeclPathPropGroup)
active_material_decl_path = bpy.props.StringProperty(name="", default="")
material_decls = bpy.props.CollectionProperty(type=MaterialDeclPropGroup)
active_material_decl = bpy.props.EnumProperty(name="", items=material_decl_preview_items)
entities = bpy.props.CollectionProperty(type=EntityPropGroup)
active_entity = bpy.props.StringProperty(name="Active Entity", default="")
model_defs = bpy.props.CollectionProperty(type=ModelDefPropGroup)
global_uv_scale = bpy.props.FloatProperty(name="Global UV Scale", description="Scale Automatically unwrapped UVs by this amount", default=0.5, step=0.1, min=0.1, max=10)
uv_fit_repeat = bpy.props.FloatProperty(name="UV Fit Repeat", default=1.0, step=0.1, min=0.1, max=10)
uv_nudge_increment = bpy.props.FloatProperty(name="Nudge Increment", default=_scale_to_blender)
uv_rotate_degrees = bpy.props.FloatProperty(name="UV Rotate Degrees", default=90.0, step=10.0, min=1.0, max=90.0)
class BfgObjectPropertyGroup(bpy.types.PropertyGroup):
auto_unwrap = bpy.props.BoolProperty(name="Auto unwrap on Build Map", description="Auto Unwrap this object when the map is built", default=True)
classname = bpy.props.StringProperty(name="Classname", default="")
entity_model = bpy.props.StringProperty(name="Entity model", default="")
room_height = bpy.props.FloatProperty(name="Room Height", default=4, step=20, precision=1, update=update_room)
floor_material = bpy.props.StringProperty(name="Floor Material", update=update_room)
wall_material = bpy.props.StringProperty(name="Wall Material", update=update_room)
ceiling_material = bpy.props.StringProperty(name="Ceiling Material", update=update_room)
light_material = bpy.props.EnumProperty(name="", items=light_material_preview_items)
type = bpy.props.EnumProperty(items=[
('NONE', "None", ""),
('2D_ROOM', "2D Room", ""),
('3D_ROOM', "3D Room", ""),
('BRUSH', "Brush", ""),
('ENTITY', "Entity", ""),
('BRUSH_ENTITY', "Brush Entity", ""),
('STATIC_MODEL', "Static Model", "")
], name="BFG Forge Object Type", default='NONE')
################################################################################
## MAIN
################################################################################
def register():
bpy.types.Scene.bfg = bpy.props.PointerProperty(type=BfgScenePropertyGroup)
bpy.types.Object.bfg = bpy.props.PointerProperty(type=BfgObjectPropertyGroup)
# not in BfgObjectPropertyGroup because get/set self object would be BfgObjectPropertyGroup, not bpy.types.Object
bpy.types.Object.bfg_light_radius = bpy.props.FloatProperty(name="Radius", get=get_light_radius, set=set_light_radius)
pcoll = bpy.utils.previews.new()
pcoll.materials = ()
pcoll.current_decl_path = ""
pcoll.force_refresh = False
preview_collections["material"] = pcoll
pcoll = bpy.utils.previews.new()
pcoll.lights = ()
pcoll.needs_refresh = True
preview_collections["light"] = pcoll
def unregister():
del bpy.types.Scene.bfg
del bpy.types.Object.bfg
del bpy.types.Object.bfg_light_radius
for pcoll in preview_collections.values():
bpy.utils.previews.remove(pcoll)
preview_collections.clear()
if __name__ == "__main__":
register()
| jpcy/bfg_forge | core.py | core.py | py | 71,093 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "bpy.context",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
45184258436 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
DEPRECATED = True
from google.appengine.ext import db
DEFAULT_GROUP = 'default'
def get_instance_settings_as_dict(widget_instance):
'''
get widget instance settings as dict which contains key-value pairs (both str/unicode).
Args:
widget_instance: WidgetInstance object.
Returns:
Settings as dict which both key and value are str/unicode.
'''
list = WidgetInstanceSetting.all().filter('widget_instance ==', widget_instance).fetch(100)
d = {}
for setting in list:
d[setting.setting_key] = setting.setting_value
import logging
logging.info('get_instance_settings_as_dict: ' + str(d))
return d
def update_instance_settings(widget_instance, setting_as_dict):
'''
Update instance settings.
Args:
widget_instance: WidgetInstance object.
setting_as_dict: new settings as dict contains key as str and value as str or unicde.
Returns:
None
'''
group = widget_instance.widget_group
db.delete(get_instance_settings(widget_instance))
for name, value in setting_as_dict.items():
WidgetInstanceSetting(
widget_group=group,
widget_instance=widget_instance,
setting_name=name,
setting_value=value
).put()
| Albertnnn/express-me | src/widget/store.py | store.py | py | 1,446 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.info",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db.delete",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 40,
"usage_type": "name"
}
] |
15956813860 | import os
import numpy as np
import pickle
import open3d as o3d
from absl import app
from absl import flags
flags.DEFINE_string('task', 'stack-block-pyramid', '')
flags.DEFINE_string('data_dir', './training_datasets/voxel_grids', '')
flags.DEFINE_string('data_source', './training_datasets/rgbd', '')
flags.DEFINE_integer('num_per_class', '100', '')
FLAGS = flags.FLAGS
def normalize(arr):
"""
normalize an array to [0, 1]
"""
arr_min = arr.min()
arr_max = arr.max()
return (arr - arr_min) / (arr_max - arr_min)
def load_rgb_segm_depth(path, num):
rgb_image_batches = []
# segm_image_batches = []
depth_image_batches = []
class_paths = []
file_name_batches = []
sub_paths = os.listdir(path)
data_types = ['positive', 'negative']
img_types = ['color', 'depth']
for sub_path in sub_paths:
for data_type in data_types:
rgb_images = []
# segm_images = []
depth_images = []
file_names = []
class_path = os.path.join(path, sub_path, data_type)
print(class_path)
for img_type in img_types:
img_path = os.path.join(class_path, img_type)
images = os.listdir(img_path)
for i ,img_name in enumerate(images):
if i == num:
break
with open(os.path.join(img_path, img_name), 'rb') as f:
file_names.append(img_name[:img_name.find('.')])
img = pickle.load(f)
if img_type == 'color':
rgb_images.append(img)
# elif img_type == 'segm':
# segm_images.append(img)
elif img_type == 'depth':
depth_images.append(img)
rgb_image_batches.append(rgb_images)
# segm_image_batches.append(segm_images)
depth_image_batches.append(depth_images)
file_name_batches.append(file_names)
class_paths.append(class_path)
return rgb_image_batches, depth_image_batches, file_name_batches, class_paths
def rgbd_to_voxel_grids(rgb_img, depth_img):
rgbd_img = o3d.geometry.RGBDImage.create_from_color_and_depth(
o3d.geometry.Image(rgb_img),
o3d.geometry.Image(depth_img),
convert_rgb_to_intensity=False)
# x_dim = 50
# y_dim = 45
# z_dim = 100
x_dim = 35
y_dim = 30
z_dim = 70
# # xyz
# camera_pose = np.array([[-6.12323400e-17, -1.00000000e+00, 1.22464680e-16, 1],
# [-7.07106781e-01, 1.29893408e-16, 7.07106781e-01, 0],
# [-7.07106781e-01, -4.32978028e-17, -7.07106781e-01, 0.75],
# [0,0,0,1]])
#xzy
camera_pose = np.array([[-6.12323400e-17, -1.22464680e-16, 1.00000000e+00,1 ],
[-7.07106781e-01, -7.07106781e-01, -1.29893408e-16,0],
[ 7.07106781e-01, -7.07106781e-01, -4.32978028e-17,0.75],
[0,0,0,1]])
intrinsic_matrix = np.array([[450., 0, 320.],
[0, 450., 240.],
[0, 0, 1]])
camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(640, 480, intrinsic_matrix)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
rgbd_img, camera_intrinsic, camera_pose)
# o3d.visualization.draw_geometries([pcd])
min_bound = pcd.get_min_bound()
max_bound = pcd.get_max_bound()
min_bound[0] += 0.000
max_bound[0] -= 0.0018
min_bound[1] += 0.00000105
max_bound[1] -= 0.000275
min_bound[2] += 0.0011
max_bound[2] -= 0.0011
cropping_bound = o3d.geometry.AxisAlignedBoundingBox(min_bound, max_bound)
# Crop the point cloud using the bounding box
cropped_point_cloud = pcd.crop(cropping_bound)
# o3d.visualization.draw_geometries([cropped_point_cloud])
# Get point cloud coordinates
points = np.asarray(cropped_point_cloud.points)
# Get point cloud colors
colors = np.asarray(cropped_point_cloud.colors)
# Define voxel size
# voxel_size = 0.0000105
voxel_size = 0.000015
# Calculate voxel grid dimensions
max_bound = np.max(points, axis=0)
min_bound = np.min(points, axis=0)
dimensions = np.ceil((max_bound - min_bound) / voxel_size)
print(f'dimentsions: {dimensions}')
# Calculate voxel grid indices for each point
indices = np.floor((points - min_bound) / voxel_size)
# Initialize voxel grid with zeros
voxel_grid = np.zeros((int(dimensions[0]), int(dimensions[1]), int(dimensions[2]), 3))
# Fill voxel grid with colors
for i in range(points.shape[0]):
voxel_grid[int(indices[i][0]), int(indices[i][1]), int(indices[i][2])] = colors[i]
# Convert voxel grid to 3D numpy array
voxel_grid = np.asarray(voxel_grid)
voxel_grid = np.pad(voxel_grid, ((0, x_dim-voxel_grid.shape[0]), (0, y_dim-voxel_grid.shape[1]), (0, z_dim-voxel_grid.shape[2]), (0, 0)), mode='constant')
return voxel_grid
def main(unused_argv):
rgb_image_batches, depth_image_batches, file_name_batches, class_paths = load_rgb_segm_depth(os.path.join(FLAGS.data_source, FLAGS.task), FLAGS.num_per_class)
for idx, class_path in enumerate(class_paths):
for n in range(FLAGS.num_per_class):
print(f'Processing {class_path}...')
voxel_grid = rgbd_to_voxel_grids(rgb_image_batches[idx][n], depth_image_batches[idx][n])
voxel_grid = np.mean(voxel_grid, axis=-1, keepdims=True)
# voxel_grid = voxel_grid.reshape(25, 20, 50)
save_path = os.path.join(FLAGS.data_dir, class_path)
if not os.path.exists(save_path):
os.makedirs(save_path)
# save_path = os.path.join(file_path, file_name[:file_name.find('.')] + '.npy')
# voxel_grid = np.mean(voxel_grid, axis=-1, keepdims=True)
np.save(os.path.join(save_path, file_name_batches[idx][n] + '.npy'), voxel_grid)
if __name__ == '__main__':
app.run(main) | tinwech/subgoal_success_detection | trans_rgbd_to_voxel.py | trans_rgbd_to_voxel.py | py | 5,622 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "absl.flags.DEFINE_string",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "absl.flags.DEFINE_string",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "absl.flags"... |
32107199860 | from zlib import adler32
from functools import wraps
from flask_restful import Resource as DefaultResurce, ResponseBase, OrderedDict, request, unpack, marshal
VALIDATION_ERROR_MESSAGE = 'Fields validation error'
class marshal_with(object):
"""A decorator that apply marshalling to the return values of your methods.
>>> from flask_restful import fields, marshal_with
>>> mfields = { 'a': fields.Raw }
>>> @marshal_with(mfields)
... def get():
... return { 'a': 100, 'b': 'foo' }
...
...
>>> get()
OrderedDict([('a', 100)])
>>> @marshal_with(mfields, envelope='data')
... def get():
... return { 'a': 100, 'b': 'foo' }
...
...
>>> get()
OrderedDict([('data', OrderedDict([('a', 100)]))])
see :meth:`flask_restful.marshal`
"""
def __init__(self, fields, envelope=None):
"""
:param fields: a dict of whose keys will make up the final
serialized response output
:param envelope: optional key that will be used to envelop the serialized
response
"""
self.fields = fields
self.envelope = envelope
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
resp = f(*args, **kwargs)
if isinstance(resp, tuple):
data, code, headers = unpack(resp)
if code != 200 or data.get('success') is False:
return resp
return marshal(data, self.fields, self.envelope), code, headers
else:
return marshal(resp, self.fields, self.envelope)
return wrapper
class Resource(DefaultResurce):
def __serialize_errors(self, resp):
"""reformat the error structure and add the error codes"""
if not resp.get('errors'):
return resp
elif resp['errors'].get('global_error'):
ge_message = resp['errors'].pop('global_error')
resp['errors']['global_error'] = {
'code': adler32(ge_message.encode('UTF-16')),
'message': ge_message
}
return resp
else:
# compose field errors
field_errors = {}
for field in resp['errors']:
field_errors[field] = {
'code': adler32(resp['errors'][field].encode('UTF-16')),
'message': resp['errors'][field]
}
resp['errors'] = {
'fields': field_errors
}
# compose global_error
if resp['errors'].get('global_error'):
ge_message = resp['errors']['global_error']
resp['errors']['global_error'] = {
'code': adler32(ge_message.encode('UTF-16')),
'message': ge_message
}
else:
resp['errors']['global_error'] = {
'code': adler32(VALIDATION_ERROR_MESSAGE.encode('UTF-16')),
'message': VALIDATION_ERROR_MESSAGE
}
return resp
def dispatch_request(self, *args, **kwargs):
# Taken from flask
#noinspection PyUnresolvedReferences
meth = getattr(self, request.method.lower(), None)
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
for decorator in self.method_decorators:
meth = decorator(meth)
resp = meth(*args, **kwargs)
# adds default success key
if isinstance(resp, dict):
if resp.get('success', None) is None:
resp['success'] = True
elif resp.get('success', True) is False:
resp = self.__serialize_errors(resp)
elif isinstance(resp, tuple):
if resp[0].get('success', True) is False:
list(resp)[0] = self.__serialize_errors(resp[0])
resp = tuple(resp)
if isinstance(resp, ResponseBase): # There may be a better way to test
return resp
representations = self.representations or OrderedDict()
#noinspection PyUnresolvedReferences
mediatype = request.accept_mimetypes.best_match(representations, default=None)
if mediatype in representations:
data, code, headers = unpack(resp)
resp = representations[mediatype](data, code, headers)
resp.headers['Content-Type'] = mediatype
return resp
return resp
| annacorobco/flask-formula | v01/dockerfiles/backend/app/libs/controllers.py | controllers.py | py | 4,630 | python | en | code | null | github-code | 1 | [
{
"api_name": "flask_restful.unpack",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "flask_restful.marshal",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "flask_restful.marshal",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "func... |
73948599075 | import pickle as pkl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import FixedLocator, FixedFormatter
def log_histogram(data, name):
"""
Create ridgeline plots, where each individual plot is a
log-y scaled histogram.
data: 2D Numpy array
Each row contains all the values to include in a single histogram.
0th row will be in the lowest ridgeline plot.
Every Nth row will be a separate histogram. N is selected according
to n_target_rows, below to avoid overwhelming the viewer.
name: str
A name stem for the results image.
"""
dpi = 300
border = .125
eps = 1e-6
# n_bins = 50
n_bins = 100
n_target_rows = 16
fig = plt.figure()
ax = fig.gca()
n_rows, n_cols = data.shape
d_keep = np.maximum(1, np.floor(n_rows / n_target_rows))
i_keep = np.arange(0, n_rows, d_keep, dtype=int)
data = data[i_keep, :]
n_rows, n_cols = data.shape
x_min = np.min(data)
x_max = np.max(data)
x_range = x_max - x_min + eps
x_border = x_range * border
bin_edges = np.linspace(x_min, x_max, n_bins + 1)
bin_centers = ((bin_edges[:-1] + bin_edges[1:]) / 2)
y_range = n_rows
y_border = y_range * border
ax.set_ylabel("Epoch")
scale = .1
for i in np.arange(n_rows)[::-1]:
y_0 = i
ax.plot(
[x_min, x_max],
[y_0, y_0],
color="gray",
alpha=.4,
linewidth=.1,
)
counts, bins = np.histogram(data[i, :], bins=bin_edges)
x = np.array([x_min] + list(bin_centers) + [x_max])
y = y_0 + np.log2(np.array([0] + list(counts) + [0]) + 1) * scale
ax.add_patch(patches.Polygon(
np.concatenate((x[:, np.newaxis], y[:, np.newaxis]), axis=1),
alpha=1,
edgecolor="black",
facecolor="lightsteelblue",
linewidth=.2,
))
y_formatter = FixedFormatter([str(i) for i in i_keep])
y_locator = FixedLocator(list(np.arange(i_keep.size)))
ax.yaxis.set_major_formatter(y_formatter)
ax.yaxis.set_major_locator(y_locator)
plt.savefig("hist_" + name + ".png", dpi=dpi)
plt.close()
# This code is specific to my use case.
# It parses the values I pulled out and stashed in a dict with a
# key naming scheme that lets me re-create a Numpy array from it.
# filename = "data/params/scs_weight_01.pkl"
filename = "data/params/scs_p_01.pkl"
# filename = "data/params/scs_out_01.pkl"
def main():
with open(filename, "rb") as f:
scs_out = pkl.load(f)
n_vals_per_iter = 0
n_iters = 0
n_epochs = 0
out_vals = {}
for name, vals in scs_out.items():
n_vals_per_iter = vals.size
layer, i_epoch, i_iter = name.split("_")
n_iters = np.maximum(n_iters, int(i_iter) + 1)
n_epochs = np.maximum(n_epochs, int(i_epoch) + 1)
out_vals[layer] = None
for layer in out_vals.keys():
out_vals[layer] = np.zeros((n_epochs, n_iters * n_vals_per_iter))
for name, vals in scs_out.items():
layer, i_epoch, i_iter = name.split("_")
i_iter = int(i_iter)
i_epoch = int(i_epoch)
out_vals[layer][
i_epoch,
n_vals_per_iter * i_iter: n_vals_per_iter * (i_iter + 1)
] = vals
for layer, vals in out_vals.items():
log_histogram(vals, layer)
if __name__ == "__main__":
main()
| brohrer/scs-gallery | log_y_histograms.py | log_y_histograms.py | py | 3,565 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "numpy.maximum",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.floor"... |
37338983645 | import server
import os
import config
import time
import threading
class Chord:
def __init__(self):
self.qnt_key = None
self.qnt_nodes = None
self.cfg = config.Config()
self.host = self.cfg.getHost().strip("\n")
self.portaInicial = int(self.cfg.getMinPort().strip("\n"))
self.srv = [] #lista de servidores
self.id = []
self.portas = []
self.qnt_key = None
self.qnt_nodes = None
def main(self):
self.qnt_key = input("Quantidade de Bits: ")
self.qnt_nodes = input("Quantidade de Nós: ")
self.snap_time = input("Tempo Snapshot (seg): ")
self.id.insert(0, 2**int(self.qnt_key) - 1)
self.portas.insert(0, self.portaInicial)
for i in range(1, int(self.qnt_nodes)):
self.id.insert(i, int(self.id[i-1] - ((2**int(self.qnt_key))/int(self.qnt_nodes))))
self.portas.insert(i, self.portaInicial+ 4*i)
for i in range(0, int(self.qnt_nodes)):
if i == 0:
self.srv.insert(i, (self.id[int(self.qnt_nodes)-1], self.id[i], self.id[i+1],self.portas[int(self.qnt_nodes)-1], self.portas[i], self.portas[i+1]))
elif i == (int(self.qnt_nodes )-1):
self.srv.insert(i, (self.id[i-1], self.id[i], self.id[0],self.portas[i-1], self.portas[i], self.portas[0]))
else:
self.srv.insert(i, (self.id[i-1], self.id[i], self.id[i+1],self.portas[i-1], self.portas[i], self.portas[i+1]))
chord = open("thisChord.txt","w")
for i in range(0, int(self.qnt_nodes) ): #inicia os servidores passando os parametros criados pelo chord
cAnt, cAtu, cSuc, pAnt, pAtu, pSuc = self.srv[i]
chord.write(self.qnt_key + " " + self.qnt_nodes + " " + str(cAnt) + " " + str(cAtu) + " " + str(cSuc) + " " + str(pAnt) + " " + str(pAtu) + " " + str(pSuc) + " " + self.snap_time + "\n")
os.system("start /B start cmd.exe @cmd /k python server.py " + self.qnt_key + " " + self.qnt_nodes + " " + str(cAnt) + " " + str(cAtu) + " " + str(cSuc) + " " + str(pAnt) + " " + str(pAtu) + " " + str(pSuc) + " " + self.snap_time)
#inicia cliente
os.system("start /B start cmd.exe @cmd /k python client.py ")
if __name__ == '__main__':
chord = Chord()
chord.main() | ruehara/SdPython | chord.py | chord.py | py | 2,378 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config.Config",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 47,
"usage_type": "call"
}
] |
37117203904 | from flask import Flask, render_template, request
#import requests
import pickle
#import numpy as np
import sklearn
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/',methods=['GET'])
def index():
return render_template('index.html')
standard_to = StandardScaler()
@app.route("/predict", methods=['POST'])
def predict():
if request.method == 'POST':
#1Age
Age=float(request.form['Age'])
#2BusinessTravel
BusinessTravel=int(request.form['BusinessTravel'])
#3Department
Department=int(request.form['Department'])
#4distanceFromHome
DistanceFromHome=float(request.form['DistanceFromHome'])
#5Education
Education=float(request.form['Education'])
#6EducationField
EducationField=int(request.form['EducationField'])
#7EnvironmentSatisfaction
EnvironmentSatisfaction=float(request.form['EnvironmentSatisfaction'])
#8JobInvolvement
JobInvolvement=float(request.form['JobInvolvement'])
#9JobLevel
JobLevel=float(request.form['JobLevel'])
#10JobRole
JobRole=int(request.form['JobRole'])
#11JobSatisfaction
JobSatisfaction=float(request.form['JobSatisfaction'])
#12MaritalStatus
MaritalStatus=int(request.form['MaritalStatus'])
#14MonthlyIncome
MonthlyIncome=float(request.form['MonthlyIncome'])
#15NumCompaniesWorked
NumCompaniesWorked=float(request.form['NumCompaniesWorked'])
#16OverTime
OverTime=int(request.form['OverTime'])
#17PercentSalaryHike
PercentSalaryHike=float(request.form['PercentSalaryHike'])
#18PerformanceRating
PerformanceRating=float(request.form['PerformanceRating'])
#19RelationshipSatisfaction
RelationshipSatisfaction=float(request.form['RelationshipSatisfaction'])
#20StockOptionLevel
StockOptionLevel=float(request.form['StockOptionLevel'])
#21TotalWorkingYears
TotalWorkingYears=float(request.form['TotalWorkingYears'])
#22TrainingTimesLastYear
TrainingTimesLastYear=float(request.form['TrainingTimesLastYear'])
#23YearsSinceLastPromotion
WorkLifeBalance=float(request.form['WorkLifeBalance'])
#24YearsAtCompany
YearsAtCompany=float(request.form['YearsAtCompany'])
#25YearsInCurrentRole
YearsInCurrentRole=float(request.form['YearsInCurrentRole'])
#26YearsSinceLastPromotion
YearsSinceLastPromotion=float(request.form['YearsSinceLastPromotion'])
#27YearsWithCurrManager
YearsWithCurrManager=float(request.form['YearsWithCurrManager'])
#28Stability
Stability=float(request.form['Stability'])
#29Fidelity
Fidelity=float(request.form['Fidelity'])
#30Income_YearsComp
Income_YearsComp=float(request.form['Income_YearsComp'])
#31Total_Satisfaction
TotalSatisfaction_mean=float(request.form['Total_Satisfaction'])
prediction=model.predict([[Age,
Department,
DistanceFromHome,
Education,
EducationField,
EnvironmentSatisfaction,
JobInvolvement,
JobLevel,
JobRole,
BusinessTravel,
JobSatisfaction,
MaritalStatus,
MonthlyIncome,
NumCompaniesWorked,
OverTime,
PercentSalaryHike,
PerformanceRating,
RelationshipSatisfaction,
StockOptionLevel,
TotalWorkingYears,
TrainingTimesLastYear,
WorkLifeBalance,
YearsAtCompany,
YearsInCurrentRole,
YearsSinceLastPromotion,
YearsWithCurrManager,
Stability,
Fidelity,
Income_YearsComp,
TotalSatisfaction_mean
]])
#output=prediction
if(prediction==0):
return render_template('result.html',prediction_text="This employee likely to leave Company")
elif(prediction==1):
return render_template('result.html',prediction_text="This employee will not leave the Company")
else:
return render_template('result.html',prediction_text=" data not present")
if __name__=="__main__":
app.run()
| Swathikrishnatu/ibm-attrition--Final-project | web.py | web.py | py | 5,528 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.Sta... |
34521429221 | import matplotlib.pyplot as plt
import seaborn as sns; sns.set() # for plot styling
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from numpy import genfromtxt
#humm, encontre este codigo en un servidor remoto
#estaba junto con el "traffic.pcap"
# que podria ser?, like some sample code
my_data2 = np.genfromtxt('test_2.txt', delimiter=',')
db = DBSCAN(eps=10000, min_samples=100000).fit(my_data2)
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linsspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=14)
#NOTE: what you see in the sky put it format TMCTF{replace_here}
#where "replace_here" is what you see
plt.title('aaaaaaaa: %d' % n_clusters_)
plt.show()
| p4-team/ctf | 2018-09-15-trendmicro/misc_constellation/proc.py | proc.py | py | 1,156 | python | en | code | 1,716 | github-code | 1 | [
{
"api_name": "seaborn.set",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cm.Spe... |
33093318888 | import solution
class Solution(solution.Solution):
def solve(self, test_input=None):
logs, k = test_input
return self.findingUsersActiveMinutes([x[:] for x in logs], k)
def findingUsersActiveMinutes(self, logs, k):
"""
:type logs: List[List[int]]
:type k: int
:rtype: List[int]
"""
from collections import defaultdict
d = defaultdict(set)
for i,t in logs:
d[i].add(t)
ans = [0] * k
for key in d:
ans[len(d[key]) - 1] += 1
return ans
| QuBenhao/LeetCode | problems/1817/solution.py | solution.py | py | 572 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "solution.Solution",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 16,
"usage_type": "call"
}
] |
16074380398 | import tkinter as tk
from PIL import ImageTk, Image
class Caption:
""" """
text = ''
lastx = None
lasty = None
speech_bubble = 'imgs/spb-300x165.png'
thought_bubble = 'imgs/thought.png'
long_sample = ("I am the hope of the universe. I am the answer to all living things "
"that cry out for peace. I am protector of the innocent. I am the "
"light in the darkness. I am truth. Ally to good! Nightmare to you!")
def __init__(self, canvas, caption_text=None, bubble_type='speech', position={'x': 0, 'y': 0}):
self.canvas = canvas
if not caption_text:
self.text = caption_text
self.type = bubble_type # 'speech' or 'thought'
self.speech_img = ImageTk.PhotoImage(Image.open(self.speech_bubble))
self.thought_img = ImageTk.PhotoImage(Image.open(self.thought_bubble))
self.lastx, self.lasty = position
if bubble_type == 'thought':
bg_image = self.thought_img
text_posx = position['x'] + 50
text_posy = position['y'] + 50
else:
bg_image = self.speech_img
text_posx = position['x'] + 75
text_posy = position['y'] + 25
self.cid = self.canvas.create_image(position['x'], position['y'], anchor=tk.NW, image=bg_image)
caption_tag = 'caption-{}'.format(int(self.cid / 2)) # local caption id
layer_tag = 'layer-{}'.format(1)
self.canvas.itemconfig(self.cid, tag=(caption_tag, layer_tag))
# caption text
self.tid = self.canvas.create_text(text_posx, text_posy,
anchor=tk.NW,
width=200,
text=caption_text,
tags='caption-text')
# caption_text_tag = 'caption-{}-text'.format(self.cid)
# canvas.addtag_withtag(caption_text_tag, tid)
self.canvas.itemconfig(self.tid, tag=(caption_tag, layer_tag))
print(caption_tag)
| brianteachman/comic-dialog-builder | ui/caption.py | caption.py | py | 2,077 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"li... |
20581596447 | """
热力图
"""
from pyecharts import options as opts
from pyecharts.charts import Geo
from pyecharts.globals import GeoType, ThemeType
from pyecharts.faker import Faker
import random, os
map = (
Geo(init_opts=opts.InitOpts(width="1000px", height="800px", renderer="canvas",
theme=ThemeType.LIGHT, animation_opts=opts.AnimationOpts(animation=True)))
.add_schema(maptype="china")
.add(series_name="中国热力图",
data_pair=[list(z) for z in
zip(Faker.provinces,
[random.randint(0, (i + 1) * 200 + 100) for i in range(len(Faker.provinces))])],
type_=GeoType.EFFECT_SCATTER, symbol_size=20,
color=Faker.visual_color[random.randint(0, len(Faker.visual_color)-1)])
.add(series_name="广东热力图",
data_pair=[list(z) for z in
zip(Faker.guangdong_city,
[random.randint(0, (i + 1) * 100) for i in range(len(Faker.provinces))])],
type_=GeoType.EFFECT_SCATTER, symbol_size=10,
color=Faker.visual_color[1])
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(visualmap_opts=opts.VisualMapOpts(max_=400, pos_top=20),
title_opts=opts.TitleOpts(title=""),
)
)
map.render(path="E:/tmp/geo_heatmap.html")
os.system('E:/tmp/geo_heatmap.html')
| qugemingzizhemefeijin/python-study | ylspideraction/chapter15/_002charts_geo.py | _002charts_geo.py | py | 1,471 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pyecharts.charts.Geo",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyecharts.options.InitOpts",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pye... |
32390876497 | from django.test import TestCase, Client
from shortener_url.models import Url
from rest_framework import status
import json
class TestShortenerUrl(TestCase):
def setUp(self):
Url.objects.create(original_url="https://web.whatsapp.com/", custom_alias="whatsapp", shortened_url="http://shortener/u/whatsapp")
def test_shortener_alias(self):
data = {
'url': 'http://www.google.com.br',
'CUSTOM_ALIAS': 'google'
}
response = self.client.post('/create', json.dumps(data), content_type="application/json")
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
def test_shortener_without_url(self):
data = {
'url': '',
'CUSTOM_ALIAS': ''
}
response = self.client.post('/create', json.dumps(data), content_type="application/json")
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_shortener(self):
data = {
'url': 'https://www.youtube.com/',
'CUSTOM_ALIAS': ''
}
response = self.client.post('/create', json.dumps(data), content_type="application/json")
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
def test_shortener_existing_alias(self):
data = {
'url': 'https://web.whatsapp.com/',
'CUSTOM_ALIAS': 'whatsapp'
}
response = self.client.post('/create', json.dumps(data), content_type="application/json").json()
self.assertEqual('001', response['err_code'])
class TestRetrieveUrl(TestCase):
def setUp(self):
Url.objects.create(original_url="https://web.whatsapp.com/", custom_alias="whatsapp", shortened_url="http://shortener/u/whatsapp")
def test_retrieve_alias(self):
response = self.client.get('/retrieve/whatsapp')
self.assertEqual(status.HTTP_302_FOUND, response.status_code)
def test_retrieve_url_not_found(self):
response = self.client.get('/retrieve/gmail').json()
self.assertEqual('002', response['err_code'])
def test_retrieve_without_alias(self):
response = self.client.get('/retrieve/')
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
class TestTopVisitedUrl(TestCase):
def test_top_visited_url(self):
response = self.client.get('/top_visited')
self.assertEqual(status.HTTP_204_NO_CONTENT, response.status_code) | karolGuimaraes/hire.me | tests.py | tests.py | py | 2,457 | python | en | code | null | github-code | 1 | [
{
"api_name": "django.test.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "shortener_url.models.Url.objects.create",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "shortener_url.models.Url.objects",
"line_number": 10,
"usage_type": "attribut... |
28925820348 | import requests
from bs4 import BeautifulSoup
def strip_string(string1):
for i in string1:
if i=='<':
var1 = string1[string1.index(i):string1.index(">")+1]
string1 = string1.replace(var1, "")
return string1
def scrape_meaning(a):
URL = f"https://www.dictionary.com/browse/{a}"
r = requests.get(URL)
soup = BeautifulSoup(r.content, 'html5lib')
meaning = str(soup.find('span', attrs={'class': 'one-click-content css-nnyc96 e1q3nk1v1'}))
if meaning.endswith("</span> </span>") or meaning.endswith("</span></span>"):
meaning = meaning[:-7]
meaning1 = str(strip_string(meaning))
if ":" in meaning1:
meaning1 = meaning1[:meaning1.index(":")]
if len(meaning1) > 90:
meaning1 = meaning1[:90] + "\n" + "-" + meaning1[90:]
return meaning1
def scrape_usage(a):
URL = f"https://www.dictionary.com/browse/{a}"
r = requests.get(URL)
soup = BeautifulSoup(r.content, 'html5lib')
usage = str(soup.find('span', attrs={'class': 'luna-example italic'}))
usage1 = str(strip_string(usage))
if len(usage1) > 90:
usage1 = usage1[:90] + "\n" + "-" + usage1[90:]
return usage1 | priyanshusingh509/AashaEd | webscrape.py | webscrape.py | py | 1,199 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
37282786931 | from typing import List, Dict
from fastapi import WebSocket
from fastapi import HTTPException, status
from sqlalchemy import exc
from sqlalchemy.ext.asyncio import AsyncSession
from database.models.chat_models.chat_model import Chat
from database.models.chat_models.members_model import ChatMember
from database.models.chat_models.messages_model import Message
from src.schemas.chat_schema import CreateUserRoom, SaveMessage, MessageOut, ChatMemberOut
class ChatCrud:
def __init__(self, db_session: AsyncSession) -> None:
self._session: AsyncSession = db_session
async def _close_session(self):
if self._session.is_active:
await self._session.close()
async def _get_chat_by_id(self, chat_id) -> Chat | None:
return await self._session.get(Chat, chat_id)
async def get_chat_history(self, chat_id: int) -> List[MessageOut]:
chat: Chat = await self._session.get(Chat, chat_id)
if chat is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Chat not found")
messages = list()
for message in chat.messages:
messages.append(MessageOut(**message.__dict__))
await self._close_session()
return messages
async def create_chat(self):
chat = Chat()
self._session.add(chat)
try:
await self._session.commit()
except exc.IntegrityError:
await self._session.rollback()
return chat.id
async def create_user_room(self, create_data: CreateUserRoom):
chat_id = await self.create_chat()
added_userd = await self.add_user_to_chat([create_data.owner_id], chat_id)
await self._close_session()
return added_userd
async def add_user_to_chat(self, user_id_list: List[str], chat_id: int) -> List[ChatMemberOut]:
added_users: List[ChatMemberOut] = list()
if await self._get_chat_by_id(chat_id) is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Chat not found")
for user_id in user_id_list:
member = ChatMember(chat_id=chat_id, user_id=user_id)
self._session.add(member)
added_users.append(ChatMemberOut(chat_id=chat_id, user_id=user_id))
try:
await self._session.commit()
except exc.IntegrityError:
await self._session.rollback()
return added_users
async def user_in_chat(self, chat_id, user_id):
chat: Chat = await self._session.get(Chat, chat_id)
if chat is None:
return False
members = [user_id for members in chat.members if user_id == members.user_id]
if user_id in members:
return True
async def save_message(self, message_data: SaveMessage):
if not await self.user_in_chat(message_data.chat_id, message_data.user_id):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
chat: Chat = await self._session.get(Chat, message_data.chat_id)
if chat is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
message = Message(**message_data.dict())
chat.messages.append(message)
try:
self._session.add(chat)
await self._session.commit()
except exc.IntegrityError:
await self._session.rollback()
await self._close_session()
class Notifier:
def __init__(self):
self.connections: Dict[int, Dict] = dict()
async def send_in_group(self, chat_id, user_id, message):
chat_users = self.connections[chat_id]
for user in chat_users:
if user != user_id:
await chat_users[user].send_text(message)
async def connect(self, chat_id: int, user_id: str, websocket: WebSocket):
await websocket.accept()
if self.connections == {} or len(self.connections) == 0:
self.connections[chat_id] = {}
if self.connections.get(chat_id, None) is None:
self.connections[chat_id] = {}
self.connections[chat_id].update({user_id: websocket})
def remove(self, chat_id: int, user_id: str):
try:
self.connections[chat_id].pop(user_id)
# delete chat_room
if len(self.connections[chat_id]) == 0:
self.connections.pop(chat_id)
except KeyError:
pass
| Whitev2/WebsocketChatExample | app/src/crud/chat_crud.py | chat_crud.py | py | 4,420 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.ext.asyncio.AsyncSession",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.ext.asyncio.AsyncSession",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "database.models.chat_models.chat_model.Chat",
"line_number": 24,
... |
9673124412 | import jc
from connectors.core.connector import get_logger, ConnectorError
from .constants import LOGGER_NAME
logger = get_logger(LOGGER_NAME)
def convert(config, params):
parser = params.get('parser')
cmd_output = params.get('command_output')
raw = params.get('raw', False)
if not parser or not cmd_output:
logger.exception('Missing required input')
raise ConnectorError('Missing required input')
parser = parser.replace('-', '_')
if parser in jc.streaming_parser_mod_list():
logger.exception(f"'{parser}' parser not valid. Streaming parsers are not supported.")
raise ConnectorError(f"'{parser}' parser not valid. Streaming parsers are not supported.")
if parser not in jc.standard_parser_mod_list():
logger.exception(f"'{parser}' parser not valid.")
raise ConnectorError(f"'{parser}' parser not valid.")
parser_info = jc.parser_info(parser)
logger.info(f"Selected parser: {parser_info}")
return jc.parse(parser, cmd_output, raw=raw, quiet=True)
| fortinet-fortisoar/connector-json-convert | json-convert/convert.py | convert.py | py | 1,045 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "connectors.core.connector.get_logger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "constants.LOGGER_NAME",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "connectors.core.connector.ConnectorError",
"line_number": 15,
"usage_type": ... |
74418312994 | from __future__ import annotations
from typing import TypedDict
from movielog.reviews import serializer
from movielog.utils import export_tools, list_tools
from movielog.utils.logging import logger
StatGroup = TypedDict("StatGroup", {"reviewYear": str, "reviewsCreated": int})
def export() -> None: # noqa: WPS210
logger.log("==== Begin exporting {}...", "review stats")
all_reviews = serializer.deserialize_all()
stat_groups = [
StatGroup(
reviewYear="all",
reviewsCreated=len(all_reviews),
)
]
reviews_by_year = list_tools.group_list_by_key(
all_reviews, lambda review: str(review.date.year)
)
for year, reviews_for_year in reviews_by_year.items():
stat_groups.append(
StatGroup(
reviewYear=str(year),
reviewsCreated=len(reviews_for_year),
)
)
export_tools.serialize_dicts_to_folder(
dicts=stat_groups,
folder_name="review_stats",
filename_key=lambda stat_file: stat_file["reviewYear"],
)
| fshowalter/movielog | movielog/reviews/exports/review_stats.py | review_stats.py | py | 1,081 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.TypedDict",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "movielog.utils.logging.logger.log",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "movielog.utils.logging.logger",
"line_number": 13,
"usage_type": "name"
},
{
"a... |
19076092035 | from __future__ import print_function, division # requires Python >= 2.6
# numpy and scipy imports
import numpy as np
import math
from scipy.sparse import kron, identity
from scipy.sparse.linalg import eigsh # Lanczos routine from ARPACK
from collections import namedtuple
####Initial parameter
#physical parameter
J = 1
Jz = 1
#number of states kept
Nstate = 10
#number of iterations
Nlength = 100
#exact solution
ExactEnergy = -math.log(2) + 0.25
Block = namedtuple("Block", ["length", "basis_size", "operator_dict"])
EnlargedBlock = namedtuple("EnlargedBlock", ["length", "basis_size", "operator_dict"])
def is_valid_block(block):
for op in block.operator_dict.values():
if op.shape[0] != block.basis_size or op.shape[1] != block.basis_size:
return False
return True
is_valid_enlarged_block = is_valid_block
# Model-specific code for the Heisenberg XXZ chain
model_d = 2 # single-site basis size
Sz1 = np.array([[0.5, 0], [0, -0.5]], dtype='d') # single-site S^z
Sp1 = np.array([[0, 1], [0, 0]], dtype='d') # single-site S^+
H1 = np.array([[0, 0], [0, 0]], dtype='d') # single-site portion of H is zero
def H2(Sz1, Sp1, Sz2, Sp2): # two-site part of H
return (
(J / 2) * (kron(Sp1, Sp2.conjugate().transpose()) + kron(Sp1.conjugate().transpose(), Sp2)) +
Jz * kron(Sz1, Sz2)
)
initial_block = Block(length=1, basis_size=model_d, operator_dict={
"H": H1,
"conn_Sz": Sz1,
"conn_Sp": Sp1,
})
def enlarge_block(block):
mblock = block.basis_size
o = block.operator_dict
enlarged_operator_dict = {
"H": kron(o["H"], identity(model_d)) + kron(identity(mblock), H1) + H2(o["conn_Sz"], o["conn_Sp"], Sz1, Sp1),
"conn_Sz": kron(identity(mblock), Sz1),
"conn_Sp": kron(identity(mblock), Sp1),
}
return EnlargedBlock(length=(block.length + 1),
basis_size=(block.basis_size * model_d),
operator_dict=enlarged_operator_dict)
def rotate_and_truncate(operator, transformation_matrix):
return transformation_matrix.conjugate().transpose().dot(operator.dot(transformation_matrix))
def single_dmrg_step(sys, env, m):
assert is_valid_block(sys)
assert is_valid_block(env)
# Enlarge each block by a single site.
sys_enl = enlarge_block(sys)
if sys is env: # no need to recalculate a second time
env_enl = sys_enl
else:
env_enl = enlarge_block(env)
assert is_valid_enlarged_block(sys_enl)
assert is_valid_enlarged_block(env_enl)
# Construct the full superblock Hamiltonian.
m_sys_enl = sys_enl.basis_size
m_env_enl = env_enl.basis_size
sys_enl_op = sys_enl.operator_dict
env_enl_op = env_enl.operator_dict
superblock_hamiltonian = kron(sys_enl_op["H"], identity(m_env_enl)) + kron(identity(m_sys_enl), env_enl_op["H"]) + \
H2(sys_enl_op["conn_Sz"], sys_enl_op["conn_Sp"], env_enl_op["conn_Sz"], env_enl_op["conn_Sp"])
energy, psi0 = eigsh(superblock_hamiltonian, k=1, which="SA")
psi0 = psi0.reshape([sys_enl.basis_size, -1], order="C")
rho = np.dot(psi0, psi0.conjugate().transpose())
evals, evecs = np.linalg.eigh(rho)
possible_eigenstates = []
for eval, evec in zip(evals, evecs.transpose()):
possible_eigenstates.append((eval, evec))
possible_eigenstates.sort(reverse=True, key=lambda x: x[0]) # largest eigenvalue first
my_m = min(len(possible_eigenstates), m)
transformation_matrix = np.zeros((sys_enl.basis_size, my_m), dtype='d', order='F')
for i, (eval, evec) in enumerate(possible_eigenstates[:my_m]):
transformation_matrix[:, i] = evec
truncation_error = 1 - sum([x[0] for x in possible_eigenstates[:my_m]])
print("truncation error:", truncation_error)
new_operator_dict = {}
for name, op in sys_enl.operator_dict.items():
new_operator_dict[name] = rotate_and_truncate(op, transformation_matrix)
newblock = Block(length=sys_enl.length,
basis_size=my_m,
operator_dict=new_operator_dict)
return newblock, energy
def graphic(sys, env):
graphic = ("=" * sys.length) + "**" + ("-" * env.length)
return graphic
def infinite_system_algorithm(L, m):
# initial
block = initial_block
# Repeatedly enlarge the system by performing a single DMRG step, using a
# reflection of the current block as the environment.
while 2 * block.length < L:
print("L =", block.length * 2 + 2)
print(graphic(block, block))
block, energy = single_dmrg_step(block, block, m=m)
print("E/L =", energy / (block.length * 2))
print("error=", ExactEnergy - energy / (block.length * 2))
if __name__ == "__main__":
np.set_printoptions(precision=10, suppress=True, threshold=10000, linewidth=300)
infinite_system_algorithm(L=Nlength, m=Nstate)
| liuzhsunshine/tensor-network-practice | 3M-01-infinite-dmrg-1D-XXZ.py | 3M-01-infinite-dmrg-1D-XXZ.py | py | 4,911 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.log",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
23466392621 | #!/usr/bin/python3
from brownie import web3, Attack
from scripts.deploy import deploy
from scripts.helpful_scripts import get_account
from colorama import Fore
from time import time
# * colours
green = Fore.GREEN
red = Fore.RED
blue = Fore.BLUE
magenta = Fore.MAGENTA
reset = Fore.RESET
def print_colour(solved):
if solved:
print(f"{blue}Is complete: {green}{True}{reset}")
else:
print(f"{blue}Is complete: {red}{solved}{reset}")
def hack(contract_address=None, attacker=None):
if not contract_address:
target, _ = deploy()
_, attacker = get_account()
else:
print(f"{red}Something is wrong{reset}")
exit(-1)
print_colour(target.isComplete())
# # Get the value of ans
# block_number = web3.eth.blockNumber
# block_hash = web3.eth.getBlock(block_number - 1)["hash"].hex()
# timestamp = web3.eth.getBlock("latest")["timestamp"]
# # print(block_hash + timestamp)
# # Compute the answer using the same algorithm as the contract
# ans_hash = web3.solidityKeccak(["bytes32", "uint32"], [block_hash, timestamp]).hex()
# print(ans_hash)
# answer = int("0x" + ans_hash[-2:], 0)
# print(answer)
# print(target.answer())
attcking_contract = Attack.deploy(target.address, {"from": attacker})
attcking_contract.hack({"from": attacker, "value": "1 ether"})
# target.guess(answer, {"from": attacker, "value": "1 ether"}).wait(1)
print_colour(target.isComplete())
assert target.isComplete() == True
def main(contract_address=None):
if contract_address:
hack(contract_address, get_account())
else:
hack()
if __name__ == "__main__":
main()
| Aviksaikat/Blockchain-CTF-Solutions | capturetheether/lotteries/GuessTheNewNumber_DONE/scripts/hack.py | hack.py | py | 1,700 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "colorama.Fore.GREEN",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "colorama.... |
3259518810 | import art
import subprocess
clear = lambda: subprocess.call('cls||clear', shell=True)
print(art.logo)
list = []
def add_list(name,bid):
new_dic = {
"name": name,
"bid": bid
}
list.append(new_dic)
def declare_winner(list):
max_bid = 0
for dic in list:
if max_bid < int(dic["bid"]):
max_bid = int(dic["bid"])
winner = dic["name"]
print(f"Winner of the auction is (The one who pays the most:D) {winner} with the price ${max_bid}")
while True:
name = input("What is your name?: ")
bid = input("What is your bid?: $")
add_list(name, bid)
while True:
flag = input("Is there any one who want to bid too?(yes or no)").lower()
if flag != "yes" and flag != "no":
print("Invalid input! Let's try again")
continue
else:
break
if flag == "yes":
clear()
continue
else:
clear()
break
declare_winner(list) | idris-bahce/Blind-Auction | Auction.py | Auction.py | py | 938 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "subprocess.call",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "art.logo",
"line_number": 6,
"usage_type": "attribute"
}
] |
8702134035 | import streamlit as st
import altair as alt
import inspect
from vega_datasets import data
@st.experimental_memo
def get_chart_99637(use_container_width: bool):
import altair as alt
from vega_datasets import data
airports = data.airports()
states = alt.topo_feature(data.us_10m.url, feature='states')
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
width=500,
height=300
).project('albersUsa')
# airport positions on background
points = alt.Chart(airports).mark_circle(
size=10,
color='steelblue'
).encode(
longitude='longitude:Q',
latitude='latitude:Q',
tooltip=['name', 'city', 'state']
)
chart = background + points
tab1, tab2 = st.tabs(["Streamlit theme (default)", "Altair native theme"])
with tab1:
st.altair_chart(chart, theme="streamlit", use_container_width=True)
with tab2:
st.altair_chart(chart, theme=None, use_container_width=True)
try:
st.expander("See code").code(inspect.getsource(get_chart_99637))
get_chart_99637(use_container_width=True)
except Exception as e:
st.exception(e)
| streamlit/release-demos | 1.16.0/demo_app_altair/pages/115_Airports.py | 115_Airports.py | py | 1,260 | python | en | code | 78 | github-code | 1 | [
{
"api_name": "vega_datasets.data.airports",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "vega_datasets.data",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "altair.topo_feature",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ve... |
27401206260 | from __future__ import annotations
from typing import Literal, Optional
from cognite.client import data_modeling as dm
from pydantic import Field
from ._core import DomainModel, DomainModelApply, TypeList, TypeApplyList
__all__ = ["CogPool", "CogPoolApply", "CogPoolList", "CogPoolApplyList", "CogPoolFields", "CogPoolTextFields"]
CogPoolTextFields = Literal["name", "time_unit", "timezone"]
CogPoolFields = Literal["max_price", "min_price", "name", "time_unit", "timezone"]
_COGPOOL_PROPERTIES_BY_FIELD = {
"max_price": "maxPrice",
"min_price": "minPrice",
"name": "name",
"time_unit": "timeUnit",
"timezone": "timezone",
}
class CogPool(DomainModel):
space: str = "market"
max_price: Optional[float] = Field(None, alias="maxPrice")
min_price: Optional[float] = Field(None, alias="minPrice")
name: Optional[str] = None
time_unit: Optional[str] = Field(None, alias="timeUnit")
timezone: Optional[str] = None
def as_apply(self) -> CogPoolApply:
return CogPoolApply(
external_id=self.external_id,
max_price=self.max_price,
min_price=self.min_price,
name=self.name,
time_unit=self.time_unit,
timezone=self.timezone,
)
class CogPoolApply(DomainModelApply):
space: str = "market"
max_price: Optional[float] = None
min_price: Optional[float] = None
name: Optional[str] = None
time_unit: Optional[str] = None
timezone: Optional[str] = None
def _to_instances_apply(self, cache: set[str]) -> dm.InstancesApply:
if self.external_id in cache:
return dm.InstancesApply(dm.NodeApplyList([]), dm.EdgeApplyList([]))
sources = []
properties = {}
if self.max_price is not None:
properties["maxPrice"] = self.max_price
if self.min_price is not None:
properties["minPrice"] = self.min_price
if self.time_unit is not None:
properties["timeUnit"] = self.time_unit
if properties:
source = dm.NodeOrEdgeData(
source=dm.ContainerId("market", "CogPool"),
properties=properties,
)
sources.append(source)
properties = {}
if self.name is not None:
properties["name"] = self.name
if self.timezone is not None:
properties["timezone"] = self.timezone
if properties:
source = dm.NodeOrEdgeData(
source=dm.ContainerId("market", "Market"),
properties=properties,
)
sources.append(source)
if sources:
this_node = dm.NodeApply(
space=self.space,
external_id=self.external_id,
existing_version=self.existing_version,
sources=sources,
)
nodes = [this_node]
else:
nodes = []
edges = []
cache.add(self.external_id)
return dm.InstancesApply(dm.NodeApplyList(nodes), dm.EdgeApplyList(edges))
class CogPoolList(TypeList[CogPool]):
_NODE = CogPool
def as_apply(self) -> CogPoolApplyList:
return CogPoolApplyList([node.as_apply() for node in self.data])
class CogPoolApplyList(TypeApplyList[CogPoolApply]):
_NODE = CogPoolApply
| cognitedata/pygen | examples-pydantic-v1/markets_pydantic_v1/client/data_classes/_cog_pool.py | _cog_pool.py | py | 3,336 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "typing.Literal",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Literal",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "_core.DomainModel",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
... |
22508709753 | import math
from enum import Enum
from numbers import Number
from typing import Tuple
import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch.optim.lr_scheduler import CosineAnnealingLR, LinearLR
from accelerate import init_empty_weights
from transformers import (
AutoModelForCausalLM,
AutoConfig,
mpu,
ParallelOPTForCausalLM,
ParallelGPTJForCausalLM,
ParallelGPT2LMHeadModel,
ParallelLlamaForCausalLM)
parallel_model_map = {
"opt": ParallelOPTForCausalLM,
"gpt2": ParallelGPT2LMHeadModel,
"gptj": ParallelGPTJForCausalLM,
"llama": ParallelLlamaForCausalLM
}
def get_entropy(gen_logits, inf_mask, mask, model_parallel=False):
inf_mask = torch.isinf(gen_logits) | inf_mask
if model_parallel:
full_probs = mpu.parallel_softmax(gen_logits.float(), dim=-1)
full_logprobs = mpu.parallel_log_softmax(gen_logits.float(), dim=-1)
full_logprobs = full_logprobs.masked_fill(inf_mask, 0)
ent = -mpu.parallel_sum(full_probs * full_logprobs, dim=-1)
else:
full_probs = F.softmax(gen_logits, dim=-1, dtype=torch.float32)
full_logprobs = F.log_softmax(gen_logits, dim=-1, dtype=torch.float32)
full_logprobs = full_logprobs.masked_fill(inf_mask, 0)
ent = -torch.sum(full_probs * full_logprobs, dim=-1)
ent = ent * mask
return ent
def get_log_probs(logits, ids, mask, inf_mask=None, model_parallel=False):
if model_parallel:
logprobs = -mpu.parallel_logprobs(logits, ids)
if inf_mask is not None:
gathered_inf_mask = mpu.parallel_gather(inf_mask, -1, ids.unsqueeze(-1)).squeeze(-1)
logprobs = logprobs.masked_fill(gathered_inf_mask, -float("inf"))
# logprobs = mpu.parallel_log_softmax(logits.float(), dim=-1)
# if inf_mask is not None:
# logprobs = logprobs.masked_fill(inf_mask, -float("inf"))
# logprobs = mpu.parallel_gather(logprobs, -1, ids.unsqueeze(-1)).squeeze(-1)
else:
logprobs = F.log_softmax(logits, dim=-1)
if inf_mask is not None:
logprobs = logprobs.masked_fill(inf_mask, -float("inf"))
logprobs = torch.gather(logprobs, dim=-1, index=ids.unsqueeze(-1)).squeeze(-1)
logprobs = logprobs.masked_fill(~(mask.bool()), 0)
# we ensure that the selected logprobs are not inf or nan
assert all((~torch.isinf(logprobs.view(-1))) & (~torch.isnan(logprobs.view(-1))))
return logprobs
def get_x_entropy(logits_1, logits_2, inf_mask, mask, model_parallel=False):
inf_mask = torch.isinf(logits_1) | torch.isinf(logits_2) | inf_mask
if model_parallel:
full_probs = mpu.parallel_softmax(logits_1.float(), dim=-1)
full_logprobs = mpu.parallel_log_softmax(logits_2.float(), dim=-1)
full_logprobs = full_logprobs.masked_fill(inf_mask, 0)
xent = -mpu.parallel_sum(full_probs * full_logprobs, dim=-1)
else:
full_probs = F.softmax(logits_1, dim=-1, dtype=torch.float32)
full_logprobs = F.log_softmax(logits_2, dim=-1, dtype=torch.float32)
full_logprobs = full_logprobs.masked_fill(inf_mask, 0)
xent = -torch.sum(full_probs * full_logprobs, dim=-1)
xent = xent * mask
return xent
def get_rev_kl(log_p, log_q, mask):
log_ratio = (log_p - log_q) * mask
kl = log_ratio.float().exp() - 1 - log_ratio
return kl
def get_global_statistics(xs: torch.Tensor) -> Tuple[float, float, int]:
"""
Computes element-wise mean and variance of the tensor across processes
"""
sum_and_count = torch.tensor([xs.sum(), xs.numel()], device=xs.device)
dist.all_reduce(sum_and_count, dist.ReduceOp.SUM)
global_sum, count = sum_and_count
global_mean = global_sum / count
sum_var = torch.sum((xs - global_mean) ** 2)
dist.all_reduce(sum_var, dist.ReduceOp.SUM)
global_var = sum_var / count
return global_mean, global_var, count
def whiten(xs: torch.Tensor, shift_mean=True, distributed=True) -> torch.Tensor:
"""Whitens values"""
if distributed and dist.is_initialized():
mean, var, _ = get_global_statistics(xs)
else:
var, mean = torch.var_mean(xs)
whitened = (xs - mean) * torch.rsqrt(var + 1e-8)
if not shift_mean:
whitened += mean
return whitened
def significant(x: Number, ndigits=2) -> Number:
"""
Cut the number up to its `ndigits` after the most significant
"""
if isinstance(x, torch.Tensor):
x = x.item()
if not isinstance(x, Number) or x == 0:
return x
return round(x, ndigits - int(math.floor(math.log10(abs(x)))))
class OptimizerName(str, Enum):
"""Supported optimizer names"""
ADAM: str = "adam"
ADAMW: str = "adamw"
ADAM_8BIT_BNB: str = "adam_8bit_bnb"
ADAMW_8BIT_BNB: str = "adamw_8bit_bnb"
SGD: str = "sgd"
def get_optimizer_class(name: OptimizerName):
"""
Returns the optimizer class with the given name
Args:
name (str): Name of the optimizer as found in `OptimizerNames`
"""
if name == OptimizerName.ADAM:
return torch.optim.Adam
if name == OptimizerName.ADAMW:
return torch.optim.AdamW
if name == OptimizerName.SGD.value:
return torch.optim.SGD
supported_optimizers = [o.value for o in OptimizerName]
raise ValueError(
f"`{name}` is not a supported optimizer. "
f"Supported optimizers are: {supported_optimizers}"
)
class SchedulerName(str, Enum):
"""Supported scheduler names"""
COSINE_ANNEALING = "cosine_annealing"
LINEAR = "linear"
def get_scheduler_class(name: SchedulerName):
"""
Returns the scheduler class with the given name
"""
if name == SchedulerName.COSINE_ANNEALING:
return CosineAnnealingLR
if name == SchedulerName.LINEAR:
return LinearLR
supported_schedulers = [s.value for s in SchedulerName]
raise ValueError(
f"`{name}` is not a supported scheduler. "
f"Supported schedulers are: {supported_schedulers}"
) | microsoft/LMOps | minillm/minillm/utils.py | utils.py | py | 6,065 | python | en | code | 2,623 | github-code | 1 | [
{
"api_name": "transformers.ParallelOPTForCausalLM",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "transformers.ParallelGPT2LMHeadModel",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "transformers.ParallelGPTJForCausalLM",
"line_number": 25,
"usage... |
18259114828 | import json
import boto3
from datetime import datetime
# get ec2 metrics
def get_metric_ec2(namespace, metricname, client):
response = client.get_metric_statistics(
Namespace=namespace,
MetricName=metricname,
Dimensions=[
{
'Name': 'InstanceId',
'Value': 'i-092988b42c6dea77e'
}
],
StartTime=datetime(2022, 5, 21),
EndTime=datetime.now(),
Period=3600,
Statistics=[
'SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum'
]
)
return response
# get s3 metrics
def get_metric_s3(namespace, metricname, storageType, client):
response = client.get_metric_statistics(
Namespace=namespace,
MetricName=metricname,
Dimensions=[
{
'Name': 'StorageType',
'Value': storageType
},
{
'Name': 'BucketName',
'Value': 'tdg-s3-bucket'
}
],
StartTime=datetime(2022, 5, 21),
EndTime=datetime.now(),
Period=3600,
Statistics=[
'SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum'
]
)
return response
def lambda_handler(event, context):
headers = {
'Access-Control-Allow-Origin': '*'
}
if "headers" in event and "referer" in event["headers"] and "18.215.185.124" in event["headers"]["referer"]:
path = event["rawPath"].split("/")
namespace = "AWS/" + path[1]
metricname = path[2]
if namespace == "AWS/EC2":
client = boto3.client('cloudwatch', region_name='us-east-1')
return {
'statusCode': 200,
'headers': headers,
'body': json.dumps(get_metric_ec2(namespace, metricname, client), default=str)
}
else:
client = boto3.client('cloudwatch', region_name='eu-west-3')
storageType = path[3]
return {
'statusCode': 200,
'headers': headers,
'body': json.dumps(get_metric_s3(namespace, metricname, storageType, client), default=str)
}
else:
return {
'statusCode': 403,
'headers': headers,
'body': json.dumps("Access denied")
}
| BrunosBastos/ES_TDG | MetricsService/lambda_function.py | lambda_function.py | py | 2,499 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "datetime.da... |
73767645472 | import time
from datetime import datetime
from models.cancel_queue import CancelQueue
from models.last_updated_tracker import LastUpdatedTracker
from sqlalchemy import insert, update, select
from .get_orders import get_orders
from .get_orders_items import get_orders_items
from ... import my_logger, SP_EXCEPTIONS, pacific_zone
from ...helpers.db_config import db_conn
conn = db_conn()
"""
Hitting Sp Api Orders & Orders Item Api to fetch user's orders information
And Updating the Amazon Orders Cancel Queue Based on order's is_buyer_requested_cancellation status
"""
def process_cancel_orders(posted_after, posted_before):
try:
my_logger.info(
f"posted_after {posted_after} | posted_before {posted_before} | Datetime Now in PacificTime {(datetime.now(pacific_zone)).isoformat()}"
)
time.sleep(3)
orders = get_orders(
posted_after,
posted_before,
marketplace_ids=["ATVPDKIKX0DER"],
order_statuses=["PartiallyShipped", "Unshipped"],
fulfillment_channels=["MFN"]
)
if len(orders) > 0:
my_logger.info(f"orders length {len(orders)}")
last_purchase_date = posted_before
for order in orders:
time.sleep(3)
order_items = get_orders_items(
order_id=order.get("AmazonOrderId"))
if order_items:
my_logger.info(f"order_id {order.get('AmazonOrderId')} \n order_items {len(order_items)}")
for order_item in order_items:
is_buyer_requested_cancellation = order_item.get("BuyerRequestedCancel").get(
"IsBuyerRequestedCancel"
) if order_item.get("BuyerRequestedCancel") else "false"
buyer_cancellation_reason = order_item.get("BuyerRequestedCancel").get(
"BuyerCancelReason"
) if order_item.get("BuyerRequestedCancel") else None
# Updating the Cancel Queue
if is_buyer_requested_cancellation != "false":
cqs = conn.query(CancelQueue).where(
CancelQueue.order_number == order.get('AmazonOrderId'),
CancelQueue.order_item_id == order_item.get('OrderItemId'),
CancelQueue.sku == order_item.get('SellerSKU')
).first()
if not cqs:
my_logger.info(f"Sku {order_item.get('SellerSKU','')} is cancelled and saving in db.")
cq = CancelQueue(order.get('AmazonOrderId',''))
cq.order_item_id = order_item.get('OrderItemId','')
cq.sku = order_item.get('SellerSKU','')
cq.buyer_cancel_date = datetime.now(pacific_zone)
cq.purchase_date = datetime.strptime(order.get('PurchaseDate'), "%Y-%m-%dT%H:%M:%SZ")
cq.last_processed_at = datetime.now(pacific_zone)
cq.desktopshipper_cancel = 0
cq.skubana_cancel = 0
cq.amazon_cancel = 0
cq.buyer_cancellation_reason = buyer_cancellation_reason
cq.processing_status = 'PENDING'
cq.created_at = datetime.now(pacific_zone)
cq.updated_at = datetime.now(pacific_zone)
conn.merge(cq)
conn.commit()
else:
my_logger.debug(
f"Sku {order_item.get('SellerSKU','')} is already in the queue, skipping."
)
last_purchase_date = order.get('PurchaseDate')
# Updating Order Tracker
stmt = (
select(LastUpdatedTracker)
.where(LastUpdatedTracker.tracker_type == 'orders')
)
last_tracker = conn.execute(stmt).first()
if not last_tracker:
stmt = (
insert(LastUpdatedTracker)
.values(
tracker_type="orders",
last_updated_at=datetime.strptime(last_purchase_date, "%Y-%m-%dT%H:%M:%SZ"),
created_at=datetime.now(pacific_zone),
updated_at=datetime.now(pacific_zone)
)
)
conn.execute(stmt)
conn.commit()
else:
stmt = (
update(LastUpdatedTracker)
.where(LastUpdatedTracker.tracker_type == "orders")
.values(
last_updated_at=datetime.strptime(last_purchase_date, "%Y-%m-%dT%H:%M:%SZ"),
updated_at=datetime.now(pacific_zone)
)
)
conn.execute(stmt)
conn.commit()
return True
else:
my_logger.error(f"No Orders")
return False
except SP_EXCEPTIONS as e:
my_logger.error(str(e))
orders = None
raise e
| aman-saleem-qbatch/dagster-cloud-dev | ops/apis/helpers/orders_processor.py | orders_processor.py | py | 5,474 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "helpers.db_config.db_conn",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "tim... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.