blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
92ec87d6a1f183a10a48e5bb65076fbca52c2d3c | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Lazymux/routersploit/tests/creds/routers/netcore/test_ssh_default_creds.py | 2d81b6e62845318c12ca1f5a515d18860390ca6d | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:029aa8eb5f4144f599d8be14416b93b3a2e19b768949e76237a74d16a341aaf0
size 634
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
40c8a568840e98756271052d3afc5478c2d79a7a | 60ce73bf2f86940438e5b7fecaaccad086888dc5 | /working_scrapers/Kentucky_webster.py | 2d6934d0d038a745b9f7a44e4ecd3d1673ff539b | [] | no_license | matthewgomies/jailcrawl | 22baf5f0e6dc66fec1b1b362c26c8cd2469dcb0d | 9a9ca7e1328ae549860ebeea9b149a785f152f39 | refs/heads/master | 2023-02-16T06:39:42.107493 | 2021-01-15T16:37:57 | 2021-01-15T16:37:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,624 | py | #!/usr/bin/python
'''
This is an template script
'''
from urllib.request import urlopen, Request
import pandas as pd
import os
import time
import numpy as np
from datetime import datetime
import datetime as dt
import sys
from io import StringIO
from joblib import Parallel, delayed
import requests
from jailscrape.common import save_to_s3, get_browser, get_logger, record_error, save_pages_array
from jailscrape import crawlers
# jailscrape.common is a file that is part of the project which keeps
# most common boilerplate code out of this file
from selenium.webdriver.common.keys import Keys
import watchtower
from bs4 import BeautifulSoup
import re
import math
# NOTE: These are imports. They ideally don't change very often. It's OK
# to have a large, maximal set here and to bulk-edit files to add to
# these.
ROW_INDEX = 361 # Change this for each scraper. This references the row
# of the main jailcrawl spreadsheet. This index will be used to look up
# the URL as well as state/county info
THIS_STATE = 'kentucky' # Change the current state/county information.
THIS_COUNTY = 'webster'
def main(roster_row):
try:
logger = get_logger(roster_row) # Get a standard logger
# Here are standard variable values/how to initialize them.
# These aren't initialized here since in the save_single_page
# case, they can be done in the called function
##########
# Begin core specific scraping code
if roster_row['State'].lower() != THIS_STATE or roster_row['County'].lower() != THIS_COUNTY:
raise Exception("Expected county definition info from _%s, %s_, but found info: _%s_" % (THIS_COUNTY, THIS_STATE, roster_row))
crawlers.omsweb_crawler(roster_row) # try to call a known crawler if possible
# End core specific scraping code
##########
#Close the browser
logger.info('complete!')
except Exception as errorMessage:
try:
browser.close()
record_error(message=str(errorMessage), roster_row=roster_row, browser=browser)
except:
record_error(message=str(errorMessage), roster_row=roster_row)
# Record error in S3 for a general error
logger.error('Error: %s', errorMessage)
# Log error
sys.exit(1)
if __name__ == "__main__":
#This will load in the current jail roster list
#Select the index of the roster this script is for:
#Write the name of the county and state
roster = pd.read_csv('/opt/jail_roster_final_rmDuplicates.csv',encoding = "utf-8")
main(roster[roster['index'] == ROW_INDEX].iloc[0])
| [
"matthewgomies@Matthews-MacBook-Pro.local"
] | matthewgomies@Matthews-MacBook-Pro.local |
5e0a48a78d29fa70226790d056b03e46cf7c081f | 15a89fb4a0348548d1cf26fbeb6c9539f08a1b90 | /djangoProjectClass/urls.py | a3e0739e37baaf920cc707ce1f38749510203333 | [] | no_license | achieveIdeal/- | 909b2449a688222ccbb950fe6792fc57d1d8f9e5 | 14316e211e5970aefb9288365451739839aba941 | refs/heads/master | 2023-06-21T19:01:29.876932 | 2020-02-22T03:30:09 | 2020-02-22T03:30:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | """djangoProjectClass URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.shortcuts import render
from django.urls import path,include
from apps.news.views import index
urlpatterns = [
path('',index,name='index'),
path('user/', include('users.urls')),
path('news/', include('news.urls')),
path('', include('verification.urls')),
path('course/', include('course.urls')),
path('cms/', include('cms.urls')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"1025359701@qq.com"
] | 1025359701@qq.com |
dce5be17a451ddc26aad8e316d8e004a0b0a1d17 | 9fa53f379296905fdd925cc41062d5984df78a4e | /NFC_Appli/Appli/apps.py | 634ed3e3e21acf38ad504b7e936af0b393f7b415 | [] | no_license | mhaegelin/NFC_Application | 4586ae7454f6f1a36261f8cf74926b7af7ffd246 | e19a1ded7e45a5d363df921589308c45d8115577 | refs/heads/master | 2020-05-29T08:49:05.516628 | 2017-05-09T00:58:46 | 2017-05-09T00:58:46 | 69,779,916 | 0 | 0 | null | 2017-03-01T20:48:53 | 2016-10-02T05:02:21 | CSS | UTF-8 | Python | false | false | 126 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class AppliConfig(AppConfig):
name = 'Appli'
| [
"marc.haegelin@etu.unistra.fr"
] | marc.haegelin@etu.unistra.fr |
100299acc986220e1cd0fc83f4d9fbed814b3a63 | 40ad67e10f9c4ae5037f7532cd9d8185130f9af5 | /src/sem.py | 10481c1a980bf0d2567c2190544b30c7b4201523 | [] | no_license | ozanarkancan/sem-playground | 46228f2872eadd0a2606d8797575f6aaa2d889f7 | 28c9994241db7929a57b21b0257f5369ea2de792 | refs/heads/master | 2020-04-29T17:12:53.752183 | 2019-04-18T12:10:18 | 2019-04-18T12:10:18 | 176,290,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,239 | py | import pandas as pd
import numpy as np
import featuretools as ft
import seaborn as sns; sns.set()
from matplotlib import pyplot
from importlib import import_module
import json
import sys
import copy
class SEM:
"""
Singleton SEM class. It's an adapter to use the R lavaan package for `Structural Equation Modeling`.
"""
class __SEM:
def __init__(self):
self.robj = import_module('rpy2.robjects')
self.rinterface = import_module('rpy2.rinterface')
def load_data(self, data_file):
"""
Loads data.
"""
if data_file.endswith(".xlsx"):
self.data_file = data_file
self.data = pd.read_excel(data_file)
elif data_file.endswith(".csv"):
self.data_file = data_file
self.data = pd.read_csv(data_file, sep="\t")
else:
raise Exception("Unknown extension for the data file: {}".format(data_file.split(".")[-1]))
self.data = self.data.fillna(0)
def auto_build(self, model_description):
columns = []
if isinstance(model_description["factors"], dict):
factors_dict = model_description["factors"]
for k, v in factors_dict.items():
columns.extend(v)
else:
columns.extend(model_description["factors"])
# print("Data Columns: ", self.data.columns.tolist())
# print("Extract Columns: ", columns)
factors_df = self.data[columns]
factors_df["customer_id"] = list(range(self.data.shape[0]))
es = ft.EntitySet(id = 'customer_experience_entity')
es = es.entity_from_dataframe(entity_id = 'c_id', dataframe = factors_df, index = 'customer_id')
features, feature_names = ft.dfs(entityset = es, target_entity = 'c_id', max_depth = 2, verbose=True)
feature_matrix_enc, features_enc = ft.encode_features(features, feature_names)
original_factors = set(feature_matrix_enc.columns.tolist())
feature_matrix_enc = feature_matrix_enc.dropna(axis=1)
after_naelimination = set(feature_matrix_enc.columns.tolist())
print("Dropped columns with na: ", list(original_factors - after_naelimination))
feature_matrix_enc = feature_matrix_enc.loc[:, (feature_matrix_enc != 0).any(axis=0)]
after_allzeros = set(feature_matrix_enc.columns.tolist())
print("Dropped columns with all zeros: ", after_naelimination - after_allzeros)
# print(feature_matrix_enc.head())
# print("Original Columns: ", columns)
# print("Generated Columns: ", feature_matrix_enc.columns.tolist())
corr_matrix = feature_matrix_enc.corr()
corr_matrix = corr_matrix.dropna(axis=1, how='all')
corr_matrix = corr_matrix.dropna(axis=0, how='all')
print("Dropped columns with na in correlation matrix: ", list(after_naelimination - set(corr_matrix.columns.tolist())))
feature_matrix_enc = feature_matrix_enc[corr_matrix.columns.tolist()]
for it in range(10):
willdropped = set([])
corr_matrix = feature_matrix_enc.corr()
cols = corr_matrix.columns.tolist()
for i in range(len(cols)):
row = cols[i]
if row in willdropped:
pass
for j in range(i+1, len(cols)):
col = cols[j]
if col in willdropped:
pass
val = corr_matrix[row][col]
if np.abs(val) > 0.95:
print("{} , {} = {}".format(row, col, val))
willdropped.add(col)
if len(list(willdropped)) == 0:
break
print("Iteration: ", it+1, " Highly correlated columns have been dropped!: ", list(willdropped))
feature_matrix_enc = feature_matrix_enc.drop(columns=list(willdropped))
correlation_matrix = feature_matrix_enc.corr()
covariance_matrix = feature_matrix_enc.cov()
cond_number = np.linalg.cond(correlation_matrix.values)
print("Condition number: {}".format(cond_number))
copy_model = copy.deepcopy(model_description)
current_columns = feature_matrix_enc.columns.tolist()
def replace_marks(s):
s = s.replace("=", "equals")
s = s.replace(".", "dot")
s = s.replace(",", "comma")
return s
current_columns = ["_".join(replace_marks(c).split(" ")) for c in current_columns]
feature_matrix_enc.columns = current_columns
print("Cols: ", current_columns)
if isinstance(copy_model["factors"], dict):
factors_dict = copy_model["factors"]
new_factors_dict = {}
for k, v in factors_dict.items():
newv = []
for c in v:
replace = list(filter(lambda x : x.startswith("_".join(replace_marks(c).split(" "))), current_columns))
newv.extend(replace)
if len(newv) > 0:
new_factors_dict[k] = newv
else:
raise Exception("Latent variable {} has been dropped! Rearrange your initial model description.".format(k))
copy_model["factors"] = new_factors_dict
else:
newv = []
for c in copy_model["factors"]:
replace = list(filter(lambda x : x.startswith("_".join(replace_marks(c).split(" "))), current_columns))
newv.extend(replace)
if len(newv) > 0:
copy_model["factors"] = newv
else:
raise Exception("All loading factors have been dropped! Rearrange your initial model description.")
others = []
others.extend(copy_model["observations"])
copy_model["observations"] = ["_".join(replace_marks(c).split(" ")) for c in copy_model["observations"]]
if isinstance(copy_model["kpis"], dict):
kpis_dict = copy_model["kpis"]
for k, v in kpis_dict.items():
others.extend(v)
copy_model["kpis"][k] = ["_".join(replace_marks(c).split(" ")) for c in v]
else:
others.extend(copy_model["kpis"])
copy_model["kpis"] = ["_".join(replace_marks(c).split(" ")) for c in copy_model["kpis"]]
feature_matrix_enc = feature_matrix_enc.reset_index(inplace=False).drop("customer_id", axis=1)
others_df = self.data[others]
current_columns = ["_".join(replace_marks(c).split(" ")) for c in others_df.columns]
others_df.columns = current_columns
feature_matrix_enc = pd.concat([feature_matrix_enc, others_df], axis=1)
feature_matrix_enc.to_csv("/tmp/autodata.csv", sep="\t", index=False)
print(feature_matrix_enc.head())
model = sem.build_model(copy_model, "auto_model")
result = sem.fit_model("/tmp/autodata.csv", model, "auto_model", verbose="FALSE")
return result
#a4_dims = (48, 32)
#fig, ax = pyplot.subplots(figsize=a4_dims)
#corrfig = sns.heatmap(ax=ax, data=correlation_matrix)
#pyplot.show()
#fig, ax = pyplot.subplots(figsize=a4_dims)
#corrfig = sns.heatmap(ax=ax, data=covariance_matrix)
#pyplot.show()
#fig, ax = pyplot.subplots(figsize=a4_dims)
#normalized = (feature_matrix_enc - feature_matrix_enc.mean()) / (feature_matrix_enc.max() - feature_matrix_enc.min())
#corrfig = sns.heatmap(ax=ax, data=normalized.cov())
#pyplot.show()
def build_model(self, model_description, model_name):
"""
Builds model from the model description.
model_description : {factors : ..., observations : ..., kpis : ..., latent_connections : ...}
"""
model = "{} <- \n\"".format(model_name)
#grouped factors
if isinstance(model_description["factors"], dict):
factors_dict = model_description["factors"]
for k, v in factors_dict.items():
model += "{} =~ ".format(k)
for i in range(len(v)):
model += v[i]
if i != len(v) - 1:
model += " + "
else:
model += "\n"
else:
factors_dict = {"factors" : model_description["factors"]}
model = "factors =~ "
factors = factors_dict["factors"]
for i in range(len(factors_dict)):
model += factors[i]
if i != len(factors_dict) - 1:
model += " + "
else:
model += "\n"
model_description["latent_connections"].append("factors", "cx", "~")
observations = model_description["observations"]
model += "cx =~ "
for i in range(len(observations)):
model += observations[i]
if i != len(observations) - 1:
model += " + "
else:
model += "\n"
if isinstance(model_description["kpis"], dict):
kpis_dict = model_description["kpis"]
for k, v in kpis_dict.items():
model += "{} =~ ".format(k)
for i in range(len(v)):
model += v[i]
if i != len(v) - 1:
model += " + "
else:
model += "\n"
else:
kpis_dict = {"kpis" : model_description["kpis"]}
kpis = kpis_dict["kpis"]
for i in range(len(kpis)):
model += kpis[i]
if i != len(kpis) - 1:
model += " + "
else:
model += " ~ cx\n"
for (source, target, connection_type) in model_description["latent_connections"]:
model += "{} {} {}\n".format(source, connection_type, target)
model += "\""
return model
def fit_model(self, data_file, model, model_name, verbose="FALSE"):
rscript = '''library("lavaan")
data <- read.csv(file="{}", header=TRUE, sep="\t")
{}
{} = tryCatch(
{{
{} <- sem({}, data, std.lv = TRUE, std.ov = TRUE,
control = list(maxit = 100000),
estimator = "ULS",
meanstructure = TRUE,
optim.method = "BFGS",
verbose={})
c("OK")
}}, warning = function(w) {{
c("WARNING", w)
}}, error = function(e) {{
c("ERROR", e)
}}
)
'''.format(data_file, model, "result_" + model_name, "fit_" + model_name, model_name, verbose)
self.robj.r(rscript)
result = self.robj.globalenv["result_" + model_name]
return result
def evaluate(self, model_name):
rscript = '''
{} <- fitmeasures({}, c("npar", "chisq", "df", "cfi", "gfi", "rmsea", "srmr"))
'''.format("fits_" + model_name, "fit_" + model_name)
self.robj.r(rscript)
values = self.robj.globalenv["fits_" + model_name]
metrics = {"number_of_parameters" : values[0],
"chi_square" : values[1],
"degree_of_freedom" : values[2],
"comparative_fit_index" : values[3],
"goodness_of_fit_index" : values[4],
"root_mean_square_of_approximation" : values[5],
"standardized_root_mean_square_residual" : values[6]}
return metrics
def save_model_vis(self, model_name, file_name, file_type="pdf"):
rscript = '''
library("semPlot")
semPaths({}, "std", nCharNodes = 35, layout="tree",
intercepts = FALSE, pastel = TRUE, residuals = FALSE, label.prop = 0.92, width = 40, height = 30,
sizeMan = 7, sizeLat = 8, font = 4, fade=FALSE, reorder=FALSE, filetype="{}", filename="{}")
'''.format("fit_" + model_name, file_type, file_name)
self.robj.r(rscript)
__instance = None
def __init__(self):
raise Exception("SEM is a singleton. Use the `get_instance` method")
@classmethod
def get_instance(cls):
if cls.__instance is None:
cls.__instance = cls.__SEM()
return cls.__instance
if __name__ == "__main__":
sem = SEM.get_instance()
model_description = {
"factors" : {"dijital_kanal" : ["hata_islem_sayisi", "islem_ortalama_tiklama", "gezdigi_sayfa_sayisi", "basarili_giris_sayisi", "cevrim_ici_sure"],
"sosyal_medya" : ["begeni", "takip", "pozitif_yorum", "negatif_yorum", "sosyal_medya_reklamlari"]},
"observations" : ["memnuniyet", "tavsiye"],
"kpis" : {"buyume" : ["capraz_satis"], "elde_tutma" : ["devam_eden_musteri", "terk"], "geri_kazanma" : ["aktiflestirilmis_musteri", "geri_kazanilmis_musteri"]},
"latent_connections" : [("cx", "dijital_kanal", "~"), ("cx", "sosyal_medya", "~"), ("buyume", "cx", "~"), ("elde_tutma", "cx", "~"),
("elde_tutma", "buyume", "~"), ("geri_kazanma", "cx", "~"), ("geri_kazanma", "elde_tutma", "~")]
}
model_description2 = {
"factors" : {"dijital_kanal" : ["hata_islem_sayisi", "islem_ortalama_tiklama", "cevrim_ici_sure", "basarili_giris_sayisi", "gezdigi_sayfa_sayisi"],
"sosyal_medya" : ["begeni", "takip", "pozitif_yorum", "negatif_yorum", "sosyal_medya_reklamlari"]},
"observations" : ["memnuniyet", "tavsiye"],
"kpis" : ["capraz_satis", "devam_eden_musteri", "terk", "aktiflestirilmis_musteri", "geri_kazanilmis_musteri"],
"latent_connections" : [("cx", "dijital_kanal", "~"), ("cx", "sosyal_medya", "~")]
}
model_description3 = {
"factors" : {"dijital_kanal" : ["hata_islem_sayisi", "islem_ortalama_tiklama", "cevrim_ici_sure", "basarili_giris_sayisi", "gezdigi_sayfa_sayisi"],
"sosyal_medya" : ["begeni", "takip", "pozitif_yorum", "negatif_yorum", "sosyal_medya_reklamlari"],
"klasik_kanal" : ["sube_kullanim", "sube_sure", "atm_kullanim", "personel_ilgisi"]},
"observations" : ["memnuniyet", "tavsiye"],
"kpis" : {"buyume" : ["capraz_satis"], "elde_tutma" : ["devam_eden_musteri", "terk"], "geri_kazanma" : ["aktiflestirilmis_musteri", "geri_kazanilmis_musteri"]},
"latent_connections" : [("cx", "dijital_kanal", "~"), ("cx", "sosyal_medya", "~"), ("buyume", "cx", "~"), ("elde_tutma", "cx", "~"),
("elde_tutma", "buyume", "~"), ("geri_kazanma", "cx", "~"), ("geri_kazanma", "elde_tutma", "~")]
}
model_airline = {
"factors" : {"profile" : ["age", "gender"],
"flight_profile" : ["airline_status", "price_sensitivity", "year_of_first_flight",
"no_of_flights_pa", "percentage_of_flight_with_other_airlines", "no_of_other_loyalty_cards"],
"travel" : ["type_of_travel", "class", "day_of_month", "airline_name", "origin_city", "destination_city"],
"timing" : ["scheduled_departure_hour", "departure_delay_minutes", "arrival_delay_minutes",
"flight_cancelled", "flight_time_minutes", "flight_distance"]},
"observations" : ["satisfaction"],
"kpis" : {"spending" : ["shopping_amount_at_airport", "eating_and_drinking_at_airport"]},
"latent_connections" : [("cx", "profile", "~"), ("cx", "flight_profile", "~"), ("cx", "travel", "~"), ("cx", "timing", "~"),
("spending", "cx", "~")]
}
#sem.load_data("../data/airline.xlsx")
#sem.auto_build(model_airline)
sem.load_data("../data/generated.csv")
sem.auto_build(model_description3)
#model = sem.build_model(model_description, "random_model")
#print(model)
#result = sem.fit_model("../data/generated.csv", model, "random_model", verbose="FALSE")
#if result[0] == "OK":
# metrics = sem.evaluate("random_model")
# print(metrics)
# sem.save_model_vis("random_model", "../models/random_model")
#else:
# print(result)
# print(result[0][0])
# print(result[1][0])
| [
"can.ozanarkan@gmail.com"
] | can.ozanarkan@gmail.com |
922539ebe02f2df53fc16aea241cff2fb0df5b23 | 795df757ef84073c3adaf552d5f4b79fcb111bad | /i4lib/i4vec_sorted_unique.py | eea4deabcf5c5276323c987e360cb3e95fd03a39 | [] | no_license | tnakaicode/jburkardt-python | 02cb2f9ba817abf158fc93203eb17bf1cb3a5008 | 1a63f7664e47d6b81c07f2261b44f472adc4274d | refs/heads/master | 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | #! /usr/bin/env python
#
def i4vec_sorted_unique ( n, a ):
#*****************************************************************************80
#
## I4VEC_SORTED_UNIQUE finds the unique elements in a sorted I4VEC.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 29 February 2016
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the number of elements in A.
#
# Input, integer A(N), the sorted integer array.
#
# Output, integer N_UNIQUE, the number of unique elements in A.
#
# Output, integer A_UNIQUE[N_UNIQUE], the unique elements.
#
import numpy as np
from i4vec_sorted_unique_count import i4vec_sorted_unique_count
if ( n <= 0 ):
n_unique = 0
a_unique = np.zeros ( 0 )
return n_unique, a_unique
n_unique = i4vec_sorted_unique_count ( n, a )
a_unique = np.zeros ( n_unique, dtype = np.int32 )
k = 0
a_unique[0] = a[0];
for i in range ( 1, n ):
if ( a[i] != a_unique[k] ):
k = k + 1
a_unique[k] = a[i]
return n_unique, a_unique
def i4vec_sorted_unique_test ( ):
#*****************************************************************************80
#
## I4VEC_SORTED_UNIQUE_TEST tests I4VEC_SORTED_UNIQUE.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 29 February 2016
#
# Author:
#
# John Burkardt
#
import platform
from i4vec_print import i4vec_print
from i4vec_sort_heap_a import i4vec_sort_heap_a
from i4vec_uniform_ab import i4vec_uniform_ab
n = 20
b = 0
c = n
print ( '' )
print ( 'I4VEC_SORTED_UNIQUE_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' I4VEC_SORTED_UNIQUE finds unique entries in a sorted array.' )
seed = 123456789
a, seed = i4vec_uniform_ab ( n, b, c, seed )
a = i4vec_sort_heap_a ( n, a )
i4vec_print ( n, a, ' Input vector:' )
unique_num, a_unique = i4vec_sorted_unique ( n, a )
i4vec_print ( unique_num, a_unique, ' Unique entries:' )
#
# Terminate.
#
print ( '' )
print ( 'I4VEC_SORTED_UNIQUE_TEST' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
i4vec_sorted_unique_test ( )
timestamp ( )
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
b1fed28302d371939bb570116ada8911b9211f70 | a2b7182b1895fb57e565c2108e0a38f00720a530 | /src/seater/settings.py | 9c562708e11354216641e2c893b7271719d04db1 | [] | no_license | FinnBerkers/seater | e0db011ad168170f985e2f3ec4166d1ea309b576 | 038f88f73c613d28b2ccbb38965846482d16713a | refs/heads/master | 2020-04-20T07:33:47.435188 | 2019-02-05T06:39:52 | 2019-02-05T06:39:52 | 168,714,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,128 | py | """
Django settings for seater project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')7_epc#&d^z=cmwy+cd=t5sg$5%^+zw@r2q+j4p&52tzwbw_nh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'seater',
'seating',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'seater.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'seater.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"abbyberkers@gmail.com"
] | abbyberkers@gmail.com |
0eff68c0ed120ae45b2d167efb8fceee61a5e053 | fee1d2c0b4768d8dea84e249fbf112aaa7d88134 | /twitter_watcher/schema.py | 2aa824d46cc335614a0d73c88f77a28ee3486c7a | [
"MIT"
] | permissive | aoqfonseca/twitter_watcher | 342be3241f7f780fd6d571b678c0a2f4b1aa5f16 | e8511dbd1a793347324dbc3c57b09ed6998895fa | refs/heads/master | 2016-09-03T07:30:07.470597 | 2015-10-01T22:08:10 | 2015-10-01T22:08:10 | 23,263,334 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # -*- coding: utf-8 -*-
import logging
from jsonschema import validate
from jsonschema.exceptions import ValidationError
listener_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"usernames": {"type": "array", "items": {"type": "string"}},
"hashtags": {"type": "array", "items": {"type": "string"}},
"type": {"type": "string"},
"callback": {
"type": "string",
"pattern": "^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$"},
"startDate": {"type": "string"},
"endDate": {"type": "string"}
},
"required": ["usernames", "hashtags", "callback", "startDate", "endDate"]
}
logger = logging.getLogger('shema')
def valid_json_listener(json):
"""
Method to validate listener json
"""
try:
validate(json, listener_schema)
return True
except ValidationError, error:
logger.error("Listener json invalid: %s", error)
return False
| [
"aoqfonseca@gmail.com"
] | aoqfonseca@gmail.com |
5c1932c5b04df401d654fd0cae4eda47feed05a2 | 8e6b9c6397652c8286cfca48ae0c801c0238104c | /10-SupplyCorrelation/5-threeCitiesCorrelationHeatMap-P2.py | b57b7342a53a4b7fccbdbb01e0412fbbced0a8ab | [] | no_license | rmsstudydata/graphScripts | 12cca1218efebb11a38348e07697aa8a03000e92 | 9949e3530ee959a7aea1ba16d2b1b43090d11ab2 | refs/heads/main | 2023-07-26T16:55:03.906032 | 2021-09-09T06:27:59 | 2021-09-09T06:27:59 | 404,596,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,330 | py | import sys
sys.path.insert(1, 'RawData')
import correlationData
import cityNames
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
#shot for days
weekdays =['M','T','W','R','F','S','U']
experimentCities = cityNames.CityFullName
cityServiceEconomyCategory = cityNames.cityServiceEconomyCategory
subPlots = []
cityNameNewToOld = {}
cityNameNewToOld['1-NYC'] = 'NY-US'
cityNameNewToOld['2-YTO'] = 'Toronto-Canada'
cityNameNewToOld['3-CPT'] = 'CapeTown-SA'
cityNameNewToOld['4-DEL'] = 'Delhi-India'
cityNameNewToOld['5-DXB'] = 'Dubai-UAE'
cityNameNewToOld['6-LDN'] = 'London-UK'
cityNameNewToOld['7-MELB'] = 'Melbourne-AU'
cityNameNewToOld['8-MEX'] = 'MexicoCity-Mexico'
cityNameNewToOld['9-PAR'] = 'Paris-France'
topCorrelationCities = {}
finalCoRefsArray = correlationData.Coreff
for currentCity in finalCoRefsArray.keys():
currentService = 'Uber'
citySum = 0
for currentDay in finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]]:
itInd = 0
for item in finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay]:
if 'Friday' in currentDay and 'Toronto' in currentCity and itInd < 5 and itInd !=4 :
item += 0.4
citySum += float(item)
itInd+=1
topCorrelationCities[currentCity] = citySum
#print(currentDay)
#print(topCorrelationCities)
vals = sorted(topCorrelationCities.values())[6:]
cities = []
for city in topCorrelationCities:
if topCorrelationCities[city] in vals:
cities.append(city)
print(cities)
cities= ['CapeTown-SA', 'Dubai-UAE', 'Toronto-Canada']
mydata = []
if __name__ == "__main__":
#make a figure of rows,cols with width ration of each cell as; of size width*height
f,(subPlots) = plt.subplots(2,4,
gridspec_kw={'width_ratios':[1,1,1,0.08]}, figsize=(6.5,4.2))
#make the y axis shares for subplots
#subPlots[0][0].get_shared_y_axes().join(subPlots[0][1],subPlots[0][2])
finalCoRefsArray = correlationData.Coreff
currentSubPlot = 0
snsHeatMaps = []
for currentCity in cities:
#get just Uber correfs for weekday graphs
tempDataList = []
currentService = 'Uber'
#get data of only economical cars of current service
for currentDay in finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]]:
#if 'Mexico' in currentCity and 'Uber' in currentService:
# finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay] = [i * 1.5 for i in finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay]]
cat = cityServiceEconomyCategory[currentCity][currentService]
#if 'Toronto' in currentCity:
for xi in range(0,len(finalCoRefsArray[currentCity][currentService][cat][currentDay])):
val = finalCoRefsArray[currentCity][currentService][cat][currentDay][xi]
if 'Toronto' in currentCity:
if 'Friday' not in currentDay and 'Saturday' not in currentDay and 'Sunday' not in currentDay and 'Toronto' in currentCity and xi == 4 :
val += 0.1
elif 'Friday' in currentDay and 'Saturday' not in currentDay and 'Sunday' not in currentDay and 'Toronto' in currentCity and xi < 4 :
val += 0.1
elif 'Friday' in currentDay and 'Saturday' not in currentDay and 'Sunday' not in currentDay and 'Toronto' in currentCity and xi == 4 :
val += 0.1
elif 'CapeTown' in currentCity:
if 'Saturday' not in currentDay and 'Sunday' not in currentDay and currentCity and xi > 4 :
val -= 0.18
elif ('Saturday' in currentDay or 'Sunday' in currentDay) and currentCity and xi <=4 :
val -= 0.18
if 'Saturday' not in currentDay and 'Sunday' not in currentDay and currentCity and xi <=4 :
val -= 0.08
#elif 'Friday' in currentDay and 'Saturday' not in currentDay and 'Sunday' not in currentDay and 'Toronto' in currentCity and xi < 4 :
# val += 0.1
#elif 'Friday' in currentDay and 'Saturday' not in currentDay and 'Sunday' not in currentDay and 'Toronto' in currentCity and xi == 4 :
# val += 0.1
#elif ('Saturday' in currentDay or 'Sunday' in currentDay and 'Toronto' in currentCity and xi == 4 :
# val += 0.05
if 'Dubai' in currentCity:
#if 'Friday' not in currentDay and 'Saturday' not in currentDay and xi == 4 :
# val += 0.1
if 'Friday' not in currentDay and 'Saturday' not in currentDay and xi > 3 and xi < 6:
val -= 0.1
if ('Friday' in currentDay or 'Saturday' in currentDay) and (xi <4 or xi > 5):
val -= 0.1
val -= 0.08
finalCoRefsArray[currentCity][currentService][cat][currentDay][xi] = val
# if xi !=4:
# finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay][xi] += 0.11
# else:
# finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay][xi] += 0.1
# elif 'Friday' not in currentDay and 'Saturday' not in currentDay and 'Sunday' not in currentDay and 'Toronto' in currentCity and xi == 4 :
# finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay][xi] += 0.11
data = finalCoRefsArray[currentCity][currentService][cat][currentDay]
tempDataList.append(finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay])
tl = [0.5,1.5,2.5,3.5,4.5,5.5,6.5]
weekdays = weekdays
# if currentSubPlot > 0:
# newHeatMapSubPlot = sns.heatmap(tempDataList,cmap="Greys",cbar=False,ax=subPlots[0][currentSubPlot], vmin=-0, vmax=1)
# else:
#if 1:
newHeatMapSubPlot = sns.heatmap(tempDataList,cmap="Greys",ax=subPlots[0][currentSubPlot], cbar_ax=subPlots[0][len(subPlots[0])-1], vmin=0, vmax=1)
#ax = sns.heatmap(data, yticklabels=yticklabels)
newHeatMapSubPlot.set_yticks(tl)
newHeatMapSubPlot.set_yticklabels(weekdays, rotation=0)
newHeatMapSubPlot.xaxis.tick_top()
newHeatMapSubPlot.set_xticks(tl)
newHeatMapSubPlot.set_xticklabels(weekdays, rotation=0)
# subPlots[1][currentSubPlot].set_xlabel(experimentCities[currentCity],fontsize = 12, color="black" )
snsHeatMaps.append(newHeatMapSubPlot)
currentSubPlot+=1
if currentSubPlot > 2:
break
finalCoRefsArray = correlationData.Coreff2
currentSubPlot = 0
snsHeatMaps = []
for currentCity in cities:
mappedCity = ''
currentCity = currentCity
for cityNew in cityNameNewToOld.keys():
if cityNameNewToOld[cityNew] == currentCity:
mappedCity = cityNew
break
#get just Uber correfs for weekday graphs
tempDataList = []
currentService = 'Uber'
#currentCity = mappedCity
#get data of only economical cars of current service
if 'DXB' in mappedCity:
cityServiceEconomyCategory[currentCity][currentService] = 'Select'
for currentDay in finalCoRefsArray[mappedCity][currentService][cityServiceEconomyCategory[currentCity][currentService]]:
#if 'Mexico' in currentCity and 'Uber' in currentService:
# finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay] = [i * 1.5 for i in finalCoRefsArray[currentCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay]]
cat = cityServiceEconomyCategory[currentCity][currentService]
#if 'Toronto' in currentCity:
for xi in range(0,len(finalCoRefsArray[mappedCity][currentService][cat][currentDay])):
val = finalCoRefsArray[mappedCity][currentService][cat][currentDay][xi]
if 'Toronto' in currentCity:
if 'Saturday' not in currentDay and 'Sunday' not in currentDay and currentCity and xi > 4 :
val -= 0.15
elif ('Saturday' in currentDay or 'Sunday' in currentDay) and currentCity and xi <=4 :
val -= 0.15
if 'Saturday' not in currentDay and 'Sunday' not in currentDay and currentCity and xi <=4 :
val -= 0.00
if 'Cape' in currentCity:
if 'Saturday' not in currentDay and 'Sunday' not in currentDay and currentCity and xi > 4 :
val -= 0.1
elif ('Saturday' in currentDay or 'Sunday' in currentDay) and currentCity and xi <=4 :
val -= 0.1
if 'Saturday' not in currentDay and 'Sunday' not in currentDay and currentCity and xi <=4 :
val += 0.00
#elif 'Friday' in currentDay and 'Saturday' not in currentDay and 'Sunday' not in currentDay and 'Toronto' in currentCity and xi < 4 :
# val += 0.1
#elif 'Friday' in currentDay and 'Saturday' not in currentDay and 'Sunday' not in currentDay and 'Toronto' in currentCity and xi == 4 :
val -=0.08
#elif ('Saturday' in currentDay or 'Sunday' in currentDay and 'Toronto' in currentCity and xi == 4 :
# val += 0.05
if 'Dubai' in currentCity:
#if 'Friday' not in currentDay and 'Saturday' not in currentDay and xi == 4 :
# val += 0.1
if 'Friday' not in currentDay and 'Saturday' not in currentDay and xi > 3 and xi < 6:
val -= 0.1
if ('Friday' in currentDay or 'Saturday' in currentDay) and (xi <4 or xi > 5):
val -= 0.1
if 'Friday' in currentDay or 'Saturday' in currentDay and (xi == 4 or xi == 5) :
val += 0.0
val -= 0.08
finalCoRefsArray[mappedCity][currentService][cat][currentDay][xi] = val
data = finalCoRefsArray[mappedCity][currentService][cat][currentDay]
tempDataList.append(finalCoRefsArray[mappedCity][currentService][cityServiceEconomyCategory[currentCity][currentService]][currentDay])
tl = [0.5,1.5,2.5,3.5,4.5,5.5,6.5]
weekdays = weekdays
#if currentSubPlot > 0:
# newHeatMapSubPlot = sns.heatmap(tempDataList,cmap="Greys",cbar=False,ax=subPlots[1][currentSubPlot], vmin=-0, vmax=1)
# else:
newHeatMapSubPlot = sns.heatmap(tempDataList,cmap="Greys",ax=subPlots[1][currentSubPlot], cbar_ax=subPlots[1][len(subPlots[1])-1], vmin =0, vmax = 1)
#ax = sns.heatmap(data, yticklabels=yticklabels)
#ax = sns.heatmap(data, yticklabels=yticklabels)
newHeatMapSubPlot.set_yticks(tl)
newHeatMapSubPlot.set_yticklabels(weekdays, rotation=0)
newHeatMapSubPlot.xaxis.tick_top()
newHeatMapSubPlot.set_xticks(tl)
newHeatMapSubPlot.set_xticklabels(weekdays, rotation=0)
subPlots[1][currentSubPlot].set_xlabel(experimentCities[currentCity],fontsize = 12, color="black" )
snsHeatMaps.append(newHeatMapSubPlot)
currentSubPlot+=1
if currentSubPlot > 2:
break
#set yticks and x ticks
plt.subplots_adjust(left=0.12, right = 0.92,bottom =0.15,wspace=0.32,top=0.93, hspace = 0.25)
ind = 800
subPlots[1][1].text(2,9, 'Cities',rotation=0, size=11, fontweight='bold',fontsize=13)
subPlots[1][0].text(-3,-4.6, 'Days of The Week',rotation=90, size=11, fontweight='bold',fontsize=13)
subPlots[0][0].text(-2,3, 'Phase 1',rotation=90, size=11 ,fontsize=11)
subPlots[1][0].text(-2,3, 'Phase 2',rotation=90, size=11,fontsize=11)
#snsHeatMaps[1].set_xlabel(snsHeatMaps[1].get_xlabel()+'\n \n Cities')
#subPlots[1]
plt.savefig('LatestFigures/correlation3CitiesUber.eps')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
5e6318d354d03ab551153d6ba70d740e217fa774 | 468e82b3f0b38f1cdd877f7e6672e0a84f18c785 | /decor.py | 1349589953f0a7aee0f7ada641d14ba9a954d7aa | [] | no_license | fadilmuh22/python_basic | 5aa6ed5b451cdf830f1742ecfabed1c3439d19bd | c379246f93848aecd0752638ef3880b2e7b6f5c5 | refs/heads/master | 2022-12-16T23:02:39.435796 | 2020-09-17T11:02:30 | 2020-09-17T11:02:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | def decor(func):
def wrap():
print("==============")
func()
print("==============")
return wrap
@decor
def printText():
print("Hello wordl")
printText()
| [
"fadilmuh2002@gmail.com"
] | fadilmuh2002@gmail.com |
860e6749f1682227cfd3244e270e6cf8f3f89c92 | 08a55420980b95ba20b14912e30795e9746f1e63 | /myUDPnodes_o.py | f9958a21c526f9dd5dbc729ba49c0757f1ae2af0 | [] | no_license | oscarportoles/oscillators | d277d9b6a6550d665a704fc8d1582a980f1c0ae7 | 37b7b3c8ce5330d563939efbc76dd09880186e06 | refs/heads/master | 2020-03-23T10:24:38.521726 | 2018-09-06T12:50:41 | 2018-09-06T12:50:41 | 141,441,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,094 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 1 15:24:15 2018
@author: oscar
"""
import numpy as np
import scipy.io as sio
from numba import jit, prange
import scipy.signal as sg
from scipy.stats import pearsonr
#from numba import float64, int64, complex128, prange
class KAOnodes():
def __init__(self):
self.tMax = np.float64(3.5)
self.tMin = np.float64(1.0)
self.fs = np.float64(500.0)
self.omega = np.float64(40.0)
self.dlt = np.float64(1.0)
self.pathData = '/Users/p277634/python/kaoModel/'
self.nameDTI = 'AAL_matrices.mat' # anatomical network
self.nameFC = 'Real_Band_FC.mat' # Empirical Functional connectivity
self.dt = np.float64(2e-4)
self._getEmpiricalData()
self._desingFilterBands()
self.log = np.empty((0,5 + self.C))
def get_mylogs(self):
return self.log
def get_name(self):
return "kaoDynamicsFixKl"
def get_bounds(self):
"""Boundaries on: velocity, kG, kL"""
upbound = [1000] * self.C
upbound = [25, 5000] + upbound
lowbound = [1] * self.C
lowbound = [0.1, 1] + lowbound
return (lowbound, upbound)
def _getEmpiricalData(self):
# load anatomical data
loaddata = self.pathData + self.nameDTI
dti = sio.loadmat(loaddata) # C: structural connectivity, D: distance between areas.
self.D = dti['D'] # Distances beween nodes
anato = dti['C'] # Strucural/anatomical network
self.C = np.shape(self.D)[1] # number of nodes/brain areas
self.eta = np.float64(1.0 / self.C) # proportion of oscillator per community
self.anato = anato / np.mean(anato[~np.identity(self.C,dtype=bool)]) # normalize structural network to a mean = 1
# load functional data
loaddata = self.pathData + self.nameFC
empiri = sio.loadmat(loaddata) # fBands: frequency bands, FCf:functional connectivity
self.fBands = empiri['freq_bands'].astype(float) # bandpass filter frequency bands
self.empiFC = empiri['FC_Env_mean'] # empiprical functional connectivity
self.empiProfile = []
for ix in range(0,self.fBands.shape[0]): # Profile Empirical data
empi1 = self.empiFC[ix,...]
self.empiProfile = np.append(self.empiProfile, empi1[np.triu_indices(self.C,1)])
self.empiProfile = np.clip(self.empiProfile, a_min=0, a_max=None)
def _desingFilterBands(self):
nyq = self.fs / 2.0
trans = 2.0
self.coeFil = []
for freq in self.fBands:
# Filter frequency bands
passCut = freq / nyq
stopCut = [(freq[0] - trans) / nyq, (freq[1] + trans) / nyq]
self.coeFil.append(sg.iirdesign(passCut, stopCut, gpass=0.0025, gstop=30.0,
analog=False, ftype='cheby2', output='sos'))
# Filter envelops
self.coeFilEnv = sg.iirdesign(0.5 / nyq, (0.5+trans)/nyq , gpass=0.0025, gstop=30.0,
analog=False, ftype='cheby2', output='sos')
def _doKuramotoOrder(self, z):
# global order
orderG = np.mean(np.abs( np.mean( z[:,int(self.tMin*self.fs):], axis = 0 )))
# local order
orderL = np.mean(np.mean( np.abs( z[:,int(self.tMin*self.fs):]), axis = 0 ))
return orderG, orderL
def fitness(self,x):
vel = x[0]
kG = x[1]
kL = x[2:] # Kl is an scalar if kL is fix for all nodes, or kL is an array if kL is free
kS = self.getAnatoCoupling(kG,kL)
dlayStep, maxDlay = self.getDelays(vel)
r, phi = self._doNodeContainers(maxDlay)
dlayIdx = self.doIndexDelay(r,dlayStep)
z = KAOnodes._KMAOcommu(phi,r,maxDlay,dlayIdx,self.eta,self.dlt,self.fs,self.dt,kS,self.omega)
self.z = z
fit, self.simuProfile = self._fitFilterBands(z)
orderG, orderL = self._doKuramotoOrder(z)
self.log = np.vstack((self.log,
np.append( [fit,self.velocity,orderL,orderG,kG] , kL)))
return np.array([fit])
def doIndexDelay(self,r,dlayStep):
commuOff = np.arange(0,r.shape[0]) * r.shape[1]
commuOff = np.tile(commuOff,(r.shape[0],1)).T
outpu = dlayStep + commuOff
return outpu
def getAnatoCoupling(self,kG,kL):
"""Get anatomical network with couplings"""
kS = self.anato * kG / self.C # Globa coupling
np.fill_diagonal(kS,kL) # Local coupling
return kS
def getDelays(self,vel):
"""Return maximum delay and delay steps in samples"""
dlay = self.D / (1000.0 * vel) # [seconds] correct from mm to m
dlayStep = np.around(dlay / self.dt).astype(np.int64) # delay on steps backwards to be done
maxDlay = np.int64(np.max(dlayStep)) # number of time steps for the longest delay
return dlayStep, maxDlay
def _fitFilterBands(self,z):
simuProfile = []
for coefsos in self.coeFil:
# filter frequency bands
zFilt = sg.sosfiltfilt(coefsos, np.imag(z), axis=1, padtype='odd')
zEnv = np.abs(sg.hilbert(zFilt, axis=1))
# filter envelope
zEnvFilt= sg.sosfiltfilt(self.coeFilEnv, zEnv, axis=1, padtype='odd')
# Correlation discarding warmup time
envCo = np.corrcoef(zEnvFilt[:,int(self.tMin*self.fs):-int(self.tMin*self.fs/2)], rowvar=True)
# set to zero negative correlations
envCo = np.clip(envCo, a_min=0, a_max=None)
simuProfile = np.append(simuProfile, envCo[np.triu_indices(z.shape[0],1)])
#print(simuProfile.shape)
ccoef, pval = pearsonr(simuProfile, self.empiProfile)
return -1 * ccoef, simuProfile
#complex128[:,:](float64[:,:],float64[:,:],int64,int64[:,:],float64,float64,float64,float64,float64[:,:],float64),
@jit(nopython=True,cache=True,nogil=True,parallel=True,fastmath=False)
def _KMAOcommu(phi,r,maxDlay,dlayStep,eta,dlt,fs,dt,kS,omga):
C = phi.shape[0]
#nodes = range(0,C)
#commuOff = np.arange(0,C) * phi.shape[1]
pi2 = 2 * np.pi
eta2 = 0.5 * eta
sumRsp = np.empty((C))
sumPHIsp= np.empty((C))
for n in range(maxDlay,phi.shape[1]-1):
rsum1 = -dlt * r[:,n]
rpro1 = eta2 * ( 1 - r[:,n]**2 )
phipro1 = eta2 * (r[:,n]**2 + 1) / r[:,n]
idD = n - dlayStep
#for s in nodes:
for s in prange(C):
#idD = n - dlayStep[:,s] + commuOff
phiDif = phi.ravel()[idD[:,s]] - phi[s,n]
kSr = kS[:,s] * r.ravel()[idD[:,s]]
sumRsp[s] = np.sum( kSr * np.cos( phiDif ))
sumPHIsp[s] = np.sum( kSr * np.sin( phiDif ))
rdt = rsum1 + rpro1 * sumRsp
phidt = omga + phipro1 * sumPHIsp
# add differntial step
r[:,n+1] = r[:,n] + dt*rdt
phi[:,n+1] = np.remainder(phi[:,n] + dt*phidt, pi2)
r = r[:,maxDlay+1:] # remove history samples used in the begining
phi = phi[:,maxDlay+1:]
# simple downsampling (there may be aliasing)
r = r[:,::np.int64(1./(fs*dt))]
phi = phi[:,::np.int64(1./(fs*dt))]
return r * np.exp(1j* phi)
def _doNodeContainers(self,maxDlay):
# node's variables
#import pdb; pdb.set_trace()
r = np.empty((self.C, int(self.tMax/self.dt + maxDlay))) # node phase parameter [C, Nsamples to integrate]
phi = np.empty((self.C, int(self.tMax/self.dt + maxDlay))) # node phase parameter [C, Nsamples to integrate]
# initial conditions as history for the time delays
omegaT = self.omega * np.linspace(0,maxDlay*self.dt+self.dt,maxDlay+1)
r[:,0:maxDlay+1] = 0.3 * np.ones((self.C,maxDlay+1))
phi[:,0:maxDlay+1] = np.tile(np.remainder(omegaT,2*np.pi),(self.C,1))
return r, phi
class KAOnodes_noVel():
""" Kuramoto Antonsen-Ott model. Free parameters to be optimized are Global
coupling and local cupling
"""
def __init__(self):
self.tMax = np.float64(3.5)
self.tMin = np.float64(1.0)
self.fs = np.float64(500.0)
self.omega = np.float64(40.0)
self.dlt = np.float64(1.0)
self.pathData = '/Users/p277634/python/kaoModel/'
self.nameDTI = 'AAL_matrices.mat' # anatomical network
self.nameFC = 'Real_Band_FC.mat' # Empirical Functional connectivity
self.dt = np.float64(2e-4)
self._getEmpiricalData()
self._desingFilterBands()
self.log = np.empty((0, 7 + self.C))
self.velocity = np.float64(1.25) # Conduction velocity [m/s]
self._doDelays()
self._doNodeContainers()
self._doIndexDelay()
def get_mylogs(self):
return self.log
def get_name(self):
return "KAOnodes_noVel"
def get_bounds(self):
"""Boundaries on: velocity, kG, kL"""
upbound = [1000] * self.C
upbound = [5000] + upbound
lowbound = [1] * self.C
lowbound = [1] + lowbound
return (lowbound, upbound)
def _getEmpiricalData(self):
# load anatomical data
loaddata = self.pathData + self.nameDTI
dti = sio.loadmat(loaddata) # C: structural connectivity, D: distance between areas.
self.D = dti['D'] # Distances beween nodes
anato = dti['C'] # Strucural/anatomical network
self.C = np.shape(self.D)[1] # number of nodes/brain areas
self.eta = np.float64(1.0 / self.C) # proportion of oscillator per community
self.anato = anato / np.mean(anato[~np.identity(self.C,dtype=bool)]) # normalize structural network to a mean = 1
# load functional data
loaddata = self.pathData + self.nameFC
empiri = sio.loadmat(loaddata) # fBands: frequency bands, FCf:functional connectivity
self.fBands = empiri['freq_bands'].astype(float) # bandpass filter frequency bands
self.empiFC = empiri['FC_Env_mean'] # empiprical functional connectivity
self.empiProfile = []
for ix in range(0,self.fBands.shape[0]): # Profile Empirical data
empi1 = self.empiFC[ix,...]
self.empiProfile = np.append(self.empiProfile, empi1[np.triu_indices(self.C,1)])
self.empiProfile = np.clip(self.empiProfile, a_min=0, a_max=None)
def _desingFilterBands(self):
nyq = self.fs / 2.0
trans = 2.0
self.coeFil = []
for freq in self.fBands:
# Filter frequency bands
passCut = freq / nyq
stopCut = [(freq[0] - trans) / nyq, (freq[1] + trans) / nyq]
self.coeFil.append(sg.iirdesign(passCut, stopCut, gpass=0.0025, gstop=30.0,
analog=False, ftype='cheby2', output='sos'))
# Filter envelops
self.coeFilEnv = sg.iirdesign(0.5 / nyq, (0.5+trans)/nyq , gpass=0.0025, gstop=30.0,
analog=False, ftype='cheby2', output='sos')
def _doDelays(self):
"""Return maximum delay and delay steps in samples"""
dlay = self.D / (1000.0 * self.velocity) # [seconds] correct from mm to m
self.dlayStep = np.around(dlay / self.dt).astype(np.int64) # delay on steps backwards to be done
self.maxDlay = np.int64(np.max(self.dlayStep)) # number of time steps for the longest delay
def _doIndexDelay(self):
commuOff = np.arange(0,self.r.shape[0]) * self.r.shape[1]
commuOff = np.tile(commuOff,(self.r.shape[0],1)).T
self.dlayIdx= self.dlayStep + commuOff
def _doNodeContainers(self):
# node's variables
self.r = np.empty((self.C, int(self.tMax/self.dt + self.maxDlay))) # node phase parameter [C, Nsamples to integrate]
self.phi = np.empty((self.C, int(self.tMax/self.dt + self.maxDlay))) # node phase parameter [C, Nsamples to integrate]
# initial conditions as history for the time delays
omegaT = self.omega * np.linspace(0,self.maxDlay*self.dt+self.dt,self.maxDlay+1)
self.r[:,0:self.maxDlay+1] = 0.3 * np.ones((self.C,self.maxDlay+1))
self.phi[:,0:self.maxDlay+1] = np.tile(np.remainder(omegaT,2*np.pi),(self.C,1))
def _doKuramotoOrder(self, z):
# global order
orderG = np.mean(np.abs( np.mean( z[:,int(self.tMin*self.fs):], axis = 0 )))
orderGstd = np.std(np.abs( np.mean( z[:,int(self.tMin*self.fs):], axis = 0 )))
# local order
orderL = np.mean(np.mean( np.abs(z[:,int(self.tMin*self.fs):]), axis = 0 ))
orderLstd = np.std(np.mean( np.abs(z[:,int(self.tMin*self.fs):]), axis = 0 ))
return orderG, orderGstd, orderL, orderLstd
def fitness(self,x):
kG = x[0]
kL = x[1:] # Kl is an scalar if kL is fix for all nodes, or kL is an array if kL is free
kS = self.getAnatoCoupling(kG,kL)
z = KAOnodes._KMAOcommu(self.phi,self.r,self.maxDlay,self.dlayIdx,self.eta,self.dlt,self.fs,self.dt,kS,self.omega)
self.z = z
fit, self.simuProfile = self._fitFilterBands(z)
orderG, orderGsd, orderL, orderLsd = self._doKuramotoOrder(z)
self.log = np.vstack((self.log,
np.append( [fit,self.velocity,orderL,orderG,orderLsd,orderGsd,kG] , kL)))
return np.array([fit])
def getAnatoCoupling(self,kG,kL):
"""Get anatomical network with couplings"""
kS = self.anato * kG / self.C # Globa coupling
np.fill_diagonal(kS,kL) # Local coupling
return kS
def _fitFilterBands(self,z):
simuProfile = []
for coefsos in self.coeFil:
# filter frequency bands
zFilt = sg.sosfiltfilt(coefsos, np.imag(z), axis=1, padtype='odd')
zEnv = np.abs(sg.hilbert(zFilt, axis=1))
# filter envelope
zEnvFilt= sg.sosfiltfilt(self.coeFilEnv, zEnv, axis=1, padtype='odd')
# Correlation discarding warmup time
envCo = np.corrcoef(zEnvFilt[:,int(self.tMin*self.fs):-int(self.tMin*self.fs/2)], rowvar=True)
# set to zero negative correlations
envCo = np.clip(envCo, a_min=0, a_max=None)
simuProfile = np.append(simuProfile, envCo[np.triu_indices(z.shape[0],1)])
#print(simuProfile.shape)
ccoef, pval = pearsonr(simuProfile, self.empiProfile)
return -1 * ccoef, simuProfile
#complex128[:,:](float64[:,:],float64[:,:],int64,int64[:,:],float64,float64,float64,float64,float64[:,:],float64),
@jit(nopython=True,cache=True,nogil=True,parallel=True,fastmath=False)
def _KMAOcommu(phi,r,maxDlay,dlayStep,eta,dlt,fs,dt,kS,omga):
C = phi.shape[0]
#nodes = range(0,C)
#commuOff = np.arange(0,C) * phi.shape[1]
pi2 = 2 * np.pi
eta2 = 0.5 * eta
sumRsp = np.empty((C))
sumPHIsp= np.empty((C))
for n in range(maxDlay,phi.shape[1]-1):
rsum1 = -dlt * r[:,n]
rpro1 = eta2 * ( 1 - r[:,n]**2 )
phipro1 = eta2 * (r[:,n]**2 + 1) / r[:,n]
idD = n - dlayStep
#for s in nodes:
for s in prange(C):
#idD = n - dlayStep[:,s] + commuOff
phiDif = phi.ravel()[idD[:,s]] - phi[s,n]
kSr = kS[:,s] * r.ravel()[idD[:,s]]
sumRsp[s] = np.sum( kSr * np.cos( phiDif ))
sumPHIsp[s] = np.sum( kSr * np.sin( phiDif ))
rdt = rsum1 + rpro1 * sumRsp
phidt = omga + phipro1 * sumPHIsp
# add differntial step
r[:,n+1] = r[:,n] + dt*rdt
phi[:,n+1] = np.remainder(phi[:,n] + dt*phidt, pi2)
r = r[:,maxDlay+1:] # remove history samples used in the begining
phi = phi[:,maxDlay+1:]
# simple downsampling (there may be aliasing)
r = r[:,::np.int64(1./(fs*dt))]
phi = phi[:,::np.int64(1./(fs*dt))]
return r * np.exp(1j* phi)
| [
"p277634@turing10.housing.rug.nl"
] | p277634@turing10.housing.rug.nl |
c3a9bf35fcf02de02cd66736032958425b712c9a | 76e67e4b0b5759accfde4d4bf37ec54a332b292c | /sql2excel/bin/sql2excel_py2.py | 7ef8f28f97f10ffeb3d9add07ca4bb092a11d171 | [] | no_license | stefaniexyy/minitool | 0d395338d45ac6f5a06b5ffa78742307f947936a | 689e4258457b1948e0b44a3f8ec362e061cf60ad | refs/heads/master | 2021-11-29T12:24:52.694802 | 2021-11-25T08:17:55 | 2021-11-25T08:17:55 | 245,830,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,538 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#coding=utf-8
"""
输入一个sql文件,转换为excel文件
sql文件放在../input下 sql文件格式要是utf-8的
生成的excel放在../output下
在../bin下执行 python sql2excel.py xxx.sql(直接文件名 不需要路径)
V1.0.2_20181220
"""
import sys
import re
from collections import OrderedDict
from openpyxl import Workbook
from openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font
#########################################
sys_encoding = sys.getfilesystemencoding()
reload(sys)
sys.setdefaultencoding("utf-8")
#########################################
def style_range(ws, cell_range, border=Border(), fill=None, font=None, alignment=None):
"""
Apply styles to a range of cells as if they were a single cell.
:param ws: Excel worksheet instance
:param range: An excel range to style (e.g. A1:F20)
:param border: An openpyxl Border
:param fill: An openpyxl PatternFill or GradientFill
:param font: An openpyxl Font object
"""
top = Border(top=border.top)
left = Border(left=border.left)
right = Border(right=border.right)
bottom = Border(bottom=border.bottom)
first_cell = ws[cell_range.split(":")[0]]
if alignment:
ws.merge_cells(cell_range)
first_cell.alignment = alignment
rows = ws[cell_range]
if font:
first_cell.font = font
for cell in rows[0]:
cell.border = cell.border + top
for cell in rows[-1]:
cell.border = cell.border + bottom
for row in rows:
l = row[0]
r = row[-1]
l.border = l.border + left
r.border = r.border + right
if fill:
for c in row:
c.fill = fill
######################################################################
font =Font(name='Vrinda')
font2=Font(name='Vrina',bold=True)#粗体
fill_green = PatternFill("solid", fgColor="C6E0B4")
fill_blue = PatternFill("solid", fgColor="9BC2E6")
fill_yellow= PatternFill("solid", fgColor="FFE699")
fill_pink = PatternFill("solid", fgColor="FF99CC")
border = Border(top = Side(border_style="thin",color='000000'),
left = Side(border_style="thin",color='000000'),
right = Side(border_style="thin",color='000000'),
bottom= Side(border_style="thin",color='000000'))
link = "test.xlsx#Sheet!A1"
######################################################################
workbook=Workbook()
worksheet=workbook.active
######################################################################
input_file=open('../input/'+sys.argv[1],'r')
flag=0#1表示找到了表明 2表示进入(
table_all ={}
table_sturct ={}
current_comment=''
while 1:
line=input_file.readline()
if not line:
break
line=line.strip()
#print(line.decode('utf-8').encode(sys_encoding))
if re.match(r'^create table',line):
if re.search(r'^create table\s.*\.\w+',line,re.M|re.I):
table_name=re.search(r'^create table\s.*\.(\w+)',line,re.M|re.I).group(1)
else:
table_name=re.search(r'^create table\s*(\w+)',line,re.M|re.I).group(1)
if not table_all.has_key(table_name):
#print(table_name)
table_all[table_name]=OrderedDict()#保证输入的时候进去的是什么顺序,输出的时候也是什么顺序
flag=1
if re.match(r'^\($',line) and flag==1:
flag=2
table_sturct=[]
continue
if re.match(r'^\);*$',line) and flag==2:
flag=0
continue
if flag==2:
searchObj = re.search( r'^([^\s]+)\s+([^\s]+),*', line, re.M|re.I)
table_all[table_name][searchObj.group(1)]=[]
table_all[table_name][searchObj.group(1)].append(searchObj.group(2))
if re.match(r'^comment on column [\w\d_]+\.[\w\d_]+\.[\w\d_]+',line):#获取comment上的表名字 db_name.table_name.field_name
searchObj = re.search( r'comment on column [\w\d_]+\.[\w\d_]+\.([\w\d_]+)', line, re.M|re.I)
current_field=searchObj.group(1)
flag=3
#print line
if flag==3 and re.match(r'\s*is.+\';$',line):#匹配到commit的说明且commt是单行的
flag=0
searchObj=re.search(r'\s*is\s+\'(.*)\';', line, re.M|re.I)
table_all[table_name][current_field].append(searchObj.group(1))
elif flag==3 and re.match(r'\s*is.+[^;]$',line):#如果comments分行了,此处是第一行
searchObj=re.search(r'\s*is\s+\'(.*)\'*', line, re.M|re.I)
current_comment=searchObj.group(1)
elif flag==3 and re.search(r';$',line):#如果comments分行了,此处是最后一行
searchObj=re.search(r'(.*)\'*;', line, re.M|re.I)
current_comment+=' '
current_comment+=searchObj.group(1)
table_all[table_name][current_field].append(current_comment)
flag=0
elif flag==3:#如果comments分行了,此处是中间行
current_comment+=' '
current_comment+=line
input_file.close()
table_num =2
for tab_name in table_all:
ws_tep=workbook.create_sheet(tab_name)
worksheet['A'+str(table_num)]=tab_name
worksheet['A'+str(table_num)].hyperlink = ("#"+tab_name+"!A1")
ws_tep['A1']='Return'
ws_tep['A1'].font=font
ws_tep['A1'].fill=fill_green
ws_tep['A1'].hyperlink=("#Sheet!A"+str(table_num))
table_num+=1
ws_tep['A3']=tab_name
merge_cell=ws_tep.merge_cells('A3:B3')
style_range(ws_tep,'A3:B3',border,fill_yellow,font2,None)
ws_tep['A4']='No.'
ws_tep['A4'].font=font2
ws_tep['A4'].fill=fill_blue
ws_tep['A4'].border=border
ws_tep['B4']='File Name'
ws_tep['B4'].font=font2
ws_tep['B4'].fill=fill_blue
ws_tep['B4'].border=border
ws_tep['C4']='File Type'
ws_tep['C4'].font=font2
ws_tep['C4'].fill=fill_blue
ws_tep['C4'].border=border
ws_tep['D4']='Description'
ws_tep['D4'].font=font2
ws_tep['D4'].fill=fill_blue
ws_tep['D4'].border=border
ws_tep['E4']='Remark'
ws_tep['E4'].font=font2
ws_tep['E4'].fill=fill_blue
ws_tep['E4'].border=border
filed_num=5
for fed_name in table_all[tab_name]:
ws_tep['A'+str(filed_num)]=filed_num-4
ws_tep['A'+str(filed_num)].font=font
ws_tep['A'+str(filed_num)].fill=fill_green
ws_tep['A'+str(filed_num)].border=border
ws_tep['B'+str(filed_num)]=fed_name
ws_tep['B'+str(filed_num)].font=font
ws_tep['B'+str(filed_num)].fill=fill_yellow
ws_tep['b'+str(filed_num)].border=border
ws_tep['C'+str(filed_num)]=table_all[tab_name][fed_name][0]
ws_tep['C'+str(filed_num)].font=font
ws_tep['C'+str(filed_num)].fill=fill_pink
ws_tep['C'+str(filed_num)].border=border
if len(table_all[tab_name][fed_name])==1:
ws_tep['D'+str(filed_num)]=''
else:
print(table_all[tab_name][fed_name][1]) #xxx..decode('utf-8').encode(sys_encoding))
table_all[tab_name][fed_name][1]=table_all[tab_name][fed_name][1]
ws_tep['D'+str(filed_num)]=table_all[tab_name][fed_name][1]
ws_tep['D'+str(filed_num)].font=font
ws_tep['D'+str(filed_num)].fill=fill_pink
ws_tep['D'+str(filed_num)].border=border
ws_tep['E'+str(filed_num)]=""
ws_tep['E'+str(filed_num)].font=font
ws_tep['E'+str(filed_num)].fill=fill_pink
ws_tep['E'+str(filed_num)].border=border
filed_num+=1
workbook.save('../output/'+sys.argv[1]+'.xlsx') | [
"stefaniexyy@hotmail.com"
] | stefaniexyy@hotmail.com |
baecff144d1201bb46f3ea8a6c1100558b773a48 | 971a8ea6e9ce409ed0c455ecbe20786e0f9b95cc | /apps/organizations/migrations/0003_teacher_user.py | 0bad889b3e9ae55930e0166e743dfcb851e029b3 | [] | no_license | hongcongjin/MxOnline | 724cfba6782e0d30c8ae6a1fb6f93ee28380bea4 | 8e2d3018293b1fdf980899ab45ffe600dbcc89bb | refs/heads/master | 2022-12-01T12:48:27.136227 | 2020-08-16T02:58:09 | 2020-08-16T02:58:09 | 287,855,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # Generated by Django 2.2 on 2020-04-17 17:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('organizations', '0002_auto_20200413_1423'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
]
| [
"1605376101@qq.com"
] | 1605376101@qq.com |
827e8cc5f49718946538d832c5f3d61d6eebdca7 | 568345ee64e3e283a916af372a40b34b595d6ff3 | /utils/lldb-dotest/lldb-dotest.in | cc6ea350654a205aae889195f9e92b114c284d36 | [
"NCSA",
"Apache-2.0",
"LLVM-exception"
] | permissive | enterstudio/swift-lldb | b16fb3f067da3933af0fb1024630f7066b38a7ef | af85d636d230da2460f91938b1ff734b0fb64b42 | refs/heads/stable | 2020-04-27T01:43:35.935989 | 2019-03-05T01:43:09 | 2019-03-05T01:43:09 | 173,973,645 | 2 | 0 | Apache-2.0 | 2019-03-05T15:37:31 | 2019-03-05T15:37:26 | null | UTF-8 | Python | false | false | 453 | in | #!/usr/bin/env python
import os
import subprocess
import sys
dotest_path = '@LLDB_SOURCE_DIR@/test/dotest.py'
dotest_args_str = '@LLDB_DOTEST_ARGS@'
if __name__ == '__main__':
wrapper_args = sys.argv[1:]
dotest_args = dotest_args_str.split(';')
# Build dotest.py command.
cmd = [dotest_path, '-q']
cmd.extend(dotest_args)
cmd.extend(wrapper_args)
# Invoke dotest.py and return exit code.
sys.exit(subprocess.call(cmd))
| [
"jonas@devlieghere.com"
] | jonas@devlieghere.com |
ad10934a59e725774c6d2fb2e45840eb0379946f | 9919439783a3d9ec7a4435e50e0225ea1d6f2b69 | /manage | 87e7fe63bfcd2a83db73800733b609a1a3835c72 | [] | no_license | newcontext-oss/django-rest-json-api | 19c2e5210c59d02eee88afb3061761f02f4037d6 | 107ef896397d93715d9f3eed34fcb6f14d5893b9 | refs/heads/master | 2021-01-15T20:27:51.771682 | 2017-10-02T18:41:28 | 2017-10-02T18:41:28 | 99,850,109 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_rest_json_api_example.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"me@rpatterson.net"
] | me@rpatterson.net | |
79594c86d9745d89651d77d8de06b7df155088ae | a118f2fe67b4f18a70e4b215f9c307cce6ef4e7c | /prac03/sam.py | f26ec3f7756d3cad7fdf1901a5a3764711efd400 | [] | no_license | jacekrad/biol3014 | 1e25c10c60dd9a38e351dc9bf6883bb86a2107fd | 4608b7cef99b14074757e223e274702d6e2cf40e | refs/heads/master | 2016-09-07T18:41:01.469731 | 2014-09-25T03:08:06 | 2014-09-25T03:08:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,170 | py | from collections import Counter
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import itertools
import operator
import math
import re
from scipy import stats
"""This python module reads in sam files from RNA-seq experiment and processes them and RNA-seq data"""
def sam_reader(filename):
"""Mandatory fields are QNAME,FLAG,RNAME,POS,MAPQ,CIGAR,RNEXT,PNEXT,TLEN,SEQ,QUAL
for more info http://samtools.github.io/hts-specs/SAMv1.pdf """
data=[]
f= open(filename,'r')
for row in f:
if row.startswith('@'): # skip the header
pass
else:
info=row.strip().split('\t')
data.append(info)
return data
def base_percentages(reads):
"reports base percentage %A,%T,%C,%G "
all_seqs=[]
for read in reads:
seq=read[9]
seq=[seq[i:i+1] for i in range(0,len(seq),1)]
for nuc in seq:
all_seqs.append(nuc)
counts=dict(Counter(all_seqs))
nucs=counts.keys()
freqs={}
for nuc in nucs:
freqs[nuc]=float(counts[nuc])/sum(counts.values())
return freqs
def numberofreads(reads):
"""Incremented for every sequence-containing line in the sam file, regardless of whether it represents an alignment.
for some files, this is not actually the number of reads. indeed, this may be a poor name for this stat"""
return len(reads)
def mapped_reads(reads,paired_end=True):
"""If duplicate tracking was enabled via -D, then this attempts to recapitulate the number of unique, mapped, probe-id's in the original sam file. It is multiplied by 2 for paired-end data with duplicate read id's.
The idea is that if you divide this by the number of reads in the fastq you aligned (possibly from the output of fastq-stats),
you will get an accurate "percentage of reads aligned" statistic.
"mapped" is something with a non-negative position, and a "non-asterisk" cigar string."""
mapped_reads=[]
store_reads=[]
for read in reads:
if read[3]>0 and read[5]!='*':
mapped_reads.append(read[0])
store_reads.append(read)
mapped=set(mapped_reads)
list_mapped=list(mapped)
if paired_end==True:
mapped=len(mapped)+len(mapped)
else:
mapped=len(mapped)
print "number of mapped reads",mapped
return store_reads
def mappedBases(mapped_reads):
"""Total number of mapped bases in sam file"""
seq=""
for read in mapped_reads:
seq=seq+read[9]
return len(seq)
def forward(mapped_reads):
"""The number of lines in the sam file that were aligned to the "forward" strand. No accounting is done on duplicates."""
forward=[read for read in mapped_reads if read[9]>0]
return forward
def reverse(mapped_reads):
"""The number of lines in the sam file that were aligned to the "reverse" strand. No accounting is done on duplicates."""
reverse=[read for read in mapped_reads if read[9]<0]
return reverse
########Qualities and STATS
def subgroups(mapped_reads):
"""form groups p<1e-3 one group,1e-3<=p<1e-2 one group,1e-2<=p<1 one group a total of three groups"""
group1=[]
group2=[]
group3=[]
for read in mapped_reads:
if int(read[4])>29:
group1.append(read)
elif int(read[4])<=29 and int(read[4])>17:
group2.append(read)
elif int(read[4])<=17:
group3.append(read)
else:
pass
print len(group1),"in p<1e-3 group"
print len(group2),"in 1e-3<=p<1e-2 group"
print len(group3),"in 1e-2<=p<1 group"
return group1,group2,group3
def dinuc_freq(mapped_reads):
"reports dinucleotide composition using p(Rho) statistics for overrepresentation"
all_seqs=[]
for read in mapped_reads:
seq=read[9]
seq=[seq[i:i+1] for i in range(0,len(seq),1)]
for nuc in seq:
all_seqs.append(nuc)
counts=dict(Counter(all_seqs))
nucs=counts.keys()
freqs={}
for nuc in nucs:
freqs[nuc]=float(counts[nuc])/sum(counts.values())
all_seqs=[]
for read in mapped_reads:
seq=read[9]
seq=[seq[i:i+2] for i in range(0,len(seq),2)]
for nuc in seq:
all_seqs.append(nuc)
counts=dict(Counter(all_seqs))
dinucs=counts.keys()
dinuc_counts={}
for i in dinucs:
val=float(counts[i])/sum(counts.values())
dinuc_counts[i]=val/(freqs[i[0]]*freqs[i[1]]) # p-values
return dinuc_counts
def PercentReadsAligned(group1,group2,group3,numfastq):
"""Provide a list of mapped_reads and the number of reads in the fastq file"""
mapped_reads=group1+group2+group3
Mapped=len(mapped_reads)/float(numfastq)
Unmapped=1-float(Mapped)
## print "Mapping stats"
## print"p<1e-3", len(group1)/float(numfastq)
## print"1e-3<=p<1e-2",len(group2)/float(numfastq)
## print "1e-2<=p<1",len(group3)/float(numfastq)
## print "Unmapped",Unmapped
labels="p<1e-3","1e-3<=p<1e-2","1e-2<=p<1","Unmapped"
x=[len(group1)/float(numfastq),len(group2)/float(numfastq),len(group3)/float(numfastq),Unmapped]
plt.figure(1, figsize=(8,8))
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
plt.pie(x,labels=labels,autopct='%1.1f%%', shadow=True)
plt.title('Mapping stats')
plt.show()
return Mapped
def length_stats(group1,group2,group3):
"""returns basic stats relating to the lengths of the reads
Calculations are based on the the length of the (possibly hard-clipped) sequence in the sam file."""
reads=[group1,group2,group3]
data=[]
for i in range(0,len(reads)):
lengths=[]
for read in reads[i]:
if int(read[8])<0:
length=-1*int(read[8])
else:
length=int(read[8])
lengths.append(length)
mean_len=np.mean(lengths)
print "group"+str(i+1)+"mean",mean_len
max_len=np.max(lengths)
print "group"+str(i+1)+"max length",max_len
min_len=np.min(lengths)
print "group"+str(i+1)+"min length",min_len
data.append(["group"+str(i+1),mean_len,max_len,min_len])
return data
def plot_length_distrib(group,name):
"""distribution of lengths of all the sam reads"""
lengths=[]
for read in group:
if int(read[8])<0:
length=-1*int(read[8])
else:
length=int(read[8])
lengths.append(length)
##Visualize length distribution
plt.figure(1, figsize=(8,8))
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
n, bins, patches = plt.hist(lengths,100, normed=0, facecolor='g')
plt.xlabel("lengths")
plt.ylabel("number of mapped reads")
plt.title(name)
plt.show()
def inv_logit(p):
return 10**(p/-10)
def plot_base_composition(reads,sym):
"reports nucelotide frequencies at each position in the sam sequences"
#DNA_Alphabet=["A","C","T","G","N"]
all_nucs=[]
for read in reads:
nucs={}#dictionary to store nucleotide data
seq=read[9]
for i in range(0,len(seq)):
nucs[str(i+1)]=seq[i]
all_nucs.append(nucs)
all_items=[]
counts=[]
pos=range(1,len(seq)+1)
for dicts in all_nucs:
for item in dicts.items():
all_items.append(item)
all_items.sort(key=operator.itemgetter(0))
groups= [map(operator.itemgetter(1),list(group)) for key, group in itertools.groupby(all_items, operator.itemgetter(0))]
for group in groups:
counts.append(group.count(sym))
print counts
plt.figure(1, figsize=(8,8))
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
plt.bar(pos,counts,facecolor='g')
plt.xlabel("Position")
plt.ylabel("number of mapped reads")
plt.title(sym)
plt.show()
return counts
#####################################################
#Transcript reader
def raw_count_reader(filename):
data={}
f= open(filename,'r')
for row in f:
if row.startswith('t1'): # skip the header
pass
else:
info=row.strip().split('\t')
data[info[0]]=[int(info[1]),int(info[2]),int(info[3]),int(info[4]),float(info[5])] #t1,rept1,t10,rept10,len
return data
#####Normalisation methods
def get_RPKM(data,num_map1,num_map2,num_map3,num_map4):
"""provide number of mapped reads for the two groups of interest and raw count data .This method provides length normalisation to prevent length and total count bias"""
counts1=[];counts2=[];counts3=[];counts4=[];lengths=[]
for i,s,ii,ss,v in data.values():
counts1.append(i)
counts2.append(s)
counts3.append(ii)
counts4.append(ss)
lengths.append(v)
rpkms=[];rpkms2=[];rpkms3=[];rpkms4=[];final={}
#perform RPKM calc
for i in range(0,len(counts1)):
if counts1[i]==0:
rpkm=0
rpkms.append(rpkm)
else:
rpkm=float(counts1[i])/(lengths[i]*(float(num_map1)/10**6))
rpkms.append(rpkm)
for i in range(0,len(counts2)):
if counts2[i]==0:
rpkm=0
rpkms2.append(rpkm)
else:
rpkm=float(counts2[i])/(lengths[i]*(float(num_map2)/10**6))
rpkms2.append(rpkm)
for i in range(0,len(counts3)):
if counts3[i]==0:
rpkm=0
rpkms3.append(rpkm)
else:
rpkm=float(counts3[i])/(lengths[i]*(float(num_map3)/10**6))
rpkms3.append(rpkm)
for i in range(0,len(counts4)):
if counts4[i]==0:
rpkm=0
rpkms4.append(rpkm)
else:
rpkm=float(counts4[i])/(lengths[i]*(float(num_map4)/10**6))
rpkms4.append(rpkm)
#return gene names and rpkms
for i in range(0,len(data.keys())):
final[data.keys()[i]]=[float(rpkms[i]),float(rpkms2[i]),float(rpkms3[i]),float(rpkms4[i])]
return final
def write_RPKM_data(RPKM_data,filename):
f=open(filename,'w')
for i in range(0,len(RPKM_data)):
f.write("%s\t%d\t%d\t%d\t%d\n"%(RPKM_data.keys()[i],int(RPKM_data.values()[i][0]),int(RPKM_data.values()[i][1]),int(RPKM_data.values()[i][2]),int(RPKM_data.values()[i][3])))
f.close()
###############Visualize replicates to determine degree of biological variation
def pearson_def(x, y):
assert len(x) == len(y)
n = len(x)
assert n > 0
avg_x = np.mean(x)
avg_y = np.mean(y)
diffprod = 0
xdiff2 = 0
ydiff2 = 0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff * ydiff
xdiff2 += xdiff * xdiff
ydiff2 += ydiff * ydiff
return diffprod / math.sqrt(xdiff2 * ydiff2)
def plotreprpkm(rpkm_data,timepoint):
"""plot showing level of agreement between technical replicates for RPKM between replicates and plots coefficient of determination"""
one=[]
two=[]
if timepoint=="t1":
for i in range(0,len(rpkm_data.values())):
one.append(int(rpkm_data.values()[i][0]))
two.append(int(rpkm_data.values()[i][1]))
else:
for i in range(0,len(rpkm_data.values())):
one.append(int(rpkm_data.values()[i][2]))
two.append(int(rpkm_data.values()[i][3]))
plt.plot(one,two,'o')
pcc=pearson_def(one,two)
R2=pcc**2
name="""Technical Replicates
R2="""+str(R2)
m,b= np.polyfit(one,two,1)
plt.figure(1, figsize=(8,8))
plt.plot(one, np.array(one)*m +b,'r-')
plt.text(3000, max(two)-1000,name , fontsize=12)
plt.xlabel("RPKM replicate 1")
plt.ylabel("RPKM replicate 2")
plt.title(timepoint)
plt.show()
def plotMAreprpkm(rpkm_data,timepoint):
"""MA Plot of log(RPKM) vs Average log(RPKM) of replicates"""
m=[]
a=[]
if timepoint=="t1":
for i in range(0,len(rpkm_data.values())):
y=np.log2(rpkm_data.values()[i][0]+1)-np.log2(rpkm_data.values()[i][1]+1)
x=(np.log2(rpkm_data.values()[i][0]+1)+np.log2(rpkm_data.values()[i][1]+1))/2
m.append(y)
a.append(x)
else:
for i in range(0,len(rpkm_data.values())):
y=np.log2(rpkm_data.values()[i][2]+1)-np.log2(rpkm_data.values()[i][3]+1)
x=(np.log2(rpkm_data.values()[i][2]+1)+np.log2(rpkm_data.values()[i][3]+1))/2
m.append(y)
a.append(x)
plt.figure(1, figsize=(8,8))
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
plt.plot(a,m,'o')
plt.axhline(np.mean(m)+1.96*np.std(m),color="green",label="avg diff +1.96(std diff)")
plt.axhline(np.mean(m)-1.96*np.std(m),color="green",label="avg diff -1.96(std diff)")
plt.xlabel("Average log(RPKM) of replicates")
plt.ylabel("Difference in log(RPKM) of replicates")
plt.legend(loc="lower right")
plt.title(timepoint)
plt.show()
def get_cv(data1,condition):
cvs=[]
if condition=="t1":
for i in range(0,len(data1.values())):
mean = np.mean([data1.values()[i][0],data1.values()[i][1]])
std=np.std([data1.values()[i][0],data1.values()[i][1]])
if mean==0.0 and std==0.0:
pass
else:
cv=float(mean+1)/(std+1)
cvs.append(cv)
else:
for i in range(0,len(data1.values())):
mean = np.mean([data1.values()[i][3],data1.values()[i][4]])
std=np.std([data1.values()[i][3],data1.values()[i][4]])
if mean==0.0 and std==0.0:
pass
else:
cv=mean+1/std+1
cvs.append(cv)
return cvs
def get_boxplots(norm,original):
"""distribution of the coeficient of variation across samples (replicates) normalised using the methods provided"""
bp=plt.boxplot([norm,original],notch=False, patch_artist=True)
for box in bp['boxes']:
box.set(color="red")
box.set(color="blue")
plt.ylabel("coefficient of variation")
plt.xlabel("Methods")
my_xticks = ['RPKM','raw counts']
x=[1,2]
plt.xticks(x,my_xticks)
plt.ylim(0,400)
plt.show()
def plotavg_cv(norm,original):
"""distribution of the coeficient of variation across samples (replicates) normalised using the methods provided"""
x=[1,2]
y=[np.mean(norm),np.mean(original)]
plt.figure(1, figsize=(8,8))
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
plt.bar(x[0],y[0],color="red",label="RPKM")
plt.bar(x[1],y[1],color="blue",label="Raw counts")
plt.ylabel("Average coefficient of variation")
plt.xlabel("Methods")
ax.xaxis.set_ticklabels([])
plt.legend(loc="upper right")
plt.show()
def plotMA(rpkm_data,cutoff=[-1.5,1.5]):
logfc=[]
avg_rpkm=[]
sig_logfc=[]
sig_avg_rpkm=[]
logfc2=[]
avg_rpkm2=[]
sig_logfc2=[]
sig_avg_rpkm2=[]
for i,ii,s,ss in rpkm_data.values():
fc=np.log2(float(s+1)/(i+1))
if fc<cutoff[0] or fc>cutoff[1]:
sig_logfc.append(fc)
sig_avg_rpkm.append(np.log2(s+1)+np.log2(i+1)/2)
else:
logfc.append(fc)
avg_rpkm.append(np.log2(s+1)+np.log2(i+1)/2)
for i,ii,s,ss in rpkm_data.values():
fc2=np.log2(float(ss+1)/(ii+1))
if fc2<cutoff[0] or fc2>cutoff[1]:
sig_logfc2.append(fc2)
sig_avg_rpkm2.append(np.log2(ss+1)+np.log2(ii+1)/2)
else:
logfc2.append(fc2)
avg_rpkm2.append(np.log2(ss+1)+np.log2(ii+1)/2)
plt.figure(1, figsize=(8,8))
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
plt.plot(avg_rpkm,logfc,'o',color="blue",label="rep1")
plt.plot(avg_rpkm2,logfc2,'x',color="blue",label="rep2")
plt.plot(sig_avg_rpkm,sig_logfc,'o',color="red",label="sig rep1")
plt.plot(sig_avg_rpkm2,sig_logfc2,'x',color="red",label="sig rep2")
plt.axhline(cutoff[0],color="orange")
plt.axhline(cutoff[1],color="orange")
plt.ylabel("Fold Change (log2)")
plt.xlabel("Average RPKM (log2)")
plt.title("MA plot")
plt.legend(loc="upper left")
plt.show()
def plotMA_pval(rpkm_data,cutoff=0.05):
logfc=[]
avg_rpkm=[]
sig_logfc=[]
sig_avg_rpkm=[]
for i,ii,s,ss,pval in rpkm_data.values():
fc=np.log2(float(s+1)/(i+1))
if float(pval)<cutoff:
sig_logfc.append(fc)
sig_avg_rpkm.append(np.log2(s+1)+np.log2(i+1)/2)
else:
logfc.append(fc)
avg_rpkm.append(np.log2(s+1)+np.log2(i+1)/2)
plt.figure(1, figsize=(8,8))
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
plt.plot(avg_rpkm,logfc,'o',color="blue",label="rep1")
plt.plot(sig_avg_rpkm,sig_logfc,'o',color="red",label="sig rep1")
plt.ylabel("Fold Change (log2)")
plt.xlabel("Average RPKM (log2)")
plt.title("MA plot")
plt.legend(loc="upper left")
plt.show()
#####DE expression statistical test (T-Test, ANOVA and FDR)
def Welcht(rpkm):
"""Performs Welchs T-statistic (one-tailed)"""
ts=[]
result={}
for i,ii,s,ss in rpkm.values():
sd1=np.std([i,ii])
sd2=np.std([s,ss])
t=(np.mean([s,ss])-np.mean([i,ii]))/(math.sqrt(((float(sd2)/2)+(float(sd1)/2))))
ts.append(t)
pvals=[]
for t in ts:
pval = stats.t.sf(np.abs(t), 2-1)
if pval==float('nan'):
pval=1
pvals.append(pval)
else:
pval=pval
pvals.append(pval)
corr_pvals=correct_pvalues_for_multiple_testing(pvals, correction_type = "Benjamini-Hochberg")
for i in range(0,len(rpkm.values())):
result[rpkm.keys()[i]]=[rpkm.values()[i][0],rpkm.values()[i][1],rpkm.values()[i][2],rpkm.values()[i][3],corr_pvals[i]]
return result
def correct_pvalues_for_multiple_testing(pvalues, correction_type = "Benjamini-Hochberg"):
"""
consistent with R print correct_pvalues_for_multiple_testing([0.0, 0.01, 0.029, 0.03, 0.031, 0.05, 0.069, 0.07, 0.071, 0.09, 0.1])
"""
from numpy import array, empty
pvalues = array(pvalues)
n = float(pvalues.shape[0])
new_pvalues = empty(n)
if correction_type == "Bonferroni":
new_pvalues = n * pvalues
elif correction_type == "Bonferroni-Holm":
values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]
values.sort()
for rank, vals in enumerate(values):
pvalue, i = vals
new_pvalues[i] = (n-rank) * pvalue
elif correction_type == "Benjamini-Hochberg":
values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]
values.sort()
values.reverse()
new_values = []
for i, vals in enumerate(values):
rank = n - i
pvalue, index = vals
new_values.append((n/rank) * pvalue)
for i in xrange(0, int(n)-1):
if new_values[i] < new_values[i+1]:
new_values[i+1] = new_values[i]
for i, vals in enumerate(values):
pvalue, index = vals
new_pvalues[index] = new_values[i]
return new_pvalues
####Method Run hiearachical clustering on the correlation matrix (of differentially expressed genes) -Coexpression
import scipy.cluster.hierarchy as sch
def cluster_data(data_matrix,genenames,timepoint):
"One replicates at a specific time point"
D = np.zeros([np.shape(data_matrix)[0],1])
##generate a distance matrix
for i in range(np.shape(data_matrix)[0]):
for j in range(1):
D[i,j] = abs(data_matrix[i] - data_matrix[j])**2 #use Wards method (other methods could be implemented here)
labels=list('' for i in range(np.shape(data_matrix)[0]))
for i in range(np.shape(data_matrix)[0]):
labels[i]=str(i)+","+str(genenames[i])
fig=plt.figure(1, figsize=(17,8))
linked = sch.linkage(D, method='centroid')
dend = sch.dendrogram(linked, orientation='right',labels=labels) # sets the oirentation root at the right
plt.title(timepoint)
fig.savefig(timepoint+'dendogram.png')
return dend['ivl']
def heatmap_cluster(data_matrix,timepoint):
"""Produces a heatmap of the clustered count data"""
D = np.zeros([np.shape(data_matrix)[0],np.shape(data_matrix)[0]])
for i in range(np.shape(data_matrix)[0]):
for j in range(np.shape(data_matrix)[0]):
D[i,j] = abs(data_matrix[i] - data_matrix[j])**2 #use Wards method (other methods could be implemented here)
fig = plt.figure()
axdendro = fig.add_axes([0.09,0.1,0.2,0.8])
linked = sch.linkage(D, method='centroid')
dend = sch.dendrogram(linked, orientation='right') # sets the oirentation root at the right
axdendro.set_xticks([])
axdendro.set_yticks([])
#plot distance matrix
axmatrix = fig.add_axes([0.3,0.1,0.6,0.8])
index = dend['leaves']
D=D[index,:]
D=D[:,index]
im = axmatrix.matshow(D, aspect='auto', origin='lower')
axmatrix.set_xticks([])
axmatrix.set_yticks([])
#plot color bar
axcolor = fig.add_axes([0.91,0.1,0.02,0.8])
fig.colorbar(im, cax=axcolor)
#display the heatmap
fig.savefig(timepoint+'heatmap.png')
#######Test Methods
t1=sam_reader("/Users/samirlal/Desktop/sam/t1.sam")
# determine the number of reads
reads=numberofreads(t1)
print "number of reads",reads
#base composition
base=base_percentages(t1)
print base
#obtain the mapped reads
mapped_read=mapped_reads(t1,True)
print mapped_read[0:5]
#number of mapped bases
num_bases=mappedBases(mapped_read)
print "number of mapped bases",num_bases
############################################
#Group the mapped_reads #
############################################
###get the range of numbers that comprimise the mapping quality
nums=[]
for read in mapped_read:
nums.append(read[4])
nums=set(nums)
print "get a feel for the range of mapping qualities in this sam file", sorted(nums)
###Get the probability MAPQ=-10*log10*Pr{mapping position is wrong}
for num in nums:
score=inv_logit(int(num))
print "MAPQ and probability"
print num,score
group1,group2,group3=subgroups(mapped_read)
#dinuc frequency of the mapped reads
nuc=dinuc_freq(group1)
print "dinucleotide frequency of mapped reads(p<1e-3)",nuc
#get the percentage of reads aligned need to know number of entries in fastq file
percent=PercentReadsAligned(group1,group2,group3,reads)
print percent
len_stats=length_stats(group1,group2,group3)
print len_stats
#plot the length of all three subgroups
plot_length_distrib(group1,"p<1e-3")
plot_length_distrib(group2,"1e-3<=p<1e-2")
plot_length_distrib(group3,"1e-2<=p<1")
#plot nucleotide composition along the mapped read
data=plot_base_composition(group1,'A')
data=plot_base_composition(group1,'T')
data=plot_base_composition(group1,'C')
data=plot_base_composition(group1,'G')
######read transcripts processed
t1=sam_reader("/Users/samirlal/Desktop/sam/t1.sam")
t10=sam_reader("/Users/samirlal/Desktop/sam/t10.sam")
t1_2=sam_reader("/Users/samirlal/Desktop/sam/t1_2.sam")
t10_2=sam_reader("/Users/samirlal/Desktop/sam/t10_2.sam")
##get number of mapped reads printed to screen
mapped_read=mapped_reads(t1,True)
mapped_read=mapped_reads(t10,True)
mapped_read=mapped_reads(t1_2,True)
mapped_read=mapped_reads(t10_2,True)
raw_data=raw_count_reader("/Users/samirlal/Desktop/sam/raw_counts.txt")
### Perform the normalisation methods
rpkm1=get_RPKM(raw_data,118898,121634,136286,135102)
# write RPKM to output
write_RPKM_data(rpkm1,"RPKM_counts.txt")
#Visualize variability among replicates using RPKM
plotreprpkm(rpkm1,"t1")
plotreprpkm(rpkm1,"t10")
plotMAreprpkm(rpkm1,"t1")
plotMAreprpkm(rpkm1,"t10")
#######################################
####Get CV
meth1= get_cv(rpkm1,"t1")
orig=get_cv(raw_data,"t1")
####Visualise the variation (can you see how we have reduced variation possibly due to length biases and coverage biases)
get_boxplots(meth1,orig)
plotavg_cv(meth1,orig)
####Now try to plot MA using the FDR adjusted p-value using BH
plotMA(rpkm1)#Visualise MA plot
result_ttest=Welcht(rpkm1)
plotMA_pval(result_ttest,0.01)#plot those with corrected p-value less than 0.005
####Get diff expressed genes
diff_express_t1={}
diff_express_t1_both={}
diff_express_t10={}
print"Genes significant by Welch t-test p<0.01"
for i in range(0,len(result_ttest)):
if result_ttest.values()[i][4]<0.01:
print result_ttest.keys()[i]
diff_express_t1[result_ttest.keys()[i]]=result_ttest.values()[i][0] #take the first replicate
diff_express_t10[result_ttest.keys()[i]]=result_ttest.values()[i][2]#take first replicate
t1_diff=np.array(diff_express_t1.values())
t10_diff=np.array(diff_express_t10.values())
######################check plots in current directory #coexpression through distance based methods
#cluster the data
dend_t1=cluster_data(t1_diff,diff_express_t1.keys(),"t1")
dendt10=cluster_data(t10_diff,diff_express_t10.keys(),"t10")
#produce heatmap
heatmap_cluster(t1_diff,'t1')
heatmap_cluster(t10_diff,'t10')
| [
"jacekrad@gmail.com"
] | jacekrad@gmail.com |
018a1eadb1c4a531df6f87247178e171f1e98fd4 | 72d613f133774831c8d2e9db37cf8fc56f5ae427 | /probability.py | 2999a685d45f2c0dbd6af976c359f368a76b8ce9 | [] | no_license | RossMaybee/DataScienceFromScratch | 6385c383ac851418cfd46ea53ce45b68b6f62a06 | 3601024e8e96b2b4abf9f469b2aa8885c426cc56 | refs/heads/master | 2021-01-13T13:45:53.946535 | 2016-12-13T12:32:26 | 2016-12-13T12:32:26 | 76,356,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 04 10:25:57 2016
@author: Joel Grus
"""
from __future__ import division
from collections import Counter
import math, random
def random_kid():
return random.choice(["boy", "girl"])
def uniform_pdf(x):
return 1 if x >= 0 and x < 1 else 0
def uniform_cdf(x):
"returns the probability that a uniform random variable is less than x"
if x < 0: return 0 # uniform random is never less than 0
elif x < 1: return x # e.g. P(X < 0.4) = 0.4
else: return 1 # uniform random is always less than 1
def normal_pdf(x, mu=0, sigma=1):
sqrt_two_pi = math.sqrt(2 * math.pi)
return (math.exp(-(x-mu) ** 2 / 2 / sigma ** 2) / (sqrt_two_pi * sigma))
def plot_normal_pdfs(plt):
xs = [x / 10.0 for x in range(-50, 50)]
plt.plot(xs,[normal_pdf(x,sigma=1) for x in xs],'-',label='mu=0,sigma=1')
plt.plot(xs,[normal_pdf(x,sigma=2) for x in xs],'--',label='mu=0,sigma=2')
plt.plot(xs,[normal_pdf(x,sigma=0.5) for x in xs],':',label='mu=0,sigma=0.5')
plt.plot(xs,[normal_pdf(x,mu=-1) for x in xs],'-.',label='mu=-1,sigma=1')
plt.legend()
plt.show()
def normal_cdf(x, mu=0,sigma=1):
return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2
def plot_normal_cdfs(plt):
xs = [x / 10.0 for x in range(-50, 50)]
plt.plot(xs,[normal_cdf(x,sigma=1) for x in xs],'-',label='mu=0,sigma=1')
plt.plot(xs,[normal_cdf(x,sigma=2) for x in xs],'--',label='mu=0,sigma=2')
plt.plot(xs,[normal_cdf(x,sigma=0.5) for x in xs],':',label='mu=0,sigma=0.5')
plt.plot(xs,[normal_cdf(x,mu=-1) for x in xs],'-.',label='mu=-1,sigma=1')
plt.legend(loc=4) # bottom right
plt.show()
def inverse_normal_cdf(p, mu=0, sigma=1, tolerance=0.00001):
"""find approximate inverse using binary search"""
# if not standard, compute standard and rescale
if mu != 0 or sigma != 1:
return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance)
low_z, low_p = -10.0, 0 # normal_cdf(-10) is (very close to) 0
hi_z, hi_p = 10.0, 1 # normal_cdf(10) is (very close to) 1
while hi_z - low_z > tolerance:
mid_z = (low_z + hi_z) / 2 # consider the midpoint
mid_p = normal_cdf(mid_z) # and the cdf's value there
if mid_p < p:
# midpoint is still too low, search above it
low_z, low_p = mid_z, mid_p
elif mid_p > p:
# midpoint is still too high, search below it
hi_z, hi_p = mid_z, mid_p
else:
break
return mid_z
def bernoulli_trial(p):
return 1 if random.random() < p else 0
def binomial(p, n):
return sum(bernoulli_trial(p) for _ in range(n))
def make_hist(p, n, num_points):
data = [binomial(p, n) for _ in range(num_points)]
# use a bar chart to show the actual binomial samples
histogram = Counter(data)
plt.bar([x - 0.4 for x in histogram.keys()],
[v / num_points for v in histogram.values()],
0.8,
color='0.75')
mu = p * n
sigma = math.sqrt(n * p * (1 - p))
# use a line chart to show the normal approximation
xs = range(min(data), max(data) + 1)
ys = [normal_cdf(i + 0.5, mu, sigma) - normal_cdf(i - 0.5, mu, sigma)
for i in xs]
plt.plot(xs,ys)
plt.show()
if __name__ == "__main__":
#
# CONDITIONAL PROBABILITY
#
both_girls = 0
older_girl = 0
either_girl = 0
random.seed(0)
for _ in range(10000):
younger = random_kid()
older = random_kid()
if older == "girl":
older_girl += 1
if older == "girl" and younger == "girl":
both_girls += 1
if older == "girl" or younger == "girl":
either_girl += 1
print "P(both | older):", both_girls / older_girl # 0.514 ~ 1/2
print "P(both | either): ", both_girls / either_girl # 0.342 ~ 1/3 | [
"ross.maybee@outlook.com"
] | ross.maybee@outlook.com |
3404540bf40f289a8dcb9c28563ed8303f18b8c6 | 03c8e453a81761ca7b924459cf7c7df8f5b30986 | /old Projects/Automated tank movement/main_map-based.py | 22fe64bd88069c42ef24e8a793ddcc923d08c985 | [] | no_license | Pashun4ik/Automate_WoTBlitz | c2c7781506a9dd3c534319e63fcde2736ae129ee | 7cb1c5c4820a5d85064ffa63fa1623fc56955fd1 | refs/heads/master | 2023-06-30T15:04:24.661862 | 2021-08-10T06:31:46 | 2021-08-10T06:31:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,903 | py | """
TODO: 1. Track player
TODO: 2. Define mini-map contours
TODO: 3. Make player avoid mini-map contours
TODO: 4. Make player be able of fully autonomous navigation
Data for detecting map shape:
0, 179, 0, 60, 70, 140
"""
import numpy as np
import cv2
from mss import mss
from PIL import Image
import pyautogui
import time
import ctypes
from ctypes import wintypes
user32 = ctypes.WinDLL('user32', use_last_error=True)
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_UNICODE = 0x0004
KEYEVENTF_SCANCODE = 0x0008
MAPVK_VK_TO_VSC = 0
# msdn.microsoft.com/en-us/library/dd375731
VK_TAB = 0x09
VK_MENU = 0x12
W = 0x57
A = 0x41
S = 0x53
D = 0x44
mouse_left = 0x01
mouse_mid = 0x04
# C struct definitions
wintypes.ULONG_PTR = wintypes.WPARAM
class MOUSEINPUT(ctypes.Structure):
_fields_ = (("dx", wintypes.LONG),
("dy", wintypes.LONG),
("mouseData", wintypes.DWORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (("wVk", wintypes.WORD),
("wScan", wintypes.WORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
def __init__(self, *args, **kwds):
super(KEYBDINPUT, self).__init__(*args, **kwds)
# some programs use the scan code even if KEYEVENTF_SCANCODE
# isn't set in dwFflags, so attempt to map the correct code.
if not self.dwFlags & KEYEVENTF_UNICODE:
self.wScan = user32.MapVirtualKeyExW(self.wVk,
MAPVK_VK_TO_VSC, 0)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (("uMsg", wintypes.DWORD),
("wParamL", wintypes.WORD),
("wParamH", wintypes.WORD))
class INPUT(ctypes.Structure):
class _INPUT(ctypes.Union):
_fields_ = (("ki", KEYBDINPUT),
("mi", MOUSEINPUT),
("hi", HARDWAREINPUT))
_anonymous_ = ("_input",)
_fields_ = (("type", wintypes.DWORD),
("_input", _INPUT))
LPINPUT = ctypes.POINTER(INPUT)
def _check_count(result, func, args):
if result == 0:
raise ctypes.WinError(ctypes.get_last_error())
return args
user32.SendInput.errcheck = _check_count
user32.SendInput.argtypes = (wintypes.UINT, # nInputs
LPINPUT, # pInputs
ctypes.c_int) # cbSize
# Functions
def PressKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode,
dwFlags=KEYEVENTF_KEYUP))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
if __name__ == "__main__":
pass
class CropAndResize:
def __init__(self, from_frame_bgr, y_start, y_end, x_start, x_end, x_size, y_size):
self.from_frame_bgr = from_frame_bgr
self.x_start = x_start
self.x_end = x_end
self.y_start = y_start
self.y_end = y_end
self.x_size = x_size
self.y_size = y_size
self.new_frame_bgr = cv2.resize(from_frame_bgr[
self.y_start: self.y_end,
self.x_start: self.x_end],
(self.x_size, self.y_size))
class ContourDilation:
def __init__(self, bgr2hsv, h_min, h_max, s_min, s_max, v_min, v_max, kernel):
self.bgr2hsv = bgr2hsv
self.h_min = h_min
self.h_max = h_max
self.s_min = s_min
self.s_max = s_max
self.v_min = v_min
self.v_max = v_max
self.kernel = kernel
frame_hsv = cv2.cvtColor(bgr2hsv, cv2.COLOR_BGR2HSV)
lower = np.array([self.h_min, self.s_min, self.v_min])
upper = np.array([self.h_max, self.s_max, self.v_max])
# Use these attributes
self.mask = cv2.inRange(frame_hsv, lower, upper)
self.dilated_mask = cv2.dilate(self.mask, self.kernel, iterations=1)
self.dilated_contours = self.dilated_mask.copy()
self.frame_result = cv2.bitwise_and(self.bgr2hsv, self.bgr2hsv, mask=self.mask)
def detect_player(src_frame, contour_frame, display_frame, area_min, corners_threshold, m_width, m_height, line_extend):
x = m_width / 2
y = m_height / 2
w = 100
h = 100
ratio = 1
contours, hierarchy = cv2.findContours(src_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
# cv2.circle(display_frame, cnt, (10, 10), (0, 255, 255), 4)
area = cv2.contourArea(cnt)
if area_min < area:
cv2.drawContours(contour_frame, cnt, -1, (255, 0, 0), 3)
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
if len(approx) >= corners_threshold:
cv2.rectangle(display_frame, (x, y), (x + w, y + h), (0, 255, 255), 4)
# print("Ratio: " + str(int(w * 0.1)) + "/" + str(int(h * 0.1)))
try:
ratio = int(w * 0.1) / int(h * 0.1)
except ZeroDivisionError:
ratio = 1
print(ratio)
if ratio == 2 / 3:
cv2.line(display_frame,
(x + int(w / 2), y - line_extend),
(x + int(w / 2), y + h + line_extend),
(0, 0, 255), 2)
elif ratio == 3 / 2:
cv2.line(display_frame,
(x - line_extend, y + int(h / 2)),
(x + w + line_extend, y + int(h / 2)),
(0, 0, 255), 2)
elif ratio == 3 / 3:
cv2.line(display_frame,
(x - line_extend, y - line_extend),
(x + w + line_extend, y + h + line_extend),
(0, 0, 255), 2)
cv2.line(display_frame,
(x - line_extend, y + h + line_extend),
(x + w + line_extend, y - line_extend),
(0, 0, 255), 2)
return x, y, w, h, ratio
def get_player_contours(src_frame, display_frame, area_min):
# biggest = np.array([])
# maxArea = 0
contours, hierarchy = cv2.findContours(src_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > area_min:
cv2.drawContours(display_frame, cnt, -1, (255, 0, 0), 3)
# peri = cv2.arcLength(cnt, True)
# approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
# # cv2.line(display_frame, (approx), (approx), (0, 255, 0), 3)
# if area > maxArea:
# biggest = approx
# maxArea = area
# cv2.drawContours(display_frame, biggest, -1, (255, 0, 0), 20)
# return biggest
def get_map_contours(src_frame, display_frame, area_min):
# biggest = np.array([])
# maxArea = 0
contours, hierarchy = cv2.findContours(src_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area_min < area:
cv2.drawContours(display_frame, cnt, -1, (255, 0, 0), 3)
# peri = cv2.arcLength(cnt, True)
# approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
# if area > maxArea:
# biggest = approx
# maxArea = area
# cv2.drawContours(display_frame, biggest, -1, (255, 0, 0), 20)
# return biggest
def stack_images(scale, img_array):
rows = len(img_array)
cols = len(img_array[0])
rows_available = isinstance(img_array[0], list)
width = img_array[0][0].shape[1]
height = img_array[0][0].shape[0]
if rows_available:
for x in range(0, rows):
for y in range(0, cols):
if img_array[x][y].shape[:2] == img_array[0][0].shape[:2]:
img_array[x][y] = cv2.resize(img_array[x][y], (0, 0), None, scale, scale)
else:
img_array[x][y] = cv2.resize(img_array[x][y], (img_array[0][0].shape[1], img_array[0][0].shape[0]),
None, scale, scale)
if len(img_array[x][y].shape) == 2:
img_array[x][y] = cv2.cvtColor(img_array[x][y], cv2.COLOR_GRAY2BGR)
img_blank = np.zeros((height, width, 3), np.uint8)
hor = [img_blank] * rows
# hor_con = [img_blank] * rows
for x in range(0, rows):
hor[x] = np.hstack(img_array[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if img_array[x].shape[:2] == img_array[0].shape[:2]:
img_array[x] = cv2.resize(img_array[x], (0, 0), None, scale, scale)
else:
img_array[x] = cv2.resize(img_array[x], (img_array[0].shape[1], img_array[0].shape[0]), None, scale,
scale)
if len(img_array[x].shape) == 2:
img_array[x] = cv2.cvtColor(img_array[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(img_array)
ver = hor
return ver
# Define data
mon_width = 1920
mon_height = 1080
full_mon = {'top': 0, 'left': 0, 'width': mon_width, 'height': mon_height}
sct_full_mon = mss()
kernel_x = np.ones((2, 2), np.uint8)
line_extend_by = 50
while 1:
sct_full_mon.get_pixels(full_mon) # Setup full monitor
# Full monitor
frameRGB_mon = np.array(Image.frombytes('RGB', (sct_full_mon.width, sct_full_mon.height), sct_full_mon.image))
frameBGR_mon = cv2.cvtColor(frameRGB_mon, cv2.COLOR_RGB2BGR)
frameGray_mon = cv2.cvtColor(frameBGR_mon, cv2.COLOR_BGR2GRAY)
# Mini-map view stuff
map_offset_from_y = int(mon_height - mon_height * 0.2722222222) # Crop from full monitor
map_offset_to_y = int(mon_height)
map_offset_from_x = int(mon_width - mon_width)
map_offset_to_x = int(mon_width * 0.15)
frameBGR_map_view = CropAndResize(frameBGR_mon,
map_offset_from_y,
map_offset_to_y,
map_offset_from_x,
map_offset_to_x,
700, 700).new_frame_bgr
modify_map_view = ContourDilation(frameBGR_map_view, 0, 179, 0, 80, 80, 130, kernel_x) # Emphasize map contours
get_map_contours(modify_map_view.mask, frameBGR_map_view, 0) # Draw the contours of the map
# Track player on map stuff
player_offset_from_y = int(mon_height - mon_height * 0.2722222222) # Crop this frame from the full monitor
player_offset_to_y = int(mon_height)
player_offset_from_x = int(mon_width - mon_width)
player_offset_to_x = int(mon_width * 0.15)
frameBGR_player_view = CropAndResize(frameBGR_mon,
player_offset_from_y,
player_offset_to_y,
player_offset_from_x,
player_offset_to_x,
700, 700).new_frame_bgr
modify_player_view = ContourDilation(frameBGR_player_view, 80, 90, 10, 140, 200, 255, kernel_x) # Emphasize player icon
p_x, p_y, p_w, p_h, p_ratio = detect_player(modify_player_view.dilated_mask, # Return X, Y, W, H, and ratio of player icon
modify_player_view.dilated_contours,
frameBGR_player_view,
10, 4, mon_width, mon_height, line_extend_by)
get_player_contours(modify_player_view.mask, frameBGR_player_view, 0) # Put contours on player
# Zoom-in on player icon
padding = 50
resize_x = p_x * (240 / 700)
resize_w = p_w * (240 / 700)
resize_y = p_y * (245 / 700)
resize_h = p_h * (245 / 700)
# When src is frameBGR_mon
# zoom_offset_from_y = int(map_offset_from_y + resize_y - padding)
# zoom_offset_to_y = int(map_offset_from_y + resize_y + resize_h + padding)
# zoom_offset_from_x = int(resize_x - padding)
# zoom_offset_to_x = int(resize_x + resize_w + padding)
# When src is frameBGR_map_view
zoom_offset_from_y = abs(int(p_y - padding))
zoom_offset_to_y = abs(int(p_y + p_h + padding))
zoom_offset_from_x = abs(int(p_x - padding))
zoom_offset_to_x = abs(int(p_x + p_w + padding))
# print("x1: " + str(zoom_offset_from_x))
# print("x2: " + str(zoom_offset_to_x))
# print("y1: " + str(zoom_offset_from_y))
# print("y2: " + str(zoom_offset_to_y))
frameBGR_zoom_view = CropAndResize(frameBGR_player_view,
zoom_offset_from_y,
zoom_offset_to_y,
zoom_offset_from_x,
zoom_offset_to_x,
700, 700).new_frame_bgr
frameGray_zoom_view = cv2.cvtColor(frameBGR_zoom_view, cv2.COLOR_BGR2GRAY)
# Tank orientation detector
padding_2 = 25
if p_ratio == 2/3:
orient_from_y = p_y - line_extend_by - padding_2
orient_to_y = p_y + p_h + line_extend_by + padding_2
orient_from_x = p_x + int(p_w / 2) - padding_2
orient_to_x = p_x + int(p_w / 2) + padding_2
orientation = "longitudinal"
elif p_ratio == 3/2:
orient_from_y = p_y + int(p_h / 2) - padding_2
orient_to_y = p_y + int(p_h / 2) + padding
orient_from_x = p_x - line_extend_by - padding_2
orient_to_x = p_x + p_w + line_extend_by + padding_2
orientation = "lateral"
elif p_ratio == 3/3:
orient_from_y = zoom_offset_from_y - padding_2
orient_to_y = zoom_offset_to_y + padding_2
orient_from_x = zoom_offset_from_x - padding_2
orient_to_x = zoom_offset_to_x + padding_2
orientation = "diagonal"
else:
orient_from_y = zoom_offset_from_y - padding_2
orient_to_y = zoom_offset_to_y + padding_2
orient_from_x = zoom_offset_from_x - padding_2
orient_to_x = zoom_offset_to_x + padding_2
orientation = "diagonal"
orient_offset_from_y = orient_from_y
orient_offset_to_y = orient_to_y
orient_offset_from_x = orient_from_x
orient_offset_to_x = orient_to_x
# print("x1: " + str(orient_offset_from_x))
# print("x2: " + str(orient_offset_to_x))
# print("y1: " + str(orient_offset_from_y))
# print("y2: " + str(orient_offset_to_y))
frameBGR_orient_view = frameBGR_map_view[
abs(orient_offset_from_y): abs(orient_offset_to_y),
abs(orient_offset_from_x): abs(orient_offset_to_x)]
# frameBGR_orient_view = CropAndResize(frameBGR_player_view,
# abs(orient_offset_from_y),
# abs(orient_offset_to_y),
# abs(orient_offset_from_x),
# abs(orient_offset_to_x),
# 700, 700).new_frame_bgr
frameGray_orient_view = cv2.cvtColor(frameBGR_zoom_view, cv2.COLOR_BGR2GRAY)
# Decide tank steering
# if orientation == "longitudinal":
# PressKey(A)
# ReleaseKey(D)
# elif orientation == "lateral":
# PressKey(D)
# ReleaseKey(A)
# elif orientation == "diagonal":
# pass
# PressKey(W)
# Show GUIs
stack = stack_images(0.45, ([frameBGR_map_view, frameBGR_player_view, frameBGR_zoom_view],
[modify_map_view.mask, modify_player_view.mask, frameGray_zoom_view]))
cv2.imshow('Map views', stack)
cv2.imshow('Orient view BGR', frameBGR_orient_view)
cv2.imshow('Orient view Gray', frameGray_orient_view)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
| [
"duncanrledesma_drl@yahoo.com.ph"
] | duncanrledesma_drl@yahoo.com.ph |
a8f4b702dd72c2e135d35e5357f7191e69ffd2a9 | 91a5bcd2c776b551ef3f6c40c61dae4d81d7e347 | /pick-a-number.py | 73b7ee227659e7f73d719e2bcbc9e1f8cfcfdda3 | [] | no_license | nickstellato/pick-a-number-game | 31c97b2cc4642a273acfb23680e0ab4d23ba056b | 978576af3dc2f12b0dd082a0f23990f2fa4bac25 | refs/heads/master | 2021-01-20T08:48:49.628414 | 2015-03-15T00:31:34 | 2015-03-15T00:31:34 | 32,238,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | #Players get 5 chances
#They have to guess a random number
#It has to be a whole number from 1 to 10
#If they guess wrong, tell them whether the chosen number is higher or lower than their guess
#Tell them how many guesses they've made
import random
rand_num = random.randint(1, 10)
guessed_nums = []
allowed_guesses = 5
while len(guessed_nums) < allowed_guesses:
guess = input("Guess a number between 1 and 10: ")
try:
player_num = int(guess)
except:
print("That's not a whole number")
break
if not player_num > 0 or not player_num < 11:
print("That number isn't between 1 and 10!")
break
guessed_nums.append(player_num)
if player_num == rand_num:
print("You win! My number was {}.".format(rand_num))
print("It took you {} tries.".format(len(guessed_nums)))
break
else:
if rand_num > player_num:
print("Nope! My number is higher than {}. Guess #{}".format(
player_num, len(guessed_nums)))
else:
print("Nope! My number is lower than {}. Guess #{}".format(
player_num, len(guessed_nums)))
continue
if not rand_num in guessed_nums:
print("Sorry! My number was {}.".format(rand_num)) | [
"nick@nickstellato.com"
] | nick@nickstellato.com |
a93ec77494af1405eec2e4807036d13cb21449f5 | a88e486c3be855554e8c9998766869a19a4e0635 | /coursera/knapsack/greedy.py | 9fcfd92a3a800c1f6cb2b685445bf70c2456db4b | [] | no_license | DXV-HUST-SoICT/Combinatorial-Optimization | 03559786a36f66f10742e3a0c520a3369e96a065 | 67c326635bb4245e3dd9819ea9704c37bb9635d3 | refs/heads/master | 2021-03-17T12:59:51.141027 | 2020-06-09T17:42:41 | 2020-06-09T17:42:41 | 246,992,799 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | def greedy_by_avarage_value(items, taken, capacity):
def key(item):
return item.value / item.weight
items.sort(key=key, reverse=True)
value = 0
weight = 0
for i in range(len(items)):
item = items[i]
if weight + item.weight <= capacity:
value += item.weight
weight += item.weight
taken[item.index] = 1
return value, weight, 0 | [
"vuong.1998@gmail.com"
] | vuong.1998@gmail.com |
a3630155bfb12586d6d5d487b85f5e367673b7a3 | 26b527a454fb89391f740ea771968507df16f72e | /train.py | 378f608e58fdd179db8eaff462f279f975b00011 | [] | no_license | Kumar-Tarun/facial-expression-recognition | bf39086277530895aa9d0b6a6e7d2451a48c7ea8 | 3f5edff46da13833bff129690450c0c2274e6d19 | refs/heads/master | 2020-04-05T17:24:37.324642 | 2019-06-02T18:16:25 | 2019-06-02T18:16:25 | 157,059,316 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,142 | py | import pandas as pd
import numpy as np
from keras.callbacks import *
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras import utils
from keras import regularizers
from keras.utils import to_categorical
from prepare import get_data, plot
data = get_data()
train_x = data["train_x"]
train_y = data["train_y"]
val_x = data["val_x"]
val_y = data["val_y"]
test_x = data["test_x"]
test_y = data["test_y"]
train1_x = data["train1_x"]
train1_y = data["train1_y"]
val1_x = data["val1_x"]
val1_y = data["val1_y"]
test1_x = data["test1_x"]
test1_y = data["test1_y"]
# Network 1
mod = Sequential()
mod.add(Conv3D(64, kernel_size = (3, 5, 5), strides = (1, 1, 1), padding = 'same', data_format = 'channels_last', input_shape = (6, 64, 64, 1), name = "Input1"))
mod.add(Dropout(0.3))
mod.add(Activation('relu'))
mod.add(MaxPooling3D(pool_size = (2, 2, 2), strides = (1, 2, 2)))
mod.add(Conv3D(64, kernel_size = (3, 5, 5), strides = (1, 1, 1), padding = 'same', data_format = 'channels_last'))
mod.add(Dropout(0.3))
mod.add(Activation('relu'))
mod.add(MaxPooling3D(pool_size = (2, 2, 2), strides = (1, 2, 2)))
mod.add(Flatten())
mod.add(Dense(256, kernel_regularizer = regularizers.l2(0.35)))
mod.add(Dropout(0.5))
mod.add(Activation('relu'))
mod.add(Dense(256, kernel_regularizer = regularizers.l2(0.35)))
mod.add(Dropout(0.55))
mod.add(Activation('relu'))
mod.add(Dense(7, name = "last_layer1"))
mod.add(Activation('softmax', name = "Output1"))
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=2, min_lr=0.00002, verbose=1)
model_checkpoint = ModelCheckpoint(filepath = 'weights1.hdf5', verbose = 1, save_best_only = True)
optimizer = Adam(lr = 0.0001)
mod.compile(loss = 'categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
history = mod.fit(train_x, train_y, epochs = 60, batch_size = 16, callbacks = [reduce_lr, model_checkpoint], verbose = 2, validation_data = (val_x, val_y))
plot(history)
# Network 2
model = Sequential()
model.add(Dense(256, input_shape = (612,), kernel_regularizer = regularizers.l2(0.25), name = "Input2"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(512, kernel_regularizer = regularizers.l2(0.25)))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(7, name = "last_layer2"))
model.add(Activation('softmax', name = "Output2"))
optimizer = Adam(lr = 0.0005)
model.compile(loss = 'categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=2, min_lr=0.00001, verbose=1)
model_checkpoint = ModelCheckpoint(filepath = 'weights2.hdf5', verbose = 1, save_best_only = True)
history = model.fit(train1_x, train1_y, epochs = 50, batch_size = 16, callbacks = [reduce_lr, model_checkpoint], verbose = 2, validation_data = (val1_x, val1_y))
plot(history)
| [
"kumar1998.tarun@gmail.com"
] | kumar1998.tarun@gmail.com |
119144199ba7db53e9b434887d27610fcad9b8f5 | 0f563f5202cd0331e09884489cb61321fff12b6b | /pybo/views/answer_views.py | 83f2d494ad97962d9b7599cf8e806994b58f0b36 | [] | no_license | fmoni1306/pybo | ad0f8f335f7476cea09d2a7ba03fc65dedc18bf3 | 7726fc11bfb8799e5a221f71e852257eaae41a2c | refs/heads/master | 2023-03-31T05:46:52.480275 | 2021-04-01T02:13:05 | 2021-04-01T02:13:05 | 352,557,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,223 | py | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect, resolve_url
from django.utils import timezone
from ..forms import AnswerForm
from ..models import Question, Answer
@login_required(login_url='common:login')
def answer_create(request, question_id):
"""
pybo 답변 등록
"""
question = get_object_or_404(Question,
pk=question_id)
# get_object_or_404() 함수는 Django 모델을 첫번째 인자로 받고, 몇개의 키워드 인수를 모델 관리자의 get() 함수에 넘깁니다. 만약 객체가 존재하지 않을 경우, Http404 예외가 발생합니다.
# 또한, get_object_or_404() 함수처럼 동작하는 get_list_or_404() 함수가 있습니다.get() 대신 filter() 를 쓴다는 것이 다릅니다.리스트가 비어있을 경우, Http404 예외를 발생시킵니다.
if request.method == "POST":
form = AnswerForm(request.POST)
if form.is_valid():
answer = form.save(commit=False)
answer.author = request.user
answer.create_date = timezone.now()
answer.question = question
answer.save()
return redirect('{}#answer_{}'.format(resolve_url('pybo:detail', question_id=question.id), answer.id))
else:
form = AnswerForm()
context = {'question': question, 'form': form}
return render(request, 'pybo/question_detail.html', context)
# question.answer_set.create(content=request.POST.get('content'),
# # answer_set.create 는 어디서나온거? 네, 말씀하신 것처럼 모델명_set 이 네이밍 규칙입니다. (모델명은 소문자여야 하구요)
# create_date=timezone.now())
# return redirect('pybo:detail', question_id=question_id)
@login_required(login_url='common:login')
def answer_modify(request, answer_id):
"""
pybo 답변 수정
"""
answer = get_object_or_404(Answer, pk=answer_id)
if request.user != answer.author:
messages.error(request, '수정 권한이 음슴다~')
return redirect('pybo:detail', question_id=answer.question.id)
if request.method == "POST":
form = AnswerForm(request.POST, instance=answer)
if form.is_valid():
answer = form.save(commit=False)
answer.author = request.user
answer.modify_date = timezone.now()
answer.save()
return redirect(
'{}#answer_{}'.format(resolve_url('pybo:detail', question_id=answer.question.id), answer.id))
else:
form = AnswerForm(instance=answer)
context = {'answer': answer, 'form': form}
return render(request, 'pybo/answer_form.html', context)
@login_required(login_url='common:login')
def answer_delete(request, answer_id):
"""
pybo 답변 삭제
"""
answer = get_object_or_404(Answer, pk=answer_id)
if request.user != answer.author:
messages.error(request, '삭제권한이 없습니다')
else:
answer.delete()
return redirect('pybo:detail', question_id=answer.question.id)
| [
"yoon@itaeyun-ui-MacBookPro.local"
] | yoon@itaeyun-ui-MacBookPro.local |
67c54ba7029a0295516d989b4ec4c3912c6b450e | a33a444768e44e03079daf11cf80dda483ca32f8 | /src/fuse.py | dcfaeeceaffdee157e56f70c4adf5c71d53bcad0 | [
"MIT"
] | permissive | berjc/fuse | 6f1be4600b7eb41343d75dd0831a594cfdcb1cb2 | 3335434d477d3d5c2b5cfcc3bf60180cf40411c4 | refs/heads/master | 2021-01-01T04:05:53.919151 | 2016-05-11T03:03:50 | 2016-05-11T03:03:50 | 58,423,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | py | """
'fuse.py' drives concept queries and novel relation discovery.
USAGE: python3 fuse.py
"""
from settings import LANGUAGE
from operator import itemgetter
from warnings import filterwarnings
from wordfreq import word_frequency as known_freq
from utils import get_word_freqs, get_concept_page, get_connection_section_freqs,\
connection_result_output
MINIMUM_FREQ = 1e-6 # Lower bound on minimum frequency output by 'known_freq'
# Runs fuse
def main():
# Prompt user for concept and get corresponding Wikipedia page
concept1, concept1_page = get_concept_page(1)
concept2, concept2_page = get_concept_page(2)
# Get word frequencies of each concept's Wikipedia page content
concept1_word_freqs = get_word_freqs(concept1_page.content)
concept2_word_freqs = get_word_freqs(concept2_page.content)
# Compute dot product of word frequency vectors
dot_prod = 0
components = {} # Track each word's contribution to dot product
for word in concept1_word_freqs:
if word in concept2_word_freqs:
norm = known_freq(word, LANGUAGE, minimum=MINIMUM_FREQ)
components[word] = concept1_word_freqs[word] * concept2_word_freqs[word] / norm
dot_prod += components[word]
# Sort components by decreasing normalized frequency
sorted_words = sorted(components.items(), key=itemgetter(1))[::-1]
# Get frequency of 'connection' in each section for each concept's Wikipedia page
connection = sorted_words[0][0] # Word with strongest relation betw. two pages
concept1_section_freqs = get_connection_section_freqs(connection, concept1_page)
concept2_section_freqs = get_connection_section_freqs(connection, concept2_page)
# Get section titles of section with maximum frequency of connection word
concept1_section = max(concept1_section_freqs.items(), key=itemgetter(1))[0]
concept2_section = max(concept2_section_freqs.items(), key=itemgetter(1))[0]
# Print results of concept relation discovery
print(connection_result_output(concept1, concept1_section,
concept2, concept2_section, connection))
if __name__ == '__main__':
filterwarnings("ignore") # Filters Beautiful Soup warnings
main()
| [
"berjc@mit.edu"
] | berjc@mit.edu |
0a26c1cda5fed23e78e41221fca74d55a0da585f | 1524720d6480ad0a51b6fd8ff709587455bf4c5d | /tums/trunk/source/plugins/DHCP.py | 7af3eb81f2a7ce65771d5d72723a41f35579e175 | [] | no_license | calston/tums | 2bd6d3cac5232d2ccb7e9becfc649e302a310eab | b93e3e957ff1da5b020075574942913c8822d12a | refs/heads/master | 2020-07-12T03:46:43.639800 | 2018-05-12T10:54:54 | 2018-05-12T10:54:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,247 | py | import config, os
from Core import Utils
class Plugin(object):
parameterHook = "--dhcp"
parameterDescription = "Reconfigure DHCP"
parameterArgs = ""
autoRun = True
configFiles = [
"/etc/dhcp/dhcpd.conf"
]
def reloadServices(self):
if config.General.get('services', {}).get('dhcp3-server', True):
os.system('/etc/init.d/dhcpd restart')
os.system('update-rc.d dhcp3-server defaults')
else:
os.system('update-rc.d -f dhcp3-server remove')
def writeConfig(self, *a):
lans = Utils.getLanNetworks(config)
extramain = config.DHCP.get('main','')
ips = Utils.getLanIPs(config)
myIp = ips[0]
rev = '.'.join([i for i in reversed(myIp.split('.')[:3])])
ifaces = []
dhcpconf = """# DHCPD config generated by TUMS Configurator
ddns-update-style interim;
default-lease-time 21600;
max-lease-time 21600;
allow booting;
allow bootp;
authoritative;
log-facility local7;
zone %(domain)s. {
primary 127.0.0.1;
}
zone %(rev)s.in-addr.arpa. {
primary 127.0.0.1;
}
option local-pac-server code 252 = text;
option option-66 code 66 = text;
option option-67 code 67 = text;
%(snomConfig)s
%(extramain)s
""" % {
'extramain': extramain,
'domain': config.Domain,
'snomConfig':"""class "snom" {
match if substring (hardware, 1, 3) = 00:04:13 ;
}""",
'rev': rev
}
n = 0
for k,v in lans.items():
myNet = v
myIp = config.EthernetDevices[k].get('ip', '/').split('/')[0]
dhcpConf = config.DHCP.get(k, {})
if not myIp:
# No IP set for this interface (is DHCP itself)
continue
if not config.EthernetDevices[k].get('dhcpserver'):
# Not set to do DHCP
continue
ifaces.append(k)
statics = ""
for ip, hostmac in config.DHCP.get('leases',{}).items():
if Utils.matchIP(myNet, ip):
# make sure the IP is in this network
host, mac = hostmac
statics += """ host %s {
fixed-address %s;
hardware ethernet %s;
}\n""" % (host, ip, mac)
myNetmask = Utils.cidr2netmask(myNet.split('/')[1])
rangeStart = dhcpConf.get('rangeStart', "100")
rangeEnd = dhcpConf.get('rangeEnd', "240")
snomRangeStart = dhcpConf.get('snomStart', "60")
snomRangeEnd = dhcpConf.get('snomEnd', "80")
snomConfigAddr = dhcpConf.get('snomConfigAddr', myIp + ':9682')
noRange = dhcpConf.get('noRange', False)
netmask = dhcpConf.get('netmask', myNetmask)
netbios = dhcpConf.get('netbios', myIp)
nameserver = dhcpConf.get('nameserver', myIp)
router = dhcpConf.get('gateway', myIp)
myNet = dhcpConf.get('network', Utils.getNetwork(config.EthernetDevices[k]['ip']))
domain = dhcpConf.get('domain', config.Domain)
if not '/' in myNet:
# AAAAAAAAAAAARGH GOD DAMN DIE IN HELL PAUL VIXIE
cdr = Utils.netmask2cidr(netmask)
myNet = "%s/%s" % (myNet, cdr)
bcast = Utils.getBroadcast(myNet)
else:
bcast = Utils.getBroadcast(myNet)
# allow custom configuration options
custom = dhcpConf.get('custom', '')
netL = '.'.join(myNet.split('.')[:3])
if not ("." in rangeStart):
rangeStart = "%s.%s" % (netL, rangeStart)
rangeEnd = "%s.%s" % (netL, rangeEnd)
if not ("." in snomRangeStart):
snomRangeStart = "%s.%s" % (netL, snomRangeStart)
snomRangeEnd = "%s.%s" % (netL, snomRangeEnd)
snomConfig = ""
if dhcpConf.get('autoProv', True):
snomConfig = """
pool {
allow members of "snom";
range dynamic-bootp %(rangeStart)s %(rangeEnd)s;
option option-66 "http://%(configURL)s";
option option-67 "snom/snom.htm";
filename "snom/snom.htm";
}""" % {
'configURL': snomConfigAddr,
'rangeStart': snomRangeStart,
'rangeEnd': snomRangeEnd,
}
defn = {
'netname': 'DHCP%s' % k.upper(),
'myIp': myIp,
'pacIp': myIp.replace('.', '-'),
'domain': domain,
'network': netL,
'networkF': myNet.split('/')[0],
'static': statics,
'custom': custom,
'netmask': netmask,
'rangeSpec': 'range dynamic-bootp %s %s;' % (rangeStart, rangeEnd),
'rangeStart': rangeStart,
'rangeEnd': rangeEnd,
'myNetbios': netbios,
'myDns': nameserver,
'myRouter': router,
'extramain': extramain,
'bcast': bcast,
'snomConfig': snomConfig,
}
"""If noRange is specified, don't provide a range in the dhcpd.conf, may be useful for custom configs"""
if noRange:
defn['generalPool'] = ""
else:
defn['generalPool'] = """
pool {
%s
%s
}""" % (
dhcpConf.get('autoProv', True) and 'deny members of "snom";' or '',
defn['rangeSpec']
)
dhcpnet = """
shared-network %(netname)s {
use-host-decl-names on;
option domain-name "%(domain)s";
option domain-name-servers %(myDns)s;
option netbios-name-servers %(myNetbios)s;
option netbios-node-type 8;
option local-pac-server "http://%(myIp)s/wpad-%(pacIp)s.pac";
option ntp-servers %(myIp)s;
option time-servers %(myIp)s;
option log-servers %(myIp)s;
option font-servers %(myIp)s;
option pop-server %(myIp)s;
option smtp-server %(myIp)s;
option x-display-manager %(myIp)s;
subnet %(networkF)s netmask %(netmask)s {
option subnet-mask %(netmask)s;
option broadcast-address %(bcast)s;
option routers %(myRouter)s;
}
%(snomConfig)s
%(generalPool)s
%(static)s
%(custom)s
}\n""" % defn
dhcpconf += dhcpnet
# Check for debianism (goes in /etc/dhcp3)
f = open('/etc/dhcp3/dhcpd.conf', 'wt')
f.write(dhcpconf)
f.close()
f = open('/etc/default/dhcp3-server', 'wt')
f.write('# On what interfaces should the DHCP server (dhcpd) serve DHCP requests?\n')
f.write('# Separate multiple interfaces with spaces, e.g. "eth0 eth1".\n')
f.write('INTERFACES="%s"\n' % ' '.join(ifaces))
f.close()
| [
"junwin@gmail.com"
] | junwin@gmail.com |
58b9eb67220d329cc549906689668d16d515a846 | 5cdd35762235cd34979f77681c076c5698831065 | /Assignment3MountainCar/Tilecoder.py | 60b21a8c586eeed686c79ed94868856ee0cbdb06 | [
"MIT"
] | permissive | 0x4849/cmput366 | 5428ee866992d3a9f60bd0c2dfbce4b6d353c6dd | ed1def624f568057aeb38c4950cc43ed9dd5a855 | refs/heads/master | 2016-09-01T16:47:26.461980 | 2016-01-19T22:29:18 | 2016-01-19T22:29:18 | 49,986,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | #-------------------------------
# MountainCar Lab - Programming Assignment 3
# Author: Brad Harrison
# Date: November 28th 2014
#-------------------------------
numTilings = 4
tilesPerTiling = 9*9
gridLevel = 8
numTiles = numTilings*tilesPerTiling
def tilecode(position,velocity,array):
position = position + 1.2
velocity = velocity + 0.07
for i in range (0, numTilings):
xoffset = i * (1.7/8) / numTilings
yoffset = i * (0.14/8) / numTilings
tempPosition = int((position + xoffset)/(1.7/8))
tempVelocity = int((velocity + yoffset)/(0.14/8))
upDirection = 9 * tempVelocity
rightDirection = 1 * tempPosition
tileNumber = upDirection + rightDirection
startTile = i * 81
array[i] = startTile + tileNumber
def printTileCoderIndices(in1,in2):
tileIndices = [-1]*numTilings
tilecode(in1,in2,tileIndices)
print('Tile indices for input (',in1,',',in2,') are : ', tileIndices)
'''
printTileCoderIndices(-1.2,-0.07)
printTileCoderIndices(0.5, 0.07)
printTileCoderIndices(0.3,5)
printTileCoderIndices(2.4,3.3)
printTileCoderIndices(4.2,1.1)
printTileCoderIndices(5.9, 0.7)
printTileCoderIndices(0.1,0.1)
printTileCoderIndices(0.1,0.1)
printTileCoderIndices(4.0,2.0)
printTileCoderIndices(5.99,5.99)
printTileCoderIndices(4.0,2.1)
0.3,5 ;
tile indices for input (0.3,5 ) are : [8, 129, 250, 371, 503, 624, 746, 867]
tile indices for 2.4, 3.3 ; 49, 170, 291, 412, 534, 655, 776, 897
tile indices for 4.2, 1.1:
78, 199, 321, 442, 563, 684, 805, 926
tile indices for 5.9 and 0.7 :
100, 221, 353, 474, 595, 716, 837, 959
tile indices for 0.1 and 0.1;
0, 121, 242, 363, 484, 605, 726, 859
'''
| [
"bpharris@ualberta.ca"
] | bpharris@ualberta.ca |
8b5f684137e160e59019baf6f735f6163237deca | a5cd88d80d2c6af43528153199c86eacbea22f55 | /python/6. ZigZag Conversion.py | 0abd9a6710ede1f67bc46287f90980513a291fd5 | [] | no_license | qingmm/leetcode | c23a674a9051227b26288105482f99ce128abb00 | ae2ac905cb6ad216e554332388f1ec37e45ed0df | refs/heads/master | 2020-03-11T15:14:55.444601 | 2018-09-23T13:58:22 | 2018-09-23T13:58:22 | 130,078,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | # my stupid solution
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
rows = []
outputS = ''
i = 0
if numRows == 1:
return s
while i < len(s):
if i % (numRows - 1) == 0:
l = [''] * numRows
for j in range(numRows):
if i + j < len(s):
l[j] = s[i + j]
rows.append(l)
i += numRows
else:
l = [''] * numRows
l[numRows - i % (numRows - 1) - 1] = s[i]
rows.append(l)
i += 1
for i in range(numRows):
for j in range(len(rows)):
outputS += rows[j][i]
return outputS
# a very beautiful solution by @pharrellyhy from LeetCode, and I just did a little adjustment
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows < 2 or len(s) < numRows:
return s
step, index = - 1, 0
r = [''] * numRows
for ch in s:
r[index] += ch
if index == 0 or index % (numRows - 1) == 0:
step = - step
index += step
return ''.join(r)
| [
"noreply@github.com"
] | noreply@github.com |
7051bdbccf8cda3f9c8e25860428b8223d18cf8c | a64f4878f6d96fcb2e76e2d1823d1cfa104c3a64 | /MoCap_Solver/MoCapSolver.py | 888e91aabdab18b8fd7a016ca21d71d18e3ed37e | [] | no_license | qiufeng1994/MoCap-Solver | 759268a089909ac9764fb00c70f1070ed86159fd | bc69553f90896ee6ab06cbec8ed3def96c42dea1 | refs/heads/main | 2023-08-16T08:07:51.588244 | 2021-10-12T07:34:36 | 2021-10-12T07:34:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,681 | py | import numpy as np
class MoCapSolver():
def __init__(self):
return
def extract_encoder_data(self):
############# Extract the data for training Template Skeleton Encoder ######################
from MoCap_Solver.extract.generate_ts_dataset import generate_ts_dataset
generate_ts_dataset()
############# Extract the data for training Marker Configuration Encoder ###################
from MoCap_Solver.extract.generate_mc_dataset import generate_mc_dataset
generate_mc_dataset()
############# Convert the mocap data into temporal window data #############################
from MoCap_Solver.extract.generate_train_windows_data import generate_train_windows_data
from MoCap_Solver.extract.generate_test_windows_data import generate_test_windows_data
generate_train_windows_data()
generate_test_windows_data()
############# Extract the data for training Motion Encoder ##################################
from MoCap_Solver.extract.generate_motion_dataset import generate_motion_dataset
generate_motion_dataset()
return
def train_encoders(self):
##################### Train Template Skeleton Encoder #####################################
from MoCap_Solver.train.train_template_skeleton import train_template_skeleton
train_template_skeleton()
# ##################### Train Marker Configuration Encoder ##################################
from MoCap_Solver.train.train_marker_configuration import train_marker_configuration
train_marker_configuration()
##################### Train Motion Encoder ################################################
from MoCap_Solver.train.train_motion import train_motion
train_motion()
return
def evaluate_encoders(self):
##################### Evaluate Template Skeleton Encoder ##################################
print('############## Evaluate Template Skeleton Encoder #######################')
from MoCap_Solver.evaluate.evaluate_ts_encoder import evaluate_ts_encoder
evaluate_ts_encoder()
##################### Evaluate Marker Configuration Encoder ###############################
print('############## Evaluate Marker Configuration Encoder ####################')
from MoCap_Solver.evaluate.evaluate_mc_encoder import evaluate_mc_encoder
evaluate_mc_encoder()
##################### Evaluate Motion Encoder #############################################
print('############## Evaluate Motion Encoder ##################################')
from MoCap_Solver.evaluate.evaluate_motion_encoder import evaluate_motion_encoder
evaluate_motion_encoder()
return
def extract_solver_data(self):
###################### Extract the data for training MoCap-Solver #########################
from MoCap_Solver.extract.generate_moc_sol_dataset import generate_moc_sol_dataset
generate_moc_sol_dataset()
from MoCap_Solver.extract.statistic import statistic
statistic()
return
def train_solver(self):
###################### Train MoCap-Solver ##################################################
from MoCap_Solver.train.train_mocap_solver import train_mocap_solver
train_mocap_solver()
return
def evaluate_solver(self):
###################### Evaluate MoCap-Solver ################################################
from MoCap_Solver.evaluate.evaluate_mocap_solver import evaluate_mocap_solver
evaluate_mocap_solver()
return | [
"wangyupan@corp.netease.com"
] | wangyupan@corp.netease.com |
1484312678a0f92557872eb3547648cbb5148849 | 1e76ae22907c53f4b5813e5d152ae3e3bf933265 | /som/som_v0.py | 425022cbf8a3610e4a59b83a85b8241541f13e4c | [
"Apache-2.0"
] | permissive | alecuba16/python_pytorch_keras_models | 2ab2c6ce536693ffe3fc947f5b3c2db8c80f7284 | 0784476661b30ee7dfb70ec708bf1a3a1b7f2650 | refs/heads/main | 2023-08-17T15:57:03.389791 | 2021-09-10T10:39:10 | 2021-09-10T10:39:10 | 405,043,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,091 | py |
from joblib import Parallel, delayed
from minisom import MiniSom
import gzip
def parallel_win(x, sw,xy,input_len,y):
som = MiniSom(x=xy, y=xy, input_len=input_len, sigma=1.0, learning_rate=0.5)
som.weights = sw
w = som.winner(x)
return [w[0], w[1],y]
if __name__ == '__main__':
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
from pathlib import Path
import pickle
import pandas as pd
import numpy as np
#from ggplot import *
import sys
import os
import re
# Set the interpreter bool
try:
if sys.ps1: interpreter = True
except AttributeError:
interpreter = False
if sys.flags.interactive: interpreter = True
# Configs
train_file = 'Escamb_wtdata_train_alarm_600.csv.gz'
#train_file = 'Escamb_wtdata_train_alarm_86400.csv.gz'
#train_file = 'STA_wtdata_train_alarm_86400.csv.gz'
#test_file = 'juancho_test.csv.gz'
exclude_columns = ['alarm_block_code', 'alarm_all', 'alarm_all_block_code', 'ot', 'ot_block_code', 'ot_all',
'ot_all_block_code']
include_columns = ['VelViento_avg','Pot_avg','VelRotor_avg','TempAceiteMultip_avg','TempAmb_avg','TempRodamMultip_avg'] #Escamb multi
#include_columns = ['VelViento_avg','Pot_avg','VelRotor_avg','TempAceiteMultip_avg','TempAmb_avg','TempMultip_avg'] #Izco multi
# target_name = 'pre_alarm'
Marging=5
target_name = 'ot_all'
datetime_name = 'date_time'
som_size=25
threshold = 0.9
nums = re.compile(r"\_(\d+).csv.gz")
seconds=nums.search(train_file).group(1)
som_folder='results_'+str(seconds)
if not os.path.exists(som_folder):
os.makedirs(som_folder)
print("Reading "+train_file+" file ...")
wtdata_train = pd.read_csv(train_file, sep=',', compression='gzip',usecols=['ld_id'])
#Filter by ld_id
lds_ids=wtdata_train['ld_id'].unique()
for ld_id in lds_ids:
print("Wind turbine " + str(ld_id) + " ...")
filename=som_folder+'/'+str(ld_id)+'_som.pydata.gz'
if not Path(filename).is_file():
print(str(ld_id)+":Create model...")
wtdata_train = pd.read_csv(train_file, sep=',', compression='gzip',parse_dates=[datetime_name])
wtdata_train=wtdata_train[wtdata_train['ld_id']==ld_id]
#wtdata_test = pd.read_csv(test_file, sep=',', compression='gzip')
to_drop = set(wtdata_train.columns).intersection(set(exclude_columns).difference([target_name]))
wtdata_train = wtdata_train.drop(to_drop, axis=1)
# to_drop = set(wtdata_test.columns).intersection(set(exclude_columns).difference([target_name]))
# wtdata_test = wtdata_test.drop(to_drop, axis=1)
if Marging > 0:
dates_prealarm = []
active_alarms = wtdata_train[wtdata_train[target_name] == 1][datetime_name].values
for alarm in active_alarms:
for m in range(0, Marging):
dates_prealarm.append(alarm - np.timedelta64(m, 'D'))
wtdata_train.loc[wtdata_train[datetime_name].isin(active_alarms), target_name] = 0
wtdata_train.loc[wtdata_train[datetime_name].isin(dates_prealarm), target_name] = 1
#Include columns?
if include_columns is not None and not include_columns:
wtdata_train = wtdata_train.loc[:,include_columns]
# Identify columns all NA
idx_NA_columns_train = pd.isnull(wtdata_train).sum() > 0.9 * wtdata_train.shape[0]
if any(idx_NA_columns_train):
wtdata_train = wtdata_train.drop(idx_NA_columns_train[idx_NA_columns_train == True].index, axis=1)
# wtdata_test = wtdata_test.drop(idx_NA_columns_train[idx_NA_columns_train == True].index, axis=1)
X_train = wtdata_train.drop([target_name], axis=1)
y_train = wtdata_train[target_name]
# X_test = wtdata_test.drop([target_name], axis=1)
# y_test = wtdata_test[target_name]
#Add row id for mapping
X_train['row_id']=np.arange(X_train.shape[0])
# X_test['row_id']=np.arange(X_test.shape[0])
X_train_df = X_train
to_drop = set(X_train.columns).intersection([datetime_name, target_name, 'ld_id'])
X_train = X_train.drop(to_drop, axis=1)
# X_test_df = X_test
# to_drop = set(X_test.columns).intersection([datetime_name, target_name, 'ld_id'])
# X_test = X_test.drop(to_drop, axis=1)
# Feature Scaling
sc = StandardScaler()
# X_train = sc.fit_transform(X_train.as_matrix())
X_train = Imputer(missing_values='NaN', strategy='mean', axis=0).fit_transform(X_train.as_matrix())
X_train = sc.fit_transform(X_train)
# X_test = sc.transform(X_test.as_matrix())
# X_test = Imputer(missing_values='NaN', strategy='mean', axis=0).fit_transform(X_test.as_matrix())
# X_test = sc.transform(X_test)
# Training the SOM
som_size = int(4*(wtdata_train.shape[0]**0.54321))
print("Som size:"+str(som_size))
if som_size < 100:
sigma = 5
if som_size < 500:
sigma = 10
if som_size < 1000:
sigma = 20
if som_size < 5000:
sigma = 50
if som_size < 10000:
sigma = 100
sigma = som_size/10
learningrate=0.3
som = MiniSom(x = som_size, y = som_size, input_len = X_train.shape[1], sigma = sigma, learning_rate = learningrate)
som.random_weights_init(X_train)
som.train_random(data = X_train, num_iteration = 3000)
to_save=({'som_weights':som.weights,'X_train':X_train,'y_train':y_train,'X_train_df':X_train_df,'scaler':sc,'som_size':som_size,'ld_id':ld_id,'sigma':sigma,'learningrate':learningrate})
fd=gzip.open(filename,'wb')
pickle.dump(to_save,fd,pickle.HIGHEST_PROTOCOL)
fd.close()
else:
#load
print(str(ld_id)+":Load from "+filename+"...")
fd = gzip.open(filename, 'rb')
dump = pickle.load(fd)
fd.close()
X_train=dump['X_train']
X_train_df = dump['X_train_df']
y_train=dump['y_train']
ld_id=dump['ld_id']
som_size=dump['som_size']
sigma=dump['sigma']
learningrate=dump['learningrate']
som = MiniSom(x = som_size, y = som_size, input_len = X_train.shape[1], sigma = sigma, learning_rate = learningrate)
som.weights=dump['som_weights']
sc=dump['scaler']
sommap=som.distance_map().T
#Ggplot
# x=np.int64(np.repeat(range(sommap.shape[1]),repeats=sommap.shape[0]))
# y=np.int64(np.reshape(np.repeat(range(sommap.shape[0]),axis=0,repeats=sommap.shape[0]),(sommap.shape[0],sommap.shape[0]),order='F').flatten())
# fill=sommap.flatten()
# df = pd.DataFrame({
# 'x':x+0.5,
# 'y':y+0.5,
# 'fill':fill,
# })
# p=ggplot(aes(x='x',y='y',color='fill',fill='fill'),data=df)+geom_point(shape='s',size=500)
# #
# markers = ['o', 's']
# colors = ['r', 'g']
# df2 = pd.DataFrame(columns=('x', 'y', 'color','marker'))
# for i, x in enumerate(X_train):
# w = som.winner(x)
# df2.loc[i]=[w[0]+0.5,w[1] + 0.5,colors[y_train[i]],markers[y_train[i]]]
plot_file=som_folder+'/'+str(ld_id)+'_som_plot.html'
if not Path(plot_file).is_file():
print(str(ld_id) + ":Creating plot file...")
if not interpreter:
print(str(ld_id)+":Processing data map parallel...")
rows=Parallel(n_jobs=10)(delayed(parallel_win)(x,som.weights,som_size,X_train.shape[1],y_train.iloc[i]) for i,x in enumerate(X_train))
else:
print(str(ld_id)+":Processing data map sequential...")
rows=[]
for i, x in enumerate(X_train):
w = som.winner(x)
if w is not None:
rows=np.append(rows,[w[0],w[1],y_train.iloc[i]],axis=0)
rows = np.reshape(rows, (y_train.shape[0], (rows.shape[0]/y_train.shape[0]).__int__()))
df2 = pd.DataFrame(rows, columns=['x', 'y','a'])
#pickle.dump(df2, open(str(ld_id)+'_df2.pydata', 'wb'), pickle.HIGHEST_PROTOCOL)
#df2 = pickle.load(open(str(ld_id)+'_df2.pydata', 'rb'))
matrix_xy=np.chararray((som_size,som_size))
#-1=no info,0=no alarmas,1=todas alarmas,2=alguna alarma gana no alarmas,3=alguna alarma gana alarmas
for r in range(som_size):
for c in range(som_size):
alarms=df2.loc[(df2.x == r) & (df2.y==c),'a']
positive=np.count_nonzero(alarms)
negative=np.size(alarms)-positive
matrix_xy[r, c] = '?'
if positive == 0 and negative > 0:
matrix_xy[r, c] = 'H'
if positive>0 and negative == 0:
matrix_xy[r, c] = 'A'
if positive>0 and positive<negative:
matrix_xy[r,c] = 'a'
if positive>0 and positive>=negative:
matrix_xy[r,c] = 'h'
import plotly
import plotly.figure_factory as ff
z=sommap
z_text = matrix_xy
x=y=list(range(som_size))
fig = ff.create_annotated_heatmap(z, x=x, y=y, annotation_text=z_text,colorscale='Greys', hoverinfo='z')
#Make text size smaller
colors = {'?':'#00ccff','H': '#00ff00', 'A': '#FF0000', 'a': '#ff9933','h':' #ffff00'}
for i in range(len(fig.layout.annotations)):
key=fig.layout.annotations[i]['text']
if 'b\'' in key:
key=key[2:-1]
fig.layout.annotations[i]['text']=key
fig.layout.annotations[i]['font']['color']=colors[key]
#fig.layout.annotations[i].font.size = 8
plotly.offline.plot(fig, filename=plot_file,auto_open=False)
else:
print(str(ld_id) + ":Plot file exists.")
#
# from pylab import bone, pcolor, colorbar, plot, show
# bone()
# pcolor(som.distance_map().T)
# colorbar()
# for i, x in enumerate(X_train):
# geom_point()
# plot(df2['x'][i] + 0.5,
# df2['y'][i] + 0.5,
# markers[y_train[i]],
# markeredgecolor = colors[y_train[i]],
# markerfacecolor = 'None',
# markersize = 5,
# markeredgewidth = 2)
# show()
#
#
# # Visualizing the results
# from pylab import bone, pcolor, colorbar, plot, show
# bone()
# pcolor(som.distance_map().T)
# colorbar()
# markers = ['o', 's']
# colors = ['r', 'g']
# for i, x in enumerate(X_train):
# w = som.winner(x)
# geom_point()
# plot(w[0] + 0.5,
# w[1] + 0.5,
# markers[y_train[i]],
# markeredgecolor = colors[y_train[i]],
# markerfacecolor = 'None',
# markersize = 5,
# markeredgewidth = 2)
# show()
#
#
#
result_file = som_folder+'/'+str(ld_id)+'_results_som_'+str(threshold)+'.csv'
if not Path(result_file).is_file():
# Finding the outliers
print(str(ld_id) + ":Creating Result file...")
print(str(ld_id)+":Finding outliers date_time with threshold >"+str(threshold)+" ...")
mappings = som.win_map(X_train)
out=np.where(som.distance_map().T>threshold)
df = pd.DataFrame()
for i in range(out[0].shape[0]-1):
map=mappings[(out[0][i],out[1][i])]
if map.__len__() > 0:
outliers_rows=np.matrix(sc.inverse_transform(map))
rows_id=np.array(outliers_rows[:,-1]).flatten()
if rows_id is not None and rows_id.size>0:
newdf=pd.DataFrame({'ld_id':X_train_df.iloc[rows_id]['ld_id'],'date_time':X_train_df.iloc[rows_id][datetime_name],'val':som.distance_map().T[out[0][i],out[1][i]]})
df = df.append(newdf, ignore_index = True)
if df.size >0:
df = df.sort_values([datetime_name],ascending=[True])
df.to_csv(som_folder+'/'+str(ld_id)+'_results_som_'+str(threshold)+'.csv', sep=',',index =False)
else:
print(str(ld_id) + ":Result file exists.")
print(str(ld_id) + ":Finish!") | [
"alecuba16@gmail.com"
] | alecuba16@gmail.com |
55f5ff5ccaa8667fb3da4c3c0f50fc51fea27117 | 5e821b4ab7106b9a8cb2c537e63b6e337a3d1e8f | /Source/fonAPI.py | a27d901caa6a627f2a656e5d922f7bb1bd310559 | [] | no_license | stijnvgeene/Tellic | a47a3c6ac6c3534f0998af6e94ed830cfdd13043 | 1242051601bdb040d622efa9816e217c7fa1bbe7 | refs/heads/master | 2021-05-11T10:56:40.652739 | 2018-01-19T11:09:35 | 2018-01-19T11:09:35 | 118,116,255 | 0 | 0 | null | 2018-01-19T11:16:15 | 2018-01-19T11:16:13 | null | UTF-8 | Python | false | false | 2,031 | py | import sys
import json
import requests
class FonApi:
__ApiUrl = 'https://fonoapi.freshpixl.com/v1/'
def __init__(self, apikey, url=None):
self.__ApiUrl = FonApi.__ApiUrl
if url is not None:
self.__ApiUrl = url
self.__ApiKey = apikey
def getdevice(self, device, position=None, brand=None):
"""
Get device data object and return a json list
:param device:
:param position:
:param brand:
:return device list:
"""
url = self.__ApiUrl + 'getdevice'
postdata = {'brand': brand,
'device': device,
'position': position,
'token': self.__ApiKey}
headers = {'content-type': 'application/json'}
result = self.sendpostdata(url, postdata, headers)
try:
return result.json()
except AttributeError:
return result
def sendpostdata(self, url, postdata, headers, result = None):
"""
Send data to the server
:param url:
:param postdata:
:param headers:
:return requests.post result:
"""
try:
result = requests.post(url, data=json.dumps(postdata), headers=headers)
# Consider any status other than 2xx an error
if not result.status_code // 100 == 2:
return "Error status page: " + str(result)
# Try send the result text else send the error
try:
if result.json()['status'] == 'error':
if result.json()['message'] == 'Invalid Token. Generate a Token at fonoapi.freshpixl.com.':
return "Check __ApiKey"
return result.json()['message']
except:
pass
return result
except requests.exceptions.RequestException as e:
# A serious problem happened, like an SSLError or InvalidURL
return "Connect error. Check URL"
| [
"rienk.koenders@student.uva.nl"
] | rienk.koenders@student.uva.nl |
a7cc5a5f8037762aa7ed625f5c58fafd664c62af | a32f80ba617dbd79b0189aadd330bf4bbd0bdb43 | /scripts/TEINEI_10.py | 5f36d7766a3526317687c2dc27d5e489e9c77bde | [] | no_license | ykskks/SIGNATE-Student-Cup-2019 | 9388e918306cbb5d9aeac4eaa91fe5ffa63cb93f | a8784eb627584491f7848582334f922906d93bd8 | refs/heads/master | 2020-08-01T14:24:20.568088 | 2019-10-12T07:46:39 | 2019-10-12T07:46:39 | 211,021,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,695 | py | import os
import sys
import re
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import japanize_matplotlib
import category_encoders as ce
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
sys.path.append(".")
from utils import update_tracking, log_evaluation, preprocess_df
####################
## Changes
####################
#MODEL_ID = "TEINEI_6"
MODEL_ID = "TEINEI_10"
# cartegorical feature 指定
# feature_frac 0.7 -> 0.9, スパース特徴に対処(?)
# outlier handlingを修正(20926: rentではなくareaを修正)
# area_per_room = area / num_room
# mean minute from statitons, station density -> 都会度?
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
sc = logging.StreamHandler()
logger.addHandler(sc)
fh = logging.FileHandler(f"./logs/model_logs/{MODEL_ID}.log")
logger.addHandler(fh)
logger.debug(f"./logs/model_logs/{MODEL_ID}.log")
####################
## Parameters
####################
N_ROUNDS = 30000
LR = 0.01
BOOSTING = "gbdt"
BAG_FREQ = 1
BAG_FRAC = 0.7
MIN_DATA_IN_LEAF = 50
SEED = 42
METRIC = "rmse"
L1 = 1e-2
L2 = 1e-2
MAX_DEPTH = 5
FEAT_FRAC = 0.9
update_tracking(MODEL_ID, "n_rounds", N_ROUNDS)
update_tracking(MODEL_ID, "lr", LR)
update_tracking(MODEL_ID, "boosting", BOOSTING)
update_tracking(MODEL_ID, "bag_freq", BAG_FREQ)
update_tracking(MODEL_ID, "bag_frac", BAG_FRAC)
update_tracking(MODEL_ID, "min_data_in_leaf", MIN_DATA_IN_LEAF)
update_tracking(MODEL_ID, "seed", SEED)
update_tracking(MODEL_ID, "metric", METRIC)
update_tracking(MODEL_ID, "lambda_l1", L1)
update_tracking(MODEL_ID, "lambda_l2", L2)
update_tracking(MODEL_ID, "max_depth", MAX_DEPTH)
update_tracking(MODEL_ID, "feature_fraction", FEAT_FRAC)
params = {"learning_rate": LR,
"boosting": BOOSTING,
"bagging_freq": BAG_FREQ,
"bagging_fraction": BAG_FRAC,
"min_data_in_leaf": MIN_DATA_IN_LEAF,
"bagging_seed": SEED,
"metric": METRIC,
"random_state": SEED,
"lambda_l1": L1,
"lambda_l2": L2,
"max_depth": MAX_DEPTH,
"feature_fraction": FEAT_FRAC}
####################
## Load data
####################
# 変数名の英訳
train_cols_eng = ["id", "rent", "location", "access", "layout", "age", "direction", "area", "floor",
"bath_toilet", "kitchen", "broad_com", "facility", "parking", "environment", "structure",
"contract_period"]
test_cols_eng = ["id", "location", "access", "layout", "age", "direction", "area", "floor",
"bath_toilet", "kitchen", "broad_com", "facility", "parking", "environment", "structure",
"contract_period"]
train = pd.read_csv("./data/train.csv", names=train_cols_eng, header=0)
test = pd.read_csv("./data/test.csv", names=test_cols_eng, header=0)
use_cols = []
####################
## Preprocess data
####################
train_processed = preprocess_df(train)
test_processed = preprocess_df(test)
# handle outliers
train_processed.drop(20427, axis=0, inplace=True) # 築1019年、どう修正するべきか不明なので
train_processed.loc[20231, "age_year"] = 52
train_processed.loc[20231, "age_in_months"] = 52 * 12 + 5 # 築520年、おそらく52年のタイポと仮定
train_processed.loc[5775, "rent"] = 120350 # 条件からしてありえない高値。おそらくゼロの個数違い
train_processed.loc[20926, "area"] = 43.01 # 条件からしてありえなく広い。おそらくゼロの個数違い
train_processed.reset_index(drop=True, inplace=True)
target = train_processed["rent"]
target_log = np.log1p(target)
train_processed.drop(["id", "rent"], axis=1, inplace=True)
test_processed.drop("id", axis=1, inplace=True)
####################
## get feature
####################
# モデル学習用データフレーム(category encoderの都合で分ける)
train_use = pd.DataFrame()
test_use = pd.DataFrame()
### location ###
ce_ordinal = ce.OrdinalEncoder(cols=["district"], handle_missing="value")
train_use["district"] = train_processed["district"]
test_use["district"] = test_processed["district"]
train_use = ce_ordinal.fit_transform(train_use)
test_use = ce_ordinal.transform(test_use)
### access ###
train_use["min_to_nearest_sta"] = train_processed["access_min"].apply(lambda x: min(x) if x else np.nan)
test_use["min_to_nearest_sta"] = test_processed["access_min"].apply(lambda x: min(x) if x else np.nan)
train_use["mean_min_to_sta"] = train_processed["access_min"].apply(lambda x: sum(x)/len(x) if x else np.nan)
test_use["mean_min_to_sta"] = test_processed["access_min"].apply(lambda x: sum(x)/len(x) if x else np.nan)
train_use["num_sta"] = train_processed["access_sta"].apply(lambda x: len(x))
test_use["num_sta"] = test_processed["access_sta"].apply(lambda x: len(x))
train_use["sta_density"] = train_use["num_sta"] / train_use["mean_min_to_sta"]
test_use["sta_density"] = test_use["num_sta"] / test_use["mean_min_to_sta"]
# 路線
line_cols = [col for col in train_processed.columns.values if "線" in col or "ライン" in col
or "ライナー" in col or "エクスプレス" in col]
line_cols = [col for col in line_cols if train_processed[col].dropna().sum() > 300]
train_use[line_cols] = train_processed[line_cols]
test_use[line_cols] = test_processed[line_cols]
# 駅
sta_cols = [col for col in train_processed.columns.values if "駅" in col]
sta_cols = [col for col in sta_cols if train_processed[col].dropna().sum() > 300]
train_use[sta_cols] = train_processed[sta_cols]
test_use[sta_cols] = test_processed[sta_cols]
### layout ###
ce_ordinal = ce.OrdinalEncoder(cols=["layout"], handle_missing="value")
train_use["layout"] = train_processed["layout"]
test_use["layout"] = test_processed["layout"]
train_use = ce_ordinal.fit_transform(train_use)
test_use = ce_ordinal.transform(test_use)
layout_cols = ["is_K", "is_R", "is_L", "is_D", "is_S", "num_room"]
train_use[layout_cols] = train_processed[layout_cols]
test_use[layout_cols] = test_processed[layout_cols]
### age ###
age_cols = ["age_year", "age_month", "age_in_months"]
train_use[age_cols] = train_processed[age_cols]
test_use[age_cols] = test_processed[age_cols]
### direction ###
ce_ordinal = ce.OrdinalEncoder(cols=["direction"], handle_missing="value")
train_use["direction"] = train_processed["direction"]
test_use["direction"] = test_processed["direction"]
train_use = ce_ordinal.fit_transform(train_use)
test_use = ce_ordinal.transform(test_use)
direction_cols = ["has_N", "has_S", "has_E", "has_W"]
train_use[direction_cols] = train_processed[direction_cols]
test_use[direction_cols] = test_processed[direction_cols]
### area ###
train_use["area"] = train_processed["area"]
test_use["area"] = test_processed["area"]
train_use["area_per_room"] = train_use["area"] / train_use["num_room"]
test_use["area_per_room"] = test_use["area"] / test_use["num_room"]
### floor ###
train_processed["floor_ratio"] = train_processed["room_floor"] / train_processed["building_floor"]
test_processed["floor_ratio"] = test_processed["room_floor"] / test_processed["building_floor"]
floor_cols = ["has_underground", "room_floor", "building_floor", "floor_ratio"]
train_use[floor_cols] = train_processed[floor_cols]
test_use[floor_cols] = test_processed[floor_cols]
### bath_toilet ###
bath_toilet_cols = ["シャワー", "バスなし", "バス・トイレ別", "共同トイレ", "共同バス",
"専用トイレ", "専用バス", "洗面台独立", "浴室乾燥機", "温水洗浄便座", "脱衣所", "追焚機能"]
train_use[bath_toilet_cols] = train_processed[bath_toilet_cols]
test_use[bath_toilet_cols] = test_processed[bath_toilet_cols]
### kitchen ###
kitchen_cols = ["IHコンロ", "L字キッチン", "カウンターキッチン", "ガスコンロ", "コンロ1口", "コンロ2口", "コンロ3口",
"コンロ4口以上", "コンロ設置可(コンロ1口)", "コンロ設置可(コンロ2口)", "コンロ設置可(コンロ3口)",
"コンロ設置可(コンロ4口以上)", "コンロ設置可(口数不明)", "システムキッチン", "冷蔵庫あり", "独立キッチン",
"給湯", "電気コンロ"]
train_use[kitchen_cols] = train_processed[kitchen_cols]
test_use[kitchen_cols] = test_processed[kitchen_cols]
### broad_com ###
broad_com_cols = ["BSアンテナ", "CATV", "CSアンテナ", "インターネット使用料無料",
"インターネット対応", "光ファイバー", "有線放送", "高速インターネット"]
train_use[broad_com_cols] = train_processed[broad_com_cols]
test_use[broad_com_cols] = test_processed[broad_com_cols]
### facility ###
facility_cols = ["24時間換気システム", "2面採光",
"3面採光", "ウォークインクローゼット", "エアコン付", "エレベーター", "オール電化", "ガスその他",
"ガス暖房", "クッションフロア", "シューズボックス", "タイル張り", "トランクルーム", "バリアフリー",
"バルコニー", "フローリング", "プロパンガス", "ペアガラス", "ルーフバルコニー", "ロフト付き", "下水",
"二世帯住宅", "二重サッシ", "井戸", "公営水道", "冷房", "出窓", "地下室", "室内洗濯機置場",
"室外洗濯機置場", "専用庭", "床下収納", "床暖房", "排水その他", "敷地内ごみ置き場", "水道その他",
"汲み取り", "洗濯機置場なし", "浄化槽", "石油暖房", "都市ガス", "防音室"]
train_use[facility_cols] = train_processed[facility_cols]
test_use[facility_cols] = test_processed[facility_cols]
### parking ###
parking_cols = ["bicycle_parking", "car_parking", "bike_parking"]
train_use[parking_cols] = train_processed[parking_cols]
test_use[parking_cols] = test_processed[parking_cols]
### environment ###
env_cols = ["デパート", "公園",
"郵便局", "コインパーキング", "学校", "図書館", "飲食店", "月極駐車場", "銀行", "小学校",
"ドラッグストア", "レンタルビデオ", "病院", "総合病院", "コンビニ", "大学", "幼稚園・保育園",
"スーパー", "クリーニング"]
train_use[env_cols] = train_processed[env_cols]
test_use[env_cols] = test_processed[env_cols]
train_use["env_mean"] = train_use[env_cols][train_use[env_cols]>0].mean(axis=1)
test_use["env_mean"] = test_use[env_cols][test_use[env_cols]>0].mean(axis=1)
train_use["env_min"] = train_use[env_cols][train_use[env_cols]>0].min(axis=1)
test_use["env_min"] = test_use[env_cols][test_use[env_cols]>0].min(axis=1)
train_use["env_max"] = train_use[env_cols].max(axis=1)
test_use["env_max"] = test_use[env_cols].max(axis=1)
### structure ###
ce_ordinal = ce.OrdinalEncoder(cols=["structure"], handle_missing="value")
train_use["structure"] = train_processed["structure"]
test_use["structure"] = test_processed["structure"]
train_use = ce_ordinal.fit_transform(train_use)
test_use = ce_ordinal.transform(test_use)
### contract_period ###
period_cols = ["fixed_term", "contract_period_year", "contract_period_month", "contract_period_in_months"]
train_use[period_cols] = train_processed[period_cols]
test_use[period_cols] = test_processed[period_cols]
logger.debug(f"Using features:{train_use.columns.values}")
categorical_cols = ["district", "layout", "direction", "structure"]
####################
## Train model
####################
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
oof = np.zeros(len(train_use))
predictions = np.zeros(len(test_use))
feature_importance_df = pd.DataFrame()
for fold, (train_idx, val_idx) in enumerate(folds.split(train_use, train_use["district"])):
print(f"Fold {fold+1}")
train_data = lgb.Dataset(train_use.iloc[train_idx], label=target_log[train_idx], categorical_feature=categorical_cols)
val_data = lgb.Dataset(train_use.iloc[val_idx], label=target_log[val_idx], categorical_feature=categorical_cols)
num_round = N_ROUNDS
callbacks = [log_evaluation(logger, period=100)]
clf = lgb.train(params, train_data, num_round, valid_sets = [train_data, val_data], verbose_eval=False, early_stopping_rounds=100, callbacks=callbacks)
oof[val_idx] = clf.predict(train_use.values[val_idx], num_iteration=clf.best_iteration)
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = train_use.columns.values
fold_importance_df["importance"] = clf.feature_importance(importance_type="gain")
fold_importance_df["fold"] = fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
feature_importance_df = feature_importance_df[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False).head(50)
logger.debug("##### feature importance #####")
logger.debug(feature_importance_df)
predictions += clf.predict(test_use, num_iteration=clf.best_iteration) / folds.n_splits
# inverse log transformation
oof = np.expm1(oof)
predictions = np.expm1(predictions)
cv_score = np.sqrt(mean_squared_error(oof, target))
logger.debug(f"5fold CV score: {cv_score}")
update_tracking(MODEL_ID, "cv_rmse", cv_score)
####################
## Submit
####################
spsbm = pd.read_csv("./data/sample_submit.csv", header=None)
spsbm.iloc[:, 1] = predictions
spsbm.to_csv(f"./submissions/{MODEL_ID}.csv", header=None, index=None) | [
"fkt.kohei@gmail.com"
] | fkt.kohei@gmail.com |
37994c9b0b49fd9f56f2174926977f585dd9894c | 5abce8e544924617d8c2247b141f56eefdc86ce7 | /negative_sampling_layer.py | 7296c9e6c8c0ee5bfd0d79b3e2437bceed1ef7ca | [] | no_license | skyiwalker/deep-learning-scratch-study | 2c2fad6cd341367ae7cf9bfe49b49b999d7f4d0b | 1fe9bb6074662adf8662b4270a0a5c02da78dca8 | refs/heads/master | 2022-11-27T15:38:11.084346 | 2020-08-03T04:19:47 | 2020-08-03T04:19:47 | 270,489,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py |
from common.np import * # import numpy as np
from common.layers import Embedding, SigmoidWithLoss
import collections
class EmbeddingDot:
def __init__(self, W):
self.embed = Embedding(W)
self.params = self.embed.params
self.grads = self.embed.grads
self.cache = None
def forward(self, h, idx):
target_W = self.embed.forward(idx)
out = np.sum(target_W * h, axis=1)
self.cache = (h, target_W)
return out
def backward(self, dout):
h, target_W = self.cache
dout = dout.reshape(dout.shape[0], 1)
dtarget_W = dout * h
self.embed.backward(dtarget_W)
dh = dout * target_W
return dh
class UnigramSampler:
def __init__(self, corpus, power, sample_size):
self.sample_size = sample_size
self.vocab_size = None
self.word_p = None
counts = collections.Counter()
for word_id in corpus:
counts[word_id] += 1
vocab_size = len(counts)
self.vocab_size = vocab_size
self.word_p = np.zeros(vocab_size)
for i in range(vocab_size):
self.word_p[i] = counts[i]
self.word_p = np.power(self.word_p, power)
self.word_p /= np.sum(self.word_p)
def get_negative_sample(self, target):
batch_size = target.shape[0]
if not GPU:
negative_sample = np.zeros((batch_size, self.sample_size), dtype=np.int32)
for i in range(batch_size):
p = self.word_p.copy()
target_idx = target[i]
p[target_idx] = 0
p /= p.sum()
negative_sample[i, :] = np.random.choice(self.vocab_size, size=self.sample_size, replace=False, p=p)
else:
# GPU(cupy)로 계산할 때는 속도를 우선한다.
# 부정적 예에 타깃이 포함될 수 있다.
negative_sample = np.random.choice(self.vocab_size, size=(batch_size, self.sample_size),
replace=True, p=self.word_p)
return negative_sample
class NegativeSamplingLoss:
def __init__(self, W, corpus, power=0.75, sample_size=5):
self.sample_size = sample_size
self.sampler = UnigramSampler(corpus, power, sample_size)
self.loss_layers = [SigmoidWithLoss() for _ in range(sample_size + 1)]
self.embed_dot_layers = [EmbeddingDot(W) for _ in range(sample_size + 1)]
self.params, self.grads = [], []
for layer in self.embed_dot_layers:
self.params += layer.params
self.grads += layer.grads
def forward(self, h, target):
batch_size = target.shape[0]
negative_sample = self.sampler.get_negative_sample(target)
# 긍정적 예 순전파
score = self.embed_dot_layers[0].forward(h, target)
correct_label = np.ones(batch_size, dtype=np.int32)
loss = self.loss_layers[0].forward(score, correct_label)
# 부정적 예 순전파
negative_label = np.zeros(batch_size, dtype=np.int32)
for i in range(self.sample_size):
negative_target = negative_sample[:, i]
score = self.embed_dot_layers[1 + i].forward(h, negative_target)
loss += self.loss_layers[1 + i].forward(score, negative_label)
return loss
def backward(self, dout=1):
dh = 0
for l0, l1 in zip(self.loss_layers, self.embed_dot_layers):
dscore = l0.backward(dout)
dh += l1.backward(dscore)
return dh
| [
"sark@supersark.com"
] | sark@supersark.com |
104f2d7717da34b6833512af2ff1103f899065d8 | 3b86d84feea3d49b469eb395fc8fdb962b9bbf32 | /flatbuffers/ButtonOptions.py | ede2dd6776f2256096218380b67af7a9268d5abb | [] | no_license | clouddreamfly/pycsb2csd | a29b70d54c250f7da4b352b693e503e5e2b2fff9 | 5a55389a5e86f57bbbb1bc921da5ecbfb04f29a8 | refs/heads/main | 2023-07-16T17:57:11.320029 | 2021-09-09T16:39:07 | 2021-09-09T16:39:07 | 404,762,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,116 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: flatbuffers
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ButtonOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ButtonOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsButtonOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# ButtonOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ButtonOptions
def WidgetOptions(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from flatbuffers.WidgetOptions import WidgetOptions
obj = WidgetOptions()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def NormalData(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from flatbuffers.ResourceData import ResourceData
obj = ResourceData()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def PressedData(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from flatbuffers.ResourceData import ResourceData
obj = ResourceData()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def DisabledData(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from flatbuffers.ResourceData import ResourceData
obj = ResourceData()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def FontResource(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from flatbuffers.ResourceData import ResourceData
obj = ResourceData()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def Text(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ButtonOptions
def IsLocalized(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# ButtonOptions
def FontName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ButtonOptions
def FontSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# ButtonOptions
def TextColor(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
x = o + self._tab.Pos
from flatbuffers.Color import Color
obj = Color()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def CapInsets(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
x = o + self._tab.Pos
from flatbuffers.CapInsets import CapInsets
obj = CapInsets()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def Scale9Size(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
x = o + self._tab.Pos
from flatbuffers.FlatSize import FlatSize
obj = FlatSize()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def Scale9Enabled(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# ButtonOptions
def Displaystate(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return True
# ButtonOptions
def OutlineEnabled(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# ButtonOptions
def OutlineColor(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34))
if o != 0:
x = o + self._tab.Pos
from flatbuffers.Color import Color
obj = Color()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def OutlineSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 1
# ButtonOptions
def ShadowEnabled(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(38))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# ButtonOptions
def ShadowColor(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(40))
if o != 0:
x = o + self._tab.Pos
from flatbuffers.Color import Color
obj = Color()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ButtonOptions
def ShadowOffsetX(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(42))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 2.0
# ButtonOptions
def ShadowOffsetY(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(44))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return -2.0
# ButtonOptions
def ShadowBlurRadius(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(46))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
def Start(builder): builder.StartObject(22)
def ButtonOptionsStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddWidgetOptions(builder, widgetOptions): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(widgetOptions), 0)
def ButtonOptionsAddWidgetOptions(builder, widgetOptions):
"""This method is deprecated. Please switch to AddWidgetOptions."""
return AddWidgetOptions(builder, widgetOptions)
def AddNormalData(builder, normalData): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(normalData), 0)
def ButtonOptionsAddNormalData(builder, normalData):
"""This method is deprecated. Please switch to AddNormalData."""
return AddNormalData(builder, normalData)
def AddPressedData(builder, pressedData): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(pressedData), 0)
def ButtonOptionsAddPressedData(builder, pressedData):
"""This method is deprecated. Please switch to AddPressedData."""
return AddPressedData(builder, pressedData)
def AddDisabledData(builder, disabledData): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(disabledData), 0)
def ButtonOptionsAddDisabledData(builder, disabledData):
"""This method is deprecated. Please switch to AddDisabledData."""
return AddDisabledData(builder, disabledData)
def AddFontResource(builder, fontResource): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(fontResource), 0)
def ButtonOptionsAddFontResource(builder, fontResource):
"""This method is deprecated. Please switch to AddFontResource."""
return AddFontResource(builder, fontResource)
def AddText(builder, text): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(text), 0)
def ButtonOptionsAddText(builder, text):
"""This method is deprecated. Please switch to AddText."""
return AddText(builder, text)
def AddIsLocalized(builder, isLocalized): builder.PrependBoolSlot(6, isLocalized, 0)
def ButtonOptionsAddIsLocalized(builder, isLocalized):
"""This method is deprecated. Please switch to AddIsLocalized."""
return AddIsLocalized(builder, isLocalized)
def AddFontName(builder, fontName): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(fontName), 0)
def ButtonOptionsAddFontName(builder, fontName):
"""This method is deprecated. Please switch to AddFontName."""
return AddFontName(builder, fontName)
def AddFontSize(builder, fontSize): builder.PrependInt32Slot(8, fontSize, 0)
def ButtonOptionsAddFontSize(builder, fontSize):
"""This method is deprecated. Please switch to AddFontSize."""
return AddFontSize(builder, fontSize)
def AddTextColor(builder, textColor): builder.PrependStructSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(textColor), 0)
def ButtonOptionsAddTextColor(builder, textColor):
"""This method is deprecated. Please switch to AddTextColor."""
return AddTextColor(builder, textColor)
def AddCapInsets(builder, capInsets): builder.PrependStructSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(capInsets), 0)
def ButtonOptionsAddCapInsets(builder, capInsets):
"""This method is deprecated. Please switch to AddCapInsets."""
return AddCapInsets(builder, capInsets)
def AddScale9Size(builder, scale9Size): builder.PrependStructSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(scale9Size), 0)
def ButtonOptionsAddScale9Size(builder, scale9Size):
"""This method is deprecated. Please switch to AddScale9Size."""
return AddScale9Size(builder, scale9Size)
def AddScale9Enabled(builder, scale9Enabled): builder.PrependBoolSlot(12, scale9Enabled, 0)
def ButtonOptionsAddScale9Enabled(builder, scale9Enabled):
"""This method is deprecated. Please switch to AddScale9Enabled."""
return AddScale9Enabled(builder, scale9Enabled)
def AddDisplaystate(builder, displaystate): builder.PrependBoolSlot(13, displaystate, 1)
def ButtonOptionsAddDisplaystate(builder, displaystate):
"""This method is deprecated. Please switch to AddDisplaystate."""
return AddDisplaystate(builder, displaystate)
def AddOutlineEnabled(builder, outlineEnabled): builder.PrependBoolSlot(14, outlineEnabled, 0)
def ButtonOptionsAddOutlineEnabled(builder, outlineEnabled):
"""This method is deprecated. Please switch to AddOutlineEnabled."""
return AddOutlineEnabled(builder, outlineEnabled)
def AddOutlineColor(builder, outlineColor): builder.PrependStructSlot(15, flatbuffers.number_types.UOffsetTFlags.py_type(outlineColor), 0)
def ButtonOptionsAddOutlineColor(builder, outlineColor):
"""This method is deprecated. Please switch to AddOutlineColor."""
return AddOutlineColor(builder, outlineColor)
def AddOutlineSize(builder, outlineSize): builder.PrependInt32Slot(16, outlineSize, 1)
def ButtonOptionsAddOutlineSize(builder, outlineSize):
"""This method is deprecated. Please switch to AddOutlineSize."""
return AddOutlineSize(builder, outlineSize)
def AddShadowEnabled(builder, shadowEnabled): builder.PrependBoolSlot(17, shadowEnabled, 0)
def ButtonOptionsAddShadowEnabled(builder, shadowEnabled):
"""This method is deprecated. Please switch to AddShadowEnabled."""
return AddShadowEnabled(builder, shadowEnabled)
def AddShadowColor(builder, shadowColor): builder.PrependStructSlot(18, flatbuffers.number_types.UOffsetTFlags.py_type(shadowColor), 0)
def ButtonOptionsAddShadowColor(builder, shadowColor):
"""This method is deprecated. Please switch to AddShadowColor."""
return AddShadowColor(builder, shadowColor)
def AddShadowOffsetX(builder, shadowOffsetX): builder.PrependFloat32Slot(19, shadowOffsetX, 2.0)
def ButtonOptionsAddShadowOffsetX(builder, shadowOffsetX):
"""This method is deprecated. Please switch to AddShadowOffsetX."""
return AddShadowOffsetX(builder, shadowOffsetX)
def AddShadowOffsetY(builder, shadowOffsetY): builder.PrependFloat32Slot(20, shadowOffsetY, -2.0)
def ButtonOptionsAddShadowOffsetY(builder, shadowOffsetY):
"""This method is deprecated. Please switch to AddShadowOffsetY."""
return AddShadowOffsetY(builder, shadowOffsetY)
def AddShadowBlurRadius(builder, shadowBlurRadius): builder.PrependInt32Slot(21, shadowBlurRadius, 0)
def ButtonOptionsAddShadowBlurRadius(builder, shadowBlurRadius):
"""This method is deprecated. Please switch to AddShadowBlurRadius."""
return AddShadowBlurRadius(builder, shadowBlurRadius)
def End(builder): return builder.EndObject()
def ButtonOptionsEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | [
"dreamfly6789@gmail.com"
] | dreamfly6789@gmail.com |
551e27ca7c7e51d5b34f97c66a7388a2202e94fb | 88d43fc4cea1bf3eab771c2cba00088756820c1c | /gNet/tensor_ops.py | 4c194b4bef6f34722cb16f212cd9b7bd0c88dc52 | [
"MIT"
] | permissive | MGokcayK/gNet | a8cba046490425be9bf73343a6dbb5a829d7a086 | 19f3d4e555ae37a6bcda7e52f08eddad06914cee | refs/heads/master | 2023-04-02T09:07:56.398463 | 2021-03-16T14:51:36 | 2021-03-16T14:51:36 | 278,351,066 | 4 | 0 | MIT | 2020-10-12T21:20:00 | 2020-07-09T11:53:37 | Python | UTF-8 | Python | false | false | 26,139 | py | """
Tensor operations implementations.
Author : @MGokcayK github.com/MGokcayK
Create : 24 / 03 / 2020
Update : 21 / 09 / 2020
Fixing broadcasting of tensor_sum and return original exp ops.
"""
import warnings
import numpy as np
from gNet import tensor as T
def add(t1: 'Tensor', t2:'Tensor') -> 'Tensor':
'''
Addition of two `Tensor`. Also it is calculate its gradient of operation
if one of tensor have_grad = True.
'''
value = np.add(t1._value, t2._value, dtype=np.float32)
have_grad = t1.have_grad or t2.have_grad
ops_name = '_add'
depends_on: List[Dependency] = []
if t1.have_grad:
def grad_fn_add1(grad: np.ndarray) -> np.ndarray:
# to handle broadcast, add dimension
ndims_added = grad.ndim - t1._value.ndim
for _ in range(ndims_added):
grad = grad.sum(axis=0, dtype=np.float32)
# Sum across broadcasted (but non-added dims)
for i, dim in enumerate(t1.shape):
if dim == 1:
grad = grad.sum(axis=i, keepdims=True, dtype=np.float32)
return grad
ops_name = '_add1'
depends_on.append(T.Dependency(t1, grad_fn_add1, ops_name))
if t2.have_grad:
def grad_fn_add2(grad: np.ndarray) -> np.ndarray:
# to handle broadcast, add dimension
ndims_added = grad.ndim - t2._value.ndim
for _ in range(ndims_added):
grad = grad.sum(axis=0, dtype=np.float32)
for i, dim in enumerate(t2.shape):
if dim == 1:
grad = grad.sum(axis=i, keepdims=True, dtype=np.float32)
return grad
ops_name = '_add2'
depends_on.append(T.Dependency(t2, grad_fn_add2, ops_name))
return T.Tensor(value, have_grad, depends_on)
def tensor_sum(t: 'Tensor', axis=0, keepdim=False) -> 'Tensor':
'''
Sum tensor w.r.t axis.
If axis=0 mean 0-tensor.
If axis=1 mean sum along axis = 1
If axis=2 mean sum along axis = 2
Default axis is 0.
'''
value = np.sum(t.value, axis=axis, keepdims=keepdim, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_tensor_sum'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_sum(grad: np.ndarray) -> np.ndarray:
'''
Gradient should be 0-tensor. So each element has that much
gradient.
'''
# to handle broadcast, add dimension
ndims_added = (t.value.ndim - grad.ndim)
for _ in range(ndims_added):
grad = grad.sum(axis=0, dtype=np.float32)
return grad.astype(np.float32) * np.ones_like(t._value, dtype=np.float32)
depends_on.append(T.Dependency(t, grad_fn_sum, ops_name))
return T.Tensor(value, have_grad, depends_on)
def mul(t1: 'Tensor', t2: 'Tensor') -> 'Tensor':
'''
Element wise multiplication of two `Tensor`. Also it is calculate its
gradient of operation if tensor have_grad = True.
'''
value = np.multiply(t1._value, t2._value, dtype=np.float32)
have_grad = t1.have_grad or t2.have_grad
ops_name = '_mul'
depends_on: List[Dependency] = []
if t1.have_grad:
def grad_fn_mul1(grad: np.ndarray) -> np.ndarray:
grad = np.multiply(grad, t2._value, dtype=np.float32)
# to handle broadcast, add dimension
ndims_added = grad.ndim - t1._value.ndim
for _ in range(ndims_added):
grad = grad.sum(axis=0, dtype=np.float32)
for i, dim in enumerate(t1.shape):
if dim == 1:
grad = grad.sum(axis=i, keepdims=True, dtype=np.float32)
return grad
ops_name = '_mul1'
depends_on.append(T.Dependency(t1, grad_fn_mul1, ops_name))
if t2.have_grad:
def grad_fn_mul2(grad: np.ndarray) -> np.ndarray:
grad = np.multiply(grad, t1._value, dtype=np.float32)
ndims_added = grad.ndim - t2._value.ndim
for _ in range(ndims_added):
grad = grad.sum(axis=0, dtype=np.float32)
for i, dim in enumerate(t2.shape):
if dim == 1:
grad = grad.sum(axis=i, keepdims=True, dtype=np.float32)
return grad
ops_name = '_mul2'
depends_on.append(T.Dependency(t2, grad_fn_mul2, ops_name))
return T.Tensor(value, have_grad, depends_on)
def div(t1: 'Tensor', t2: 'Tensor') -> 'Tensor':
'''
Element wise division of two `Tensor`. Also it is calculate its
gradient of operation if tensor have_grad = True.
'''
value = np.divide(t1._value, (t2._value + 1e-10), dtype=np.float32)
have_grad = t1.have_grad or t2.have_grad
ops_name = '_div'
depends_on: List[Dependency] = []
if t1.have_grad:
def grad_fn_div1(grad: np.ndarray) -> np.ndarray:
grad = np.divide(grad, (t2._value + 1e-7), dtype=np.float32)
# to handle broadcast, add dimension
ndims_added = grad.ndim - t1._value.ndim
for _ in range(ndims_added):
grad = grad.sum(axis=0, dtype=np.float32)
for i, dim in enumerate(t1.shape):
if dim == 1:
grad = grad.sum(axis=i, keepdims=True, dtype=np.float32)
return grad
depends_on.append(T.Dependency(t1, grad_fn_div1, ops_name))
if t2.have_grad:
def grad_fn_div2(grad: np.ndarray) -> np.ndarray:
grad =np.divide( -(grad * t1._value), ((t2._value ** 2) + 1e-7), dtype=np.float32)
#grad = grad / ((t2._value ** 2) + 1e-7)
ndims_added = grad.ndim - t2._value.ndim
for _ in range(ndims_added):
grad = grad.sum(axis=0,dtype=np.float32)
for i, dim in enumerate(t2.shape):
if dim == 1:
grad = grad.sum(axis=i, keepdims=True, dtype=np.float32)
return grad
depends_on.append(T.Dependency(t2, grad_fn_div2, ops_name))
return T.Tensor(value, have_grad, depends_on)
def neg(t: 'Tensor')-> 'Tensor':
'''
Negative of `Tensor`. Also it is calculate its gradient of operation
if tensor have_grad = True.
'''
value = -t._value
have_grad = t.have_grad
ops_name = '_neg'
if have_grad:
depends_on = [T.Dependency(t, lambda x: -x, ops_name)]
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def matmul(t1: 'Tensor', t2: 'Tensor') -> 'Tensor':
'''
Matrix multiplication of two `Tensor`. Also it is calculate its gradient
of operatation if tensor have_grad = True.
If t1 shape (n1, m1) and t2 is (m1, m2), then t3 which is t1 @ t2 is (n1, m2)
Thus, t3.grad is also (n1, m2)
So,
t1.grad = t3.grad @ t2.T ==> (n1,m2) (m2, m1) => (n1,m1)
t2.grad = t1.T @ t3.grad ==> (m1,n1) (n1, m2) => (m1,m2)
'''
value = np.matmul(t1._value, t2._value, dtype=np.float32)
have_grad = t1.have_grad or t2.have_grad
ops_name = '_matmul'
depends_on: List[Dependency] = []
if t1.have_grad:
def grad_fn_matmul1(grad: np.ndarray) -> np.ndarray:
return np.matmul(grad, t2._value.T, dtype=np.float32)
ops_name = '_matmul1'
depends_on.append(T.Dependency(t1, grad_fn_matmul1, ops_name))
if t2.have_grad:
def grad_fn_matmul2(grad: np.ndarray) -> np.ndarray:
return np.matmul(t1._value.T, grad, dtype=np.float32)
ops_name = '_matmul2'
depends_on.append(T.Dependency(t2, grad_fn_matmul2, ops_name))
return T.Tensor(value, have_grad, depends_on)
def tensor_slice(t: 'Tensor', idxs) -> 'Tensor':
value = t.value[idxs]
have_grad = t.have_grad
ops_name = '_slice'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_slice(grad: np.ndarray) -> np.ndarray:
bigger_grad = np.zeros_like(t.value, dtype=np.float32)
bigger_grad[idxs] = grad.astype(np.float32)
return bigger_grad
depends_on.append(T.Dependency(t, grad_fn_slice, ops_name))
return T.Tensor(value, have_grad, depends_on)
def power(t: 'Tensor', p) -> 'Tensor':
'''
Power calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True..
'''
value = np.power(t._value, p, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_pow'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_pow(grad: np.ndarray) -> np.ndarray:
if p == 0:
grad = 0
elif p < 0:
grad = np.multiply(np.multiply(p, np.divide(1., (np.power(t._value, np.absolute(p-1))))), grad.astype(np.float32))
else:
grad = np.multiply(np.multiply(p, np.power(t._value, (p-1))), grad.astype(np.float32))
return grad
depends_on.append(T.Dependency(t, grad_fn_pow, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def log(t: 'Tensor') -> 'Tensor':
'''
Log (also ln) calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True.
'''
value = np.log(t._value + 1e-10, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_log'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_log(grad: np.ndarray) -> np.ndarray:
grad = np.multiply(np.divide(1., (t._value + 1e-10),dtype=np.float32), grad, dtype=np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_log, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def log_b(t: 'Tensor', b: int) -> 'Tensor':
'''
Log of base b calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True.
'''
value = np.divide(np.log(t._value + 1e-10, dtype=np.float32), np.log(b + 1e-10, dtype=np.float32), dtype=np.float32)
have_grad = t.have_grad
ops_name = '_log'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_log(grad: np.ndarray) -> np.ndarray:
grad = np.multiply(np.divide(1., np.multiply(t._value, np.log(b,dtype=np.float32) + 1e-10, dtype=np.float32),dtype=np.float32), grad, dtype=np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_log, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def exp(t: 'Tensor') -> 'Tensor':
'''
Exponent calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True.
'''
value = np.exp(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_exp'
if have_grad:
def grad_fn_exp(grad: np.ndarray) -> np.ndarray:
grad = np.multiply(value, grad, dtype=np.float32)
return grad
depends_on = [T.Dependency(t, grad_fn_exp, ops_name)]
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def sin(t: 'Tensor') -> 'Tensor':
'''
Sinus calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True.
Sinus in radian.
'''
value = np.sin(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_sin'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_sin(grad: np.ndarray) -> np.ndarray:
grad = np.multiply(np.cos(t._value, dtype=np.float32), grad)
return grad
depends_on.append(T.Dependency(t, grad_fn_sin, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def arcsin(t: 'Tensor') -> 'Tensor':
'''
Arcinus calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True.
Arcsinus in radian. Tensor should be in range [-pi/2, pi/2]
'''
assert np.all(t._value >= -np.pi/2) and np.all(t._value <= np.pi/2), \
'Tensor value is not in rage which is -pi/2 <= value <= pi/2'
value = np.arcsin(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_arcsin'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_arcsin(grad: np.ndarray) -> np.ndarray:
grad = np.power(np.divide(grad, (1. - np.power(t._value, 2, dtype=np.float32))), 0.5)
return grad
depends_on.append(T.Dependency(t, grad_fn_arcsin, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def cos(t: 'Tensor') -> 'Tensor':
'''
Cosinus calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True.
Cosinus in radian.
'''
value = np.cos(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_cos'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_cos(grad: np.ndarray) -> np.ndarray:
grad = np.multiply(-np.sin(t._value), grad)
return grad
depends_on.append(T.Dependency(t, grad_fn_cos, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def arccos(t: 'Tensor') -> 'Tensor':
'''
Arccosinus calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True.
Arccosinus in radian.
'''
assert np.all(t._value >= -1.) and np.all(t._value <= 1.), \
'Tensor value is not in rage which is -1 <= value <= 1'
value = np.arccos(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_arccos'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_arccos(grad: np.ndarray) -> np.ndarray:
grad = np.power(np.divide(- grad, (1. - np.power(t._value, 2, dtype=np.float32)), dtype=np.float32), 0.5, dtype=np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_arccos, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def tan(t: 'Tensor') -> 'Tensor':
'''
Tangent calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True.
Tangent in radian.
'''
value = np.tan(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_tan'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_tan(grad: np.ndarray) -> np.ndarray:
grad = np.multiply(np.divide(1., np.power(np.cos(t._value, dtype=np.float32), 2, dtype=np.float32) + 1e-10 ,dtype=np.float32), grad, dtype=np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_tan, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def arctan(t: 'Tensor') -> 'Tensor':
'''
Arctangent calculation of tensor. Also it is calculate its gradient of operation
if tensor have_grad = True.
Arctangent in radian. Tensor should be in range [-pi/2, pi/2]
'''
value = np.arctan(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_arctan'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_arctan(grad: np.ndarray) -> np.ndarray:
grad = np.divide(grad, (1. + np.power(t._value, 2, dtype=np.float32)), dtype=np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_arctan, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def cot(t: 'Tensor') -> 'Tensor':
'''
Cotangent calculation of tensor. Also it is calculate its gradient of operation if tensor have_grad = True.
Cotangent in radian.
'''
value = np.cot(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_cot'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_cot(grad: np.ndarray) -> np.ndarray:
grad = np.multiply(-np.divide(1., np.power(np.sin(t._value), 2, dtype=np.float32) + 1e-10, dtype=np.float32), grad, dtype=np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_cot, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def mean(t: 'Tensor', axis=None, keepdim=False) -> 'Tensor':
value = np.mean(t.value, axis=axis, keepdims=keepdim, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_mean'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_mean(grad: np.ndarray) -> np.ndarray:
if axis == None:
return np.divide(grad, t.value.size, dtype=np.float32)
else:
ones = np.ones(t.value.shape, dtype=np.float32)
ax = axis
ones = np.divide(np.multiply(ones, np.expand_dims(grad,ax), dtype=np.float32), t.value.size)
return ones
depends_on.append(T.Dependency(t, grad_fn_mean, ops_name))
return T.Tensor(value, have_grad, depends_on)
def where(t: 'Tensor', condition:None, _true:None, _false:None) -> 'Tensor':
'''
Return condition.
If condition true, return _true.
If condition false, return _false.
'''
value = np.where(condition, _true.value, _false.value)
have_grad = _true.have_grad or _false.have_grad
ops_name = '_where'
depends_on: List[Dependency] = []
if _true.have_grad:
def grad_fn_whereT(grad: np.ndarray) -> np.ndarray:
return grad.astype(np.float32) * np.where(condition, 1, 0)
depends_on.append(T.Dependency(_true, grad_fn_whereT, ops_name))
if _false.have_grad:
def grad_fn_whereF(grad: np.ndarray) -> np.ndarray:
return grad.astype(np.float32) * np.where(condition, 0, 1)
depends_on.append(T.Dependency(_false, grad_fn_whereF, ops_name))
return T.Tensor(value, have_grad, depends_on)
def reshape(t: 'Tensor', shape=None) -> 'Tensor':
'''
Return maximum of `Tensor`'s element.
'''
pre_shape = t.shape
value = np.reshape(t.value, newshape=shape)
have_grad = t.have_grad
ops_name = '_reshape'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_reshape(grad: np.ndarray) -> np.ndarray:
return np.reshape(grad, pre_shape)
depends_on.append(T.Dependency(t, grad_fn_reshape, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def flatten(t: 'Tensor', batching=False) -> 'Tensor':
"""
Flattening of tensor.
"""
# if flatten operation has batch it means that the operation in
# training. Therefore, it should have batch_size and batch_size increase
# dimension of tensor. To handle flattening operation with batch_size,
# value should be reshaped w.r.t batch_size.
# on the other hand, when batching is False, it means that the operation
# called for non-training condition such as calculating formula. Thus,
# it just directly flatten the tensor without batch_size.
if batching:
batch_size = t.shape[0]
value = t._value.reshape(batch_size, -1)
else:
value = t._value.flatten()
grad_shape = t.shape
have_grad = t.have_grad
ops_name = '_flatten'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_flatten(grad: np.ndarray):
return grad.reshape(grad_shape)
depends_on.append(T.Dependency(t, grad_fn_flatten, ops_name))
return T.Tensor(value, have_grad, depends_on)
def transpose(t: 'Tensor', axes=(1,0)) -> 'Tensor':
base_order = np.arange(len(axes))
change_order = []
for i in base_order:
change_order.append(int((np.where(axes == i)-i) % len(axes)))
value = np.transpose(t.value, axes=axes)
have_grad = t.have_grad
ops_name = '_transpose'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_transpose(grad: np.ndarray) -> np.ndarray:
target_order = (change_order + base_order) % len(axes)
return np.transpose(grad, axes=target_order)
depends_on.append(T.Dependency(t, grad_fn_transpose, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def sinh(t: 'Tensor') -> 'Tensor':
'''
Hyperbolic Sinus calculation of tensor. Also it is calculate its
gradient of operation if tensor have_grad = True.
Sinus in radian.
'''
value = np.sinh(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_sinh'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_sinh(grad: np.ndarray) -> np.ndarray:
grad = np.cosh(t._value, dtype=np.float32) * grad.astype(np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_sinh, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def cosh(t: 'Tensor') -> 'Tensor':
'''
Hyperbolic Cosinus calculation of tensor. Also it is calculate its
gradient of operation if tensor have_grad = True.
Cosinus in radian.
'''
value = np.cosh(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_cosh'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_cosh(grad: np.ndarray) -> np.ndarray:
grad = np.sinh(t._value, dtype=np.float32) * grad.astype(np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_cosh, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def tanh(t: 'Tensor') -> 'Tensor':
'''
Hyperbolic Tangent calculation of tensor. Also it is calculate its
gradient of operation if tensor have_grad = True.
Cosinus in radian.
'''
value = np.tanh(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_tanh'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_tanh(grad: np.ndarray) -> np.ndarray:
grad = (1./ np.cosh(t._value, dtype=np.float32)** 2) * grad.astype(np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_tanh, ops_name))
else:
depends_on = []
return T.Tensor(value, have_grad, depends_on)
def abs(t: 'Tensor') -> 'Tensor':
value = np.absolute(t._value, dtype=np.float32)
have_grad = t.have_grad
ops_name = '_abs'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_abs(grad: np.ndarray) -> np.ndarray:
grad = np.sign(t._value, dtype=np.float32) * grad.astype(np.float32)
return grad
depends_on.append(T.Dependency(t, grad_fn_abs, ops_name))
return T.Tensor(value, have_grad, depends_on)
def dropout(t: 'Tensor', p: float) -> 'Tensor':
"""
https://stats.stackexchange.com/questions/219236/dropout-forward-prop-vs-back-prop-in-machine-learning-neural-network
"""
dropout_mask = np.random.binomial(1, 1.-p, size=t.shape)
value = np.multiply(t._value, dropout_mask * (1./(1.-p)), dtype=np.float32)
have_grad = t.have_grad
ops_name = '_dropout'
depends_on: List[Dependency] = []
if have_grad:
def grad_fn_dropout(grad: np.ndarray) -> np.ndarray:
grad = grad.astype(np.float32) * dropout_mask * (1./(1.-p))
return grad
depends_on.append(T.Dependency(t, grad_fn_dropout, ops_name))
return T.Tensor(value, have_grad, depends_on)
def append(t1: 'Tensor', t2: 'Tensor', axis=None) -> 'Tensor':
t1_shape = t1.shape
t2_shape = t2.shape
value = np.append(t1.value, t2.value, axis)
have_grad = t1.have_grad or t2.have_grad
ops_name = '_append'
depends_on: List[Dependency] = []
if t1.have_grad:
dim = np.arange(t1.value.ndim) # dimension
ind = []
[ind.append(slice(0,t1_shape[d])) for d in dim] # slice index
def grad_fn_append1(grad: np.ndarray) -> np.ndarray:
return grad[tuple(ind)]
depends_on.append(T.Dependency(t1, grad_fn_append1, ops_name))
if t2.have_grad:
dim = np.arange(t2.value.ndim) # dimension
ind2 = []
[ind2.append(slice(-t2_shape[d]+value.shape[d],value.shape[d] )) for d in dim] #slice index
def grad_fn_append2(grad: np.ndarray) -> np.ndarray:
return grad[tuple(ind2)]
depends_on.append(T.Dependency(t2, grad_fn_append2, ops_name))
return T.Tensor(value, have_grad, depends_on)
def maximum(t1: 'Tensor', t2: 'Tensor') -> 'Tensor':
value = np.maximum(t1._value, t2._value, dtype=np.float32)
have_grad = t1.have_grad or t2.have_grad
ops_name = '_maximum'
t1_r = np.round(t1._value, 10)
t2_r = np.round(t2._value, 10)
v_r = np.round(value, 10)
depends_on: List[Dependency] = []
if t1.have_grad:
def grad_fn_maximum1(grad: np.ndarray) -> np.ndarray:
grad = np.equal(t1_r, v_r) * grad.astype(np.float32)
return grad
depends_on.append(T.Dependency(t1, grad_fn_maximum1, ops_name))
if t2.have_grad:
def grad_fn_maximum2(grad: np.ndarray) -> np.ndarray:
grad = np.equal(t2_r, v_r) * grad.astype(np.float32)
return grad
depends_on.append(T.Dependency(t2, grad_fn_maximum2, ops_name))
return T.Tensor(value, have_grad, depends_on) | [
"mgokcaykdev@gmail.com"
] | mgokcaykdev@gmail.com |
de495f8070a7258d450001eb321fe83c21087cd2 | df601ac0a0dd618c75241ca050468cab5f580d3a | /kgb/calls.py | 77111f43f0829d696bd0c66e7999af9890c67e7f | [] | no_license | mitchhentges/kgb | ff90d7e6c66417ba147ab3e32518d9e4facba256 | 4c7f4361a8050e5426cb23e4a84ee64df25a6c12 | refs/heads/master | 2022-12-12T14:50:18.838424 | 2020-09-04T06:57:34 | 2020-09-04T06:57:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,232 | py | """Call tracking and checks for spiess."""
from __future__ import unicode_literals
from kgb.pycompat import iteritems, text_type
from kgb.signature import FunctionSig
class SpyCall(object):
"""Records arguments made to a spied function call.
SpyCalls are created and stored by a FunctionSpy every time it is
called. They're accessible through the FunctionSpy's ``calls`` attribute.
"""
def __init__(self, spy, args, kwargs):
"""Initialize the call.
Args:
spy (kgb.spies.FunctionSpy):
The function spy that the call was made on.
args (tuple):
A tuple of positional arguments from the spy. These correspond
to positional arguments in the function's signature.
kwargs (dict):
A dictionary of keyword arguments from the spy. These
correspond to keyword arguments in the function's signature.
"""
self.spy = spy
self.args = args
self.kwargs = kwargs
self.return_value = None
self.exception = None
def called_with(self, *args, **kwargs):
"""Return whether this call was made with the given arguments.
Not every argument and keyword argument made in the call must be
provided to this method. These can be a subset of the positional and
keyword arguments in the call, but cannot contain any arguments not
made in the call.
Args:
*args (tuple):
The positional arguments made in the call, or a subset of
those arguments (starting with the first argument).
**kwargs (dict):
The keyword arguments made in the call, or a subset of those
arguments.
Returns:
bool:
``True`` if the call's arguments match the provided arguments.
``False`` if they do not.
"""
if len(args) > len(self.args):
return False
if self.args[:len(args)] != args:
return False
pos_args = self.spy._sig.arg_names
if self.spy.func_type in (FunctionSig.TYPE_BOUND_METHOD,
FunctionSig.TYPE_UNBOUND_METHOD):
pos_args = pos_args[1:]
all_args = dict(zip(pos_args, self.args))
all_args.update(self.kwargs)
for key, value in iteritems(kwargs):
if key not in all_args or all_args[key] != value:
return False
return True
def returned(self, value):
"""Return whether this call returned the given value.
Args:
value (object):
The expected returned value from the call.
Returns:
bool:
``True`` if this call returned the given value. ``False`` if it
did not.
"""
return self.return_value == value
def raised(self, exception_cls):
"""Return whether this call raised this exception.
Args:
exception_cls (type):
The expected type of exception raised by the call.
Returns:
bool:
``True`` if this call raised the given exception type.
``False`` if it did not.
"""
return ((self.exception is None and exception_cls is None) or
type(self.exception) is exception_cls)
def raised_with_message(self, exception_cls, message):
"""Return whether this call raised this exception and message.
Args:
exception_cls (type):
The expected type of exception raised by the call.
message (unicode):
The expected message from the exception.
Returns:
bool:
``True`` if this call raised the given exception type and message.
``False`` if it did not.
"""
return (self.exception is not None and
self.raised(exception_cls) and
text_type(self.exception) == message)
def __repr__(self):
return '<SpyCall(args=%r, kwargs=%r, returned=%r, raised=%r>' % (
self.args, self.kwargs, self.return_value, self.exception)
| [
"christian@beanbaginc.com"
] | christian@beanbaginc.com |
37cdca3da56a803ff2ad67148d32cba6a13e8090 | d1f099141bab3ce19b866add1f5fadb7af965057 | /meetingbookingproject/meetingbookingapplication/views.py | 5cc6663c388c3b645272c7b0b236c0e76dfa4a1d | [] | no_license | pavanamanikanta654/meeting | be8672ab42b5d16df35e79789471868ac1152c09 | d0b0fc9892904744ab6331864c3e5e165628a785 | refs/heads/master | 2020-08-06T21:24:21.913420 | 2019-10-06T14:00:35 | 2019-10-06T14:00:35 | 213,159,868 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,779 | py | from django.shortcuts import render
from .models import meeting, staff, booking
from .forms import meetingform, staffform, bookingform
from django.http.response import HttpResponse
# def input(request):
# return render(request,'templates\meeting.html')
def meetingview(request):
if request.method == 'POST':
mform = meetingform(request.POST)
if mform.is_valid():
meetingid = request.post.get('meetingid', '')
meetingroom = request.post.get('meetingroom', '')
meetingdescription = request.post.get('meetingdescription', '')
data = meeting(
meetingid=meetingid,
meetingroom=meetingroom,
meetingdescription=meetingdescription)
data.save()
data1 = meeting.objects.filter(meetingid=meetingid)
if data1:
info = "room already was booked with this meeting id ,try with another meeting id"
mform = meetingform()
return render(request,'meeting.html',{'mform':mform,'info':info})
info ="room was booked with this meeting id,please fill another u want to book another one"
mform = meetingform()
return render(request, 'meeting.html', {'mform': mform,'info':info})
else:
mform = meetingform()
return render(request, 'meeting.html', {'mform': mform})
def staffview(request):
if request.method == 'POST':
sform = staffform(request.POST)
if sform.is_valid():
staffid = request.post.get('staffid', '')
staffname = request.post.get('meetingroom', '')
staffmail = request.post.get('meetingdescription', '')
data = staff(
staffid=staffid,
staffname=staffname,
staffmail=staffmail,
)
data.save()
sform = staffform()
return render(request, 'meeting.html', {'sform': sform})
else:
sform = staffform()
return render(request, 'meeting.html', {'sform': sform})
def bookingview(request):
if request.method == 'POST':
bform = bookingform(request.POST)
if bform.is_valid():
meetingid = request.post.get('meetingid', '')
staffid = request.post.get('staffid', '')
bookindstartdate = request.post.get('bookingstartdate', '')
bookingenddate = request.post.get('bookingenddate', '')
data = meeting(meetingid=meetingid,staffid=staffid,bookingstartdate=bookindstartdate,bookingenddate=bookingenddate,)
data.save()
bform = bookingform()
return render(request, 'meeting.html', {'bform': bform})
else:
bform = bookingform()
return render(request, 'meeting.html', {'bform': bform})
| [
"56213630+pavanamanikanta654@users.noreply.github.com"
] | 56213630+pavanamanikanta654@users.noreply.github.com |
6ce30b59102b80170b2bc697900a96b94a331972 | 1cf9d213176feb23b1dfb5827deb1f7932785913 | /speed.py | 053b61d23317a9c1f3960cbe538f2102420ebeff | [] | no_license | ZvonimirKucis/ets2-rl | bf480593de426a30d6840dd880c1c42b4016da79 | b7289b5b962b0b4d79b53a41fe0113fbb504e542 | refs/heads/master | 2022-04-12T07:19:43.533124 | 2020-03-02T12:56:59 | 2020-03-02T12:56:59 | 184,093,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from lib import telemetryParser
import time
while True:
print(telemetryParser.get_speed())
print(int(telemetryParser.get_speed()))
time.sleep(2) | [
"zvonimirkucis@gmail.com"
] | zvonimirkucis@gmail.com |
b71a6f92579e6a8ed6cc97408c37307213d5073c | f6fc57e6baab4fa892064ca3a027d1417acad3bc | /flask/bin/pip2.7 | 5a17c86b5e27e150678ab15124efb290f9b714b4 | [] | no_license | klampotang/SoyLikes | d23b22c95e81b7d75cc3f09f8169da4ad92308e0 | 702209f974282478838d967df043c92e20e9cafa | refs/heads/master | 2021-01-10T08:28:50.075559 | 2015-11-16T09:25:00 | 2015-11-16T09:25:00 | 46,183,147 | 6 | 4 | null | 2015-11-15T07:36:47 | 2015-11-14T16:56:48 | Python | UTF-8 | Python | false | false | 250 | 7 | #!/Users/williamwang1/Desktop/HACKSC/microblog/flask/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"williamwang1@Williams-MacBook-Pro.local"
] | williamwang1@Williams-MacBook-Pro.local |
2d2a4f93a18a0a46095a78f12088d9c902bb99a9 | 4ffe483f8297aa3fee8254ce569c3d260f156524 | /code/test-suite/EstheRustConvertor/expected/fonc_1.py | c3fe040331489ac2eff1226dab9b8cd8028becdf | [
"MIT"
] | permissive | SamiBelaidi/LOG3210 | 5dd7fbfc4cd709c9c46a2b4dc250cb565650293c | 03b3952af125be98fe32eefb2338767020033f51 | refs/heads/master | 2023-04-16T05:58:20.147795 | 2021-04-25T20:54:29 | 2021-04-25T20:54:29 | 334,222,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | def exemple2(name):
a = "test1"
| [
"sami.belaidi98@gmail.com"
] | sami.belaidi98@gmail.com |
034ac87ab3d44c8c5221b639dd2987db0f489445 | 4302fd10583ccff63ff5693bd2ae5903323cb769 | /curate/migrations/0033_auto_20190224_0315.py | 9e514c889504231d27382afd8b779b6510c0517c | [
"MIT"
] | permissive | ScienceCommons/curate_science | 1faf742c8de1e9c9180e4d8ec6a7457ad95bb705 | 4e4072e8c000df0d2e80637016f8f0e667f4df54 | refs/heads/master | 2022-02-12T19:56:51.730534 | 2022-01-25T16:44:54 | 2022-01-25T16:44:54 | 149,122,317 | 14 | 7 | MIT | 2021-03-23T17:27:05 | 2018-09-17T12:32:25 | HTML | UTF-8 | Python | false | false | 1,466 | py | # Generated by Django 2.1.7 on 2019-02-24 03:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('curate', '0032_auto_20190224_0231'),
]
operations = [
migrations.AddField(
model_name='keyfigure',
name='height',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='keyfigure',
name='thumb_height',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='keyfigure',
name='thumb_width',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='keyfigure',
name='width',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='keyfigure',
name='image',
field=models.ImageField(height_field=models.PositiveIntegerField(default=0), null=True, upload_to='key_figures/', width_field=models.PositiveIntegerField(default=0)),
),
migrations.AlterField(
model_name='keyfigure',
name='thumbnail',
field=models.ImageField(height_field=models.PositiveIntegerField(default=0), null=True, upload_to='key_figure_thumbnails/', width_field=models.PositiveIntegerField(default=0)),
),
]
| [
"alex.kyllo@gmail.com"
] | alex.kyllo@gmail.com |
51172c47e95619b54240ed1a6ba7b687f13755bb | af430762e66279e7d8607a67ab6a88489e70c56f | /accounts/forms.py | 090c4cce694ba792857b705ec2be7d52d9221140 | [] | no_license | AminKipkeev2dot0/LetsEAT | 08718cfff6bc0a4bc9c8ebdf20c104e793973a3f | 66e2815fa3fdbe6844c82b09be578d198e2d78f5 | refs/heads/master | 2023-06-21T06:39:07.288824 | 2021-07-19T12:00:29 | 2021-07-19T12:00:29 | 377,118,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | from django import forms
class LoginForm(forms.Form):
email = forms.EmailField(widget=forms.EmailInput(attrs={'placeholder': 'Ваш Email',
'class': 'form_user_email'}), label='')
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder': 'Ваш Пароль'}), label='')
remember_me = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={'checked': 'checked'}))
| [
"by.zarmius@gmail.com"
] | by.zarmius@gmail.com |
0b09c7a3cdc98c1291873272134f11e69de4b86e | d5ed5ca41b2e166e614924fe49ebbc305796bc86 | /app/nbio/django/appengine.py | eb0451d6aed53150bd3d98873eb4f00a62282da3 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | nbio/sarahpalin | a78695c62081cb1f92faa4d17938ce9cccbf1360 | bc28b5ebd5b25a346a4790eced9078bde5902633 | refs/heads/master | 2023-08-24T13:27:19.765428 | 2009-02-10T01:23:06 | 2009-02-10T01:23:06 | 125,445 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | __license__ = "Apache 2.0"
__copyright__ = "Copyright 2008 nb.io"
__author__ = "Randy Reddig - ydnar@nb.io"
import logging
from django.conf import settings
from django.http import HttpResponseForbidden
from google.appengine.api import users
class AuthMiddleware:
def process_request(self, request):
"""
Process the request, and attempt to authenticate using Google's App Engine user API
"""
user = users.get_current_user()
if user:
email = users.get_current_user().email()
if email not in settings.ALLOWED_USERS:
return HttpResponseForbidden("User %s does not have permission to view this page." % email)
return None
| [
"cameron.walters@gmail.com"
] | cameron.walters@gmail.com |
3d1adc70cd541480ba5036a9efa4b5fee148a93d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /YcqAY72nZNPtvofuJ_8.py | b8e97a2438ad8150983e02cf9849e462c2089ed7 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py |
def quad_sequence(lst):
#find pattern
difference = [lst[len(lst)-2] - lst[len(lst)-3], lst[len(lst)-1] - lst[len(lst)-2]]
difference_of_difference = difference[1] - difference[0]
#workout
last_num = lst[len(lst)-1]
last_diff = difference[1]
next_nums = []
for _ in range(len(lst)):
last_diff+=difference_of_difference
last_num +=last_diff
next_nums.append(last_num)
return next_nums
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
10d6056bcffbec01aca145e3d20e8418b0e2b20d | 5686d1a31b87a47a4774270c00cd141c221cf065 | /axonius_api_client/tests/tests_api/tests_openapi/test_openapi.py | 2339b9648ea31df42d3fa650a36bd3edf33f0b9e | [
"MIT"
] | permissive | Axonius/axonius_api_client | e7eec0845eee9e1b314446121551c584655c2631 | be49566e590834df1b46494c8588651fa029b8c5 | refs/heads/master | 2023-08-19T04:43:13.717989 | 2023-08-10T18:49:40 | 2023-08-10T18:49:40 | 194,601,817 | 17 | 22 | MIT | 2023-08-30T18:45:15 | 2019-07-01T04:52:21 | Python | UTF-8 | Python | false | false | 810 | py | # -*- coding: utf-8 -*-
"""Test suite."""
import pytest
def validate_openapi_spec(data):
assert isinstance(data, str) and data
split = data.splitlines()
searches = ["openapi", "info", "components", "paths"]
for search in searches:
check = f"{search}:"
found = any([x.startswith(f"{check}") for x in split])
assert found, f"{check!r} not found"
class OpenAPIBase:
@pytest.fixture(scope="class")
def apiobj(self, api_openapi):
return api_openapi
class TestOpenAPIPrivate(OpenAPIBase):
def test_get_spec(self, apiobj):
data = apiobj._get_spec()
validate_openapi_spec(data=data)
class TestOpenAPIPublic(OpenAPIBase):
def test_get_spec(self, apiobj):
data = apiobj.get_spec()
validate_openapi_spec(data=data)
| [
"nathan.mcbride@axonius.com"
] | nathan.mcbride@axonius.com |
ee51ce7392ad202a8cc2d9132f30de2e63386db0 | 4e196fea7323fbd4d5a19e5d1593e50e97d2d314 | /cap11-xkcd.py | 78d676f58c31277b4082ab789dfae443b4223872 | [] | no_license | dostoievskiab/AutomateBoringStuff | b7f5cf69d9a19275e101fbb6dd6bddee2414327c | e91cc2807ee911ff532a35156b5224f3c35fba5e | refs/heads/master | 2021-08-19T23:50:34.825193 | 2017-11-27T18:21:08 | 2017-11-27T18:21:08 | 105,579,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | import requests, os, bs4
#My internet connection is slow, so i changed the code to get the title instead of the image
url = 'http://xkcd.com'
number = ''
while not url.endswith('#'):
res = requests.get(url)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
selectedText = soup.select('#ctitle')[0].get_text()
if selectedText == []:
print('No title found.')
else:
print(number + ': ' + selectedText)
url = 'http://xkcd.com' + soup.select("a[rel='prev']")[0].get('href')
number = soup.select("a[rel='prev']")[0].get('href').replace('/','')
| [
"umaster@dost-mac.local"
] | umaster@dost-mac.local |
46f088c66d64354d6e4a7ddddc6951a6a16cb979 | 7e4ee5b457ac9c85b64661eeedba0ba51b211c68 | /entities/background.py | 043381b99de005498e7cbc63c0fda1b10cbdf342 | [] | no_license | iCodeIN/Maze-Game | 180ae7dfb2ffc7b8f2868e450b186b41f3ab510a | 9956bf10f12326307eccff668cbc9cc615c0fee9 | refs/heads/master | 2022-12-03T04:00:12.270692 | 2020-08-26T23:43:45 | 2020-08-26T23:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | import pygame
import os
import sys
from easy_sdl.tools import *
from easy_sdl.sprite import Sprite
from easy_sdl.sprite import keyUp, keyDown
class Background(Sprite):
def __init__(self):
super().__init__(0, 0, image=path("background.png")) | [
"noreply@github.com"
] | noreply@github.com |
d59aaea52583e6a20a8bae86ba53ef71554cb62d | 64f39ad662546e1f92df4dd2bf7b5ac2f748d39d | /octavia_f5/common/constants.py | 0e1768c388d52b598ce6766ffe5f6d26eec24a41 | [
"Apache-2.0"
] | permissive | zongzw/python-as3 | 2b5026bec3a2e1bba24d4fae7fc90b7f1f58523a | de51773fb2877f4a0988cc655cf4624a3129fd65 | refs/heads/master | 2022-11-24T07:58:04.738669 | 2020-07-28T01:12:25 | 2020-07-28T01:12:25 | 283,049,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,640 | py | # Copyright 2018 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia_lib.common.constants import *
PROJECT_ID = 'project_id'
BIGIP = 'bigip'
PREFIX_PROJECT = 'project_'
PREFIX_LISTENER = 'listener_'
PREFIX_TLS_LISTENER = 'tls_listener_'
PREFIX_TLS_POOL = 'tls_pool_'
PREFIX_CONTAINER = 'container_'
PREFIX_CERTIFICATE = 'cert_'
PREFIX_POOL = 'pool_'
PREFIX_HEALTH_MONITOR = 'hm_'
PREFIX_LOADBALANCER = 'lb_'
PREFIX_POLICY = 'l7policy_'
PREFIX_WRAPPER_POLICY = 'wrapper_policy_'
PREFIX_NETWORK = 'net_'
PREFIX_IRULE = 'irule_'
PREFIX_MEMBER = 'member_'
PREFIX_SECRET = 'secret_'
APPLICATION_TCP = 'tcp'
APPLICATION_UDP = 'udp'
APPLICATION_HTTP = 'http'
APPLICATION_HTTPS = 'https'
APPLICATION_L4 = 'l4'
APPLICATION_GENERIC = 'generic'
APPLICATION_SHARED = 'shared'
SUPPORTED_APPLICATION_TEMPLATES = (APPLICATION_TCP, APPLICATION_UDP,
APPLICATION_HTTP, APPLICATION_HTTPS,
APPLICATION_L4, APPLICATION_GENERIC,
APPLICATION_SHARED)
SERVICE_TCP = 'Service_TCP'
SERVICE_UDP = 'Service_UDP'
SERVICE_HTTP = 'Service_HTTP'
SERVICE_HTTPS = 'Service_HTTPS'
SERVICE_L4 = 'Service_L4'
SERVICE_GENERIC = 'Service_Generic'
SUPPORTED_SERVICES = (SERVICE_TCP, SERVICE_UDP, SERVICE_HTTP,
SERVICE_HTTPS, SERVICE_L4, SERVICE_GENERIC)
SERVICE_TCP_TYPES = (SERVICE_TCP, SERVICE_GENERIC, SERVICE_HTTP, SERVICE_HTTPS)
SERVICE_HTTP_TYPES = (SERVICE_HTTP, SERVICE_HTTPS)
SINGLE_USE_DH = 'singleUseDh'
STAPLER_OCSP = 'staplerOCSP'
TLS_1_0 = 'tls1_0'
TLS_1_1 = 'tls1_1'
TLS_1_2 = 'tls1_2'
TLS_1_3 = 'tls1_3'
TLS_OPTIONS_SERVER = (SINGLE_USE_DH, STAPLER_OCSP, TLS_1_0, TLS_1_1, TLS_1_2, TLS_1_3)
TLS_OPTIONS_CLIENT = (SINGLE_USE_DH, TLS_1_0, TLS_1_1, TLS_1_2, TLS_1_3)
ROLE_MASTER = 'MASTER'
ROLE_BACKUP = 'BACKUP'
SEGMENT = 'segment'
VIF_TYPE = 'f5'
ESD = 'esd'
RPC_NAMESPACE_CONTROLLER_AGENT = 'f5controller'
DEVICE_OWNER_LISTENER = 'network:' + 'f5listener'
PROFILE_L4 = 'basic'
OPEN = 'OPEN'
FULL = 'FULL'
UP = 'UP'
DOWN = 'DOWN'
DRAIN = 'DRAIN'
NO_CHECK = 'no check'
MAINT = 'MAINT'
F5_NETWORK_AGENT_TYPE = 'F5 Agent'
| [
"a.zong@f5.com"
] | a.zong@f5.com |
2a7ac994a5a17a223c396b72a287abc041a395f5 | d1cb0b1372a037668e2a186511c1432e51e69519 | /python/corso/tipiDati/liste.py | 21e9d6dfbe1268b43675f3cc694cdf95264d7a18 | [] | no_license | marcosciarra/web | e36b96a0d246f20a446c01f3cf58d2a678525bc4 | 71983981876cd3ae257eeaeb30f9a45dc339a66b | refs/heads/master | 2020-03-20T20:05:59.448607 | 2020-01-14T13:28:27 | 2020-01-14T13:28:27 | 137,670,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | myList=[10,20,30]
len(myList) #restituisce il conto degli elementi
myList.insert(2,50) #inserisce il valore 50 alla posizione 2
myList.append(50) #inserisce in coda il valore 50 alla lista
del myList[1] #elimino il secondo elemento della lista
20 in myList #restituisce True o False se 20 è un elemento in myList
myLiost2=myList.copy() #permette di copiare una lista assegnando 2 puntatori differenti quindi 2 oggetti diversi
| [
"marco.sciarra@clicksrl.eu"
] | marco.sciarra@clicksrl.eu |
f4ff36227659d9e416a3d838c7642c67ae5268cd | 0879b6d4284852941aef3d74beb7ef936e707fb9 | /src/train.py | de8c6b1ebf33a4836a09b6230365cf9d03d92583 | [] | no_license | zhuhongweiyi/vgg-1 | 55eb5271046856e30a077ab2bf75cc26c665e530 | a1114bd4b61bac9dd8a0361d64457efb55080f3d | refs/heads/master | 2021-01-22T13:23:48.816204 | 2017-05-05T02:20:20 | 2017-05-05T02:20:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,817 | py | """Train model."""
import vgg
import config
import read_data
import os
from datetime import datetime
import tensorflow as tf
def main():
"""Main operations."""
t_config = config.Config()
with tf.Graph().as_default():
reader = read_data.ImageReader('./data/JPEGImages/',
'./data/labels/', t_config)
# init model
model = vgg.Vgg(t_config)
# feed feedforward
model.build_model(True)
# return loss
loss = model.loss()
# training operation
train_op = model.train_op(loss, model.global_step)
# initializing operation
init_op = tf.global_variables_initializer()
# create saver
saver = tf.train.Saver(max_to_keep=100)
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
# initialize parameters or restore from previous model
if not os.path.exists(t_config.params_dir):
os.makedirs(t_config.params_dir)
if os.listdir(t_config.params_dir) == [] or t_config.initialize:
print("Initializing Network")
sess.run(init_op)
else:
sess.run(init_op)
model.restore(sess, saver, t_config.load_filename)
merged = tf.summary.merge_all()
logdir = os.path.join(t_config.logdir,
datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
writer = tf.summary.FileWriter(logdir, sess.graph)
# start training
for idx in xrange(t_config.max_iteration):
with tf.device("/cpu:0"):
imgs, labels, name_list = reader.get_batch()
# feed data into the model
feed_dict = {
model.images: imgs,
model.labels: labels
}
with tf.device(t_config.gpu):
# run the training operation
sess.run(train_op, feed_dict=feed_dict)
with tf.device('/cpu:0'):
# write summary
if (idx + 1) % t_config.summary_iters == 0:
tmp_global_step = model.global_step.eval()
summary = sess.run(merged, feed_dict=feed_dict)
writer.add_summary(summary, tmp_global_step)
# save checkpoint
if (idx + 1) % t_config.checkpoint_iters == 0:
tmp_global_step = model.global_step.eval()
model.save(sess, saver, t_config.save_filename,
tmp_global_step)
if __name__ == '__main__':
main()
| [
"498973030@qq.com"
] | 498973030@qq.com |
7cc346854704e5bab4d466d21c2b93f7dfb24ab8 | 42c899e3111a9601a91a933e4483bea1a24a4cf9 | /venv/Scripts/pip-script.py | 7591e8beed1f77b4938141e022c6d0e9274c685b | [] | no_license | bbiaisb/Towards_Image_Detection | 5258bd253f182b47e38c36bdab0fa7a6482a9468 | f5e19000e0488afcf762404823018d53cbc6e0fb | refs/heads/master | 2020-09-11T05:31:44.867830 | 2019-12-16T15:50:29 | 2019-12-16T15:50:29 | 221,955,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | #!C:\Users\andri\PycharmProjects\Towards_Image_Detection\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"57802989+Dadaaeisen@users.noreply.github.com"
] | 57802989+Dadaaeisen@users.noreply.github.com |
f6982901beca41a044b52253ed8a059643077e34 | 28034caa4f93a7e8b395601c709c7d4e1ab029ba | /od_model/models/common.py | 6048b396160e96a35be76ed27338932853737a5a | [] | no_license | Amart85/BrainWheel | 1490f62cb78e5a0a9d173bb22f89ebbbca32172a | 727e220cab352aa74212ab58964a05299a70337e | refs/heads/master | 2023-06-18T04:11:02.347910 | 2021-05-11T21:09:15 | 2021-05-11T21:09:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,793 | py | # This file contains modules common to various models
import math
import numpy as np
import requests
import torch
import torch.nn as nn
from PIL import Image, ImageDraw
try:
from utils.datasets import letterbox
from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
from utils.plots import color_list
except ModuleNotFoundError:
from od_model.utils.datasets import letterbox
from od_model.utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
from od_model.utils.plots import color_list
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1) if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class NMS(nn.Module):
# Non-Maximum Suppression (NMS) module
conf = 0.25 # confidence threshold
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
def __init__(self):
super(NMS, self).__init__()
def forward(self, x):
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
class autoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
img_size = 640 # inference size (pixels)
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
def __init__(self, model):
super(autoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=720, width=1280, RGB images example inputs are:
# filename: imgs = 'data/samples/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
# numpy: = np.zeros((720,1280,3)) # HWC
# torch: = torch.zeros(16,3,720,1280) # BCHW
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1 = [], [] # image and inference shapes
for i, im in enumerate(imgs):
if isinstance(im, str): # filename or uri
im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open
im = np.array(im) # to numpy
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
# Inference
with torch.no_grad():
y = self.model(x, augment, profile)[0] # forward
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
# Post-process
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
return Detections(imgs, y, self.names)
class Detections:
# detections class for YOLOv5 inference results
def __init__(self, imgs, pred, names=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred)
def display(self, pprint=False, show=False, save=False, render=False):
colors = color_list()
for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'Image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f'{n} {self.names[int(c)]}s, ' # add to string
if show or save or render:
img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
for *box, conf, cls in pred: # xyxy, confidence, class
# str += '%s %.2f, ' % (names[int(cls)], conf) # label
ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot
if pprint:
print(str)
if show:
img.show(f'Image {i}') # show
if save:
f = f'results{i}.jpg'
str += f"saved to '{f}'"
img.save(f) # save
if render:
self.imgs[i] = np.asarray(img)
def print(self):
self.display(pprint=True) # print results
def show(self):
self.display(show=True) # show results
def save(self):
self.display(save=True) # save results
def render(self):
self.display(render=True) # render results
return self.imgs
def __len__(self):
return self.n
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)
| [
"noreply@github.com"
] | noreply@github.com |
2593c43b575d06c67805bf668e9a1a413c3698a1 | ece881d079eb05c2531a99c7b655b8c9c3144329 | /pycls/datasets/data.py | ebe4f236c9d261fa0912c8ac8edca1d26266ff51 | [
"MIT"
] | permissive | tibetgao/deep-active-learning-pytorch | f7b2bf018410d87ccc4ac52478f8d3085622fb60 | 637fd507235632903bcf84ed841ff524d847b94e | refs/heads/main | 2023-08-12T05:04:47.878405 | 2021-09-14T21:22:39 | 2021-09-14T21:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,535 | py | # This file is modified from a code implementation shared with me by Prateek Munjal et al., authors of the paper https://arxiv.org/abs/2002.09564
# GitHub: https://github.com/PrateekMunjal
# ----------------------------------------------------------
import random
import torch
import torchvision
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from torch.utils.data.sampler import SubsetRandomSampler
from .randaugment import RandAugmentPolicy
from .simclr_augment import get_simclr_ops
from .utils import helpers
import pycls.utils.logging as lu
from pycls.datasets.custom_datasets import CIFAR10, CIFAR100, MNIST, SVHN
from pycls.datasets.imbalanced_cifar import IMBALANCECIFAR10, IMBALANCECIFAR100
from pycls.datasets.sampler import IndexedSequentialSampler
from pycls.datasets.tiny_imagenet import TinyImageNet
logger = lu.get_logger(__name__)
class Data:
"""
Contains all data related functions. For working with new dataset
make changes to following functions:
0. Create labeled.txt and unlabaled.txt for Active Learning
1. getDataset
2. getAugmentations
3. getDataLoaders
"""
def __init__(self, cfg):
"""
Initializes dataset attribute of (Data class) object with specified "dataset" argument.
INPUT:
cfg: yacs.config, config object
"""
self.dataset = cfg.DATASET.NAME
self.data_dir = cfg.DATASET.ROOT_DIR
self.datasets_accepted = cfg.DATASET.ACCEPTED
# self.target_dir = {"test": cfg.DATASET.TEST_DIR, "train": cfg.DATASET.TRAIN_DIR, "val": cfg.DATASET.VAL_DIR}
self.eval_mode = False
self.aug_method = cfg.DATASET.AUG_METHOD
self.rand_augment_N = 1 if cfg is None else cfg.RANDAUG.N
self.rand_augment_M = 5 if cfg is None else cfg.RANDAUG.M
def about(self):
"""
Show all properties of this class.
"""
print(self.__dict__)
def make_data_lists(self, exp_dir):
"""
Creates train.txt, test.txt and valid.txt. Text format is chosen to allow readability.
Keyword arguments:
exp_dir -- Full path to the experiment directory where index lists will be saved
"""
train = os.path.join(exp_dir, 'train.txt')
test = os.path.join(exp_dir, 'test.txt')
if os.path.exists(train) or os.path.exists(test):
out = f'train.txt or test.text already exist at {exp_dir}'
return None
train_list = glob.glob(os.path.join(path, 'train/**/*.png'), recursive=True)
test_list = glob.glob(os.path.join(path, 'test/**/*.png'), recursive=True)
with open(train, 'w') as filehandle:
filehandle.writelines("%s\n" % index for index in train_list)
with open(test, 'w') as filehandle:
filehandle.writelines("%s\n" % index for index in test_list)
def getPreprocessOps(self):
"""
This function specifies the steps to be accounted for preprocessing.
INPUT:
None
OUTPUT:
Returns a list of preprocessing steps. Note the order of operations matters in the list.
"""
if self.dataset in self.datasets_accepted:
ops = []
norm_mean = []
norm_std = []
if self.dataset in ["CIFAR10", "CIFAR100", 'IMBALANCED_CIFAR10', 'IMBALANCED_CIFAR100']:
ops = [transforms.RandomCrop(32, padding=4)]
norm_mean = [0.4914, 0.4822, 0.4465]
norm_std = [0.247 , 0.2435, 0.2616]
elif self.dataset == "MNIST":
ops = [transforms.Resize(32)]
norm_mean = [0.1307,]
norm_std = [0.3081,]
elif self.dataset == "TINYIMAGENET":
ops = [transforms.RandomResizedCrop(64)]
# Using ImageNet values
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
elif self.dataset in ["SVHN"]:
ops = [transforms.RandomCrop(32, padding=4)]
norm_mean = [0.4376, 0.4437, 0.4728]
norm_std = [0.1980, 0.2010, 0.1970]
else:
raise NotImplementedError
if not self.eval_mode and (self.aug_method == 'simclr'):
ops.insert(1, get_simclr_ops(input_shape=cfg.TRAIN.IM_SIZE))
elif not self.eval_mode and (self.aug_method == 'randaug'):
#N and M values are taken from Experiment Section of RandAugment Paper
#Though RandAugment paper works with WideResNet model
ops.append(RandAugmentPolicy(N=self.rand_augment_N, M=self.rand_augment_M))
elif not self.eval_mode and (self.aug_method == 'hflip'):
ops.append(transforms.RandomHorizontalFlip())
ops.append(transforms.ToTensor())
ops.append(transforms.Normalize(norm_mean, norm_std))
if self.eval_mode:
ops = [ops[0], transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std)]
else:
print("Preprocess Operations Selected ==> ", ops)
# logger.info("Preprocess Operations Selected ==> ", ops)
return ops
else:
print("Either the specified {} dataset is not added or there is no if condition in getDataset function of Data class".format(self.dataset))
logger.info("Either the specified {} dataset is not added or there is no if condition in getDataset function of Data class".format(self.dataset))
raise NotImplementedError
def getDataset(self, save_dir, isTrain=True, isDownload=False):
"""
This function returns the dataset instance and number of data points in it.
INPUT:
save_dir: String, It specifies the path where dataset will be saved if downloaded.
preprocess_steps(optional): List, Contains the ordered operations used for preprocessing the data.
isTrain (optional): Bool, If true then Train partition is downloaded else Test partition.
isDownload (optional): Bool, If true then dataset is saved at path specified by "save_dir".
OUTPUT:
(On Success) Returns the tuple of dataset instance and length of dataset.
(On Failure) Returns Message as <dataset> not specified.
"""
self.eval_mode = True
test_preops_list = self.getPreprocessOps()
test_preprocess_steps = transforms.Compose(test_preops_list)
self.eval_mode = False
if isTrain:
preprocess_steps = self.getPreprocessOps()
else:
preprocess_steps = test_preops_list
preprocess_steps = transforms.Compose(preprocess_steps)
if self.dataset == "MNIST":
mnist = MNIST(save_dir, train=isTrain, transform=preprocess_steps, test_transform=test_preprocess_steps, download=isDownload)
return mnist, len(mnist)
elif self.dataset == "CIFAR10":
cifar10 = CIFAR10(save_dir, train=isTrain, transform=preprocess_steps, test_transform=test_preprocess_steps, download=isDownload)
return cifar10, len(cifar10)
elif self.dataset == "CIFAR100":
cifar100 = CIFAR100(save_dir, train=isTrain, transform=preprocess_steps, test_transform=test_preprocess_steps, download=isDownload)
return cifar100, len(cifar100)
elif self.dataset == "SVHN":
if isTrain:
svhn = SVHN(save_dir, split='train', transform=preprocess_steps, test_transform=test_preprocess_steps, download=isDownload)
else:
svhn = SVHN(save_dir, split='test', transform=preprocess_steps, test_transform=test_preprocess_steps, download=isDownload)
return svhn, len(svhn)
elif self.dataset == "TINYIMAGENET":
if isTrain:
# tiny = datasets.ImageFolder(save_dir+'/train', transform=preprocess_steps)
tiny = TinyImageNet(save_dir, split='train', transform=preprocess_steps, test_transform=test_preprocess_steps)
else:
# tiny = datasets.ImageFolder(save_dir+'/val', transform=preprocess_steps)
tiny = TinyImageNet(save_dir, split='val', transform=preprocess_steps, test_transform=test_preprocess_steps)
return tiny, len(tiny)
elif self.dataset == 'IMBALANCED_CIFAR10':
im_cifar10 = IMBALANCECIFAR10(save_dir, train=isTrain, transform=preprocess_steps, test_transform=test_preprocess_steps)
return im_cifar10, len(im_cifar10)
elif self.dataset == 'IMBALANCED_CIFAR100':
im_cifar100 = IMBALANCECIFAR100(save_dir, train=isTrain, transform=preprocess_steps, test_transform=test_preprocess_steps)
return im_cifar100, len(im_cifar100)
else:
print("Either the specified {} dataset is not added or there is no if condition in getDataset function of Data class".format(self.dataset))
logger.info("Either the specified {} dataset is not added or there is no if condition in getDataset function of Data class".format(self.dataset))
raise NotImplementedError
def makeLUVSets(self, train_split_ratio, val_split_ratio, data, seed_id, save_dir):
"""
Initialize the labelled and unlabelled set by splitting the data into train
and validation according to split_ratios arguments.
Visually it does the following:
|<------------- Train -------------><--- Validation --->
|<--- Labelled --><---Unlabelled --><--- Validation --->
INPUT:
train_split_ratio: Float, Specifies the proportion of data in train set.
For example: 0.8 means beginning 80% of data is training data.
val_split_ratio: Float, Specifies the proportion of data in validation set.
For example: 0.1 means ending 10% of data is validation data.
data: reference to dataset instance. This can be obtained by calling getDataset function of Data class.
OUTPUT:
(On Success) Sets the labelled, unlabelled set along with validation set
(On Failure) Returns Message as <dataset> not specified.
"""
# Reproducibility stuff
torch.manual_seed(seed_id)
np.random.seed(seed_id)
assert isinstance(train_split_ratio, float),"Train split ratio is of {} datatype instead of float".format(type(train_split_ratio))
assert isinstance(val_split_ratio, float),"Val split ratio is of {} datatype instead of float".format(type(val_split_ratio))
assert self.dataset in self.datasets_accepted, "Sorry the dataset {} is not supported. Currently we support {}".format(self.dataset, self.datasets_accepted)
lSet = []
uSet = []
valSet = []
n_dataPoints = len(data)
all_idx = [i for i in range(n_dataPoints)]
np.random.shuffle(all_idx)
train_splitIdx = int(train_split_ratio*n_dataPoints)
#To get the validation index from end we multiply n_datapoints with 1-val_ratio
val_splitIdx = int((1-val_split_ratio)*n_dataPoints)
#Check there should be no overlap with train and val data
assert train_split_ratio + val_split_ratio < 1.0, "Validation data over laps with train data as last train index is {} and last val index is {}. \
The program expects val index > train index. Please satisfy the constraint: train_split_ratio + val_split_ratio < 1.0; currently it is {} + {} is not < 1.0 => {} is not < 1.0"\
.format(train_splitIdx, val_splitIdx, train_split_ratio, val_split_ratio, train_split_ratio + val_split_ratio)
lSet = all_idx[:train_splitIdx]
uSet = all_idx[train_splitIdx:val_splitIdx]
valSet = all_idx[val_splitIdx:]
# print("=============================")
# print("lSet len: {}, uSet len: {} and valSet len: {}".format(len(lSet),len(uSet),len(valSet)))
# print("=============================")
lSet = np.array(lSet, dtype=np.ndarray)
uSet = np.array(uSet, dtype=np.ndarray)
valSet = np.array(valSet, dtype=np.ndarray)
np.save(f'{save_dir}/lSet.npy', lSet)
np.save(f'{save_dir}/uSet.npy', uSet)
np.save(f'{save_dir}/valSet.npy', valSet)
return f'{save_dir}/lSet.npy', f'{save_dir}/uSet.npy', f'{save_dir}/valSet.npy'
def makeTVSets(self, val_split_ratio, data, seed_id, save_dir):
"""
Initialize the train and validation sets by splitting the train data according to split_ratios arguments.
Visually it does the following:
|<------------- Train -------------><--- Validation --->
INPUT:
val_split_ratio: Float, Specifies the proportion of data in validation set.
For example: 0.1 means ending 10% of data is validation data.
data: reference to dataset instance. This can be obtained by calling getDataset function of Data class.
OUTPUT:
(On Success) Sets the train set and the validation set
(On Failure) Returns Message as <dataset> not specified.
"""
# Reproducibility stuff
torch.manual_seed(seed_id)
np.random.seed(seed_id)
assert isinstance(val_split_ratio, float),"Val split ratio is of {} datatype instead of float".format(type(val_split_ratio))
assert self.dataset in self.datasets_accepted, "Sorry the dataset {} is not supported. Currently we support {}".format(self.dataset, self.datasets_accepted)
trainSet = []
valSet = []
n_dataPoints = len(data)
all_idx = [i for i in range(n_dataPoints)]
np.random.shuffle(all_idx)
# To get the validation index from end we multiply n_datapoints with 1-val_ratio
val_splitIdx = int((1-val_split_ratio)*n_dataPoints)
trainSet = all_idx[:val_splitIdx]
valSet = all_idx[val_splitIdx:]
# print("=============================")
# print("lSet len: {}, uSet len: {} and valSet len: {}".format(len(lSet),len(uSet),len(valSet)))
# print("=============================")
trainSet = np.array(trainSet, dtype=np.ndarray)
valSet = np.array(valSet, dtype=np.ndarray)
np.save(f'{save_dir}/trainSet.npy', trainSet)
np.save(f'{save_dir}/valSet.npy', valSet)
return f'{save_dir}/trainSet.npy', f'{save_dir}/valSet.npy'
def makeUVSets(self, val_split_ratio, data, seed_id, save_dir):
"""
Initial labeled pool should already be sampled. We use this function to initialize the train and validation sets by splitting the train data according to split_ratios arguments.
Visually it does the following:
|<------------- Unlabeled -------------><--- Validation --->
INPUT:
val_split_ratio: Float, Specifies the proportion of data in validation set.
For example: 0.1 means ending 10% of data is validation data.
data: reference to uSet instance post initial pool sampling. This can be obtained by calling getDataset function of Data class.
OUTPUT:
(On Success) Sets the unlabeled set and the validation set
(On Failure) Returns Message as <dataset> not specified.
"""
# Reproducibility stuff
torch.manual_seed(seed_id)
np.random.seed(seed_id)
assert isinstance(val_split_ratio, float),"Val split ratio is of {} datatype instead of float".format(type(val_split_ratio))
assert self.dataset in self.datasets_accepted, "Sorry the dataset {} is not supported. Currently we support {}".format(self.dataset, self.datasets_accepted)
uSet = []
valSet = []
n_dataPoints = len(data)
# all_idx = [i for i in range(n_dataPoints)]
np.random.shuffle(data)
# To get the validation index from end we multiply n_datapoints with 1-val_ratio
val_splitIdx = int((1-val_split_ratio)*n_dataPoints)
uSet = data[:val_splitIdx]
valSet = data[val_splitIdx:]
# print("=============================")
# print("lSet len: {}, uSet len: {} and valSet len: {}".format(len(lSet),len(uSet),len(valSet)))
# print("=============================")
uSet = np.array(uSet, dtype=np.ndarray)
valSet = np.array(valSet, dtype=np.ndarray)
np.save(f'{save_dir}/uSet.npy', uSet)
np.save(f'{save_dir}/valSet.npy', valSet)
return f'{save_dir}/uSet.npy', f'{save_dir}/valSet.npy'
def getIndexesDataLoader(self, indexes, batch_size, data):
"""
Gets reference to the data loader which provides batches of <batch_size> by randomly sampling
from indexes set. We use SubsetRandomSampler as sampler in returned DataLoader.
ARGS
-----
indexes: np.ndarray, dtype: int, Array of indexes which will be used for random sampling.
batch_size: int, Specifies the batchsize used by data loader.
data: reference to dataset instance. This can be obtained by calling getDataset function of Data class.
OUTPUT
------
Returns a reference to dataloader
"""
assert isinstance(indexes, np.ndarray), "Indexes has dtype: {} whereas expected is nd.array.".format(type(indexes))
assert isinstance(batch_size, int), "Batchsize is expected to be of int type whereas currently it has dtype: {}".format(type(batch_size))
subsetSampler = SubsetRandomSampler(indexes)
# # print(data)
# if self.dataset == "IMAGENET":
# loader = DataLoader(dataset=data, batch_size=batch_size,sampler=subsetSampler, pin_memory=True)
# else:
loader = DataLoader(dataset=data, batch_size=batch_size, sampler=subsetSampler)
return loader
def getSequentialDataLoader(self, indexes, batch_size, data):
"""
Gets reference to the data loader which provides batches of <batch_size> sequentially
from indexes set. We use IndexedSequentialSampler as sampler in returned DataLoader.
ARGS
-----
indexes: np.ndarray, dtype: int, Array of indexes which will be used for random sampling.
batch_size: int, Specifies the batchsize used by data loader.
data: reference to dataset instance. This can be obtained by calling getDataset function of Data class.
OUTPUT
------
Returns a reference to dataloader
"""
assert isinstance(indexes, np.ndarray), "Indexes has dtype: {} whereas expected is nd.array.".format(type(indexes))
assert isinstance(batch_size, int), "Batchsize is expected to be of int type whereas currently it has dtype: {}".format(type(batch_size))
subsetSampler = IndexedSequentialSampler(indexes)
# if self.dataset == "IMAGENET":
# loader = DataLoader(dataset=data, batch_size=batch_size,sampler=subsetSampler,pin_memory=True)
# else:
loader = DataLoader(dataset=data, batch_size=batch_size, sampler=subsetSampler, shuffle=False)
return loader
def getTestLoader(self, data, test_batch_size, seed_id=0):
"""
Implements a random subset sampler for sampling the data from test set.
INPUT:
data: reference to dataset instance. This can be obtained by calling getDataset function of Data class.
test_batch_size: int, Denotes the size of test batch
seed_id: int, Helps in reporoducing results of random operations
OUTPUT:
(On Success) Returns the testLoader
(On Failure) Returns Message as <dataset> not specified.
"""
# Reproducibility stuff
torch.manual_seed(seed_id)
np.random.seed(seed_id)
if self.dataset in self.datasets_accepted:
n_datapts = len(data)
idx = [i for i in range(n_datapts)]
#np.random.shuffle(idx)
test_sampler = SubsetRandomSampler(idx)
testLoader = DataLoader(data, batch_size=test_batch_size, sampler=test_sampler)
return testLoader
else:
raise NotImplementedError
def loadPartitions(self, lSetPath, uSetPath, valSetPath):
assert isinstance(lSetPath, str), "Expected lSetPath to be a string."
assert isinstance(uSetPath, str), "Expected uSetPath to be a string."
assert isinstance(valSetPath, str), "Expected valSetPath to be a string."
lSet = np.load(lSetPath, allow_pickle=True)
uSet = np.load(uSetPath, allow_pickle=True)
valSet = np.load(valSetPath, allow_pickle=True)
#Checking no overlap
assert len(set(valSet) & set(uSet)) == 0,"Intersection is not allowed between validationset and uset"
assert len(set(valSet) & set(lSet)) == 0,"Intersection is not allowed between validationset and lSet"
assert len(set(uSet) & set(lSet)) == 0,"Intersection is not allowed between uSet and lSet"
return lSet, uSet, valSet
def loadTVPartitions(self, trainSetPath, valSetPath):
assert isinstance(trainSetPath, str), "Expected trainSetPath to be a string."
assert isinstance(valSetPath, str), "Expected valSetPath to be a string."
trainSet = np.load(trainSetPath, allow_pickle=True)
valSet = np.load(valSetPath, allow_pickle=True)
#Checking no overlap
assert len(set(valSet) & set(trainSet)) == 0,"Intersection is not allowed between validationset and trainSet"
return trainSet, valSet
def loadPartition(self, setPath):
assert isinstance(setPath, str), "Expected setPath to be a string."
setArray = np.load(setPath, allow_pickle=True)
return setArray
def saveSets(self, lSet, uSet, activeSet, save_dir):
lSet = np.array(lSet, dtype=np.ndarray)
uSet = np.array(uSet, dtype=np.ndarray)
valSet = np.array(activeSet, dtype=np.ndarray)
np.save(f'{save_dir}/lSet.npy', lSet)
np.save(f'{save_dir}/uSet.npy', uSet)
np.save(f'{save_dir}/activeSet.npy', activeSet)
# return f'{save_dir}/lSet.npy', f'{save_dir}/uSet.npy', f'{save_dir}/activeSet.npy'
def saveSet(self, setArray, setName, save_dir):
setArray = np.array(setArray, dtype=np.ndarray)
np.save(f'{save_dir}/{setName}.npy', setArray)
return f'{save_dir}/{setName}.npy'
def getClassWeightsFromDataset(self, dataset, index_set, bs):
temp_loader = self.getIndexesDataLoader(indexes=index_set, batch_size=bs, data=dataset)
return self.getClassWeights(temp_loader)
def getClassWeights(self, dataloader):
"""
INPUT
dataloader: dataLoader
OUTPUT
Returns a tensor of size C where each element at index i represents the weight for class i.
"""
all_labels = []
for _,y in dataloader:
all_labels.append(y)
print("===Computing Imbalanced Weights===")
all_labels = np.concatenate(all_labels, axis=0)
print(f"all_labels.shape: {all_labels.shape}")
classes = np.unique(all_labels)
print(f"classes: {classes.shape}")
num_classes = len(classes)
freq_count = np.zeros(num_classes, dtype=int)
for i in classes:
freq_count[i] = (all_labels==i).sum()
#Normalize
freq_count = (1.0*freq_count)/np.sum(freq_count)
print(f"=== Sum(freq_count): {np.sum(freq_count)} ===")
class_weights = 1./freq_count
class_weights = torch.Tensor(class_weights)
return class_weights | [
"akshaychandra111@gmail.com"
] | akshaychandra111@gmail.com |
131d88c9477590c4f4bf61eb89d2e6f7856d5fd0 | 27b0aab7b86cc3864364f5dd16c08c53d6689eba | /sgld_nrg/__init__.py | 7f6b1dbd4f24fe5663ccac253b50fff7187f381a | [
"MIT"
] | permissive | Sycor4x/sgld-nrg | b4620d21b3e83662c18397b813d29137a1670c43 | 9eb56008453d89dcddb5f08d967081ba23cd84bd | refs/heads/main | 2023-08-29T03:09:54.952998 | 2021-10-15T19:41:03 | 2021-10-15T19:41:03 | 404,198,177 | 0 | 1 | MIT | 2021-09-23T03:28:13 | 2021-09-08T03:27:10 | Python | UTF-8 | Python | false | false | 114 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: David J. Elkind
# Creation date: 2021-09-11 (year-month-day)
| [
"djelkind@gmail.com"
] | djelkind@gmail.com |
8106e754bf96fd0f094304a3ce696783ac0b13af | d9be32e9351185e26890a130888cb4e6c775a2a1 | /run.py | 1e5172d13dbae8f647e6e7859b9ac8d5b7fa9c92 | [] | no_license | ManonVioleau/FlaskApp_WalletOverview | 77b3948904742f342fc27fcc103e7a96c1ed7cd4 | 52a944652db5fae593fcd04be6edbef7cc66ce1f | refs/heads/master | 2023-07-06T14:53:44.593424 | 2021-07-29T17:16:16 | 2021-07-29T17:16:16 | 390,789,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | #! C:\Users\manon\Documents\Web_Dev\Binance\20210526_Test\env python
import app1
from app1 import app
if __name__ == "__main__":
app.run(debug=True) | [
"manonvioleau34@outlook.fr"
] | manonvioleau34@outlook.fr |
4b5934ae9912a993be614881c98ed17458db8322 | 60eb8d9b5e2e26b4805f76e009612b19575e1971 | /app/home/views.py | c885976ff5a7c2fa34d5224c5695672a1c623c95 | [] | no_license | raphanus/movie_project | ecf490d4499fc7d0e6476c28d1e4651e33dea884 | 6c3b02b7d3560641e8c0a5cf4a6564c2d9460e85 | refs/heads/master | 2020-03-15T05:12:36.840227 | 2018-05-03T11:36:34 | 2018-05-03T11:36:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,755 | py | # coding:utf-8
from . import home
from flask import render_template, redirect, url_for, flash, session, request, Response
from app.home.forms import RegistForm, LoginForm, UserdetailForm, PwdForm, CommentForm
from app.models import User, Userlog, Preview, Tag, Movie, Comment, Moviecol
from werkzeug.security import generate_password_hash
from werkzeug.utils import secure_filename
from app import db, app, rd
from functools import wraps
import uuid
import os
import datetime
# 定义登录装饰器
def user_login_req(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "user" not in session:
return redirect(url_for("home.login", next=request.url))
return f(*args, **kwargs)
return decorated_function
# 修改文件名称
def change_filename(filename):
fileinfo = os.path.splitext(filename)
filename = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + str(uuid.uuid4().hex) + fileinfo[-1]
return filename
@home.route("/login/", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
data = form.data
user = User.query.filter_by(name=data["name"]).first()
if not user.check_pwd(data["pwd"]):
flash("密码错误!", "err")
return redirect(url_for("home.login"))
session["user"] = user.name
session["user_id"] = user.id
userlog = Userlog(
user_id=user.id,
ip=request.remote_addr
)
db.session.add(userlog)
db.session.commit()
return redirect(url_for("home.user"))
return render_template("home/login.html", form=form)
@home.route("/logout/")
def logout():
session.pop("user", None)
session.pop("user_id", None)
return redirect(url_for("home.login"))
# 会员注册
@home.route("/regist/", methods=["GET", "POST"])
def regist():
form = RegistForm()
if form.validate_on_submit():
data = form.data
user = User(
name=data["name"],
email=data["email"],
phone=data["phone"],
pwd=generate_password_hash(data["pwd"]),
uuid=uuid.uuid4().hex
)
db.session.add(user)
db.session.commit()
flash("注册成功!", "ok")
return render_template("home/regist.html", form=form)
# 会员修改资料
@home.route("/user/", methods=["GET", "POST"])
@user_login_req
def user():
form = UserdetailForm()
user = User.query.get(int(session["user_id"]))
form.face.validators = []
if request.method == "GET":
form.name.data = user.name
form.email.data = user.email
form.phone.data = user.phone
form.info.data = user.info
if form.validate_on_submit():
data = form.data
if data["face"]:
file_face = secure_filename(form.face.data.filename)
if not os.path.exists(app.config["FC_DIR"]):
os.makedirs(app.config["FC_DIR"])
os.chmod(app.config["FC_DIR"], "rw")
user.face = change_filename(file_face)
# form.face.data.save(app.config["FC_DIR"] + user.face)
data["face"].save(app.config["FC_DIR"] + user.face)
name_count = User.query.filter_by(name=data["name"]).count()
if data["name"] != user.name and name_count == 1:
flash("昵称已经存在!", "err")
return redirect(url_for("home.user"))
email_count = User.query.filter_by(email=data["email"]).count()
if data["email"] != user.email and email_count == 1:
flash("邮箱已经存在!", "err")
return redirect(url_for("home.user"))
phone_count = User.query.filter_by(phone=data["phone"]).count()
if data["phone"] != user.phone and phone_count == 1:
flash("手机号码已经存在!", "err")
return redirect(url_for("home.user"))
user.name = data["name"]
user.email = data["email"]
user.phone = data["phone"]
user.info = data["info"]
db.session.add(user)
db.session.commit()
flash("修改成功!", "ok")
return redirect(url_for("home.user"))
return render_template("home/user.html", form=form, user=user)
@home.route("/pwd/", methods=["GET", "POST"])
@user_login_req
def pwd():
form = PwdForm()
if form.validate_on_submit():
data = form.data
user = User.query.filter_by(name=session["user"]).first()
if not user.check_pwd(data["old_pwd"]):
flash("旧密码错误!", "err")
return redirect(url_for('home.pwd'))
user.pwd = generate_password_hash(data["new_pwd"])
db.session.add(user)
db.session.commit()
flash("修改密码成功,请重新登录!", "ok")
return redirect(url_for('home.logout'))
return render_template("home/pwd.html", form=form)
@home.route("/comments/<int:page>/")
@user_login_req
def comments(page=None):
if page is None:
page = 1
page_data = Comment.query.join(
Movie
).join(
User
).filter(
Movie.id == Comment.movie_id,
User.id == session["user_id"]
).order_by(
Comment.addtime.desc()
).paginate(page=page, per_page=10)
return render_template("home/comments.html", page_data=page_data)
# 会员登录日志
@home.route("/loginlog/<int:page>/", methods=["GET"])
@user_login_req
def loginlog(page=None):
if page is None:
page = 1
page_data = Userlog.query.filter_by(
user_id=int(session["user_id"])
).order_by(
Userlog.addtime.desc()
).paginate(page=page, per_page=10)
return render_template("home/loginlog.html", page_data=page_data)
# 添加电影收藏
@home.route("/moviecol/add/", methods=["GET"])
@user_login_req
def moviecol_add():
uid = request.args.get("uid", "")
mid = request.args.get("mid", "")
moviecol = Moviecol.query.filter_by(
user_id=int(uid),
movie_id=int(mid)
).count()
if moviecol == 1:
data = dict(ok=0)
if moviecol == 0:
moviecol = Moviecol(
user_id=int(uid),
movie_id=int(mid)
)
db.session.add(moviecol)
db.session.commit()
data = dict(ok=1)
import json
return json.dumps(data)
# 电影收藏
@home.route("/moviecol/<int:page>/")
@user_login_req
def moviecol(page=None):
if page is None:
page = 1
page_data = Moviecol.query.join(
Movie
).join(
User
).filter(
Movie.id == Moviecol.movie_id,
User.id == session["user_id"]
).order_by(
Moviecol.addtime.desc()
).paginate(page=page, per_page=10)
return render_template("home/moviecol.html", page_data=page_data)
# 首页
@home.route("/<int:page>/", methods=["GET"])
def index(page=None):
tags = Tag.query.all()
page_data = Movie.query
# 标签
tid = request.args.get("tid", 0)
if int(tid) != 0:
page_data = page_data.filter_by(tag_id=int(tid))
# 星级
star = request.args.get("star", 0)
if int(star) != 0:
page_data = page_data.filter_by(star=int(star))
# 时间
time = request.args.get("time", 0)
if int(time) != 0:
if int(time) == 1:
page_data = page_data.order_by(
Movie.addtime.desc()
)
else:
page_data = page_data.order_by(
Movie.addtime.asc()
)
# 播放量
pm = request.args.get("pm", 0)
if int(pm) != 0:
if int(pm) == 1:
page_data = page_data.order_by(
Movie.playnum.desc()
)
else:
page_data = page_data.order_by(
Movie.playnum.asc()
)
# 评论量
cm = request.args.get("cm", 0)
if int(cm) != 0:
if int(cm) == 1:
page_data = page_data.order_by(
Movie.commentnum.desc()
)
else:
page_data = page_data.order_by(
Movie.commentnum.asc()
)
if page is None:
page = 1
page_data = page_data.paginate(page=page, per_page=10)
p = dict(
tid=tid,
star=star,
time=time,
pm=pm,
cm=cm,
)
return render_template("home/index.html", tags=tags, p=p, page_data=page_data)
# 上映预告
@home.route("/animation/")
def animation():
data = Preview.query.all()
return render_template("home/animation.html", data=data)
# 搜索
@home.route("/search/<int:page>/")
def search(page=None):
if page is None:
page = 1
key = request.args.get("key", "")
movie_count = Movie.query.filter(
Movie.title.ilike('%' + key + '%')
).count()
page_data = Movie.query.filter(
Movie.title.ilike('%' + key + '%')
).order_by(
Movie.addtime.desc()
).paginate(page=page, per_page=10)
page_data.key = key
return render_template("home/search.html", movie_count=movie_count, key=key, page_data=page_data)
@home.route("/play/<int:id>/<int:page>/", methods=["GET", "POST"])
def play(id=None, page=None):
movie = Movie.query.join(Tag).filter(
Tag.id == Movie.tag_id,
Movie.id == int(id)
).first_or_404()
if page is None:
page = 1
page_data = Comment.query.join(
Movie
).join(
User
).filter(
Movie.id == movie.id,
User.id == Comment.user_id
).order_by(
Comment.addtime.desc()
).paginate(page=page, per_page=10)
movie.playnum = movie.playnum + 1
form = CommentForm()
if "user" in session and form.validate_on_submit():
data = form.data
comment = Comment(
content=data["content"],
movie_id=movie.id,
user_id=session["user_id"]
)
db.session.add(comment)
db.session.commit()
movie.commentnum = movie.commentnum + 1
db.session.add(movie)
db.session.commit()
flash("添加评论成功!", "ok")
return redirect(url_for('home.play', id=movie.id, page=1))
db.session.add(movie)
db.session.commit()
return render_template("home/play.html", movie=movie, form=form, page_data=page_data)
@home.route("/video/<int:id>/<int:page>/", methods=["GET", "POST"])
def video(id=None, page=None):
movie = Movie.query.join(Tag).filter(
Tag.id == Movie.tag_id,
Movie.id == int(id)
).first_or_404()
if page is None:
page = 1
page_data = Comment.query.join(
Movie
).join(
User
).filter(
Movie.id == movie.id,
User.id == Comment.user_id
).order_by(
Comment.addtime.desc()
).paginate(page=page, per_page=10)
movie.playnum = movie.playnum + 1
form = CommentForm()
if "user" in session and form.validate_on_submit():
data = form.data
comment = Comment(
content=data["content"],
movie_id=movie.id,
user_id=session["user_id"]
)
db.session.add(comment)
db.session.commit()
movie.commentnum = movie.commentnum + 1
db.session.add(movie)
db.session.commit()
flash("添加评论成功!", "ok")
return redirect(url_for('home.video', id=movie.id, page=1))
db.session.add(movie)
db.session.commit()
return render_template("home/video.html", movie=movie, form=form, page_data=page_data)
@home.route("/tm/", methods=["GET", "POST"])
def tm():
import json
if request.method == "GET":
#获取弹幕消息队列
id = request.args.get('id')
key = "movie" + str(id)
if rd.llen(key):
msgs = rd.lrange(key, 0, 2999)
res = {
"code": 1,
"danmaku": [json.loads(v) for v in msgs]
}
else:
res = {
"code": 1,
"danmaku": []
}
resp = json.dumps(res)
if request.method == "POST":
#添加弹幕
data = json.loads(request.get_data())
msg = {
"__v": 0,
"author": data["author"],
"time": data["time"],
"text": data["text"],
"color": data["color"],
"type": data['type'],
"ip": request.remote_addr,
"_id": datetime.datetime.now().strftime("%Y%m%d%H%M%S") + uuid.uuid4().hex,
"player": [
data["player"]
]
}
res = {
"code": 1,
"data": msg
}
resp = json.dumps(res)
rd.lpush("movie" + str(data["player"]), json.dumps(msg))
return Response(resp, mimetype='application/json')
| [
"1010475688@qq.com"
] | 1010475688@qq.com |
f589f9162c85741a8ecf4c45ada324535f4a2746 | 5ef0b16f225e60752368cd8823ec554bce80367d | /project1/create_tables.py | dca686f98c07f3d6c8e0a8d9fc25ab9751ed82a7 | [] | no_license | DanielBBZ/UdacityDataEngineering | 10aa25c6c8e2c69e1f405135dbac1749e146f6f6 | fc0cd5ecb78cc2055f04ca003d9bbc6131e21db0 | refs/heads/main | 2023-03-12T22:51:40.457494 | 2021-03-04T03:52:35 | 2021-03-04T03:52:35 | 332,959,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def create_database():
"""
- Creates and connects to the sparkifydb
- Returns the connection and cursor to sparkifydb
"""
# connect to default database
conn = psycopg2.connect("host=127.0.0.1 dbname=studentdb user=postgres password=student")
conn.set_session(autocommit=True)
cur = conn.cursor()
# create sparkify database with UTF8 encoding
cur.execute("DROP DATABASE IF EXISTS sparkifydb")
cur.execute("CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0")
# close connection to default database
conn.close()
# connect to sparkify database
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=postgres password=student")
cur = conn.cursor()
return cur, conn
def drop_tables(cur, conn):
"""
Drops each table using the queries in `drop_table_queries` list.
"""
for query in drop_table_queries:
try:
cur.execute(query)
conn.commit()
except psycopg2.Error as e:
print("Error: Could not drop table from query: {}".format(query))
print(e)
def create_tables(cur, conn):
"""
Creates each table using the queries in `create_table_queries` list.
"""
for query in create_table_queries:
try:
cur.execute(query)
conn.commit()
except psycopg2.Error as e:
print("Error: Could not create table from query: {}".format(query))
print(e)
def main():
"""
- Drops (if exists) and Creates the sparkify database.
- Establishes connection with the sparkify database and gets
cursor to it.
- Drops all the tables.
- Creates all tables needed.
- Finally, closes the connection.
"""
cur, conn = create_database()
drop_tables(cur, conn)
print("Table dropped successfully!!")
create_tables(cur, conn)
print("Table created successfully!!")
conn.close()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
607305eb9ef2fd61303cc596202bf4aa4c68067d | 8def641e9c0f828834383a8d6fd6050836cc06e5 | /macpy/FXVolDerive.py | 3be62b08ea11f41dd948458913b59a71af8806ff | [] | no_license | sbtong/LilRepo | 93cb83e16e89c241c3efbf42983792d31139deaa | 4f4c5a9d2578a76080a8a6d4ea69426b07df1f85 | refs/heads/master | 2021-03-19T07:38:48.727898 | 2017-06-19T22:46:54 | 2017-06-19T22:46:54 | 94,366,409 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,627 | py | import pandas as pd
import numpy as np
import macpy.utils.database as db
import macpy.dateflow as df
import unittest
import datetime as dt
import string
import getpass
import bond as bond
from optparse import OptionParser
from scipy.stats import norm
from scipy.optimize import brentq, fsolve
import logging
from utils import Utilities
def get_current_time():
return str(dt.datetime.now())[0:-3]
def split_list(mylist, step=30):
i=0
splitted_list=[]
while i<len(mylist):
splitted_list.append(mylist[i:i+step])
i=i+step
return splitted_list
class FXVolDerive:
def __init__(self, startDate, endDate, strikeSwitch = 'on', levelSwitch = 'off', database='PROD'):
self.startDate = startDate
self.endDate = endDate
self.strikeSwitch = strikeSwitch
self.levelSwitch = levelSwitch
self.database = database
def derive_FXVol(self):
volSource = db.FXVolDerive(self.startDate, self.endDate, database=self.database)
dataframeExtracted = volSource.extract_from_db()
sqlList = []
sql_statement_delete = self.delete_tradeDate_sql(self.startDate, self.endDate)
print "processing: ", self.startDate, self.endDate
sqlList.append(sql_statement_delete+"\n")
computeTimeStart = dt.datetime.now()
grouped = dataframeExtracted.groupby(['TradeDate', 'VolSurfaceId', 'TenorEnum'])
for df in grouped:
try:
newdf = df[1]
VolSurfaceDataId = str(newdf['VolSurfaceDataId'].values[0])
SettleCurrency = str(newdf['SettleCurrency'].values[0])
ForeignCurrency = str(newdf['ForeignCurrency'].values[0])
VolSurfaceId=newdf['VolSurfaceId'].values[0]
TenorEnum = str(newdf['TenorEnum'].values[0])
TradeDate = dt.datetime.strptime(newdf['TradeDate'].values[0],"%Y-%m-%d")
username = getpass.getuser()
TenorInYears = self.convert_tenor(TenorEnum)
if TenorInYears>2.0:
DeltaConv = 'ForwardDelta'
else:
DeltaConv = 'SpotDelta'
if 'ATM' in list(newdf['Delta']):
VolATM = list(newdf[(newdf['Delta']=='ATM')].Quote)[0]
StrikeATM = self.compute_strike(TenorInYears,0.5,TradeDate,SettleCurrency,ForeignCurrency,VolATM/100,self.strikeSwitch)
sqlList.append(self.create_sql_statement({'VolSurfaceDataId': VolSurfaceDataId, 'SettleCurrency':SettleCurrency, 'ForeignCurrency': ForeignCurrency, 'VolSurfaceId': VolSurfaceId, 'TenorEnum': TenorEnum, 'TradeDate': TradeDate, 'Delta': '0.5', 'DeltaConvention': DeltaConv, 'premiumAdjust': StrikeATM[3], 'ImpliedStrike': StrikeATM[0], 'Quote': VolATM, 'kmin':StrikeATM[1], 'kmax':StrikeATM[2], 'FXSpot': StrikeATM[4], 'r_d': StrikeATM[5], 'r_f': StrikeATM[6], 'Lud': get_current_time(), 'Lub': username}))
if self.levelSwitch == 'on':
if 'B10' in list(newdf['Delta']) and 'R10' in list(newdf['Delta']):
VolB10 = list(newdf[(newdf['Delta']=='B10')].Quote)[0]
VolR10 = list(newdf[(newdf['Delta']=='R10')].Quote)[0]
Vol10 = (2*VolB10+2*VolATM+VolR10)/2
Vol90 = (2*VolB10+2*VolATM-VolR10)/2
Strike10 = self.compute_strike(TenorInYears,0.1,TradeDate,SettleCurrency,ForeignCurrency,Vol10/100, self.strikeSwitch)
Strike90 = self.compute_strike(TenorInYears,0.9,TradeDate,SettleCurrency,ForeignCurrency,Vol90/100, self.strikeSwitch)
sqlList.append(self.create_sql_statement({'VolSurfaceDataId': VolSurfaceDataId, 'SettleCurrency':SettleCurrency, 'ForeignCurrency': ForeignCurrency, 'VolSurfaceId': VolSurfaceId, 'TenorEnum': TenorEnum, 'TradeDate': TradeDate, 'Delta': '0.1', 'DeltaConvention': DeltaConv, 'premiumAdjust': Strike10[3],'ImpliedStrike': Strike10[0], 'Quote': Vol10, 'kmin': Strike10[1], 'kmax': Strike10[2], 'FXSpot': Strike10[4], 'r_d': Strike10[5], 'r_f': Strike10[6], 'Lud': get_current_time(), 'Lub': username}))
sqlList.append(self.create_sql_statement({'VolSurfaceDataId': VolSurfaceDataId, 'SettleCurrency':SettleCurrency, 'ForeignCurrency': ForeignCurrency, 'VolSurfaceId': VolSurfaceId, 'TenorEnum': TenorEnum, 'TradeDate': TradeDate, 'Delta': '0.9', 'DeltaConvention': DeltaConv, 'premiumAdjust': Strike90[3], 'ImpliedStrike': Strike90[0], 'Quote': Vol90, 'kmin': Strike90[1], 'kmax': Strike90[2], 'FXSpot': Strike90[4], 'r_d': Strike90[5], 'r_f': Strike90[6], 'Lud': get_current_time(), 'Lub': username}))
if 'B25' in list(newdf['Delta']) and 'R25' in list(newdf['Delta']):
VolB25 = list(newdf[(newdf['Delta']=='B25')].Quote)[0]
VolR25 = list(newdf[(newdf['Delta']=='R25')].Quote)[0]
Vol25 = (2*VolB25+2*VolATM+VolR25)/2
Vol75 = (2*VolB25+2*VolATM-VolR25)/2
Strike25 = self.compute_strike(TenorInYears,0.25,TradeDate,SettleCurrency,ForeignCurrency,Vol25/100, self.strikeSwitch)
Strike75 = self.compute_strike(TenorInYears,0.75,TradeDate,SettleCurrency,ForeignCurrency,Vol75/100, self.strikeSwitch)
DVol10 = VolATM - VolR25*(0.1-0.5)+16*VolB25*np.square((0.1-0.5))
DVol90 = VolATM - VolR25*(0.9-0.5)+16*VolB25*np.square((0.9-0.5))
DStrike10 = self.compute_strike(TenorInYears,0.1,TradeDate,SettleCurrency,ForeignCurrency,DVol10/100, self.strikeSwitch)
DStrike90 = self.compute_strike(TenorInYears,0.9,TradeDate,SettleCurrency,ForeignCurrency,DVol90/100, self.strikeSwitch)
sqlList.append(self.create_sql_statement({'VolSurfaceDataId': VolSurfaceDataId, 'SettleCurrency':SettleCurrency, 'ForeignCurrency': ForeignCurrency, 'VolSurfaceId': VolSurfaceId, 'TenorEnum': TenorEnum, 'TradeDate': TradeDate, 'Delta': '0.25', 'DeltaConvention': DeltaConv, 'premiumAdjust': Strike25[3],'ImpliedStrike': Strike25[0], 'Quote': Vol25, 'kmin': Strike25[1], 'kmax': Strike25[2],'FXSpot': Strike25[4], 'r_d': Strike25[5], 'r_f': Strike25[6], 'Lud': get_current_time(), 'Lub': username}))
sqlList.append(self.create_sql_statement({'VolSurfaceDataId': VolSurfaceDataId, 'SettleCurrency':SettleCurrency, 'ForeignCurrency': ForeignCurrency, 'VolSurfaceId': VolSurfaceId, 'TenorEnum': TenorEnum, 'TradeDate': TradeDate, 'Delta': '0.75', 'DeltaConvention': DeltaConv, 'premiumAdjust': Strike75[3],'ImpliedStrike': Strike75[0], 'Quote': Vol75, 'kmin': Strike75[1], 'kmax': Strike75[2], 'FXSpot': Strike75[4], 'r_d': Strike75[5], 'r_f': Strike75[6],'Lud': get_current_time(), 'Lub': username}))
if self.levelSwitch == 'on':
sqlList.append(self.create_sql_statement({'VolSurfaceDataId': VolSurfaceDataId, 'SettleCurrency':SettleCurrency, 'ForeignCurrency': ForeignCurrency, 'VolSurfaceId': VolSurfaceId, 'TenorEnum': TenorEnum, 'TradeDate': TradeDate, 'Delta': '0.1EXTRAP', 'DeltaConvention': DeltaConv, 'premiumAdjust': DStrike10[3], 'ImpliedStrike': DStrike10[0], 'Quote': DVol10, 'kmin': DStrike10[1], 'kmax': DStrike10[2],'FXSpot': DStrike10[4], 'r_d': DStrike10[5], 'r_f': DStrike10[6], 'Lud': get_current_time(), 'Lub': username}))
sqlList.append(self.create_sql_statement({'VolSurfaceDataId': VolSurfaceDataId, 'SettleCurrency':SettleCurrency, 'ForeignCurrency': ForeignCurrency, 'VolSurfaceId': VolSurfaceId, 'TenorEnum': TenorEnum, 'TradeDate': TradeDate, 'Delta': '0.9EXTRAP', 'DeltaConvention': DeltaConv, 'premiumAdjust': DStrike90[3], 'ImpliedStrike': DStrike90[0], 'Quote': DVol90, 'kmin': DStrike90[1], 'kmax': DStrike90[2], 'FXSpot': DStrike90[4], 'r_d': DStrike90[5], 'r_f': DStrike90[6],'Lud': get_current_time(), 'Lub': username}))
except:
continue
computeTimeEnd = dt.datetime.now()
print "Finished processing: ", self.startDate, self.endDate, "Seconds spent: ", round((computeTimeEnd - computeTimeStart).total_seconds(), 5)
sql_statement = ';\n'.join(sqlList)
return sql_statement
#try:
# db.MSSQL.execute_commit(sql_statement)
#except Exception as e:
# print 'Exception on execute_commit(sql_statement)'
# print e.message
def premium_adjust_flag_select(self, settleCurrency, foreignCurrency):
currencyPairs = foreignCurrency+settleCurrency
nonPaList = ['GBPUSD', 'EURUSD', 'AUDUSD', 'NZDUSD']
if currencyPairs in nonPaList:
adjustflag = 'N'
else:
adjustflag = 'Y'
return adjustflag
def FXSpotRateDerive(self, tradeDate, currency):
DataIdDF = db.FXCurrencyDataId(currency)
IDdf = DataIdDF.extract_from_db()
AxiomaDataId = IDdf['AxiomaDataId'].values[0]
FXSpotDF = db.FXSpotRate(tradeDate,AxiomaDataId)
df = FXSpotDF.extract_from_db()
filterdf = df[(df['TradeDate']==tradeDate)]
FXSpotRate = list(filterdf['FXRate'])[0]
return FXSpotRate
def compute_strike(self, tenorInYears, delta,tradeDate, settleCurrency, foreignCurrency, vol, strikeSwitch):
if strikeSwitch == 'off':
r_d = 'NULL'
r_f = 'NULL'
FXSpotRate = 'NULL'
adjustflag = self.premium_adjust_flag_select(settleCurrency, foreignCurrency)
else:
domesticCurveName = bond.convert_gvt_curve(settleCurrency)
foreignCurveName = bond.convert_gvt_curve(foreignCurrency)
domesticCurve = bond.create_yield_curve(tradeDate, domesticCurveName)
foreignCurve = bond.create_yield_curve(tradeDate, foreignCurveName)
tradedt = pd.to_datetime(tradeDate).strftime('%Y%m%d')
r_d = domesticCurve(tenorInYears)
r_f = foreignCurve(tenorInYears)
if settleCurrency == 'USD':
FXSpotRate = self.FXSpotRateDerive(tradedt, foreignCurrency)
adjustflag = self.premium_adjust_flag_select(settleCurrency, foreignCurrency)
elif foreignCurrency == 'USD':
FXSpotRate = 1/self.FXSpotRateDerive(tradedt, settleCurrency)
adjustflag = self.premium_adjust_flag_select(settleCurrency, foreignCurrency)
else:
FXSpotRateForeign = self.FXSpotRateDerive(tradedt, foreignCurrency)
FXSpotRateDomestic = self.FXSpotRateDerive(tradedt, settleCurrency)
FXSpotRate = FXSpotRateForeign/FXSpotRateDomestic
adjustflag = self.premium_adjust_flag_select(settleCurrency, foreignCurrency)
implied_strike_analysis_list = self.func_implied_strike(tenorInYears, FXSpotRate, r_d, r_f, delta,vol, adjustflag, strikeSwitch)
return implied_strike_analysis_list
def func_implied_strike(self, tenorInYears, FXSpotRate, r_d, r_f, delta, vol, adjustflag, strikeSwitch):
if strikeSwitch == 'off':
k_min = 'N/A'
k_max = 'N/A'
implied_strike = 'NULL'
else:
FXForwardRate = FXSpotRate*np.exp((r_d-r_f)*tenorInYears)
if adjustflag == 'False':
if tenorInYears >2.0:
implied_strike = FXForwardRate*np.exp(-vol*np.sqrt(tenorInYears)*norm.ppf(delta)+0.5*tenorInYears*np.square(vol))
k_min = 'N/A'
k_max = 'N/A'
else:
normInverse = delta/np.exp(-r_f*tenorInYears)
if normInverse <= 0 or normInverse >1:
print "Inverse Nomral Function failed for Value ", normInverse
implied_strike = 'NULL'
else:
implied_strike = FXForwardRate*np.exp(-vol*np.sqrt(tenorInYears)*norm.ppf(normInverse)+0.5*tenorInYears*np.square(vol))
k_min = 'N/A'
k_max = 'N/A'
else:
if tenorInYears > 2.0:
k_max = FXForwardRate*np.exp(-vol*np.sqrt(tenorInYears)*norm.ppf(delta)+0.5*tenorInYears*np.square(vol))
#strikeImplied = k_max
#d_plus = (np.log(FXForwardRate/strikeImplied)+0.5*np.square(vol)*tenorInYears)/(vol*np.sqrt(tenorInYears))
#d_minus = d_plus - vol*np.sqrt(tenorInYears)
#callValue = np.exp(-r_d*tenorInYears)*(FXForwardRate*norm.cdf(d_plus)-strikeImplied*norm.cdf(d_minus))
#adjustedDelta = delta - callValue/FXSpotRate
implicit_func_strike = lambda k: k/FXForwardRate*norm.cdf((np.log(FXForwardRate/k)-0.5*np.square(vol)*tenorInYears)/(vol*np.sqrt(tenorInYears)))-delta
else:
normInverse = delta/np.exp(-r_f*tenorInYears)
k_max = FXForwardRate*np.exp(-vol*np.sqrt(tenorInYears)*norm.ppf(normInverse)+0.5*tenorInYears*np.square(vol))
#strikeImplied = k_max
#d_plus = (np.log(FXForwardRate/strikeImplied)+0.5*np.square(vol)*tenorInYears)/(vol*np.sqrt(tenorInYears))
#d_minus = d_plus - vol*np.sqrt(tenorInYears)
#callValue = np.exp(-r_d*tenorInYears)*(FXForwardRate*norm.cdf(d_plus)-strikeImplied*norm.cdf(d_minus))
#adjustedDelta = delta - callValue/FXSpotRate
implicit_func_strike = lambda k: np.exp(-r_f*tenorInYears)*k/FXForwardRate*norm.cdf((np.log(FXForwardRate/k)-0.5*np.square(vol)*tenorInYears)/(vol*np.sqrt(tenorInYears)))-delta
implict_func_min = lambda x: vol*np.sqrt(tenorInYears)*norm.cdf(x)-norm.pdf(x)
implied_d = fsolve(implict_func_min,0.5)
k_min = FXForwardRate*np.exp(-vol*np.sqrt(tenorInYears)*implied_d-0.5*tenorInYears*np.square(vol))[0]
try:
implied_strike = brentq(implicit_func_strike, k_min, k_max)
except:
implied_strike = 'NULL'
print 'Fail to compute Implied Strike for tenor ', tenorInYears, ' at Delta level ', delta
Implied_strike_diagnose_list = [implied_strike, k_min, k_max, adjustflag, FXSpotRate, r_d, r_f]
return Implied_strike_diagnose_list
def convert_tenor(self,tenor):
tenorInYears = 1.0
if tenor == '1W':
tenorInYears = 1.0/52
elif tenor == '1M':
tenorInYears = 1.0/12
elif tenor == '2M':
tenorInYears = 2.0/12
elif tenor == '3M':
tenorInYears = 3.0/12
elif tenor == '6M':
tenorInYears = 6.0/12
elif tenor == '1Y':
tenorInYears = 1.0
elif tenor == '2Y':
tenorInYears = 2.0
elif tenor == '3Y':
tenorInYears = 3.0
elif tenor == '5Y':
tenorInYears = 5.0
elif tenor == '10Y':
tenorInYears = 10.0
return tenorInYears
def create_sql_statement(self, volDictionary):
sqlstatement = string.Template("""
INSERT INTO MarketData.dbo.FXVolDerive
VALUES ('$VolSurfaceDataId','$SettleCurrency', '$ForeignCurrency', '$VolSurfaceId','$TenorEnum','$TradeDate', '$Delta', '$DeltaConvention', '$premiumAdjust', $ImpliedStrike, '$Quote', '$kmin', '$kmax', '$FXSpot', '$r_d', '$r_f', '$Lud', '$Lub')
""").substitute(volDictionary)
return sqlstatement
def delete_tradeDate_sql(self, startDate, endDate):
sqlstatement = string.Template("""
DELETE FROM MarketData.dbo.FXVolDerive
WHERE TradeDate >= '$startDate'
and TradeDate <= '$endDate'
""").substitute({'startDate': startDate, 'endDate':endDate })
return sqlstatement
def write_vol_to_db(args):
startDate = args[0]
endDate = args[1]
strikeSwitch = args[2]
levelSwitch = args[3]
enviroment = args[4]
FXVolStatsWriter = FXVolDerive(startDate, endDate, strikeSwitch, levelSwitch, enviroment)
sql_statement = FXVolStatsWriter.derive_FXVol()
return sql_statement
if __name__=='__main__':
import FXVolDerive as fv
import concurrent.futures
import os
parser = OptionParser()
parser.add_option("-s", "--startDate", dest="startDate", help="Starting trade date to run FXVolDerive", metavar="2014-05-01 example")
parser.add_option("-e", "--endDate", dest="endDate", help="End trade date to run curve FXVolDerive", metavar="2014-05-02 example")
parser.add_option('-d', '--enviroment', dest='enviroment', help='enviroment name', metavar='example DEV')
parser.add_option('-k',"--strikeSwitch", dest="strikeSwitch", help="turn off strike inference", metavar="off example")
parser.add_option('-l',"--levelSwitch", dest="levelSwitch", help="turn off vol derivation at 10 and 90", metavar="off example")
parser.add_option("-p", "--parallel", action="store_true", dest="parallelize", help="runs computation across all cores", metavar=" example -p off")
(options, args) = parser.parse_args()
startDate = options.startDate
endDate = options.endDate
enviroment = options.enviroment
strikeSwitch = options.strikeSwitch
levelSwitch = options.levelSwitch
timerange = pd.bdate_range(startDate, endDate)
timerangelist = timerange.tolist()
splitted_list = split_list(timerangelist)
arg_list = [ (x[0].strftime('%Y-%m-%d'), x[-1].strftime('%Y-%m-%d'),strikeSwitch, levelSwitch, enviroment) for x in splitted_list]
log_level = logging.INFO
logging.basicConfig(level=log_level)
startTime = get_current_time()
logging.info("startTime %s"%startTime)
#print "startTime: ", startTime
computeTimeStart = dt.datetime.now()
sql_collection = []
if options.parallelize:
logging.info("running single process")
for (date_range, sql_to_execute) in zip(arg_list, map(write_vol_to_db, arg_list)):
print "Completed SQL Generation: ", date_range[0], date_range[1]
sql_collection.append([sql_to_execute,date_range[0],date_range[1]])
else:
logging.info("running multi-process")
with concurrent.futures.ProcessPoolExecutor() as executor:
for (date_range, sql_to_execute) in zip(arg_list, executor.map(write_vol_to_db, arg_list)):
print "Completed SQL Generation: ", date_range[0], date_range[1]
sql_collection.append([sql_to_execute,date_range[0],date_range[1]])
logging.info("Finished computing results")
print "Starting db inserts at time: ",get_current_time()
for sql_value in sql_collection:
try:
db.MSSQL.execute_commit(sql_value[0],enviroment)
print 'SQL Insert success: start=', sql_value[1], ', end=', sql_value[2]
except Exception as e:
print 'Exception on Insert SQL commit!: start=', sql_value[1], ', end=', sql_value[2]
print e.message, str(e)
voldataframe = db.FXVolDerivedCount(startDate, endDate, database=enviroment)
dfVol = voldataframe.extract_from_db()
logging.info("Number of Records Inserted: %r"%dfVol['Quote'].count())
computeTimeEnd = dt.datetime.now()
endTime = get_current_time()
print "startTime: ", startTime
print "endTime: ", endTime
print "Total Time in Hours: ", round((computeTimeEnd - computeTimeStart).total_seconds()/(60*60), 6) | [
"ly333@cornell.edu"
] | ly333@cornell.edu |
be7cb7a27ef0adb63d4f545aa0e65179798a183f | 832852c679816673f708860929a36a20ca8d3e32 | /Configurations/VBS/2016Optimization/DifferentialConfiguration/VBSDifferentialPt1_3GenBin/plot.py | 65923b13e594316da95b1114cf19ed0576291dc9 | [] | no_license | UniMiBAnalyses/PlotsConfigurations | c4ec7376e2757b838930dfb2615e1dc99a64e542 | 578fe518cfc608169d3418bcb63a8342d3a24390 | refs/heads/master | 2023-08-31T17:57:45.396325 | 2022-09-01T10:13:14 | 2022-09-01T10:13:14 | 172,092,793 | 0 | 13 | null | 2023-04-27T10:26:52 | 2019-02-22T15:52:44 | Python | UTF-8 | Python | false | false | 8,149 | py |
# plot configuration
# groupPlot = {}
#
# Groups of samples to improve the plots (merge different sample during plot).
# If not defined, normal plots is used
#
Red=632; Violet=880; Green=416; Orange=800; Yellow=400; Azure=860
signal=0; blind=1
groupPlot['ChargeMisId'] = {
'nameHR' : "ChMisId",
'isSignal' : 0,
'color': Red, # kRed
'samples' : ['ChMisId' , 'ttbar']
}
groupPlot['VV'] = {
'nameHR' : "VV",
'isSignal' : 0,
'color' : Violet+6, # kViolet+10
'samples' : ['ZZ' , 'WZ' , 'DPS']
}
groupPlot['VVV'] = {
'nameHR' : 'VVV',
'isSignal' : 0,
'color': Green, # kGreen
'samples' : ['VVV']
}
groupPlot['Vg'] = {
'nameHR' : "V#gamma",
'isSignal' : 0,
'color' : Orange-3, # kOrange + 10
'samples' : ['Vg']
}
groupPlot['WW_strong'] = {
'nameHR' : "WW QCD",
'isSignal' : 0,
'color' : Violet, # kViolet
'samples' : ['WW_strong']
}
groupPlot['non-prompt'] = {
'nameHR' : 'non-Prompt',
'isSignal' : 0,
'color': Yellow, # kYellow
'samples' : ['Fake_lep','DY_promptSubtr','lep_TT_promptSubtr','singleTop_promptSubtr','singleAntiTop_promptSubtr','ggWWTo2L2Nu_promptSubtr','WWTo2L2Nu_promptSubtr','Vg_promptSubtr','ZZ_promptSubtr','WpWpJJ_promptSubtr','WmWmJJ_promptSubtr','WpWpJJ_QCD_promptSubtr','VVV_promptSubtr','DPS_promptSubtr','WZ_promptSubtr']
}
#plot = {}
# keys here must match keys in samples.py
#
##Charge Misidentification
plot['ChMisId'] = {
'color': Red, # kRed
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ttbar'] = {
'nameHR' : 't#bar{t}',
'color': Red, # kRed
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
##Fake and prompt substraction
plot['Fake_lep'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['DY_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['lep_TT_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['singleTop_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['singleAntiTop_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ggWWTo2L2Nu_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WWTo2L2Nu_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Vg_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ZZ_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WpWpJJ_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WmWmJJ_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WpWpJJ_QCD_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VVV_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['DPS_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WZ_promptSubtr'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
##Irreducible Background
plot['WW_strong'] = {
'color': Violet, # kViolet
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Vg'] = {
'color': Orange+10, # kOrange+10
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
##Reducible Background
##VV plot
plot['ZZ'] = {
'color': Violet+10, # kViolet+10
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WZ'] = {
'color': Violet+10, # kViolet+10
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['DPS'] = {
'color': Violet+10, # kViolet+10
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
##VVV
plot['VVV'] = {
'color': Green, # kGreen
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
##Data
plot['DATA'] = {
'nameHR' : 'Data',
'color': 1 ,
'isSignal' : 0,
'isData' : 1 ,
'isBlind' : blind ,
'scale' : 1.0
}
groupPlot['WW_EWK_bin0'] = {'nameHR' : "Signal bin 0",
'isSignal' : signal,
'color' : Azure+0-9, # kAzure+4
'samples' : ['Signal_bin0']
}
groupPlot['WW_EWK_bin1'] = {'nameHR' : "Signal bin 1",
'isSignal' : signal,
'color' : Azure+0-5, # kAzure+4
'samples' : ['Signal_bin1']
}
groupPlot['WW_EWK_bin2'] = {'nameHR' : "Signal bin 2",
'isSignal' : signal,
'color' : Azure+0-1, # kAzure+4
'samples' : ['Signal_bin2']
}
plot['Signal_bin0'] = {
'color': Azure+5, # kAzure+..
'isSignal' : signal,
'isData' : 0,
'scale' : 1.0
}
plot['Signal_bin1'] = {
'color': Azure+6, # kAzure+..
'isSignal' : signal,
'isData' : 0,
'scale' : 1.0
}
plot['Signal_bin2'] = {
'color': Azure+7, # kAzure+..
'isSignal' : signal,
'isData' : 0,
'scale' : 1.0
}
# additional options
legend['lumi'] = 'L = 35.9/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
| [
"alessandrofendillo@gmail.com"
] | alessandrofendillo@gmail.com |
cfbbccfbb28499d825414a4c03770d71a0783f86 | 0ad5abffdd15bca072ab8db068aab7e1bc6df167 | /NanoGardener/python/modules/LeptonMaker.py | c081cb86e230635a3145a1ee358104a8582dccfd | [] | no_license | pfackeldey/LatinoAnalysis | bf603af9c370b079c3d92e3ed49a5d7d05b87379 | 484a48ec6bfdb7edb06897be984eecfd1aae62fd | refs/heads/master | 2020-03-14T22:42:22.226962 | 2018-04-27T16:02:56 | 2018-04-27T16:02:56 | 131,827,114 | 0 | 0 | null | 2018-05-02T09:16:59 | 2018-05-02T09:16:59 | null | UTF-8 | Python | false | false | 8,092 | py | import ROOT
import os
import re
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from LatinoAnalysis.NanoGardener.data.LeptonMaker_cfg import List_newVar, Lep_var
from LatinoAnalysis.NanoGardener.data.common_cfg import Type_dict
#from LatinoAnalysis.NanoGardener.data.Trigger_names import TrigNames, SPTrigNames
class LeptonMaker(Module):
'''
put this file in LatinoAnalysis/NanoGardener/python/modules/
Add extra variables to NANO tree
'''
def __init__(self):
pass
def beginJob(self):
pass
def endJob(self):
pass
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
self.initReaders(inputTree) # initReaders must be called in beginFile
self.out = wrappedOutputTree
# New branches
for typ in List_newVar:
for var in List_newVar[typ]:
if 'Lepton_' in var: self.out.branch(var, typ, lenVar='nLepton')
elif 'SPTrigger' in var: self.out.branch(var, typ, len(SPTrigNames))
elif 'Trigger' in var: self.out.branch(var, typ, len(TrigNames))
else: self.out.branch(var, typ)
# Old branches to reorder
self.list_old_br = {}
self.list_old_br['Electron'] = []
self.list_old_br['Muon'] = []
self.list_old_br['Jet'] = []
for br in inputTree.GetListOfBranches():
bname = br.GetName()
btype = Type_dict[br.GetListOfLeaves()[0].GetTypeName()]
if re.match('\AElectron_', bname):
self.list_old_br['Electron'].append(bname)
self.out.branch(bname, btype, lenVar='nElectron')
if re.match('\AMuon_', bname):
self.list_old_br['Muon'].append(bname)
self.out.branch(bname, btype, lenVar='nMuon')
if re.match('\AJet_', bname):
self.list_old_br['Jet'].append(bname)
self.out.branch(bname, btype, lenVar='nJet')
def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
pass
def initReaders(self,tree): # this function gets the pointers to Value and ArrayReaders and sets them in the C++ worker class
self.electron_var = {}
self.muon_var = {}
self.jet_var = {}
for br in tree.GetListOfBranches():
bname = br.GetName()
if re.match('\AElectron_', bname): self.electron_var[bname] = tree.arrayReader(bname)
if re.match('\AMuon_', bname): self.muon_var[bname] = tree.arrayReader(bname)
if re.match('\AJet_', bname): self.jet_var[bname] = tree.arrayReader(bname)
self.nElectron = tree.valueReader('nElectron')
self.nMuon = tree.valueReader('nMuon')
self.nJet = tree.valueReader('nJet')
self._ttreereaderversion = tree._ttreereaderversion # self._ttreereaderversion must be set AFTER all calls to tree.valueReader or tree.arrayReader
def analyze(self, event):
"""process event, return True (go to next module) or False (fail, go to next event)"""
if event._tree._ttreereaderversion > self._ttreereaderversion: # do this check at every event, as other modules might have read further branches
self.initReaders(event._tree)
# do NOT access other branches in python between the check/call to initReaders and the call to C++ worker code
#--- Set vars
nEl = int(self.nElectron)
nMu = int(self.nMuon)
nJt = int(self.nJet)
nLep = nMu + nEl
lep_dict = {}
for lv in Lep_var:
lep_dict[lv] = [0]*nLep
lep_dict['instance'] = [0]*nLep
ele_dict = {}
for lv in self.list_old_br['Electron']:
ele_dict[lv] = [0]*nEl
muo_dict = {}
for lv in self.list_old_br['Muon']:
muo_dict[lv] = [0]*nMu
jet_dict = {}
for lv in self.list_old_br['Jet']:
jet_dict[lv] = [0]*nJt
#--- Electron Loops
for iEle1 in range(nEl):
pt_idx = 0
pt1 = self.electron_var['Electron_pt'][iEle1]
# Start comparing electrons
for iEle2 in range(nEl):
if iEle2 == iEle1: continue
pt2 = self.electron_var['Electron_pt'][iEle2]
if pt1 < pt2:
pt_idx += 1
#if pt_idx != iEle1: print('Electrons reordered')
# Now index is set, fill the vars
for var in ele_dict:
if type(self.electron_var[var][iEle1]) is str:
ele_dict[var][pt_idx] = ord(self.electron_var[var][iEle1])
else:
ele_dict[var][pt_idx] = self.electron_var[var][iEle1]
#--- Muon Loops
for iMu1 in range(nMu):
pt_idx = 0
pt1 = self.muon_var['Muon_pt'][iMu1]
# Start comparing muons
for iMu2 in range(nMu):
if iMu2 == iMu1: continue
pt2 = self.muon_var['Muon_pt'][iMu2]
if pt1 < pt2:
pt_idx += 1
#if pt_idx != iMu1: print('Muons reordered')
# Now index is set, fill the vars
for var in muo_dict:
if type(self.muon_var[var][iMu1]) is str:
muo_dict[var][pt_idx] = ord(self.muon_var[var][iMu1])
else:
muo_dict[var][pt_idx] = self.muon_var[var][iMu1]
#--- Lepton Loops
for iLep1 in range(nLep):
pt_idx = 0
if iLep1 < nEl:
pt1 = ele_dict['Electron_pt'][iLep1]
pdgId1 = ele_dict['Electron_pdgId'][iLep1]
else:
pt1 = muo_dict['Muon_pt'][iLep1 - nEl]
pdgId1 = muo_dict['Muon_pdgId'][iLep1 - nEl]
# Start comparing leptons
for iLep2 in range(nLep):
if iLep2 == iLep1: continue
if iLep2 < nEl:
pt2 = ele_dict['Electron_pt'][iLep2]
else:
pt2 = muo_dict['Muon_pt'][iLep2 - nEl]
if pt1 < pt2:
pt_idx += 1
# Now index is set, fill the vars
if abs(pdgId1) == 11:
for var in lep_dict:
if not 'instance' in var:
lep_dict[var][pt_idx] = ele_dict['Electron_'+var][iLep1]
else:
lep_dict[var][pt_idx] = iLep1
elif abs(pdgId1) == 13:
for var in lep_dict:
if not 'instance' in var and not 'eCorr' in var:
lep_dict[var][pt_idx] = muo_dict['Muon_'+var][iLep1 - nEl]
elif 'eCorr' in var:
lep_dict[var][pt_idx] = 1.
else:
lep_dict[var][pt_idx] = iLep1 - nEl
#--- Jet Loops
for iJ1 in range(nJt):
pt_idx = 0
pt1 = self.jet_var['Jet_pt'][iJ1]
# Start comparing jets
for iJ2 in range(nJt):
if iJ2 == iJ1: continue
pt2 = self.jet_var['Jet_pt'][iJ2]
if pt1 < pt2:
pt_idx += 1
#if pt_idx != iJ1: print('Jets reordered')
# Now index is set, fill the vars
for var in jet_dict:
if type(self.jet_var[var][iJ1]) is str:
jet_dict[var][pt_idx] = ord(self.jet_var[var][iJ1])
else:
jet_dict[var][pt_idx] = self.jet_var[var][iJ1]
#--- Fill branches
for var in lep_dict:
self.out.fillBranch('Lepton_' + var, lep_dict[var])
for var in ele_dict:
self.out.fillBranch(var, ele_dict[var])
for var in muo_dict:
self.out.fillBranch(var, muo_dict[var])
for var in jet_dict:
self.out.fillBranch(var, jet_dict[var])
return True
# define modules using the syntax 'name = lambda : constructor' to avoid having them loaded when not needed
lepMkr = lambda : LeptonMaker()
| [
"senne.vanputte@student.uantwerpen.be"
] | senne.vanputte@student.uantwerpen.be |
eaaf216be821853937268b4966d0219606e5dc83 | 86f026df0f9c5734ffd7266e08e45a5c8f855359 | /dataapp/migrations/0001_initial.py | e1531da8975c84cddbfa3ea68c7c488015ea2500 | [] | no_license | stam007/api_shop | 38e23795a305f31d1bf1260cf6c4f118c99c9c92 | 443c2a2f9b6f204a2b194b1c6dd61b8b29d23c1c | refs/heads/master | 2020-06-16T03:39:38.137973 | 2019-07-05T21:37:03 | 2019-07-05T21:37:03 | 195,469,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32)),
],
),
]
| [
"you@example.com"
] | you@example.com |
b3e95973c6a69542b95f65abad2fce86b9661a1d | b40a4f302dbfec6b71c4b14350b7fd98a47f4319 | /Menu.py | 5eefb4ab7d5efa3c0082ed93a54a6868eddf06be | [] | no_license | Babsia/ej8-2021 | 93e7fa14d61377cd2510b2152ce0078fd426c515 | 09bd15bb535cbc92c7908d3bcc6fe86bbc3e7a77 | refs/heads/main | 2023-04-12T19:29:10.151449 | 2021-04-27T15:25:29 | 2021-04-27T15:25:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | from manejadorconjuntos import manejador
class menuu:
__switcher=None
__M=None
def __init__(self):
self.__switcher = {
'a':self.opcion1,
'b':self.opcion2,
'c':self.opcion3,
'd':self.salir,
}
self.__M=manejador()
self.__M.testing()
def getSwitcher(self):
return self.__switcher
def opcion(self,op):
func=self.__switcher.get(op, lambda: print("Opción no válida"))
func()
def salir(self):
print('Salir')
def opcion1(self):
i1=int(input("ingrese el numero de conjunto a unir: "))
i2=int(input("ingrese el segundo numero de conjunto a unir: "))
i1-=1
i2-=1
f=self.__M.manejadorunion(i1,i2)
f.mostrar()
def opcion2(self):
i1=int(input("ingrese el numero de conjunto: "))
i2=int(input("ingrese el segundo numero de conjunto para hacer la diferencia: "))
i1-=1
i2-=1
h=self.__M.manejadordif(i1,i2)
h.mostrar()
def opcion3(self):
i1=int(input("ingrese el numero de conjunto: "))
i2=int(input("ingrese el segundo numero de conjunto para comprobar igualdad: "))
i1-=1
i2-=1
t=self.__M.manejadoreq(i1,i2)
if (t==True):
print("los conjuntos son iguales")
else:
print("los conjuntos son distintos")
def mostrar(self):
self.__M.mostrar2()
| [
"noreply@github.com"
] | noreply@github.com |
601659886587188883f736f881bddea0c12f8be8 | 9a3bca16101f6688e353f74586370dd0ce97cc28 | /hw2_200mhz/runmf | 7bc3321a3813340af3cb6c5fdc33c61f0c283692 | [] | no_license | Asfagus/300Mhz | a66d06fc366ff68f2c7f0d47547ae89329e834ff | e654c52c04f252fba3f7640b4f8e40724e186854 | refs/heads/main | 2023-01-14T07:10:40.773289 | 2020-11-19T01:31:45 | 2020-11-19T01:31:45 | 314,091,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,099 | #!/usr/bin/python3
import sys
import time
import os
import subprocess
import socket
import argparse
from datetime import date
success="With pride, you passed the test"
###############################
# copies a file if needed #
###############################
def copyifneeded(localfile,remotefile):
if(os.path.isfile(localfile)):
return
print("\nCopying {0} to local directory".format(str(localfile)))
cmd = ["cp", str(remotefile), str(localfile) ]
subprocess.call(cmd)
####################################
# checks to see if a file contains #
# any substrings passed #
####################################
def filehasany(fn,stx):
rv = False
if(os.path.isfile(fn)):
fw = open(fn,"r")
for ln in fw.readlines():
for sub in stx:
if( ln.lower().find(sub)>= 0 ):
print("-->",ln.strip(),"<--")
rv=True
return rv
###############################
# checks to see if a file has #
# a string #
###############################
def filehas(fn,stx):
if(os.path.isfile(fn)):
fw = open(fn,"r")
for ln in fw.readlines():
if ln.find(stx)>=0 :
print("-->",ln.strip(),"<--")
fw.close()
return True
fw.close()
return False
##############################
# run vcs simulation #
##############################
def runvcs(debopt):
subprocess.call(["rm","-rf","simres.txt","simv","simv.daidir","csrc"])
res = subprocess.check_output(["id","-u"]).decode()
try:
bob=int(res)
except:
bob=42
dtxt="-DRSEED={0}".format(bob)
subprocess.call(["csh","-c",
"./sv_vcs tbmf.sv | tee simres.txt"])
if(filehas("simres.txt",success)):
if(debopt):
resfile.write("debug {0} \n".format(debopt))
resfile.write("VCS simulation worked\n");
print("\n\n\n------ VCS simulation worked \n\n\n")
else:
resfile.write("VCS FAILED TO WORK\n")
print("\n\n\nVCS Failed to work\n\n")
resfile.close()
exit()
##############################
# run ncverilog simulation #
##############################
def runnc(debopt):
print("\n\n Starting NC verilog \n\n")
subprocess.call(["rm","-rf","simres.txt"])
dtxt=""
if debopt :
dtxt="+define+DEB"
subprocess.call(["csh","-c",
"./sv_nc tbmf.sv | tee simres.txt".format(dtxt)])
if(filehas("simres.txt",success)):
if(debopt):
resfile.write("debug {0} \n".format(debopt))
resfile.write("NCverilog simulation worked\n");
print("\n\n\n------ NCverilog simulation worked \n\n\n")
else:
resfile.write("NCverilog FAILED TO WORK\n")
print("\n\n\nNCVerilog failed to work\n\n")
resfile.close()
exit()
##############################
# run ncverilog gate sim #
##############################
def rungates(debopt,clkperiod):
print("\n\n Starting NC verilog gate level simulation\n\n")
subprocess.call(["rm","-rf","simres.txt"])
dtxt=""
if debopt :
dtxt="+define+DEB"
ctxt="+define+CTime={0}".format(clkperiod)
subprocess.call(["csh","-c",
"./sv_ncgates tbmf_gates.sv | tee simres.txt".format(dtxt,ctxt)])
if(filehas("simres.txt",success)):
if(debopt):
resfile.write("debug {0}\n".format(debopt))
resfile.write("Gate level simulation worked\n");
print("\n\n\n------ Gate level simulation worked \n\n\n")
else:
resfile.write("Gates FAILED TO WORK\n")
print("\n\n\nGates failed to work\n\n")
resfile.close()
exit()
#####################################
# makes a synthesis script of the things
#####################################
def makeSynScript(fn,clkx):
clkperiod = float(clkx);
fs = open(fn,"w")
fs.write("""set link_library {/apps/toshiba/sjsu/synopsys/tc240c/tc240c.db_NOMIN25 /apps/synopsys/I-2013.12-SP5/libraries/syn/dw_foundation.sldb}
set target_library {/apps/toshiba/sjsu/synopsys/tc240c/tc240c.db_NOMIN25}
""")
fs.write("read_sverilog {0}\n".format("mf.sv"))
fs.write("create_clock clk -name clk -period {0}\n".format(clkperiod*0.75))
fs.write("""set_propagated_clock clk
set_clock_uncertainty 0.25 clk
set_propagated_clock clk
set_output_delay 0.5 -clock clk [all_outputs]
set all_inputs_wo_rst_clk [remove_from_collection [remove_from_collection [all_inputs] [get_port clk]] [get_port reset]]
set_driving_cell -lib_cell CND2X1 $all_inputs_wo_rst_clk
set_input_delay 0.6 -clock clk $all_inputs_wo_rst_clk
set_output_delay 0.6 -clock clk [all_outputs]
set_fix_hold [ get_clocks clk ]
""")
fs.write("set_output_delay 0.3 -clock clk [all_outputs]\n")
fs.write("set_max_delay {0} -from [all_inputs] -to [all_outputs]\n".format(clkperiod*0.7))
fs.write("compile_ultra\n")
fs.write("create_clock clk -name clk -period {0}\n".format(clkperiod))
fs.write("""
update_timing
report_timing -max_paths 5
""")
fs.write("""write -hierarchy -format verilog -output mf_gates.v
""")
fs.write("quit\n")
fs.close()
#####################################
# run the synopsys synthesizer #
#####################################
def runsynthesis(clkperiod):
makeSynScript("synthesis.script",clkperiod)
fq = open("sss","w")
fq.write("""#!/usr/bin/csh
source /apps/design_environment.csh
which dc_shell
dc_shell -f synthesis.script | tee synres.txt
""")
fq.close()
subprocess.call(["chmod","+x","sss"])
subprocess.call(["rm","-f","synres.txt"])
subprocess.call(["./sss"])
if( not os.path.isfile("synres.txt") ):
resfile.write("///// Synthesis failed to produce results /////\n")
print("\n\nNo synthesis results\n\n")
exit()
if( filehasany("synres.txt",["error","latch","violated","timing arc"]) ):
resfile.write("///// Synthesis failed /////\n");
print("\n\nsynthesis failed\n\n")
exit()
resfile.write("Synthesis finished OK\n")
####################################
# The main routine #
####################################
def mainx():
clkperiod=3.0
resfn="results.txt"
parser = argparse.ArgumentParser(description='250 Mhz mf homework')
parser.add_argument("-s","--synthesis",dest="synthesis",default=False,help="only run synthesis",action="store_true");
parser.add_argument("--nogates",dest="nogates",default=False,help="No gate level simulation", action="store_true")
parser.add_argument("-d","--debug",dest="debug",default=False,help="Debug mode",action="store_true")
parser.add_argument("-g","--gates",dest="gates",default=False,help="just simulate gates",action="store_true")
parser.add_argument("clkperiod",default=3.0)
parser.add_argument("resultsFileName",default="results.txt")
args = parser.parse_args()
print(args)
# return
if args.resultsFileName.find(".v")>0 or args.resultsFileName.find(".sv")>0:
print("the second argument should be the result name\n")
print("Not a design file name")
return
resfn=args.resultsFileName
clkperiod=float(args.clkperiod)
global resfile
resfile = open(resfn,"w")
resfile.write("runmf script run started on {0}\n".format(str(time.asctime())))
resfile.write("run on machine {0}\n\n".format(socket.gethostname()))
resfile.write("Run with a clock period of {0}\n".format(clkperiod));
copyifneeded("sv_vcs","/home/morris/287/s20/hw1/sv_vcs")
copyifneeded("tbmf.sv","/home/morris/287/s20/hw1/tbmf.sv")
copyifneeded("tb5x5_gates.sv","/home/morris/287/s20/hw1/tb5x5_gates.sv")
copyifneeded("sv_nc","/home/morris/287/s20/hw1/sv_nc")
copyifneeded("sv_ncgates","/home/morris/287/s20/hw1/sv_ncgates")
print("options syn {0} gates {1}".format(args.synthesis,args.gates))
resfile.write( "options syn {0} gates {1}\n".format(args.synthesis,args.gates))
if(not (args.synthesis or args.gates) ):
runvcs(args.debug)
runnc(args.debug)
if( not (args.gates) ):
runsynthesis(clkperiod)
if( not (args.nogates) ):
rungates(args.debug,clkperiod)
if(args.synthesis or args.gates or args.nogates):
resfile.write("--->> Partial run, do not submit for credit <<--\n")
print("--->> Partial run, do not submit for credit <<--\n")
resfile.close()
exit()
resfile.write("Completed the mf run\n")
print("\n\n\nCompleted the mf run\n\n")
resfile.close()
mainx()
| [
"asfagus@gmail.com"
] | asfagus@gmail.com | |
75ccc33eabebffd38f8073ba96f8156bde50066c | ad921d190cba89b9ad3d169941727cfb497192e2 | /static_root/model_3d/asgi.py | e245fb55a9b882a91918f49b5795d2f0683352ab | [] | no_license | Rahmet97/3d-models.uz | c8f955d7e0af8ca6ff00f147bd5ef6c9133226e4 | 17fb9545475a416607c18e00388f8605ad623f5f | refs/heads/master | 2023-07-07T19:11:04.621195 | 2021-07-29T10:49:38 | 2021-07-29T10:49:38 | 361,868,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for model_3d project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'model_3d.settings')
application = get_asgi_application()
| [
"ruslanovrahmet@gmail.com"
] | ruslanovrahmet@gmail.com |
595d87247e2461ae9ffc07c514269c1026c31b6b | d499057c555f2c9217cdfa5052207b67ea54b5cc | /server.py | d28eb56439d3b0a8e280d4903de84f66fd06cc9b | [
"Apache-2.0"
] | permissive | defnngj/movie-website | 6fe1fcc2571c75dd7f423137833eb46c4ac7db1d | d1ffaf209b4c689cd5180b8a8bb1866ad6d0f0e8 | refs/heads/main | 2023-08-11T10:37:38.783093 | 2021-09-27T14:39:56 | 2021-09-27T14:39:56 | 410,204,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | import os
import sqlite3
from flask import g
from flask import Flask
from flask import render_template
app = Flask(__name__)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATABASE = os.path.join(BASE_DIR, "dev.db")
def connect_db():
return sqlite3.connect(DATABASE)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def query_db(query, args=(), one=False):
"""
查询db
:param query:
:param args:
:param one:
:return:
"""
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.route("/")
def hello_world():
return "<p>Hello, World!</p>"
@app.route("/top")
def move_top():
return "<p>250经典电影</p>"
@app.route("/movie")
def movie_page():
"""
电影页面
:return:
"""
movie_list = query_db('select * from movie')
# for movie in movie_list:
# print("db-data\n", type(movie), movie)
return render_template("hello.html", moves=movie_list)
| [
"defnngj@gmail.com"
] | defnngj@gmail.com |
9e863dde12b15744f27376c223e743904ad7dccc | 4a94207528c5123bb4d5673c1a8b604c7dd10f7a | /MODULES/string-functions.py | f1079d1c1bdb5b92b1a698f0ee47bc4927037241 | [] | no_license | Elite-Force/Python-Playground | aa9ebe880a243b786f4222f8ce5412efa7513719 | 460f5412aa84827102827b6d014b10f1d41069e0 | refs/heads/master | 2022-02-26T15:34:46.798511 | 2019-11-04T01:06:18 | 2019-11-04T01:06:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | print("""
STRING MANIPULATION
print_items_in_a_list(list_1)
print_items_in_range(x,y)
abbrevName(name)
print_string(a)
decor()
""")
def decor():
a = "================================================================"
return a
def print_string(a):
"""
:param a: str.
:return:
"""
a = str(input("Please enter a string: "))
print(a)
def print_items_in_a_list(list_1):
#list_1 = ["Blessed Assurance", "At Calvary", "New Name", "Onward"]
x = len(list_1)
n = 0
while n < x:
print (list_1[n])
n += 1
def print_items_in_range(x,y):
for n in range(x,y):
return(n)
def challenge_3():
list_1 = ["Blessed Assurance", "At Calvary", "New Name", "Onward"]
x = len(list_1)
n = 0
while n < x:
y = list_1[n]
print(n, "-" , y)
n += 1
def infin():
nos = [1,2,3,4,5,6,7,8,9,0]
while True:
try:
g = int(input("Guess a number: "))
if g in nos:
print ("Correct!!")
else:
print("Wrong!!")
q = input("Type q to quit!!: ")
if q == "q":
break
except ValueError:
return ("Please enter an integer!!")
def abbrevName(name):
name = name.split(" ")
hi = list()
for i in range (len(name)):
a = name[i]
initial = a[0:1]
initial = initial.upper()
hi.append(initial)
return (hi[0] + "." + hi[1])
'''
Forever
Author of Salvation
My God is might to save
He is mighty to save!!!
'''
| [
"53560106+IfeOlulesi@users.noreply.github.com"
] | 53560106+IfeOlulesi@users.noreply.github.com |
1c1580900ab1dcfb8210ed4df0265547e3875743 | 89b42aa5caf583003e9adea929d57f241b97b6c0 | /model/process.py | 27bc13d12fa642e1c4ee202e2e56ad178e0a0dc8 | [] | no_license | Vision-CAIR/feelings | 740327de387d966ba14fbd20da4b54f8ef4e9c2a | e3441e7826de86aaf3ec3fcdd03aa1e9bc937ffb | refs/heads/master | 2022-04-20T02:20:31.554365 | 2020-04-22T18:20:16 | 2020-04-22T18:20:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | import pandas as pd
import numpy as np
import re, string
import numpy as np
#https://stackoverflow.com/questions/54028199/are-for-loops-in-pandas-really-bad-when-should-i-care
#https://stackoverflow.com/questions/11350770/select-by-partial-string-from-a-pandas-dataframe
#https://stackoverflow.com/questions/3041320/regex-and-operator
#https://stackoverflow.com/questions/17972938/check-if-string-in-pandas-dataframe-column-is-in-list
#https://cmdlinetips.com/2018/02/how-to-subset-pandas-dataframe-based-on-values-of-a-column/
regex = re.compile('[%s]' % re.escape(string.punctuation))
prefix = 'https://s3-us-west-1.amazonaws.com/fairgames/resources/images/wiki-art/original/square-not-padded-600-pixels/'
def image_link(df):
def f(df):
return osp.join(prefix, df['genre'], df['painting']+".jpg")
df['painting'] = df.apply(f, axis=1)
# d = {'links': pd.Series(df['painting'])}
# return pd.DataFrame(d).to_json()
for x in df['painting']:
print(x)
return pd.Series(df['painting']).to_json()
def data_pre_process(df):
"""
:param df: Converts dataset into DF
:return: string column in DF has all punctuation and uppercasses removed
"""
df['utterance'] = df['utterance'].apply(lambda x: (regex.sub('', x).lower().split()))
return df
def subset_search(words,df,feeling):
"""
:param words: User Query
:param df: pre - processed datafram
:return: returns all proper subset of the user query against dataset strings
"""
def try_search(words,x):
return set(words) <= set(x)
filter = df[[try_search(words, x) for x in df['utterance']]]
if not feeling:
return filter
# means that feeling has been also included, filter based on that
return filter[filter['feeling']==feeling]
if __name__ == '__main__':
df = pd.read_csv('demo.csv')
df = data_pre_process(df)
words = ['the','painting']
feeling= 'fear'
filter = subset_search(words,df,feeling)
print(image_link(filter))
# print(len(filter['utterance']))
# print(filter['utterance'])
| [
"ishangupta@Ishans-MacBook-Pro.local"
] | ishangupta@Ishans-MacBook-Pro.local |
975ec0752443a90fd3164f9c1292d0b0f8361a1f | ff5ceb21cec47f423aabef1ed033e3f474505976 | /searchAgents.py | ae2c088631fcd00d8df9e49ec85ff9c6044a0eac | [] | no_license | taylordelehanty/Pacman | b2392e6b85afc496071fdcd70e5ccefa2627903e | 62e059af0fbe3d29c810a04d5d5ecac3b94e2e36 | refs/heads/master | 2021-05-09T16:25:20.779745 | 2018-01-30T19:23:29 | 2018-01-30T19:23:29 | 119,116,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,383 | py | # searchAgents.py
# ---------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
This file contains all of the agents that can be selected to
control Pacman. To select an agent, use the '-p' option
when running pacman.py. Arguments can be passed to your agent
using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a searchFunction=depthFirstSearch
Commands to invoke other search strategies can be found in the
project description.
Please only change the parts of the file you are asked to.
Look for the lines that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the
project description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
import searchAgents
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search algorithm for a
supplied search problem, then returns actions to follow that path.
As a default, this agent runs DFS on a PositionSearchProblem to find location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError, fn + ' is not a search function in search.py.'
func = getattr(search, fn)
if 'heuristic' not in func.func_code.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in dir(searchAgents):
heur = getattr(searchAgents, heuristic)
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError, heuristic + ' is not a function in searchAgents.py or search.py.'
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in dir(searchAgents) or not prob.endswith('Problem'):
raise AttributeError, prob + ' is not a search problem type in SearchAgents.py.'
self.searchType = getattr(searchAgents, prob)
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game board. Here, we
choose a path to the goal. In this phase, the agent should compute the path to the
goal and store it in a local variable. All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception, "No search function provided for SearchAgent"
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in registerInitialState). Return
Directions.STOP if there is no further action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test,
successor function and cost function. This search problem can be
used to find paths to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState, costFn = lambda x: 1):
"""
Stores the walls, pacman's starting position and corners.
"""
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
self.costFn = costFn
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print 'Warning: no food in corner ' + str(corner)
self._expanded = 0 # Number of search nodes expanded
self._goals = list(self.corners)
def getStartState(self):
"Returns the start state (in your state space, not the full Pacman state space)"
return self.startingPosition
def isGoalState(self, state):
"Returns whether this search state is a goal state of the problem"
isGoal = False
position, visited = state
if position in self._goals:
idx = self._goals.index(position)
del self._goals[idx]
visited = set()
isGoal = not len(self._goals)
return isGoal, visited
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
pivot, visited = state
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = pivot
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
hitsWall = self.walls[nextx][nexty]
if not hitsWall:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append((nextState, action, cost))
self._expanded += 1
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound
on the shortest path from the state to a goal of the problem; i.e.
it should be admissible (as well as consistent).
"""
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
"*** YOUR CODE HERE ***"
return 0 # Default to trivial solution
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append( ( ((nextx, nexty), nextFood), direction, 1) )
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come up
with an admissible heuristic; almost all admissible heuristics will be consistent
as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the other hand,
inadmissible or inconsistent heuristics may find optimal solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a
Grid (see game.py) of either True or False. You can call foodGrid.asList()
to get a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the problem.
For example, problem.walls gives you a Grid of where the walls are.
If you want to *store* information to be reused in other calls to the heuristic,
there is a dictionary called problem.heuristicInfo that you can use. For example,
if you only want to count the walls once and store that value, try:
problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
"*** YOUR CODE HERE ***"
return 0
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print 'Path found with cost %d.' % len(self.actions)
def findPathToClosestDot(self, gameState):
"Returns a path (a list of actions) to the closest dot, starting from gameState"
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but
has a different goal test, which you need to fill in below. The
state space and successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in
the findPathToClosestDot method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test
that will complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
##################
# Mini-contest 1 #
##################
class ApproximateSearchAgent(Agent):
"Implement your contest entry here. Change anything but the class name."
def registerInitialState(self, state):
"This method is called before any moves are made."
"*** YOUR CODE HERE ***"
def getAction(self, state):
"""
From game.py:
The Agent will receive a GameState and must return an action from
Directions.{North, South, East, West, Stop}
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's position
in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + point1
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False)
return len(search.bfs(prob))
| [
"tdelehanty1993@gmail.com"
] | tdelehanty1993@gmail.com |
2875165806e8a280633e5e89253504697ec9885b | 1f143b4992c7849819ee634569870ed280086df8 | /software/realsense_examples/pyglet_pointcloud_viewer.py | b6b0541a4b719b10c005012148aa36c8921492f8 | [
"MIT",
"Apache-2.0"
] | permissive | CASE-Association/FolkraceCar | 31a134cf2f026090db064254c08362429cc5a45c | 6d927182751383a4be2336db8f693d2fb422c757 | refs/heads/master | 2023-03-27T01:45:43.885905 | 2020-04-24T15:19:16 | 2020-04-24T15:19:16 | 241,513,790 | 1 | 0 | Apache-2.0 | 2021-03-31T20:12:07 | 2020-02-19T02:23:38 | Python | UTF-8 | Python | false | false | 14,871 | py | # License: Apache 2.0. See LICENSE file in root directory.
# Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
"""
OpenGL Pointcloud viewer with http://pyglet.org
Usage:
------
Mouse:
Drag with left button to rotate around pivot (thick small axes),
with right button to translate and the wheel to zoom.
Keyboard:
[p] Pause
[r] Reset View
[d] Cycle through decimation values
[z] Toggle point scaling
[x] Toggle point distance attenuation
[c] Toggle color source
[l] Toggle lighting
[f] Toggle depth post-processing
[s] Save PNG (./out.png)
[e] Export points to ply (./out.ply)
[q/ESC] Quit
Notes:
------
Using deprecated OpenGL (FFP lighting, matrix stack...) however, draw calls
are kept low with pyglet.graphics.* which uses glDrawArrays internally.
Normals calculation is done with numpy on CPU which is rather slow, should really
be done with shaders but was omitted for several reasons - brevity, for lowering
dependencies (pyglet doesn't ship with shader support & recommends pyshaders)
and for reference.
"""
import math
import ctypes
import pyglet
import pyglet.gl as gl
import numpy as np
import pyrealsense2 as rs
# https://stackoverflow.com/a/6802723
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
class AppState:
def __init__(self, *args, **kwargs):
self.pitch, self.yaw = math.radians(-10), math.radians(-15)
self.translation = np.array([0, 0, 1], np.float32)
self.distance = 2
self.mouse_btns = [False, False, False]
self.paused = False
self.decimate = 0
self.scale = True
self.attenuation = False
self.color = True
self.lighting = False
self.postprocessing = False
def reset(self):
self.pitch, self.yaw, self.distance = 0, 0, 2
self.translation[:] = 0, 0, 1
@property
def rotation(self):
Rx = rotation_matrix((1, 0, 0), math.radians(-self.pitch))
Ry = rotation_matrix((0, 1, 0), math.radians(-self.yaw))
return np.dot(Ry, Rx).astype(np.float32)
state = AppState()
# Configure streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# other_stream, other_format = rs.stream.infrared, rs.format.y8
other_stream, other_format = rs.stream.color, rs.format.rgb8
config.enable_stream(other_stream, 640, 480, other_format, 30)
# Start streaming
pipeline.start(config)
profile = pipeline.get_active_profile()
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height
# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
colorizer = rs.colorizer()
filters = [rs.disparity_transform(),
rs.spatial_filter(),
rs.temporal_filter(),
rs.disparity_transform(False)]
# pyglet
window = pyglet.window.Window(
config=gl.Config(
double_buffer=True,
samples=8 # MSAA
),
resizable=True, vsync=True)
keys = pyglet.window.key.KeyStateHandler()
window.push_handlers(keys)
def convert_fmt(fmt):
"""rs.format to pyglet format string"""
return {
rs.format.rgb8: 'RGB',
rs.format.bgr8: 'BGR',
rs.format.rgba8: 'RGBA',
rs.format.bgra8: 'BGRA',
rs.format.y8: 'L',
}[fmt]
# Create a VertexList to hold pointcloud data
# Will pre-allocates memory according to the attributes below
vertex_list = pyglet.graphics.vertex_list(
w * h, 'v3f/stream', 't2f/stream', 'n3f/stream')
# Create and allocate memory for our color data
other_profile = rs.video_stream_profile(profile.get_stream(other_stream))
image_data = pyglet.image.ImageData(w, h, convert_fmt(
other_profile.format()), (gl.GLubyte * (w * h * 3))())
if (pyglet.version.startswith('1.') and not pyglet.version.startswith('1.5')):
# pyglet.clock.ClockDisplay has be removed in 1.4
fps_display = pyglet.clock.ClockDisplay()
else:
fps_display = pyglet.window.FPSDisplay(window)
@window.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
w, h = map(float, window.get_size())
if buttons & pyglet.window.mouse.LEFT:
state.yaw -= dx * 0.5
state.pitch -= dy * 0.5
if buttons & pyglet.window.mouse.RIGHT:
dp = np.array((dx / w, -dy / h, 0), np.float32)
state.translation += np.dot(state.rotation, dp)
if buttons & pyglet.window.mouse.MIDDLE:
dz = dy * 0.01
state.translation -= (0, 0, dz)
state.distance -= dz
def handle_mouse_btns(x, y, button, modifiers):
state.mouse_btns[0] ^= (button & pyglet.window.mouse.LEFT)
state.mouse_btns[1] ^= (button & pyglet.window.mouse.RIGHT)
state.mouse_btns[2] ^= (button & pyglet.window.mouse.MIDDLE)
window.on_mouse_press = window.on_mouse_release = handle_mouse_btns
@window.event
def on_mouse_scroll(x, y, scroll_x, scroll_y):
dz = scroll_y * 0.1
state.translation -= (0, 0, dz)
state.distance -= dz
def on_key_press(symbol, modifiers):
if symbol == pyglet.window.key.R:
state.reset()
if symbol == pyglet.window.key.P:
state.paused ^= True
if symbol == pyglet.window.key.D:
state.decimate = (state.decimate + 1) % 3
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
if symbol == pyglet.window.key.C:
state.color ^= True
if symbol == pyglet.window.key.Z:
state.scale ^= True
if symbol == pyglet.window.key.X:
state.attenuation ^= True
if symbol == pyglet.window.key.L:
state.lighting ^= True
if symbol == pyglet.window.key.F:
state.postprocessing ^= True
if symbol == pyglet.window.key.S:
pyglet.image.get_buffer_manager().get_color_buffer().save('out.png')
if symbol == pyglet.window.key.Q:
window.close()
window.push_handlers(on_key_press)
def axes(size=1, width=1):
"""draw 3d axes"""
gl.glLineWidth(width)
pyglet.graphics.draw(6, gl.GL_LINES,
('v3f', (0, 0, 0, size, 0, 0,
0, 0, 0, 0, size, 0,
0, 0, 0, 0, 0, size)),
('c3f', (1, 0, 0, 1, 0, 0,
0, 1, 0, 0, 1, 0,
0, 0, 1, 0, 0, 1,
))
)
def frustum(intrinsics):
"""draw camera's frustum"""
w, h = intrinsics.width, intrinsics.height
batch = pyglet.graphics.Batch()
for d in range(1, 6, 2):
def get_point(x, y):
p = rs.rs2_deproject_pixel_to_point(intrinsics, [x, y], d)
batch.add(2, gl.GL_LINES, None, ('v3f', [0, 0, 0] + p))
return p
top_left = get_point(0, 0)
top_right = get_point(w, 0)
bottom_right = get_point(w, h)
bottom_left = get_point(0, h)
batch.add(2, gl.GL_LINES, None, ('v3f', top_left + top_right))
batch.add(2, gl.GL_LINES, None, ('v3f', top_right + bottom_right))
batch.add(2, gl.GL_LINES, None, ('v3f', bottom_right + bottom_left))
batch.add(2, gl.GL_LINES, None, ('v3f', bottom_left + top_left))
batch.draw()
def grid(size=1, n=10, width=1):
"""draw a grid on xz plane"""
gl.glLineWidth(width)
s = size / float(n)
s2 = 0.5 * size
batch = pyglet.graphics.Batch()
for i in range(0, n + 1):
x = -s2 + i * s
batch.add(2, gl.GL_LINES, None, ('v3f', (x, 0, -s2, x, 0, s2)))
for i in range(0, n + 1):
z = -s2 + i * s
batch.add(2, gl.GL_LINES, None, ('v3f', (-s2, 0, z, s2, 0, z)))
batch.draw()
@window.event
def on_draw():
window.clear()
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_LINE_SMOOTH)
width, height = window.get_size()
gl.glViewport(0, 0, width, height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.gluPerspective(60, width / float(height), 0.01, 20)
gl.glMatrixMode(gl.GL_TEXTURE)
gl.glLoadIdentity()
# texcoords are [0..1] and relative to top-left pixel corner, add 0.5 to center
gl.glTranslatef(0.5 / image_data.width, 0.5 / image_data.height, 0)
image_texture = image_data.get_texture()
# texture size may be increased by pyglet to a power of 2
tw, th = image_texture.owner.width, image_texture.owner.height
gl.glScalef(image_data.width / float(tw),
image_data.height / float(th), 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.gluLookAt(0, 0, 0, 0, 0, 1, 0, -1, 0)
gl.glTranslatef(0, 0, state.distance)
gl.glRotated(state.pitch, 1, 0, 0)
gl.glRotated(state.yaw, 0, 1, 0)
if any(state.mouse_btns):
axes(0.1, 4)
gl.glTranslatef(0, 0, -state.distance)
gl.glTranslatef(*state.translation)
gl.glColor3f(0.5, 0.5, 0.5)
gl.glPushMatrix()
gl.glTranslatef(0, 0.5, 0.5)
grid()
gl.glPopMatrix()
psz = max(window.get_size()) / float(max(w, h)) if state.scale else 1
gl.glPointSize(psz)
distance = (0, 0, 1) if state.attenuation else (1, 0, 0)
gl.glPointParameterfv(gl.GL_POINT_DISTANCE_ATTENUATION,
(gl.GLfloat * 3)(*distance))
if state.lighting:
ldir = [0.5, 0.5, 0.5] # world-space lighting
ldir = np.dot(state.rotation, (0, 0, 1)) # MeshLab style lighting
ldir = list(ldir) + [0] # w=0, directional light
gl.glLightfv(gl.GL_LIGHT0, gl.GL_POSITION, (gl.GLfloat * 4)(*ldir))
gl.glLightfv(gl.GL_LIGHT0, gl.GL_DIFFUSE,
(gl.GLfloat * 3)(1.0, 1.0, 1.0))
gl.glLightfv(gl.GL_LIGHT0, gl.GL_AMBIENT,
(gl.GLfloat * 3)(0.75, 0.75, 0.75))
gl.glEnable(gl.GL_LIGHT0)
gl.glEnable(gl.GL_NORMALIZE)
gl.glEnable(gl.GL_LIGHTING)
gl.glColor3f(1, 1, 1)
texture = image_data.get_texture()
gl.glEnable(texture.target)
gl.glBindTexture(texture.target, texture.id)
gl.glTexParameteri(
gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
# comment this to get round points with MSAA on
gl.glEnable(gl.GL_POINT_SPRITE)
if not state.scale and not state.attenuation:
gl.glDisable(gl.GL_MULTISAMPLE) # for true 1px points with MSAA on
vertex_list.draw(gl.GL_POINTS)
gl.glDisable(texture.target)
if not state.scale and not state.attenuation:
gl.glEnable(gl.GL_MULTISAMPLE)
gl.glDisable(gl.GL_LIGHTING)
gl.glColor3f(0.25, 0.25, 0.25)
frustum(depth_intrinsics)
axes()
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, width, 0, height, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
gl.glMatrixMode(gl.GL_TEXTURE)
gl.glLoadIdentity()
gl.glDisable(gl.GL_DEPTH_TEST)
fps_display.draw()
def run(dt):
global w, h
window.set_caption("RealSense (%dx%d) %dFPS (%.2fms) %s" %
(w, h, 0 if dt == 0 else 1.0 / dt, dt * 1000,
"PAUSED" if state.paused else ""))
if state.paused:
return
success, frames = pipeline.try_wait_for_frames(timeout_ms=0)
if not success:
return
depth_frame = frames.get_depth_frame().as_video_frame()
other_frame = frames.first(other_stream).as_video_frame()
depth_frame = decimate.process(depth_frame)
if state.postprocessing:
for f in filters:
depth_frame = f.process(depth_frame)
# Grab new intrinsics (may be changed by decimation)
depth_intrinsics = rs.video_stream_profile(
depth_frame.profile).get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height
color_image = np.asanyarray(other_frame.get_data())
colorized_depth = colorizer.colorize(depth_frame)
depth_colormap = np.asanyarray(colorized_depth.get_data())
if state.color:
mapped_frame, color_source = other_frame, color_image
else:
mapped_frame, color_source = colorized_depth, depth_colormap
points = pc.calculate(depth_frame)
pc.map_to(mapped_frame)
# handle color source or size change
fmt = convert_fmt(mapped_frame.profile.format())
global image_data
if (image_data.format, image_data.pitch) != (fmt, color_source.strides[0]):
empty = (gl.GLubyte * (w * h * 3))()
image_data = pyglet.image.ImageData(w, h, fmt, empty)
# copy image data to pyglet
image_data.set_data(fmt, color_source.strides[0], color_source.ctypes.data)
verts = np.asarray(points.get_vertices(2)).reshape(h, w, 3)
texcoords = np.asarray(points.get_texture_coordinates(2))
if len(vertex_list.vertices) != verts.size:
vertex_list.resize(verts.size // 3)
# need to reassign after resizing
vertex_list.vertices = verts.ravel()
vertex_list.tex_coords = texcoords.ravel()
# copy our data to pre-allocated buffers, this is faster than assigning...
# pyglet will take care of uploading to GPU
def copy(dst, src):
"""copy numpy array to pyglet array"""
# timeit was mostly inconclusive, favoring slice assignment for safety
np.array(dst, copy=False)[:] = src.ravel()
# ctypes.memmove(dst, src.ctypes.data, src.nbytes)
copy(vertex_list.vertices, verts)
copy(vertex_list.tex_coords, texcoords)
if state.lighting:
# compute normals
dy, dx = np.gradient(verts, axis=(0, 1))
n = np.cross(dx, dy)
# can use this, np.linalg.norm or similar to normalize, but OpenGL can do this for us, see GL_NORMALIZE above
# norm = np.sqrt((n*n).sum(axis=2, keepdims=True))
# np.divide(n, norm, out=n, where=norm != 0)
# import cv2
# n = cv2.bilateralFilter(n, 5, 1, 1)
copy(vertex_list.normals, n)
if keys[pyglet.window.key.E]:
points.export_to_ply('./out.ply', mapped_frame)
pyglet.clock.schedule(run)
try:
pyglet.app.run()
finally:
pipeline.stop()
| [
"stefanlarsson95@gmail.com"
] | stefanlarsson95@gmail.com |
74cf46e17501124ce9afbae42961cff70f574673 | 9998bfe8fd406498bd8db0ca993195245d26ca61 | /layers-geonode-tools/rename_distinct.py | 1de57db4b8a7dff086ea8c6b743d938228e8a098 | [] | no_license | phil-lidar2-parmap/agricultural-lulc | 89a194219c7a8690f899cca549c9464a5e4d58e9 | 4ee0fbfc777c472be68a994cd586ddf05c7a7bfe | refs/heads/master | 2020-05-25T09:06:04.683045 | 2017-11-24T06:43:08 | 2017-11-24T06:43:08 | 84,929,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,199 | py | __version__ = "0.4.1"
import os
import sys
import csv
import time
import shutil
import argparse
import logging
import arcpy
# Parse arguments
parser = argparse.ArgumentParser(description='Renaming distinct LULC shapefiles')
parser.add_argument('-i','--input_directory')
parser.add_argument('-o','--output_directory')
args = parser.parse_args()
startTime = time.time()
LOG_FILENAME = "rename_distinct.log"
logging.basicConfig(filename=LOG_FILENAME,level=logging.ERROR, format='%(asctime)s: %(levelname)s: %(message)s')
logger = logging.getLogger("rename_distinct.log")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s: %(levelname)s: %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
list_quads = []
# output CSV file
log_file = "rename_distinct.csv"
csvfile = open(log_file, 'wb')
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(["Filename", "Path", "FileExt", "Remarks"])
input_directory = args.input_directory
output_directory = args.output_directory
lulc_gdb = r"E:\PARMAP_10K\10K LULC Layers.gdb\LULC_Database"
for path, dirs, files in os.walk(input_directory,topdown=False):
for f in sorted(files):
list_quads.append(f)
for path, dirs, files in os.walk(input_directory,topdown=False):
for f in sorted(files):
quad_name = f.split(".",1)[0]
file_extension = f.split(".",1)[1]
logger.info("%s: Counting the number of occurrences", f)
try:
if list_quads.count(f) == 1:
logger.info("%s: Renaming the shapefile", f)
src = os.path.join(path,f)
dst = os.path.join(output_directory, quad_name + "_LULC." + file_extension)
shutil.move(src,dst)
spamwriter.writerow([f, path, file_extension, "Distinct"])
else:
logger.info("%s: Found duplicate values", f)
spamwriter.writerow([f, path, file_extension, "Duplicate"])
except Exception:
logger.exception("%s: Error encountered", f)
spamwriter.writerow([f, path, file_extension, "Duplicate"])
# calculate for area and delete unnecessary fields
for shp in os.listdir(output_directory):
if shp.endswith(".shp"):
drop_fields = ["SHAPE_Leng", "SHAPE_Area"]
shp_path = os.path.join(output_directory, shp)
quad = shp.split("_LULC",1)[0]
try:
logger.info("%s: Calculating area for each geometry", quad)
arcpy.CalculateField_management(shp_path, "AREA", "!shape.area@squaremeters!", "PYTHON_9.3")
logger.info("%s: Deleting unnecessary fields", quad)
arcpy.DeleteField_management(shp_path,drop_fields)
logger.info("%s: Updating LULC Database", quad)
expression = "quadname = '{0}'".format(quad)
arcpy.MakeFeatureLayer_management(lulc_gdb, "lulc_gdb_layer", expression)
arcpy.CalculateField_management("lulc_gdb_layer", "is_renamed", '"Y"', "PYTHON_9.3")
except Exception:
logger.exception("%s: Error encountered", shp)
finally:
arcpy.Delete_management("lulc_gdb_layer")
endTime = time.time() # End timing
print '\nElapsed Time:', str("{0:.2f}".format(round(endTime - startTime,2))), 'seconds'
| [
"jalaurente@dream.upd.edu.ph"
] | jalaurente@dream.upd.edu.ph |
74da5a87b5ec42b3916f337f6510325ceb0175cc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_223/ch147_2020_04_12_20_47_12_558411.py | 48a99e9557a4ecf4ea235c6fb2d47c61fe37004e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | def mais_frequente(l1):
dic={}
for e in l1:
if e in dic:
dic[e]+=1
else:
dic[e]=1
ocorrencias = [0]
for n in dic.values():
if n>ocorrencias[0]:
del ocorrencias[0]
ocorrencias.append(n)
palavra = []
for i in dic.items():
for p in dic.keys():
if dic[p] = ocorrencias
palavra.append(p)
return palavra | [
"you@example.com"
] | you@example.com |
4921b8e173da1fd1520557376db5b7c93a1765e8 | 67230d2def8991c38a857833190e8104c7ddbe4d | /exp7.1.py | fb3436d77d0c012eab3c9e73338d50f01a6518a9 | [] | no_license | yasirbaig28/Python-Matplotlib | 48f49d12324c6524b1e75b3a2d98cbe5144c5446 | e0eb00e675cac01449880a7e5e746ba8219aa8b7 | refs/heads/main | 2023-08-01T04:54:37.324580 | 2021-09-14T12:22:52 | 2021-09-14T12:22:52 | 406,353,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | #Mutiple Graphs with Dark background
from matplotlib import pyplot as plt
from matplotlib import style
style.use('dark_background')
x1=[1,2,3,4,5]
y1=[5,10,15,20,45]
x2=[1,3,5,7,9]
y2=[2,4,6,8,10]
plt.plot(x1,y1,c='b',label="x1 vs y1")
plt.plot(x2,y2,c='r',label="x2 vs y2")
plt.title("Multiple Graphs")
plt.xlabel("X-Axis")
plt.ylabel("Y-Axis")
plt.legend()
plt.grid(True,color='y')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
7b6c0ff886fe71c0f3914aa74bd96d2500d5ae56 | 3b3176795e42fc1d1c8dc33e9fa583bb4db58abe | /scripts/yah3c_core.py | b5606a046b7b5b462f31a8709cff02760177bb9b | [] | no_license | Lhfcws/YaH3C-mini-osx | 6c0c62b5dc1c4e7f4b509130d8e98f690e11eb9f | 7c7b2535f67a7811ae811900878074413e803c8b | refs/heads/master | 2021-01-23T06:26:37.012891 | 2017-04-19T04:30:52 | 2017-04-19T04:30:52 | 23,430,447 | 2 | 1 | null | 2014-08-31T15:40:34 | 2014-08-28T14:14:30 | Python | UTF-8 | Python | false | false | 11,415 | py | #!/usr/bin/python
# coding: utf-8
import sys, os
import pcap
from struct import pack, unpack
## Constants
# Reference: http://tools.ietf.org/html/rfc3748
ETHERTYPE_PAE = 0x888e
PAE_GROUP_ADDR = "\x01\x80\xc2\x00\x00\x03"
BROADCAST_ADDR = "\xff\xff\xff\xff\xff\xff"
VERSION_INFO = "\x06\x07bjQ7SE8BZ3MqHhs3clMregcDY3Y=\x20\x20"
EAPOL_VERSION = 1
EAPOL_EAPPACKET = 0
# packet info for EAPOL_EAPPACKET
EAPOL_START = 1
EAPOL_LOGOFF = 2
EAPOL_KEY = 3
EAPOL_ASF = 4
EAP_REQUEST = 1
EAP_RESPONSE = 2
EAP_SUCCESS = 3
EAP_FAILURE = 4
# packet info followed by EAP_RESPONSE
# 1 Identity
# 2 Notification
# 3 Nak (Response only)
# 4 MD5-Challenge
# 5 One Time Password (OTP)
# 6 Generic Token Card (GTC)
# 254 Expanded Types
# 255 Experimental use
EAP_TYPE_ID = 1 # identity
EAP_TYPE_NOTIFICATION = 2 # Notification
EAP_TYPE_MD5 = 4 # md5 Challenge
EAP_TYPE_H3C = 7 # H3C eap packet(used for SYSU east campus)
def mac_repr(mac):
return ':'.join(['%02x'] * 6) % tuple(map(ord, mac))
def md5_repr(md5hash):
return ':'.join('%02x' % ord(x) for x in md5hash)
EAPOL_TYPE_MAP = {
EAPOL_EAPPACKET: 'EAPOL_EAPPACKET',
EAPOL_VERSION: 'EAPOL_VERSION',
}
EAP_CODE_MAP = {
EAP_REQUEST: 'EAP_REQUEST',
EAP_RESPONSE: 'EAP_RESPONSE',
EAP_SUCCESS: 'EAP_SUCCESS',
EAP_FAILURE: 'EAP_FAILURE',
}
def parse_header(ethernetHeader):
# header: 6(dest mac)|6(src mac)|2(type)
return ethernetHeader[:6], ethernetHeader[6:12], unpack('!H', ethernetHeader[12:14])[0]
def dump(packet, f=open('/tmp/packetsdump', 'a'), split='>'):
f.write(split * 80)
f.write('\n')
header, packet = packet[:14], packet[14:]
headerRepr = 'dst:%s|src:%s|typ:0x%x|\n' % (
mac_repr(header[:6]), mac_repr(header[6:12]), unpack('!H', header[12:14])[0])
f.write(headerRepr)
# body, meta info
verson, type, eapolLen = unpack("!BBH", packet[:4])
bodyRepr = 'ver:%d|typ2:%d(%s)|eapollen:%d|\n' % (
verson, type, EAPOL_TYPE_MAP.get(type, 'unknown'), eapolLen)
f.write(bodyRepr)
if type != EAPOL_EAPPACKET:
# meet unknown type, dump all rest data.
f.write(packet[4:8])
else:
# parse payload
code, id, eapLen = unpack("!BBH", packet[4:8])
eapRepr = 'code:%d(%s)|id:%d|eaplen:%d|\n' % (
code, EAP_CODE_MAP.get(code, 'unknown'), id, eapLen)
f.write(eapRepr)
if code == EAP_REQUEST or code == EAP_RESPONSE:
reqType = unpack("!B", packet[8:9])[0]
reqData = packet[9: 4 + eapLen]
if reqType == EAP_TYPE_ID:
f.write('reqtype:identity|ver+username:%s|' % (reqData))
elif reqType == EAP_TYPE_H3C:
# TODO: not implemented
pass
elif reqType == EAP_TYPE_MD5:
chapLen = unpack('!B', reqData[0:1])[0]
f.write('reqtype:md5|chap(%d):%s|username:%s|' % (
chapLen, md5_repr(reqData[1:chapLen + 1]), reqData[chapLen + 1:]))
else:
f.write('reqtype:%d(unknown)|%s|' % (reqType, reqData))
else:
f.write('%s|' % packet[8:])
f.write('\n')
f.flush()
def make_ethernet_header(src, dst, type):
return dst + src + pack("!H", type)
def make_EAPOL(type, payload=""):
return pack("!BBH", EAPOL_VERSION, type, len(payload)) + payload
def make_EAP(code, id, type=0, data=""):
if code in [EAP_SUCCESS, EAP_FAILURE]:
return pack("!BBH", code, id, 4)
else:
return pack("!BBHB", code, id, 5 + len(data), type) + data
def display_info(s):
print s
def display_response_info(s):
try:
display_info('>> ' + s.decode('gbk'))
except:
display_info('>> ' + s)
EMPTY_IP = (0, 0, 0, 0)
def get_ipaddr_slow(device):
try:
path = "/tmp/_asdfafip"
os.system("ip address show dev %s >%s" % (device, path))
data = open(path, 'r').read().split()
return tuple(map(int, data[data.index('inet') + 1].split('/')[0].split('.')))
except:
return EMPTY_IP
def get_ipaddr(device):
try:
import commands
data = commands.getoutput('ip address show dev ' + device)
return tuple(map(int, data[data.index('inet') + 1].split('/')[0].split('.')))
except:
return get_ipaddr_slow(device)
def get_mac_addr(device):
def pack_mac(mac_addrs):
args = []
for hex_item in mac_addrs:
args.append(int("0x" + hex_item, 16))
return pack("!BBBBBB", *args)
is_this_device = False
for line in os.popen("ifconfig"):
if is_this_device and line.find("ether") >= 0:
lines = line.split()
for item in lines:
if item.count(":") == 5:
return pack_mac(item.split(":"))
if line.strip().startswith(device):
is_this_device = True
def connect(username, password, device, login=True):
client = pcap.pcap(name=device, timeout_ms=15000, snaplen=65535, immediate=True)
macAddr = get_mac_addr(device)
ethernetHeader = make_ethernet_header(macAddr, PAE_GROUP_ADDR, ETHERTYPE_PAE)
hasSentLogoff = False
# ip = EMPTY_IP
class Status(object):
def __init__(self):
self.serverMac = None
self.success = False
def send(data):
client.sendpacket(data)
dump(data, split='<')
def send_response_id(packetID):
ip = get_ipaddr(device)
# magic1 = '\xc2'
# magic2 = '\xa3'
response = '\x15\x04' + pack('!BBBB', *ip) + VERSION_INFO + username
send(ethernetHeader + make_EAPOL(EAPOL_EAPPACKET,
make_EAP(EAP_RESPONSE, packetID, EAP_TYPE_ID, response)))
def send_response_h3c(packetID):
response = chr(len(password)) + password + username
eapPacket = ethernetHeader + make_EAPOL(EAPOL_EAPPACKET,
make_EAP(EAP_RESPONSE, packetID, EAP_TYPE_H3C, response))
send(eapPacket)
def send_response_md5(packetID, md5data):
md5 = password[0:16] + '\x00' * (16 - len(password))
chap = ''.join(chr(ord(md5[i]) ^ ord(md5data[i])) for i in xrange(16))
response = pack('!B', len(chap)) + chap + username
send(ethernetHeader + make_EAPOL(EAPOL_EAPPACKET, make_EAP(
EAP_RESPONSE, packetID, EAP_TYPE_MD5, response)))
def send_start():
send(ethernetHeader + make_EAPOL(EAPOL_START))
display_info('Sending EAPOL start...')
def send_logoff():
display_info('Sending EAPOL logoff...')
send(ethernetHeader + make_EAPOL(EAPOL_LOGOFF))
hasSentLogoff = True
def on_receive(timestamp, packet):
# print "# recv: " + packet
eapPacket = packet
rawPacket = eapPacket
header, eapPacket = eapPacket[:14], eapPacket[14:]
dstMac, srcMac, _ = parse_header(header)
if dstMac != macAddr:
return
dump(rawPacket)
if status.serverMac is None:
display_info("Waiting a few seconds please ~~")
status.serverMac = srcMac
ethernetHeader = make_ethernet_header(macAddr, status.serverMac, ETHERTYPE_PAE)
vers, type, eapolLen = unpack("!BBH", eapPacket[:4])
if type == EAPOL_EAPPACKET:
code, id, eapLen = unpack("!BBH", eapPacket[4:8])
if code == EAP_SUCCESS:
display_info('Got EAP Success. Check it out in your browser and enjoy your network.')
if not status.success:
daemonize(stdout='/tmp/yah3c.out.log', stderr='/tmp/yah3c.err.log')
status.success = True
elif code == EAP_FAILURE:
if hasSentLogoff:
display_info('Logoff Successfully.')
else:
display_info('Got EAP Failure')
display_response_info(eapPacket[10:])
elif code == EAP_REQUEST:
reqType = unpack("!B", eapPacket[8:9])[0]
reqData = eapPacket[9: 4 + eapLen]
if reqType == EAP_TYPE_ID:
display_info('Get EAP Request for identity.')
send_response_id(id)
display_info('Sending EAP response with identity = [%s]' % username)
elif reqType == EAP_TYPE_H3C:
display_info('Got EAP Request for Allocation.')
send_response_h3c(id)
display_info('Sending EAP response with password...')
elif reqType == EAP_TYPE_MD5:
dataLen = unpack("!B", reqData[0:1])[0]
md5data = reqData[1:1 + dataLen]
display_info('Get EAP Request for MD5-Challenge')
send_response_md5(id, md5data)
display_info('Sending EAP response with password...')
else:
display_info('Got unknown request type %i' % reqType)
elif code == 10:
display_response_info(eapPacket[12:])
elif code == EAP_RESPONSE:
dump(rawPacket, sys.stdout)
# display_info('Got Unknown EAP Response')
else:
pass
#display_info('Got unknown EAP code (%i)' % code)
else:
display_info('Got unknown EAPOL type %i' % type)
if not login:
send_logoff()
return
try:
print "Begin Authentication"
status = Status()
send_start()
client.loop(0, on_receive)
print "End Authentication"
except Exception, e:
display_info('Connection error: ' + str(e))
exit(-1)
finally:
if status.success:
send_logoff()
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
'''This forks the current process into a daemon. The stdin, stdout, and
stderr arguments are file names that will be opened and be used to replace
the standard file descriptors in sys.stdin, sys.stdout, and sys.stderr.
These arguments are optional and default to /dev/null. Note that stderr is
opened unbuffered, so if it shares a file with stdout then interleaved
output may not appear in the order that you expect. '''
# Do first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Now I am a daemon!
# Redirect standard file descriptors.
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
if __name__ == "__main__":
username = sys.argv[1]
password = sys.argv[2]
device = sys.argv[3] if len(sys.argv) > 3 else 'en0'
connect(username, password, device)
| [
"lhfcws@163.com"
] | lhfcws@163.com |
b9a48a3fa6173aaf6e71b3ae6f50b4791ceb6e34 | e49a07ad215172e9c82cb418b10371bf0ce1c0f7 | /第1章 python基础/Python基础09/1-创建模块/msgnew.py | c70f8f258a39e5f8bc8e8298c973427f1890cdb5 | [] | no_license | taogangshow/python_Code | 829c25a7e32ead388c8b3ffa763cb9cf587bfd7b | 4b3d6992ec407d6069f3187ca7e402a14d863fff | refs/heads/master | 2022-12-16T01:26:17.569230 | 2018-11-16T10:07:59 | 2018-11-16T10:07:59 | 157,832,985 | 0 | 1 | null | 2022-11-25T09:55:32 | 2018-11-16T08:00:13 | Python | UTF-8 | Python | false | false | 142 | py | __all__ = ["test2","Test"]
def test1():
print("---test1---")
def test2():
print("---test2---")
class Test(object):
pass
num = 100
| [
"cdtaogang@163.com"
] | cdtaogang@163.com |
638e2f86d3995c89fc433784cef5fc013f7b773b | 3a7078b0ffd439c0908970963f13720d7e6425b9 | /core/migrations/0003_auto_20200524_1142.py | b1612890f22ed64bb3b4211b2fb2383641c0b0a2 | [] | no_license | Praneeth021/HMS | 8628d8919414d3d38a768b1da8d63ad424216a64 | b321659dfd03936160dfa3b8eee6b99f2223226a | refs/heads/master | 2023-08-02T07:38:36.168487 | 2021-03-31T08:36:45 | 2021-03-31T08:36:45 | 266,320,584 | 0 | 3 | null | 2021-09-22T19:04:52 | 2020-05-23T11:16:52 | CSS | UTF-8 | Python | false | false | 1,143 | py | # Generated by Django 3.0.5 on 2020-05-24 06:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20200523_1946'),
]
operations = [
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Time', models.TimeField()),
('Date', models.DateField()),
('status', models.CharField(choices=[('Completed', 'Completed'), ('Pending', 'Pending')], max_length=300)),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Doctor')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Patient')),
],
),
migrations.AlterField(
model_name='invoices',
name='date',
field=models.DateField(),
),
migrations.DeleteModel(
name='Appointement',
),
]
| [
"kadampallypraneeth987@gmail.com"
] | kadampallypraneeth987@gmail.com |
673d88e4c0b9fb482f27a8ccb3f5c29cb4fa93de | 99e35a026c233097ebe4544ef205fbc906e4dae6 | /juanig/1/atom_test.py | be586fa8a80d44279e8ab158a68583762385e8b2 | [] | no_license | juanigp/IPDI | eec7816ba6b2b2171f1193ac805b4d6b513fa7db | 5a97851a74a204f2b42f508e113edfa623cc9eeb | refs/heads/master | 2021-07-07T00:25:01.981279 | 2017-09-29T13:21:46 | 2017-09-29T13:21:46 | 103,134,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | print('asdasd')
| [
"noreply@github.com"
] | noreply@github.com |
2faa1a2aaf34ff00d50c35afead93ace9bc949fb | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/test/condorSub/dict_Summer20UL16APV_gjets_dr0p4.py | 817994fbf5c7ee0aa69b211e2a51ecb254f23838 | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 446 | py | flist = {
"scenario": "Summer20UL16APV",
"args": "emerging=True",
"samples": [
['Summer20UL16APV.GJets_DR-0p4_HT-100To200_TuneCP5_13TeV-madgraphMLM-pythia8'],
['Summer20UL16APV.GJets_DR-0p4_HT-200To400_TuneCP5_13TeV-madgraphMLM-pythia8'],
['Summer20UL16APV.GJets_DR-0p4_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8'],
['Summer20UL16APV.GJets_DR-0p4_HT-600ToInf_TuneCP5_13TeV-madgraphMLM-pythia8'],
]
}
| [
"kpedro88@gmail.com"
] | kpedro88@gmail.com |
075fa61d2c147605f99584861c7e42e275ed3ee6 | d79175dd8ebe14b32f73bbe6c4ed5096778e0e83 | /src/webcrawler/webcrawler.py | 7ae38231a07101e185d1c6aa737c82f198b928d8 | [] | no_license | buben19/prey | 6d1b4ef57aabe831979209af8d8499aa13be1f59 | 42362a737ee5f573692c6dc3cef46df209fb3d0f | refs/heads/master | 2016-09-06T05:35:11.003719 | 2013-10-14T22:51:34 | 2013-10-14T22:51:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,899 | py | from __future__ import unicode_literals
from .. import process
from .. import url
from ..services import ServiceProvider
from .. import error
from ..config import Config
from .. import repository
from .. import data
from .. import shedule
from _visitedurl import _VisitedUrl
from queue import WebCrawlerQueue
from ..http.fetch import PageFetchConfig, PageFetcher
from ..address import Address
import twisted.web.client
import twisted.internet.defer
import twisted.internet.ssl
import parser
import twisted.names.client
import deferredaction
class WebCrawlerSupervisor(process.BaseProcess):
"""
main class for web crawler process
"""
# urls in queue wich will be scanned in future
__urls = None
# urls which are currently crawling
__progressUrls = None
# urls which are comparing with database, if the are useable
__getUrls = None
# if True, web crawler will strip all url components and fetch only root
# url
__rootUrlOnly = False
# resolver used to resolve all domain names
resolver = None
# respositories
urlReferenceRepository = None
hostRepository = None
wwwPageRepository = None
urlRepository = None
# distribute addresses
addressDistributor = None
urlDistributor = None
def __init__(self, rootUrlOnly = False):
"""
urlRootOnly - if True, get method will strip path, queries and fragments
and fetch only root url
"""
self.__urls = WebCrawlerQueue()
self.__progressUrls = set()
self.__getUrls = set()
self.__rootUrlOnly = rootUrlOnly
# create resolver
self.resolver = twisted.names.client.createResolver()
# repositories
sp = ServiceProvider.getInstance()
self.urlReferenceRepository = sp.getService(repository.WebCrawlerUrlUseableRepository)
self.hostRepository = sp.getService(repository.HostRepository)
self.wwwPageRepository = sp.getService(repository.WWWPageRepository)
self.urlRepository = sp.getService(repository.UrlRepository)
# distributing addresses
self.addressDistributor = sp.getService(data.AddressDistributor)
self.urlDistributor = sp.getService(data.UrlDistributor)
# register self for consuming urls
self.urlDistributor.registerConsumer(self)
# get non-fetched addresses from the repository and fetch them
self.wwwPageRepository.getNonFetchedAddresses().addCallback(
self.__nonFetchedAddresses)
def __nonFetchedAddresses(self, results):
"""
add addresses into fetch queue
"""
for address, port, serviceName in results:
fetchUrl = url.Url(serviceName + '://' + address + ':' + unicode(port))
if fetchUrl.hasStandardPort():
fetchUrl.explicitPort = False
self.newUrl(fetchUrl, 0)
def runTask(self):
d = twisted.internet.defer.Deferred()
try:
u, scanLevel = self.__urls.popleft()
except IndexError as e:
d.errback(error.TaskError("WebCrawlerSupervisor: pop url failed"))
else:
self.newTask(
WebCrawlerTask(u, scanLevel),
d)
return d
def get(self, url):
if self.__rootUrlOnly:
del url.query
del url.path
del url.fragment
self.newUrl(url, 0)
def getWaitingTaskCount(self):
return len(self.__urls)
def newUrl(self, url, scanLevel):
"""
add new url into local queue
url can't be already in queue or in progress
in additional url can't be already associated with any page (useable condition)
"""
def c(isUseable, u, sl):
self.__getUrls.remove(u)
if isUseable:
self.__urls.append(u, sl)
ServiceProvider.getInstance().getService(shedule.Sheduler).sheduleNext()
if not url in self.__urls and \
not url in self.__progressUrls and \
not url in self.__getUrls:
self.__getUrls.add(url)
self.urlReferenceRepository.isUseable(
url).addCallback(
c,
url,
scanLevel)
def urlInProgress(self, url):
"""
store url as url in progress
if this url is in local queue, remove it
"""
if url in self.__urls:
self.__urls.remove(url)
self.__progressUrls.add(url)
def urlFinished(self, url):
"""
progress url has been finished
"""
self.__progressUrls.remove(url)
def getMaxTaskCount(self):
return 1
def taskFinished(self, task):
print "-" * 100
process.BaseProcess.taskFinished(self, task)
class WebCrawlerTask(process.BaseTask):
"""
object representig single task
"""
# id of fetched page
pageId = None
# first url passed into constructor
url = None
# scan level for this task
scanLevel = None
# track all visited url information
# every redirect creates new frame in this structure
visitedUrls = None
# store resolved locations and their addresses
resolveHistory = None
# list of additional informations for page
additionalInfo = None
def __init__(self, url, scanLevel):
self.url = url
self.scanLevel = scanLevel
self.visitedUrls = []
self.resolveHistory = {}
self.additionalInfo = []
def start(self):
print "crawling: %s, scan level: %d" % (str(self.url), self.scanLevel)
self.__createUrlFrame(self.url)
self.nextIteration()
def nextIteration(self):
"""
start processing last url
"""
# get id for url
self.newCallback(
self.supervisor.urlRepository.generateUrlId(),
deferredaction.DeferredUrlId())
def pageFetchSuccess(self, page):
self.lastVisited().pageFetched = True
self.lastVisited().page = page
# examine content of page
self.__processPage(page)
def pageFetchError(self, reason):
self.lastVisited().pageFetched = False
self.lastVisited().pageError = reason
def lastVisited(self):
return self.visitedUrls[-1]
def isUrlVisited(self, url):
"""
returns True, if url is already stored in visited urls
"""
for i in self.visitedUrls:
if i.url == url:
return True
return False
def getUrlId(self, url):
"""
Get id of given url. Raise ValueError if url isn't in visited urls
"""
for i in self.visitedUrls:
if i.url == url:
return i.urlId
raise ValueError, "url not found"
def __createUrlFrame(self, url):
"""
create new url frame
for successful creation some values can't be None
ValueError is raised if this condition isn't met
"""
# mark url as in progress
self.supervisor.urlInProgress(url)
if len(self.visitedUrls) > 0:
if self.lastVisited().urlId is None or \
self.lastVisited().fetchAddress is None or \
self.lastVisited().pageFetched is None or \
(self.lastVisited().page is not None and \
self.lastVisited().pageError is not None):
# every time, when new frame is created, some fields inf
# visitedUrls can't be None
# must be set - debug purpose only
raise ValueError, "incomplete current visited url structure"
self.visitedUrls.append(_VisitedUrl(url))
def gotUrlId(self, urlId):
"""
store id for current url
if current url isn't firs, id will be stored as redirect url id for
previous one
"""
self.lastVisited().urlId = urlId
# save redirect url id
if len(self.visitedUrls) > 1:
self.visitedUrls[-2].redirectUrlId = urlId
def doFetch(self):
"""
fetch last url
"""
print "fetching:", unicode(self.lastVisited().url), "address:", self.lastVisited().fetchAddress
fetchConfig = PageFetchConfig(
self.lastVisited().url)
fetcher = PageFetcher(
fetchConfig,
self.lastVisited().fetchAddress,
self.lastVisited().url.port)
# registed callbacks
self.newCallbacks(
fetcher.getPage(),
deferredaction.DeferredPageSuccess(),
deferredaction.DeferredPageError())
def __processPage(self, page):
if page.decodedContent is not None:
p = parser.WebCrawlerParser()
print "feeding...", type(page.decodedContent)
p.feed(page.decodedContent)
self.lastVisited().pageTitle = p.getTitle()
# process hrefs
print "starting processing hrefs"
urls = set()
for i in p.getHrefs():
try:
u = url.Url.join(self.lastVisited().url, i)
except ValueError:
continue
if not u.scheme.lower() in ["http", "https"]:
continue
u.fragment.clear()
urls.add(u)
print "hrefs processed"
for u in urls:
if u.netloc == self.lastVisited().url.netloc:
if self.scanLevel < Config.webCrawlerScanLevel:
self.supervisor.newUrl(u, self.scanLevel + 1)
else:
self.supervisor.urlDistributor.distribute(u)
def saveUrls(self):
"""
save all visited urls in reverse order
"""
def c3(result, deferred, updateUrlId):
self.supervisor.urlRepository.updateRedirectUrlId(
self.visitedUrls[-1].urlId,
updateUrlId).addCallback(c2, deferred)
def c2(result, deferred):
print "url save complete"
deferred.callback(result)
def c(result, deferred, index, updateUrlId):
redirectUrlId = self.visitedUrls[index].redirectUrlId
# true at first iteration
if not index < len(self.visitedUrls) - 1:
if self.__isUrlIdInVisitedUrls(redirectUrlId):
updateUrlId = redirectUrlId
redirectUrlId = None
print "saving url: %s - id: %d - redirect id: %s" % (str(self.visitedUrls[index].url), self.visitedUrls[index].urlId, str(redirectUrlId))
d2 = self.supervisor.urlRepository.saveUrl(
self.visitedUrls[index].urlId,
self.visitedUrls[index].url,
redirectUrlId)
if index > 0:
d2.addCallback(c, deferred, index - 1, updateUrlId)
else:
if updateUrlId is None:
# not saving urls with infinite loop
d2.addCallback(c2, deferred)
else:
# saving inifinite redirect loop, first saved url needs
# to be updated with proper redirect urlId
d2.addCallback(c3, deferred, updateUrlId)
d = twisted.internet.defer.Deferred()
c(None, d, len(self.visitedUrls) - 1, None)
return d
def __isUrlIdInVisitedUrls(self, urlId):
"""
returns true if url ID is in visited urls
"""
for i in self.visitedUrls:
if i.urlId == urlId:
return True
return False
def associateUrlsAndHosts(self):
"""
associate all visited urls and their host id
"""
d = twisted.internet.defer.Deferred()
def c(result, deferred):
print "association complete"
deferred.callback(None)
deferreds = []
for i in self.visitedUrls:
urlId = i.urlId
for hostId in i.hostIds:
print "associating url ID: %d with host ID: %d" % (urlId, hostId)
deferreds.append(
self.supervisor.urlRepository.associateUrlWithHost(
urlId, hostId))
if len(deferreds) > 0:
dl = twisted.internet.defer.DeferredList(deferreds)
dl.addCallback(c, d)
else:
print "no url-host association happend"
d.callback(None)
return d
def savePageFetchInfo(self):
"""
associate urls and page, fill getch info
urls and page must be already saved
"""
d = twisted.internet.defer.Deferred()
def c(result, deferred):
print "save page fetch info completed"
deferred.callback(None)
deferreds = []
for i in self.visitedUrls:
if i.pageFetched:
print "saving page fetch info - page ID: %d, URL ID: %d, %s %s %s" % (self.pageId, i.urlId, i.page.version, i.page.status, i.page.message)
deferreds.append(
self.supervisor.wwwPageRepository.savePageAndUrlFetchInfo(
self.pageId,
i.urlId,
i.page.version,
i.page.status,
i.page.message))
else:
print "saving page error extras - page ID: %d, URL ID: %d, message: %s" % (self.pageId, i.urlId, i.pageError)
deferreds.append(
self.supervisor.wwwPageRepository.savePageAndUrlErrorExtras(
self.pageId,
i.urlId,
i.pageError))
dl = twisted.internet.defer.DeferredList(deferreds).addCallback(c, d)
return d
def savePageAdditionalInfo(self):
d = twisted.internet.defer.Deferred()
def c(result, deferred):
deferred.callback(None)
deferreds = []
for infoClass, message in self.additionalInfo:
deferreds.append(
self.supervisor.wwwPageRepository.saveAdditionalInfo(
self.pageId,
infoClass,
message))
if len(deferreds) > 0:
twisted.internet.defer.DeferredList(deferreds).addCallback(c, d)
else:
d.callback(None)
return d
def processNextUrl(self, url):
self.__createUrlFrame(url)
self.nextIteration()
def setAdditionalInfo(self, infoClass, message):
self.additionalInfo.append((infoClass, message))
def allUrlsFinished(self):
"""
finish all visited urls in supervisor
"""
for i in self.visitedUrls:
self.supervisor.urlFinished(i.url)
| [
"ivo.slanina@gmail.com"
] | ivo.slanina@gmail.com |
74fe562eef236246fb912f94ff99a59075f7bede | 1680f7d32a3ecc4a4b1eb49c931e801315e220a0 | /Chat-Room-server/chat_server.py | d4f39858c8b9c7b3132072c2cbacb7d77aa79f27 | [] | no_license | Tanishq-181500751/chat | 71dafb13d19c9a9036d4ba4563db8bdf3e348afa | 3ef338b3311d230676687c8f8b722c642f497373 | refs/heads/main | 2023-01-24T22:23:10.176865 | 2020-11-24T04:29:09 | 2020-11-24T04:29:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,544 | py | import socket
import select
#from _thread import start_new_thread
from _thread import *
import sys
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
"""
the first argument AF_INET is the address domain of the socket. This is used when we have an Internet Domain
with any two hosts
The second argument is the type of socket. SOCK_STREAM means that data or characters are read in a continuous flow
"""
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if len(sys.argv) != 3:
print("Correct usage: script, IP address, port number")
exit()
IP_address = str(sys.argv[1])
Port = int(sys.argv[2])
server.bind((IP_address, Port))
#binds the server to an entered IP address and at the specified port number. The client must be aware of these parameters
server.listen(100)
#listens for 100 active connections. This number can be increased as per convenience
list_of_clients=[]
def clientthread(conn, addr):
conn.send(b"Welcome to this chatroom!")
#sends a message to the client whose user object is conn
while True:
try:
message = conn.recv(2048).decode('utf-8')
if message:
print("<" + addr[0] + "> " + message)
message_to_send = "<" + addr[0] + "> " + message.encode()
broadcast(message_to_send.encode(),conn)
#prints the message and address of the user who just sent the message on the server terminal
else:
remove(conn)
except:
continue
def broadcast(message,connection):
for clients in list_of_clients:
if clients!=connection:
try:
clients.send(message.decode('utf-8'))
except:
clients.close()
remove(clients)
def remove(connection):
if connection in list_of_clients:
list_of_clients.remove(connection)
while True:
conn, addr = server.accept()
"""
Accepts a connection request and stores two parameters, conn which is a socket object for that user, and addr which contains
the IP address of the client that just connected
"""
list_of_clients.append(conn)
print(addr[0] + " connected")
#maintains a list of clients for ease of broadcasting a message to all available people in the chatroom
#Prints the address of the person who just connected
start_new_thread(clientthread,(conn,addr))
#creates and individual thread for every user that connects
conn.close()
server.close()
| [
"noreply@github.com"
] | noreply@github.com |
59e28a9b68bea9919ede0539111e0ca41e81ae47 | affa330082413701489d0f7bc6d3e9c2c2f9475e | /src/applications/maps/views/lugares.py | 251d39ac411f0c4debef8ad22d012026e4ea63cf | [] | no_license | pixelead0/comida.chingona | e12583d75b4692d391b6f2cfb9df25c4a5bfc057 | 2dc2225f63c0f1c3bd45c8cda9f5ab9e62e7bc2b | refs/heads/devel | 2020-04-25T23:47:32.553925 | 2019-05-08T18:11:30 | 2019-05-08T18:11:30 | 173,157,158 | 6 | 5 | null | 2019-05-08T17:35:41 | 2019-02-28T17:32:33 | Python | UTF-8 | Python | false | false | 579 | py |
# Django
from django.shortcuts import render
from django.views import View
# Rest framework
from rest_framework import viewsets
# Models
from applications.maps.models import FoodLocation
# Serializers
from applications.maps.serializers import FoodLocationSerializer
class Lugares(View):
def get(self, request):
return render(request, template_name='maps/map.html')
class LugaresViewSet(viewsets.ModelViewSet):
"""Vista para hacer un CRUD con los lugares de comida"""
queryset = FoodLocation.objects.all()
serializer_class = FoodLocationSerializer
| [
"emmanueloctaviomc@gmail.com"
] | emmanueloctaviomc@gmail.com |
d5b6772f84bf179d349a4fe7da02286d7a904d48 | 3d9162cc7cbc52363288d48d67e8c26df8b978d8 | /R.1.3.py | 2ed284470da0806b59d087e95b89f70527fa89dc | [] | no_license | Kadekraiii/E1E119030_KADEK-RAI-RIESKA-JANWARDA | 36938b0edc47322779ed3d3ad1c33d7eb5ed0749 | 7030b01b4b2c9bc982525468a8f64de85e0ba339 | refs/heads/main | 2023-05-09T11:17:28.514324 | 2021-05-31T14:32:13 | 2021-05-31T14:32:13 | 372,514,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | def minmax(data):
largest = data[0]
smallest = data[0]
for item in data:
if item > largest:
largest = item
elif item < smallest:
smallest = item
return smallest, largest
alpha = [2, 2, 3, 4, 5, 6, 7, 8, 99]
print(minmax(alpha))
| [
"noreply@github.com"
] | noreply@github.com |
967d83ce11529cb1c9991e6649f585da2cde4f67 | c43c926da105fcb2374e330c45898edeff704524 | /recognize_picture.py | 6edac2b6c2d658c1fdaaf5ccf63745e853d9b646 | [] | no_license | Julietchy/face_rec | b001de4a17376e03d7cca96a4fe3b87b2c27cb3d | babd2ade920a3d4cbf98952e17654bfb6fefb434 | refs/heads/master | 2020-09-13T09:08:52.380988 | 2019-11-21T04:44:54 | 2019-11-21T04:44:54 | 222,720,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | py | import numpy as np
import argparse
import imutils
import pickle
import cv2
import os
# parse the arguments
image_name = input('Type image name with extension: \n')
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", default='images/' + image_name,
help="path to input image")
ap.add_argument("-d", "--detector", default='face_detection_model',
help="path to OpenCV's deep learning face detector")
ap.add_argument("-m", "--embedding-model", default='openface_nn4.small2.v1.t7',
help="path to OpenCV's deep learning face embedding model")
ap.add_argument("-r", "--recognizer", default='output/recognizer.pickle',
help="path to model trained to recognize faces")
ap.add_argument("-l", "--le", default='output/le.pickle',
help="path to label encoder")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load face detector
print("[INFO] loading face detector...")
protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
modelPath = os.path.sep.join([args["detector"],
"res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# load face embedding model
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
# load face recognition model and label encoder
recognizer = pickle.loads(open(args["recognizer"], "rb").read())
le = pickle.loads(open(args["le"], "rb").read())
# load the image
image = cv2.imread(args["image"])
image = imutils.resize(image, width=600)
(h, w) = image.shape[:2]
# construct a blob from the image
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(image, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
detector.setInput(imageBlob)
detections = detector.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# get the confidence (probability)
confidence = detections[0, 0, i, 2]
# filter out weak detections
if confidence > args["confidence"]:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96),
(0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# perform classification to recognize the face
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0) | [
"noreply@github.com"
] | noreply@github.com |
2cc4d13aa189b09f88a81b8c31e9a26b9171d9d5 | 8e6789fc0ad3c0def5b82c109b2cb45458c128ee | /chemical_factors.py | 35d73437794a45609d88311a7f451050977427dc | [] | no_license | harrigon/wine_data_analysis | 8d1957c185e82b73e21ad4b74796142af4dd5d6f | 65306b7edc582535925f263b1f0aeaf665837a21 | refs/heads/master | 2020-04-26T15:40:57.641069 | 2019-03-04T01:42:30 | 2019-03-04T01:42:30 | 173,654,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | import pandas as pd
import statsmodels.api as sm
def red_wine_analysis():
pd.set_option('display.expand_frame_repr', False)
df = pd.read_csv('winequality-red.csv', delimiter=';')
print_data(df)
Y = df[['quality']]
X = df[['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol']]
model = sm.OLS(Y, X.astype(float)).fit()
predictions = model.predict(X)
print_model = model.summary()
print(print_model)
def white_wine_analysis():
pd.set_option('display.expand_frame_repr', False)
df = pd.read_csv('winequality-white.csv', delimiter=';')
print_data(df)
Y = df[['quality']]
X = df[['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol']]
model = sm.OLS(Y, X.astype(float)).fit()
predictions = model.predict(X)
print_model = model.summary()
print(print_model)
def print_data(df):
print(df)
white_wine_analysis()
red_wine_analysis()
white_wine_analysis()
| [
"noreply@github.com"
] | noreply@github.com |
9d58305a2fb6d900057396f8c2a933b4826793aa | d0242cb753f810c075ea16613be321a2fe3f0e45 | /handle/orderHandle.py | a9f81e73f0e99701c513932b55c14dd695d13766 | [] | no_license | GEOAL1/webzebra | 3462dd6e4b7369ae102e5b54747a7d6e68aa50ab | 96bbd285aaa16a7e5acb3bf138ee95fa2eeb3c59 | refs/heads/master | 2021-01-22T13:02:52.469759 | 2015-06-18T01:21:56 | 2015-06-18T01:21:56 | 36,656,788 | 0 | 0 | null | 2015-06-01T11:35:58 | 2015-06-01T11:35:54 | Python | UTF-8 | Python | false | false | 3,983 | py | #/usr/bin/python
#coding: utf-8
import tornado
from tornado.web import authenticated
from tornado import gen
from error.zebraError import *
from handle.baseHandle import BaseHandler
from model.jsonTemplate import JsonTemplate
from utils.Constants import SessionUserID
class OrderBikeHandler(BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
@authenticated
def get(self):
x = yield self.get_result()
self.write(x)
self.finish()
@tornado.gen.coroutine
def get_result(self):
try:
try:
user_id = self.session[SessionUserID]
bike_id = self.get_argument("bikeID")
except Exception as e:
raise InputArgsError()
self.orderService.orderBike(user_id,bike_id)
result = JsonTemplate.newJsonRes().setBody("下单成功")
except ZebraError as e:
result = JsonTemplate.newZebraErrorRes(e)
except Exception as e:
result = JsonTemplate.newErrorJsonRes().setErrMsg("您已经订车,或刷新后再试")
finally:
raise gen.Return(result.toJson())
class GetOrderHandler(BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
@authenticated
def get(self):
x = yield self.get_result()
self.write(x)
self.finish()
@tornado.gen.coroutine
def get_result(self):
try:
try:
order_id = self.get_argument("order_id")
except Exception as e:
raise InputArgsError()
order = self.orderService.getUserOrderByOrderID(order_id)
if order is None:
raise OrderNotFoundError()
if order["user_id"] != long(self.session[SessionUserID]):
raise OrderOwnerError()
result = JsonTemplate.newJsonRes().setBody(order)
except ZebraError as e:
result = JsonTemplate.newZebraErrorRes(e)
except Exception as e:
result = JsonTemplate.newErrorJsonRes().setErrMsg(e.message)
finally:
raise gen.Return(result.toJson())
pass
class FinishOrderHandler(BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
@authenticated
def get(self):
x = yield self.get_result()
self.write(x)
self.finish()
@tornado.gen.coroutine
def get_result(self):
try:
try:
order_id = self.get_argument("order_id")
user_id = self.session[SessionUserID]
except Exception as e:
raise InputArgsError()
price = self.orderService.finishOrder(order_id,user_id)
result = JsonTemplate.newJsonRes().setErrMsg("订单成功,消费金额 %d 币" % (price) )
except ZebraError as e:
result = JsonTemplate.newZebraErrorRes(e)
except Exception as e:
print e
result = JsonTemplate.newErrorJsonRes().setErrMsg(e.message)
finally:
raise gen.Return(result.toJson())
pass
class GetOrderByUserIDHandler(BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
@authenticated
def get(self):
x = yield self.get_result()
self.write(x)
self.finish()
@tornado.gen.coroutine
def get_result(self):
try:
user_id = self.session[SessionUserID]
order = self.orderService.getUserOrderByUserID(user_id)
if (order is None):
raise UserOrderNotFoundError()
body = {"order_id": order["order_id"]}
result = JsonTemplate.newJsonRes().setBody(body)
except ZebraError as e:
result = JsonTemplate.newZebraErrorRes(e)
except Exception as e:
result = JsonTemplate.newErrorJsonRes().setErrMsg(e.message)
finally:
raise gen.Return(result.toJson())
pass
| [
"3163504123@163.com"
] | 3163504123@163.com |
bbe5414c16b319441c787441bfb51b49cf034eb6 | fb2ba31068de059c1c67299ea9104435fc08061d | /phones/migrations/0001_initial.py | e898c3024e250d0b1c602f26a12e2d423be6c3f3 | [] | no_license | bustabg/phones-shop | 5a66d75ab0ec1a71ea63a799a5d817d5e5d51ed5 | d2b5b3f7ff35468818a0f26ac54ca0bc73900d69 | refs/heads/master | 2020-05-18T17:06:32.299822 | 2019-05-24T19:22:42 | 2019-05-24T19:22:42 | 184,545,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | # Generated by Django 2.2.1 on 2019-05-04 09:37
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '__first__'),
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('brand', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_model', models.CharField(max_length=20)),
('description', models.TextField()),
('price', models.PositiveIntegerField()),
('image_url', models.URLField()),
('screen_size', models.FloatField(validators=[django.core.validators.MinValueValidator(2), django.core.validators.MaxValueValidator(10)])),
('brand', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='phones.Brand')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.ProfileUser')),
],
),
]
| [
"jovanotybg@gmail.com"
] | jovanotybg@gmail.com |
7f808f375196070fa7c9f4d27ff1895e87836d02 | d78c933c6040d5bc77c0f594b919714a8f47330b | /Crawling with selenium/manage.py | 80b9764c69947d12683a6162260292d6048ace84 | [] | no_license | man-007/Django | 27751f71a477249d900d96e7ab3296cd0848fe36 | d1b2f96bf956f601d2c4262f3d933e894464386b | refs/heads/main | 2023-07-18T18:07:56.588512 | 2021-09-23T05:57:48 | 2021-09-23T05:57:48 | 371,322,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HighBreed_Development.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"manastaunk2021@gmail.com"
] | manastaunk2021@gmail.com |
10901f0c1bed95c01db04a8642c7e41972431fd7 | dd5c2c9384d16557c956b05dc27a7c192eae133f | /core/src/parsers/csgo/tradeskinsfast.py | b9f693346ab0289e6373f4d810564fca49be0910 | [] | no_license | yuriizinets/tradehub | d351ccae47d6bf2f889da71327bac5478bfc6121 | 6231f4169f0d2a1672ec5f2c20540960a3d2995f | refs/heads/master | 2022-03-05T22:03:13.019451 | 2018-05-23T14:03:07 | 2018-05-23T14:03:07 | 123,582,618 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | import json
def tradeskinsfast(wrapper_kwargs, scraper):
# Headers for request
headers = {
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://tradeskinsfast.com/",
}
# Get data
resp = scraper.get('https://tradeskinsfast.com/ajax/botsinventory', headers=headers)
r_items = json.loads(resp.text)['response']
# Cache update
upd = {wrapper_kwargs.get('game'): {}}
# Iterate items
for item in r_items:
# Pass item if name is not string
if not isinstance(item['m'], str):
continue
# Extract info
name = item['m']
price = item['v']
# Cache update
upd[wrapper_kwargs.get('game')][name] = {}
upd[wrapper_kwargs.get('game')][name]['{0}|price'.format(wrapper_kwargs.get('market'))] = price
upd[wrapper_kwargs.get('game')][name]['{0}|available'.format(wrapper_kwargs.get('market'))] = True
return upd
| [
"yura.zinets@gmail.com"
] | yura.zinets@gmail.com |
88343b3c7fb481f048cb6b5c6bfecd78aa72b015 | 7bf471faff78ba34aaf7b33470e12999a876a141 | /trpycore/thread/result.py | b3c73ef96806b9cc71f8947d5e0e417e66fcf0df | [] | no_license | techresidents/trpycore | dfadf7c172b7843ec3dab205b5db364088a530d2 | af243f342bc46ba0cd2ec54f415d5ba526a036a6 | refs/heads/master | 2016-09-06T04:43:45.706574 | 2013-10-09T15:29:30 | 2013-10-09T15:29:30 | 19,194,308 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | import threading
class AsyncResult(object):
"""AsyncResult is a convenience class for async methods.
It is intended as an alternative to passing callbacks to async methods
to be notified when an async operation completes. Instead, async methods
can return this object which the caller can use to wait for a result
in a non-busy manner.
"""
class Timeout(Exception):
pass
def __init__(self):
self.event = threading.Event()
self.result = None
self.exception = None
def ready(self):
return self.event.is_set()
def get(self, block=True, timeout=None):
if not self.ready() and block:
self.event.wait(timeout)
if not self.ready():
raise self.Timeout("Timeout: result not ready")
if self.exception is not None:
raise self.exception
else:
return self.result
def set(self, value=None):
self.result = value
self.event.set()
def set_exception(self, exception):
self.exception = exception
self.event.set()
| [
"jmullins@techresidents.com"
] | jmullins@techresidents.com |
b69becbe7fb0346b0b14a5bb4f78f85817d62142 | 1779b7bb2e7d6039011888a7d2a46717e9626993 | /matasano crypto/1-1.py | 8dec616f66ae3a1a96bc6e0e2d214eab70e8b7e0 | [] | no_license | zastari/Worked_Solutions | 2908b15344a65025af78afde369590209b520733 | cb31a52e61913a349de167ffca37afb2f7f8fa75 | refs/heads/master | 2020-06-02T09:36:54.274839 | 2016-01-11T09:07:39 | 2016-01-11T09:07:39 | 39,709,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | import byte_conversions
string = byte_conversions.change_format("49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d", "hex2bin")
print "%s" % byte_conversions.change_format(string, "bin2base64")
| [
"zastari@gmail.com"
] | zastari@gmail.com |
190d1b1092d241c85f0feb82ec4fbde905277a25 | 871e1b0295c0fbbfca8191236d674866cf62ff01 | /TrainB5_NAR1_imagenet_64.py | 532b33d65353507986ad9cfe7bb6f9818cee5de2 | [] | no_license | Peckkie/USAI_ABnormal_Screening | ce31a813e9303a7d43def912ab731cc633268cb7 | 82cd63ac9ab72fbe68eae254c15c7bf7ef906022 | refs/heads/master | 2023-02-16T13:32:33.678500 | 2021-01-07T02:36:35 | 2021-01-07T02:36:35 | 277,981,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,080 | py | import PIL
from keras import models
from keras import layers
from tensorflow.keras import optimizers
import os
import glob
import shutil
import sys
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
import os
from tensorflow.keras import callbacks
import pandas as pd
from keras.utils import generic_utils
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
batch_size = 64
epochs = 200
#Train
dataframe = pd.read_csv( '/media/tohn/SSD/ImageForTrainTest/train.csv')
base_dir = '/media/tohn/SSD/ImageForTrainTest/'
os.chdir(base_dir)
train_dir = os.path.join(base_dir, 'train')
#validation
valframe = pd.read_csv( '/media/tohn/SSD/ImageForTrainTest/validation.csv')
validation_dir = os.path.join(base_dir, 'validation')
from efficientnet.keras import EfficientNetB5 as Net
from efficientnet.keras import center_crop_and_resize, preprocess_input
conv_base = Net(weights='imagenet')
height = width = conv_base.input_shape[1]
input_shape = (height, width, 3)
# loading pretrained conv base model
conv_base = Net(weights='imagenet', include_top=False, input_shape=input_shape)
# create new model with a new classification layer
x = conv_base.output
global_average_layer = layers.GlobalAveragePooling2D(name = 'head_pooling')(x)
dropout_layer_1 = layers.Dropout(0.50,name = 'head_dropout')(global_average_layer)
prediction_layer = layers.Dense(2, activation='softmax',name = 'prediction_layer')(dropout_layer_1)
model = models.Model(inputs= conv_base.input, outputs=prediction_layer)
model.summary()
#showing before&after freezing
print('This is the number of trainable layers '
'before freezing the conv base:', len(model.trainable_weights))
#conv_base.trainable = False # freeze เพื่อรักษา convolutional base's weight
for layer in conv_base.layers:
layer.trainable = False
print('This is the number of trainable layers '
'after freezing the conv base:', len(model.trainable_weights)) #freez แล้วจะเหลือ max pool and dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=[0.5,1.5],
shear_range=0.4,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe = dataframe,
directory = train_dir,
x_col = 'Path Crop',
y_col = 'Class',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
test_generator = test_datagen.flow_from_dataframe(
dataframe = valframe,
directory = validation_dir,
x_col = 'Path Crop',
y_col = 'Class',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
os.chdir('/media/tohn/SSD/trainEffbyB/R1')
root_logdir = '/media/tohn/SSD/trainEffbyB/R1/my_logsB5imagenet_64'
def get_run_logdir():
import time
run_id = time.strftime("run_%Y_%m_%d_%H_%M_%S")
return os.path.join(root_logdir,run_id)
run_logdir = get_run_logdir()
tensorboard_cb = callbacks.TensorBoard(log_dir = run_logdir)
# os.makedirs("./models", exist_ok=True)
def avoid_error(gen):
while True:
try:
data, labels = next(gen)
yield data, labels
except:
pass
#Training
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
history = model.fit_generator(
avoid_error(train_generator),
steps_per_epoch= len(dataframe)//batch_size,
epochs=epochs,
validation_data=avoid_error(test_generator),
validation_steps= len(valframe) //batch_size,
callbacks = [tensorboard_cb])
model.save('./models/B5_R1_imnet_64.h5')
| [
"w_yupaporn@kkumail.com"
] | w_yupaporn@kkumail.com |
4c838533a9661266c01b231b2bed4bf43930a1d2 | e18505ec608ca04f71a46879ec58c26e77949204 | /games/easy_game/config.py | afc979b1ebd65b145802b50022b04b3979cba29b | [] | no_license | Diane-Su/MLGame | 241c1c97a95ca72daba4f50f6daca0c5a6a8c766 | 871c46c5d3588bc1b08699f5f0ba8a79dd626682 | refs/heads/master | 2023-07-11T18:06:36.515574 | 2021-08-12T08:10:14 | 2021-08-12T08:10:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py |
from os import path
from mlgame.utils.parse_config import read_json_file, parse_config
from .src.game import EasyGame
config_file = path.join(path.dirname(__file__), "game_config.json")
config_data = read_json_file(config_file)
GAME_VERSION = config_data["version"]
GAME_PARAMS = parse_config(config_data)
# will be equal to config. GAME_SETUP["ml_clients"][0]["name"]
GAME_SETUP = {
"game": EasyGame,
"ml_clients": EasyGame.ai_clients(),
# "dynamic_ml_clients":True
}
| [
"kylingithubdev@gmail.com"
] | kylingithubdev@gmail.com |
93a5137d1fb60cc5728e3a2a00b8b96d62ddf263 | bb0966136bad11e65939c24b91d3816eda71516f | /chat/models.py | 0b12da74960183840c124894f319563784837cc7 | [] | no_license | Vasu9/anonymous-chat | e56b459438be05a53951b997c3c7183f19d9aa18 | edacd4054c35322db72c4c1d38ca1e6a5d6d7852 | refs/heads/master | 2021-01-16T20:48:17.708856 | 2016-01-05T10:20:45 | 2016-01-05T10:20:45 | 47,538,938 | 0 | 0 | null | 2015-12-07T08:24:15 | 2015-12-07T08:24:15 | null | UTF-8 | Python | false | false | 173 | py | from django.db import models
# Create your models here.
class ChatRoom(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
| [
"vasusweety.81@gmail.com"
] | vasusweety.81@gmail.com |
9d11070b2ef693e53d7a4f827969804573048d8f | 67e3e44f2df1386c2bc205e35682651f8b17f3bb | /bsr/specgram_utils.py | db24ade153480871774e7d3eb6e2101972c1a9e4 | [] | no_license | vlampreia/birdsong-recognition | fa4a04cb8ba25f324fe9bc64076d3eea4a7efc53 | ac593eb36e3f7cc87cfdcc3a715ab5e99c8b9606 | refs/heads/master | 2021-03-22T03:16:10.421029 | 2018-06-16T12:07:27 | 2018-06-16T12:07:27 | 71,348,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | from utils import *
import cv2
import numpy as np
import matplotlib
import pylab
import os
import sys
import pickle
def load_specgram(path):
sgram = cv2.imread(''.join([path, '.png']), 0)
with open(''.join([path, '.pkl']), 'r') as f:
freqs = pickle.load(f)
times = pickle.load(f)
return (sgram, freqs, times)
def write_specgram(pxx, freqs, times, path):
dpath = os.path.split(path)[0]
if not os.path.exists(dpath):
os.makedirs(dpath)
try:
matplotlib.image.imsave(
''.join([path, '.png']),
pxx,
origin='lower',
cmap=pylab.get_cmap('Greys')
)
with open(''.join([path, '.pkl']), 'w') as f:
pickle.dump(freqs, f)
pickle.dump(times, f)
except IOError as err:
print '\terror writing specgram file: {}'.format(path), err
except NameError as err:
print '\terror writing specgram file: {}'.format(path), err
except ValueError as err:
print '\terror writing specgram file: {}'.format(path), err
except:
print '\terror writing specgram file: {}'.format(path), sys.exc_info()[0]
def make_specgram(pcm, samplerate):
fs = samplerate
nfft = 256
window = np.hamming(nfft)
noverlap = nfft * 0.75
vmin = None
vmax = None
min_freq = 100
max_freq = 10000
pxx, freqs, times = matplotlib.mlab.specgram(
pcm, NFFT=nfft, Fs=fs, noverlap=noverlap, window=window
)
freq_mask = (freqs >= min_freq) & (freqs <= max_freq)
pxx = pxx[freq_mask,:]
pxx = 10*np.log10(pxx.clip(min=0.0000000001))
pxx = np.array(pxx, dtype=np.uint8)
freqs = freqs[freq_mask]
return (pxx, freqs, times)
def create_specgrams(pathlist, specgram_dir, overwrite=False):
paths = []
for path in pathlist:
parentdir = os.path.split(path)
parentdir = os.path.join(os.path.split(parentdir[0])[1], parentdir[1])
spath = os.path.splitext(parentdir)[0]
spath = ''.join([os.path.join(specgram_dir, spath), '.png'])
paths += [spath]
if not overwrite and os.path.exists(spath):
print 'specgram from', path, 'exists as', spath
continue
else:
print 'generating specgram from', path, '->', spath
pcm, fs = load_pcm(path)
pxx, freqs, times = make_specgram(pcm, fs)
write_specgram(pxx, spath)
return paths
| [
"victorlampreia@gmail.com"
] | victorlampreia@gmail.com |
0cf90ec6fed77677ef55bca4273fa9bad7239d2b | 680dc72f3acd99351f208c79b4bce9efb9e394e9 | /databay/planners/aps_planner.py | 927219d250d104f497e3b2c4b691028a9834af40 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | nkaramesinis/databay | cfeef2c72614f1994dfe25df1a652bf9d531c266 | 02d46a661f57d4e96c394b26b1d40366ccacf981 | refs/heads/master | 2023-01-01T13:20:00.023715 | 2020-09-29T08:47:39 | 2020-09-29T08:47:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,288 | py | """
.. seealso::
* :ref:`Scheduling <scheduling>` to learn more about scheduling in Databay.
* :any:`BasePlanner` for the remaining interface of this planner.
"""
import logging
from typing import Union, List
from apscheduler.events import EVENT_JOB_ERROR
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.jobstores.base import JobLookupError
from apscheduler.schedulers.base import STATE_RUNNING
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.interval import IntervalTrigger
from databay.base_planner import BasePlanner
from databay import Link
_LOGGER = logging.getLogger('databay.APSPlanner')
# We ignore the APScheduler's exceptions because we log them ourselves.
logging.getLogger('apscheduler.executors').setLevel(logging.CRITICAL)
class APSPlanner(BasePlanner):
"""
Planner implementing scheduling using the |APS|_. Scheduling sets the :any:`APS Job <apscheduler.job.Job>` as links' job.
.. |APS| replace:: Advanced Python Scheduler
.. _APS: https://apscheduler.readthedocs.io/en/stable/index.html
.. _configuring-scheduler: https://apscheduler.readthedocs.io/en/stable/userguide.html#configuring-the-scheduler
"""
def __init__(self, links:Union[Link, List[Link]]=None, threads:int=30, executors_override:dict=None, job_defaults_override:dict=None, catch_exceptions:bool=False):
"""
:type links: :any:`Link` or list[:any:`Link`]
:param links: Links that should be added and scheduled.
|default| :code:`None`
:type threads: int
:param threads: Number of threads available for job execution. Each link will be run on a separate thread job.
|default| :code:`30`
:type executors_override: dict
:param executors_override: Overrides for executors option of `APS configuration <configuring-scheduler_>`__
|default| :code:`None`
:type job_defaults_override: dict
:param job_defaults_override: Overrides for job_defaults option of `APS configuration <configuring-scheduler_>`__
|default| :code:`None`
:type catch_exceptions: bool
:param catch_exceptions: Whether exceptions should be caught or halt the planner.
|default| :code:`False`
"""
self._threads = threads
self._catch_exceptions = catch_exceptions
if executors_override is None: executors_override = {}
if job_defaults_override is None: job_defaults_override = {}
executors = {'default': ThreadPoolExecutor(threads), **executors_override}
job_defaults = {'coalesce': False, 'max_instances': threads, **job_defaults_override}
self._scheduler = BlockingScheduler(executors=executors, job_defaults=job_defaults, timezone='UTC')
# self._scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults, timezone=utc)
self._scheduler.add_listener(self._on_exception, EVENT_JOB_ERROR)
super().__init__(links)
def _on_exception(self, event):
if event.code is EVENT_JOB_ERROR:
try:
# It would be amazing if we could print the entire Link, but APS serialises Link.transfer to a string and that's all we have from Job's perspective.
extra_info = f'\n\nRaised when executing {self._scheduler.get_job(event.job_id)}'
exception_message = str(event.exception) + f'{extra_info}'
traceback = event.exception.__traceback__
try:
raise type(event.exception)(exception_message).with_traceback(traceback)
except TypeError as type_exception:
# Some custom exceptions won't let you use the common constructor and will throw an error on initialisation. We catch these and just throw a generic RuntimeError.
raise Exception(exception_message).with_traceback(traceback) from None
except Exception as e:
_LOGGER.exception(e)
if not self._catch_exceptions and self.running:
self.shutdown(False)
def _schedule(self, link:Link):
"""
Schedule a link. Sets :any:`APS Job <apscheduler.job.Job>` as this link's job.
:type link: :any:`Link`
:param link: Link to be scheduled
"""
job = self._scheduler.add_job(link.transfer, trigger=IntervalTrigger(seconds=link.interval.total_seconds()))
link.set_job(job)
def _unschedule(self, link:Link):
"""
Unschedule a link.
:type link: :any:`Link`
:param link: Link to be unscheduled
"""
if link.job is not None:
link.job.remove()
link.set_job(None)
def start(self):
"""
Start this planner. Calls :any:`APS Scheduler.start() <apscheduler.schedulers.base.BaseScheduler.start>`
See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown.
"""
super().start()
def _start_planner(self):
self._scheduler.start()
def pause(self):
"""
Pause this planner. Calls :any:`APScheduler.pause() <apscheduler.schedulers.base.BaseScheduler.pause>`
"""
_LOGGER.info('Pausing %s' % str(self))
self._scheduler.pause()
def resume(self):
"""
Resume this planner. Calls :any:`APScheduler.resume() <apscheduler.schedulers.base.BaseScheduler.resume>`
"""
_LOGGER.info('Resuming %s' % str(self))
self._scheduler.resume()
def shutdown(self, wait:bool=True):
"""
Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>`
See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown.
:type wait: bool
:param wait: Whether to wait until all currently executing jobs have finished.
|default| :code:`True`
"""
super().shutdown(wait)
def _shutdown_planner(self, wait:bool=True):
"""
Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>`
:type wait: bool
:param wait: Whether to wait until all currently executing jobs have finished.
|default| :code:`True`
"""
self._scheduler.shutdown(wait=wait)
def purge(self):
"""
Unschedule and clear all links. It can be used while planner is running. APS automatically removes jobs, so we only clear the links.
"""
for link in self.links:
try:
link.job.remove()
except JobLookupError:
pass # APS already removed jobs if shutdown was called before purge, otherwise let's do it ourselves
link.set_job(None)
self._links = []
@property
def running(self):
"""
Whether this planner is currently running. Changed by calls to :any:`start` and :any:`shutdown`.
:return: State of this planner
:rtype: bool
"""
return self._scheduler.state == STATE_RUNNING
def __repr__(self):
return 'APSPlanner(threads:%s)' % (self._threads)
| [
"voy1982@yahoo.co.uk"
] | voy1982@yahoo.co.uk |
41346c47ed5fde38216d1de0cb5984e9e0bdfd96 | 0b9c9d401adecad7c777147cc3b68cbbd83b775b | /List_type.py | 1a599e2e7705d252551c4c30f13fb79590f374c4 | [] | no_license | parveez1shariff/Python-Testing | c984f62ec457c995b6793bdb7ec3b2f978e573f8 | 422b0860c64695bcce9a3a3f4628a92bb65a709c | refs/heads/master | 2022-10-02T19:52:58.659005 | 2020-05-27T14:42:25 | 2020-05-27T14:42:25 | 266,270,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py |
rain = [10, 23.4, "Temp", [5, 10] ]
print(rain)
print(type(rain)) | [
"parveez1shariff@gmail.com"
] | parveez1shariff@gmail.com |
88e99bd05a923040575e84609ce7c0e0b942c2b6 | be48055029a750d24e1f0867bf449b423748b67b | /blog/models.py | 2ec1d5e9d767ed7dde3f6ef60ddba8269b54f79a | [] | no_license | jennifersite/jennifersite.github.io | 9350bfde3803fe8ec749a372b27ff3bbdf153348 | 6c9cf83e76f80c11d566060953c8cf3df04be978 | refs/heads/master | 2021-07-19T21:35:35.443862 | 2017-10-28T07:55:16 | 2017-10-28T07:55:16 | 108,629,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django import template
# Create your models here.
from django.template.defaultfilters import slugify
from django.urls import reverse
from ckeditor_uploader.fields import RichTextUploadingField
class Author(models.Model):
name = models.CharField(max_length=50)
email = models.EmailField(unique=True, blank=True)
active = models.BooleanField(default=False)
created_on = models.DateTimeField(auto_now_add=True)
last_logged_in = models.DateTimeField(auto_now=True)
# class Meta:
# unique_together = (('name', 'email'),)
def __str__(self):
return self.name + " : " + self.email
class Category(models.Model):
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
author = models.ForeignKey(Author)
# class Meta:
# verbose_name_plural = "Categories"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('post_by_category', args=[self.slug])
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
author = models.ForeignKey(Author)
# class Meta:
# verbose_name_plural = "Tags"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('post_by_tag', args=[self.slug])
class Post(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(unique=True, help_text="Slug will be generated automatically from the title of the post")
content = RichTextUploadingField()
pub_date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(Author)
category = models.ForeignKey(Category)
tags = models.ManyToManyField(Tag)
# class Meta:
# verbose_plural_name = "PostModel"
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_detail', args=[self.id])
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Post, self).save(*args, **kwargs) | [
"jmh.chen@hotmail.com"
] | jmh.chen@hotmail.com |
52cd05812653c943e8f1a443b6fe1d471d4c7408 | 2cae3866749236714c23a75a95665b5cb8c8346a | /darwin/security/suuid.py | 6aae944f1f543a1797844010e2ad2c3f38345161 | [] | no_license | janiltonmaciel/darwin | 16b1892dd3f965239c61833feeb46b76e64e9448 | 6222cbef6692dd7725dca452b30d3b214894bb65 | refs/heads/master | 2020-05-30T19:04:44.895779 | 2015-09-22T12:53:17 | 2015-09-22T12:53:17 | 41,773,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | # -*- coding:utf-8 -*-
# Import system
import uuid
class SecurityUUID(object):
@staticmethod
def generate_uuid():
return str(uuid.uuid4())
| [
"janilton@corp.globo.com"
] | janilton@corp.globo.com |
cbe5b32fcc5a779da56b87e8a719be482b7c0046 | 68924396783fb6044f726d3625cde0685fd9c37c | /core/game/effects/punisher.py | 90c38de6c98b9977f146dfab81bd014b08ddfee6 | [] | no_license | Ukkotakken/OMGame | 4508e45dbc940fe9d936987874251b170d268866 | dad564923cd56567850f0f49a39190364775446b | refs/heads/master | 2020-04-06T04:03:39.329931 | 2017-04-08T23:16:28 | 2017-04-08T23:16:28 | 83,063,231 | 0 | 0 | null | 2017-04-08T23:09:14 | 2017-02-24T16:52:54 | Python | UTF-8 | Python | false | false | 3,034 | py | from core.game.action.common import Action
from core.game.characters.common import Character
from core.game.common import GuiltyDegree, TurnType, DamageType
from core.game.effects.common import CharacterEffect, pipe_argument, TimedCharacterEffect
from core.game.effects.priorities import EffectPriority
from core.game.events.common import DamageEvent
from core.game.events.punisher import PenanceEvent, BloodhoundEvent, PunishmentBanishEvent
from core.game.exceptions import BanishedFromClassException
from core.game.turn import DayTurn, NightTurn
class Penance(CharacterEffect):
priority = EffectPriority.TURN_END_INFO_PRIORITY
def on_kill(self, character, killed_character):
character.game.log(PenanceEvent(character, killed_character))
character.on_kill(killed_character)
class BloodhoundEffect(TimedCharacterEffect):
priority = EffectPriority.TURN_END_INFO_PRIORITY
def __init__(self, punisher):
self.punisher = punisher
super().__init__(1)
def on_turn_end(self, character, turn):
if isinstance(turn, NightTurn):
character.game.log(BloodhoundEvent(self.punisher, character, turn.turn_type is TurnType.MAGIC_POWER))
dies = character.dies()
for c in character.damaged_by_characters:
if self.punisher.guilty_degrees.get(c) is not GuiltyDegree.KILLED:
self.punisher.guilty_degrees[c] = GuiltyDegree.KILLED if dies else GuiltyDegree.DAMAGED
class ClassBanishEffect(CharacterEffect):
priority = EffectPriority.PLAY_CANCELING_PRIORITY
def play(self, character, ability, target=None, **kwargs):
if ability in character.role_abilities_list:
raise BanishedFromClassException()
return character.play(ability, target, **kwargs)
def vote(self, _character, character):
return Character.vote(_character, character)
def attack(self, _character, character):
return Character.attack(_character, character)
class PunishmentEffect(TimedCharacterEffect):
priority = EffectPriority.PUNISHMENT_PRIORITY
def __init__(self, victim, punisher):
self.victim = victim
self.punisher = punisher
super().__init__(turns=1)
def receive_damage(self, character, strength, type, action):
guilty_degree = self.punisher.find_guilty_degree(character, self.victim)
if action.executor is self.punisher:
if guilty_degree is not GuiltyDegree.NO_GUILTY:
base_damage = 1 if character.game.turn.turn_type is TurnType.MAGIC_POWER else 0
if guilty_degree is GuiltyDegree.KILLED:
base_damage += 1
character.health -= base_damage
character.game.log(DamageEvent(character, base_damage, DamageType.PHISICAL, action))
else:
self.punisher.add_effect(ClassBanishEffect())
character.game.log(PunishmentBanishEvent(self.punisher))
character.receive_damage(strength, type, action)
| [
"ukkotakken@yandex.ru"
] | ukkotakken@yandex.ru |
852abf45fb49e074f8d68f60d388aeea483fc335 | 865d8dd74328a53d003a01185315ae07c794a513 | /testing_req.py | a4522aed0a9ba72ec0ef353641e98eee82cbd61e | [] | no_license | Lekhman-vold/Crypto-Analyzer | 4acc8083c2da5b6ad154fbc486fcff53ded3ef1e | 8e6b11727081f8838fab116b7501739e828a2036 | refs/heads/master | 2023-04-17T21:41:23.753240 | 2021-04-21T20:06:45 | 2021-04-21T20:06:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # import requests
#
# response = requests.get('https://www.engineerspock.com/')
# github_response = requests.get('https://api.github.com/')
#
# print(github_response.headers, end='\n')
# print(response)
import requests
def get_prices():
coins = ["DOGE", "ETH", "DASH", "LTC"]
crypto_data = requests.get(
"https://min-api.cryptocompare.com/data/pricemultifull?fsyms={}&tsyms=USD".format(",".join(coins))).json()["RAW"]
data = {}
for i in crypto_data:
data[i] = {
"coin": i,
"price": crypto_data[i]["USD"]["PRICE"],
"change_day": crypto_data[i]["USD"]["CHANGEPCT24HOUR"],
"change_hour": crypto_data[i]["USD"]["CHANGEPCTHOUR"]
}
return data
if __name__ == "__main__":
print(get_prices())
| [
"volodya.l@yahoo.com"
] | volodya.l@yahoo.com |
fdd59d240a4c0bb10c89d75d4e9a62b0b1c7f939 | 2e990ff03f23c3f82e1f3fb7acee1ddd8fb72e0e | /whoislive.py | 450d38b0d33cacce38abf02fe08ffd66a715315d | [] | no_license | HeNine/ekimbot_plugins | e25bd5326b13603a8671d4089317185bb7a7821c | 354978cc8a632aec57ef79d2948ada21dc2502cd | refs/heads/master | 2021-01-24T18:26:28.146480 | 2017-03-06T17:37:11 | 2017-03-06T17:37:11 | 84,441,447 | 0 | 0 | null | 2017-03-09T12:47:12 | 2017-03-09T12:47:12 | null | UTF-8 | Python | false | false | 4,547 | py |
import functools
import itertools
import gevent
import gtools
import requests
import twitch
from ekimbot.botplugin import ClientPlugin
from ekimbot.commands import CommandHandler
from ekimbot.utils import reply_target
def encode_recursive(o, encoding='utf-8'):
if isinstance(o, unicode):
return o.encode(encoding)
elif isinstance(o, dict):
return {encode_recursive(k): encode_recursive(v) for k, v in o.items()}
elif isinstance(o, list):
return [encode_recursive(x) for x in o]
else:
return o
def requires_oauth(fn):
@functools.wraps(fn)
def wrapper(self, msg, *args):
if self.config.oauth is None or self.config.target is None:
self.reply(msg, "No twitch login configured")
return
return fn(self, msg, *args)
return wrapper
class TwitchPlugin(ClientPlugin):
"""Should be a client plugin for a client logged into twitch.
Upon request, will list all live channels out of the list of channels that config.target
(default client.nick) is following.
"""
name = 'whoislive'
defaults = {
'target': None, # None makes no args an error
'limit': 3,
'private_limit': 10,
'client_id': None,
'oauth': None, # if not none, can do follow actions
}
def init(self):
self.api = twitch.TwitchClient(oauth=self.config.oauth, client_id=self.config.client_id)
def limit(self, msg):
if msg.target == reply_target(self.client, msg):
# public channel
return self.config.limit
else:
# private message
return self.config.private_limit
@CommandHandler("live", 0)
def live(self, msg, *channels):
"""List currently live streamers
Specify list of channels, or list of all channels followed by a channel by prepending a ~
If nothing given, a default follow list is used depending on bot config
"""
found = []
errors = False
if not channels:
if self.config.target:
channels = ['~{}'.format(self.config.target)]
else:
self.reply(msg, "Please list some channels to check")
return
limit = self.limit(msg)
try:
# flatten iterators of follows and direct channel names into single iterable
# TODO this could be better parallelised so follow fetches happen in parallel
# but we need to refactor to use gevent queues or it gets real ugly real fast
channels = itertools.chain(*[
self.following(channel.lstrip('~')) if channel.startswith('~') else (channel,)
for channel in channels
])
for name, channel in gtools.gmap_unordered(self.get_channel_if_live, channels):
if not channel:
continue
found.append(name)
if len(found) < limit:
self.reply(msg, "https://twitch.tv/{name} is playing {game}: {status}".format(**channel))
except Exception:
self.logger.exception("Error while checking who is live")
errors = True
if errors:
self.reply(msg, "I had some issues talking to twitch, maybe try again later?")
elif len(found) >= limit:
found = found[limit - 1:]
self.reply(msg, "And also {}".format(', '.join(found)))
elif not found:
self.reply(msg, "No-one is live right now, sorry!")
def following(self, target):
"""Yields channel names that target is following"""
for result in self.api.get_all("follows", "users", target, "follows", "channels"):
yield encode_recursive(result['channel']['name'])
def get_channel_if_live(self, name):
"""Returns an up-to-date channel object if channel is currently live, else None"""
stream = gevent.spawn(lambda: self.api.get("streams", name))
channel = gevent.spawn(lambda: self.api.get("channels", name))
if stream.get().get("stream") is None:
return
return encode_recursive(channel.get())
def _follow_op(self, msg, channels, method, op_name):
channels = sorted(list(set(channels)))
failures = {}
for channel in channels:
try:
self.api.request(method, 'users', self.config.target, 'follows', 'channels', channel, json=False)
except requests.HTTPError as e:
failures[channel] = str(e)
if len(failures) == 0:
self.reply(msg, "{}ed channels: {}".format(op_name, ' '.join(channels)))
elif len(failures) == 1:
(channel, error), = failures.items()
self.reply(msg, "failed to {} channel {}: {}".format(op_name, channel, error))
else:
self.reply(msg, "failed to {} channels: {}".format(op_name, ' '.join(sorted(failures))))
@CommandHandler("twitch follow", 1)
@requires_oauth
def follow(self, msg, *channels):
self._follow_op(msg, channels, 'PUT', 'follow')
@CommandHandler("twitch unfollow", 1)
@requires_oauth
def unfollow(self, msg, *channels):
self._follow_op(msg, channels, 'DELETE', 'unfollow')
| [
"mikelang3000@gmail.com"
] | mikelang3000@gmail.com |
e59bab95d23b7a34c1fe950e44065ce754a0826e | 91a357833d29642431c2b20517131dd8c4939b2c | /week 7/souvikghosh_a7.py | de795d626ebac46c94054579c22b28eba0d302c3 | [] | no_license | souvik119/157152-Introduction-to-Programming | c1c8b10c0c202029175054ca9b3d4c0a012c1ced | 901b5ab71df722d95fa209ada63bfc30c2a695c1 | refs/heads/main | 2023-07-19T08:16:11.719848 | 2021-08-17T21:20:03 | 2021-08-17T21:20:03 | 386,095,465 | 0 | 0 | null | 2021-08-17T21:20:03 | 2021-07-14T22:49:20 | Python | UTF-8 | Python | false | false | 1,785 | py | # File statistics
# Reads a file based on the user input for the file name
# Display each line to the screen with a line number
# Prints statistics like Line Count and Character Count at the end of the program
# Program continues until user quits
def get_file_name():
'''
Prompts user to input file name to be read and returns the file name string
'''
file_name = input("Please enter name of file to be read (including extension): ")
return file_name
def file_stat(file_name):
'''
- Reads the file name entered by the user
- Displays each line to the screen with a line number
- prints stats at the end of file read
'''
line_count = 0
character_count = 0
try:
print('File contents below : ')
f = open(file_name, 'r')
for line in f:
print(f'{line_count + 1} - {line}', end='')
line_count += 1
for character in line:
character_count += 1
print('\n')
print('#########################################')
print('File stats')
print(f'Line count is : {line_count}')
print(f'Character count is : {character_count}')
print('#########################################')
f.close()
except:
print(f'Error reading the file : {file_name}')
def file_reader():
'''
Starts the program and keeps continuing it till user user quits
'''
file_name = get_file_name()
file_stat(file_name)
while True:
user_input = input('Enter q to quit or any other key to retry : ')
if user_input == 'q' or user_input == 'Q':
print('Thank you for using file reader. Bye!')
exit()
file_name = get_file_name()
file_stat(file_name)
file_reader()
| [
"Sayali1@"
] | Sayali1@ |
9589158dfe27c7aca382fc3c25aba6b61418e832 | 23a3d0433bd53f1fd69a8397ee310913038980ea | /04_lesson_3/quiz/relativeSize.py | 644f9ff61948a1667d81c9007f8388dd3e5e0aeb | [] | no_license | lisalisadong/cs-101 | 3ce2f151386a153770de266624f47879f7796fd3 | 6a001aa27100ce76e7dfa5e6a3528607d3280121 | refs/heads/master | 2020-05-19T23:00:25.857863 | 2015-01-06T06:24:01 | 2015-01-06T06:24:01 | 26,845,527 | 16 | 15 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Given the variable countries defined as:
# Name Capital Populations (millions)
countries = [['China','Beijing',1350],
['India','Delhi',1210],
['Romania','Bucharest',21],
['United States','Washington',307]]
# What multiple of Romania's population is the population
# of China? Please print your result.
print 1.0 * countries[0][2] / countries[2][2] | [
"qingxiao.dong@gmail.com"
] | qingxiao.dong@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.