blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
86e4f8e3682a4736d8c8789a4433cf37f971fc98 | 3887dd942e13b69668f5970ee5c8051eccdd3e3d | /node_modules/webpack-dev-server/node_modules/fsevents/build/config.gypi | c1bee61dbc28ec27833460c31071bdc249a010d4 | [
"MIT"
] | permissive | hamishakl/ReactExpressNodeStyled | bd64acb01ac939d0a359c5e777c3b89bc64cc1fd | 23c95edf6d1b02d783d030c9e236bf43123f1459 | refs/heads/master | 2023-01-07T06:58:11.825048 | 2020-10-22T00:55:50 | 2020-10-22T00:55:50 | 293,730,465 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,665 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt67l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/hamiishtana/Library/Caches/node-gyp/12.18.3",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/hamiishtana/.npm-init.js",
"userconfig": "/Users/hamiishtana/.npmrc",
"cidr": "",
"node_version": "12.18.3",
"user": "",
"save": "true",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"save_exact": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/hamiishtana/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.6 node/v12.18.3 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/1h/3q6sf39s30l66ymkr4s62jy40000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"hamish.tana24@gmail.com"
] | hamish.tana24@gmail.com |
954ecc61dfce1b4baa0c31373670efc38575e854 | 2a65bb733cd173d10c0be7ef4aed7e90635a5cd4 | /blog/migrations/0002_remove_post_slug.py | d68013f9636f457fbcde77a128799c507340c590 | [] | no_license | AlexBlueCrow/mysite | bba5907006ada25a48275fe581d2cbf8b76611e2 | 97f3a0295a205c07bf89c7cae465d4a79d87c586 | refs/heads/master | 2020-04-04T18:31:32.963015 | 2019-04-24T11:46:49 | 2019-04-24T11:46:49 | 156,167,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | # Generated by Django 2.1.5 on 2019-03-20 15:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='slug',
),
]
| [
"forstarcraft213@163.com"
] | forstarcraft213@163.com |
7f491f356f6ff82934a5b21eb19f0391fa95ff5a | 109b54c8f66bcc0c84d7ebadc64005631c2f28bf | /venv/bin/pip2.7 | 10908f8108bdfbbe1a3b7e8eab513012bb1b7126 | [] | no_license | wly314/RestfulUnitTest | ed412f916e340be4b309faea1ce92b3137ce261a | 7dc1031c37a6fa2683c0832b320be56d97afc06a | refs/heads/master | 2020-03-19T20:26:31.939000 | 2018-06-11T09:20:34 | 2018-06-11T09:20:34 | 136,902,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | 7 | #!/home/leou/virtualenv/RestfulUnitTest/venv/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"wly314sdx@qq.com"
] | wly314sdx@qq.com |
b776f3ddd2760d5529bfd4feeacf8ef48eb827cb | 1819789fd07a608f6d8e1ef66b79c95fcf134e09 | /src/merry/apps.py | 9f47e85cc8e24d9578955d6ad6fcb3761ed249f7 | [] | no_license | xiaolei-shuai/kms | 699c44bbd2775af2d807c70589fd079a67303512 | 4a17c82d69d535fa0f63facc9a23a104fb9ee6f6 | refs/heads/master | 2022-12-01T16:55:49.056675 | 2020-08-16T05:09:31 | 2020-08-16T05:09:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class MerryConfig(AppConfig):
name = 'merry'
| [
"wsjAliYun@aliyun.com"
] | wsjAliYun@aliyun.com |
fb4ead0f995e45b3159411bcecdef97dd22fe41a | d992297c9dec9c01a9bc5548ba3e817c3770182c | /weasyprint-app.py | 03eb117cb8a90ca2cdfa76147c31a57bbbd59d94 | [] | no_license | yazabara/python-app-with-docker | 169a69a064066593bd0e76b82e82585c80ab92af | 9899b9005b464d9e62cb6847bb826a6555dfc07a | refs/heads/master | 2020-04-16T19:07:09.295734 | 2019-01-16T10:33:55 | 2019-01-16T10:33:55 | 165,847,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | from weasyprint import HTML
if __name__ == '__main__':
HTML('http://weasyprint.org/').write_pdf('weasyprint-website.pdf')
| [
"yaroslav.zabara@waveaccess.ru"
] | yaroslav.zabara@waveaccess.ru |
7433d119357f27263175e494dc7a2d7d5632e795 | 48a2610f1d37451b8133ebc0bbf18e0e74f53168 | /reporting-dashboard/data/clean_data.py | b03f23b0208cce25e4296be4813a8ff05f2ad56c | [] | no_license | pnandak/dashboard-prototype | 70027a54a1be7ff3916bed2c18f68fa70d1eda27 | 7dd911ca87a7ec951d0176691ad3d6d9bda87f9d | refs/heads/master | 2020-12-14T06:10:19.881742 | 2015-03-14T00:32:41 | 2015-03-14T00:32:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | import csv
def remove_last_row(iterable):
iterator = iter(iterable)
try:
prev = next(iterator)
while True:
cur = next(iterator)
yield prev
prev = cur
except StopIteration:
return
def main():
raw_data = csv.reader(open("performance_report_data.csv", "r"), delimiter=",")
output_data = csv.writer(open("reporting_data.csv", "wb"), delimiter=",")
for i, row in enumerate(remove_last_row(raw_data)):
if i == 10:
header = []
for item in row:
header.append(item.replace(" ", "").replace("-", "").replace(":", "_"))
output_data.writerow(header)
elif i > 10:
output_data.writerow(row)
else:
continue
print "Data formatted."
if __name__ == '__main__':
main()
| [
"boxuancui@gmail.com"
] | boxuancui@gmail.com |
de581e51dca0962cec387fdab2fef54098bf2b9e | b4923b19573688ad25306a5a58a7c2c4bcb0d7ae | /pyGDAL-MultiprocessingTool.py | 2b0165602775e1c0187a004bb6328a03d6dcd3cd | [] | no_license | lcabon258/Multi-DEM-conversion | a81865692a70d2908f1d7167a15b1afa711eff4b | a953f821c7ca3fb5ed1d8ea1d9fbaaa5173428f2 | refs/heads/master | 2020-06-13T04:38:29.758301 | 2016-12-07T14:16:44 | 2016-12-07T14:16:44 | 75,447,005 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,546 | py | # -*- coding: utf-8 -*-
_Release = "MultiProcessing Release V0.1"
_ReleaseDate = "2016/12/01"
_Authur = "Cheng-Wei Sun"
"""
last edited 2016-12-01 2200 @ Mac
@author: Sun, Cheng-Wei
Tested platform:
# macOS Sierra 10.12.1
# Python 3.5 Anaconda custom (x86_64) | GCC 4.2.1 Compatible Apple LLVM 4.2 (clang-425.0.28)
# pygdal 2.1.1 (conda-forge)
Useage :
$python pyGDAL-MultiprocessingTool.py /Directory/ mode (threads)
Arguments:
/Directory/ = Which directory contains the rasters you want to process
mode = 'slope' or 'hillshade'
threads = how many threads you want to generate to process the rasters.
"""
print("\nDEM Processing\n{} by {}".format(_Release,_ReleaseDate,_Authur))
#===== Built-in libraries =====
from glob import glob
import os
import os.path as oph
import sys
#from multiprocessing import Pool
import multiprocessing as mps
import time
#===== Other libraries =====
from osgeo import gdal
#===== Global Variable =====
Threats = 2
Mode = ""
Directory = ""
OutputDir = None
#======== Timer ==========
class Timer():
def __init__(self):
start=0.0
now=0.0
def start(self):
self.start=time.time()
def exec_time(self):
self.now=time.time()
return (self.now-self.start)
#======== File Load Functions ==========
def dirloader(dirpath,extension="*"):
'''
Purpose : spcify a directory then return a list with all files.
input : a directory (string), file extention (string)
output : a list of file matches the condition
libraries used : os.path (built-in) , glob (built-in)
'''
if sys.platform == "win32" and not dirpath.endswith("\\") :
pass
#dirpath = dirpath+"\\"
dire=oph.abspath(dirpath)
print("glob path : {}".format(oph.join(dire,"*."+extension)))
FileList=glob(oph.join(dire,"*."+extension))
return FileList
def MakeSlope(input_file):
'''
Use GDAL module to generate Slope raster.
ref : http://gdal.org/python/
'''
#input_Raster = gdal.Open(input_file)
#OutRaster = input_file.split(".")[0]+"_slp.tif"
OutRaster = oph.join(OutputDir,oph.basename(input_file).split(".")[0]+"_slp.tif")
print("Processing : {}".format(OutRaster))
gdal.DEMProcessing(OutRaster,input_file,"slope")
def SequenceMakeSlope(input_file):
for i in (input_file):
MakeSlope(i)
def MakeHillshade(input_file):
'''
Use GDAL module to generate Slope raster.
'''
#input_Raster = gdal.Open(input_file)
#OutRaster = input_file.split(".")[0]+"_slp.tif"
OutRaster = oph.join(OutputDir,oph.basename(input_file).split(".")[0]+"_shd.tif")
print("Processing : {}".format(OutRaster))
gdal.DEMProcessing(OutRaster,input_file,"hillshade")
def SequenceMakeHillshade(input_file):
for i in input_file:
MakeHillshade(i)
def PrintHelp():
print("Usage : python DEMProcessing.py (1)Directory (2)Mode (3)Threats")
print("(1)Directory : Give the directory and the script will process all the rasters.")
print("(2)Mode : 'hillshade' , 'slope'")
print("(3)Threads : defaults = 2")
sys.exit()
#======== Muitiprocessing Functions ==========
def multi_task(iter_file,processes=2):
global Mode
if Mode == "slope":
with mps.Pool(processes) as p: #Creating pools
p.map(MakeSlope,iter_file)
return
elif Mode == "hillshade":
with mps.Pool(processes) as p: #Creating pools
p.map(MakeHillshade,iter_file)
return
#======== Argument Parser ==========
def Parser():
global Threats
global Mode
global Directory
global OutputDir
argv=sys.argv
argc=len(argv)
# --- Parse ---
if argc == 1 : # No other argument
PrintHelp()
elif argc < 3:
print("Insufficient arguments")
PrintHelp()
for i in range(1,argc):
if i == 1:
if not oph.isdir(argv[i]) :
print("First argument should be a directory.\n")
PrintHelp()
Directory = oph.abspath(argv[i])
#Change working directory where raster files exists.
os.chdir(Directory)
OutputDir="DEM_Processing_"+time.strftime("%Y%m%d_%H%M%S",time.localtime())
os.mkdir("DEM_Processing_"+time.strftime("%Y%m%d_%H%M%S",time.localtime()))
OutputDir=oph.abspath(OutputDir)
elif i == 2:
Mode = argv[i].lower()
if argv[i].lower() not in ['hillshade','slope']:
print("Mode should be either 'hillshade' or 'slope'")
PrintHelp()
elif i == 3 :
if int(argv[i]) > mps.cpu_count() or int(argv[i]) == 0:
print("Please give correct threads. Your PC has {} threads.".format(mps.cpu_count()))
sys.exit()
Threats = int(argv[i])
else : # For future use.
print("Too many arguments !")
PrintHelp()
FileList = dirloader(Directory,"tif")
multi_task(FileList,Threats)
if __name__ == "__main__":
t=Timer()
t.start()
Parser()
print("Execution time : {} s.".format(t.exec_time()))
"""
Log:
20161201-2200: V0.1
first version.
#Functions:
## dirloader(dirpath,extension="*") :讀取資料夾所有檔案
## MakeSlope(input_file) : 呼叫 GDAL.DEMProcessing 製作 Slope 檔
## PrintHelp() : 印出使用說明
## SequenceMakeSlope(input_file) : 使用序列的方式進行轉檔
## multi_task(iter_file,processes=2) : 使用平行處理模組分配工作
"""
| [
"noreply@github.com"
] | noreply@github.com |
40d79daadfc7d474b29a31da056c8acbb016eb6b | ba1d57650d38424cbeac316d380e21727186bc22 | /classes-9.7.py | 04cd329c8fef2138d70172092736185aa786f7b0 | [] | no_license | dvp-git/Python-Crash-Course | c80dbe1fcf03fbfbeed56ac1eddb43718a42d6bd | 633e1d73a20d063b65aa9e78b3e055b3d153c7e6 | refs/heads/master | 2020-07-24T01:33:15.516438 | 2019-09-11T13:18:27 | 2019-09-11T13:18:27 | 207,762,823 | 0 | 0 | null | 2019-09-11T08:42:35 | 2019-09-11T08:32:13 | null | UTF-8 | Python | false | false | 1,828 | py | ## Exercise 9.7 Admin
## Making a class called User
class User():
"""Making the attruibutes"""
def __init__(self,
first_name,
last_name,
age,
eid
,):
self.first = first_name
self.last = last_name
self.id = eid
self.age = age
def describe_user(self):
"""Printing the summary of the user"""
print("==========User Profile: " + str(self.id) + "==========")
print("First name : " + self.first.title())
print("Last name : " + self.last.title())
print("Employee age: " + str(self.age))
print("Employee ID: " + str(self.id))
def greet_user(self):
"""Printing the greeting for the user"""
print("\n=============Welcome Mr./Ms. " + self.first.title() + "===================")
print("Welcome to this simulation")
print("You are player" + str(self.id))
## Making a Class called Admin:
class Admin(User):
"""inherit the properties of the parent class User"""
def __init__(self,first_name,last_name,age,eid):
"""Inherit the parent class attributes"""
super().__init__(first_name,last_name,age,eid)
self.priveleges = []
def show_priveleges(self):
""" List the administrators priveleges"""
print("The following are the priveleges of the admin:")
for privilege in self.priveleges:
print("...: " + privilege)
admin_user_1 = Admin('Darryl','Vas',24,1023)
admin_user_1.priveleges=[
"can add post",
"can delete post",
"can ban user",
]
admin_user_1.show_priveleges()
| [
"noreply@github.com"
] | noreply@github.com |
bf4ee2d02a325b438c10d2b86a54a4028c965b9b | 3a6bf7337126c8b1883e76cf1f46cec0886f1447 | /rssdl/rss.py | 0280fcd0436b89dd40e7f313f365f2b62e554d0f | [
"Apache-2.0"
] | permissive | egustafson/rssdl | fc4265edd9138a54005b98bdfc1ea5dfb25707d5 | 2b42d8aa4a0d03d31629d8446e7336c6c1348e58 | refs/heads/master | 2020-12-24T06:57:21.265487 | 2017-09-15T20:59:58 | 2017-09-15T20:59:58 | 58,835,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """ RSS DOM for RSSDL
"""
import feedparser
class Feed(object):
def __init__(self, href):
self._href = href
self._d = None
def result(self):
return self._d
def parse(self):
self._d = feedparser.parse(self._href)
return self._d.status if 'status' in self._d else 0
def data(self):
return self._d
## Local Variables:
## mode: python
## End:
| [
"eg-git@elfwerks.org"
] | eg-git@elfwerks.org |
097d7a098bcb81046169def9413bce7f3112d0a7 | 63f0b49eca9a78fc1050d7fdde03d54194ba0b73 | /codekata/ever.py | 3817ebc7c2bdd3afaacf88893319f3f64414ad5f | [] | no_license | Mponsubha/.guvi | d8a6e26cafce22a26c7ef7ababaf028e79b6c4cc | 5a1f0278c1570ed6b827c65baf5d838e1983e177 | refs/heads/master | 2020-06-03T00:22:10.182521 | 2019-07-02T04:02:26 | 2019-07-02T04:02:26 | 191,359,120 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | xi=list(input())
x1=[]
for am in xi:
if(am.isdigit()):
x1.append(am)
print(''.join(x1))
| [
"noreply@github.com"
] | noreply@github.com |
7670ab8d6b931d78427617fd2a1befd1482400bb | 15f1f39f18c3535d8086918851fbe8fc0d845ceb | /pkg_tf_micromouse/scripts/v2/floodfill.py | 6710250e83845f0a5facfde3d4bef23d1256ff78 | [
"MIT"
] | permissive | SuyashVerma2311/micromouse_maze_solver | 60745891d42b43e11202bd7063e3d4db38d67bc8 | d36cc671eabccf568d1f3a55a4e8e7b0289537c6 | refs/heads/main | 2023-03-19T03:20:01.368492 | 2021-03-07T18:42:19 | 2021-03-07T18:42:19 | 345,418,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,537 | py | #! /usr/bin/env python
import numpy as np
class FloodFill(object):
def __init__(self):
### ALERT : self.pos (or) pos should be integer tuple of 2 elements ###
#self.pos = pos # This will keep track of current location #
self.val = 255 # This will hold the flood val of the current cell #
self.path=[[0,0]] #for sample maze 2
self.path_return=[]
self.mode = "discovery" # This is meant to use different forms of the class #
self.stack = []
### Initial Cell Map Floodfill ### # This will keep track of current flooding on the map #
self.cell_map= np.array([[14,13,12,11,10, 9, 8, 7, 7, 8, 9,10,11,12,13,14],
[13,12,11,10, 9, 8, 7, 6, 6, 7, 8, 9,10,11,12,13],
[12,11,10, 9, 8, 7, 6, 5, 5, 6, 7, 8, 9,10,11,12],
[11,10, 9, 8, 7, 6, 5, 4, 4, 5, 6, 7, 8, 9,10,11],
[10, 9, 8, 7, 6, 5, 4, 3, 3, 4, 5, 6, 7, 8, 9,10],
[ 9, 8, 7, 6, 5, 4, 3, 2, 2, 3, 4, 5, 6, 7, 8, 9],
[ 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8],
[ 7, 6, 5, 4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 5, 6, 7],
[ 7, 6, 5, 4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8],
[ 9, 8, 7, 6, 5, 4, 3, 2, 2, 3, 4, 5, 6, 7, 8, 9],
[10, 9, 8, 7, 6, 5, 4, 3, 3, 4, 5, 6, 7, 8, 9,10],
[11,10, 9, 8, 7, 6, 5, 4, 4, 5, 6, 7, 8, 9,10,11],
[12,11,10, 9, 8, 7, 6, 5, 5, 6, 7, 8, 9,10,11,12],
[13,12,11,10, 9, 8, 7, 6, 6, 7, 8, 9,10,11,12,13],
[14,13,12,11,10, 9, 8, 7, 7, 8, 9,10,11,12,13,14]])
### Initial Wall Map ### # This will keep track of all the walls that are discovered #
### Initially Walls only on the edges of the arena ###
self.wall_map_v= np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], #vertical wall map
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.wall_map_h=np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #horizontal wall map
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
"""
self.wall_map_v= np.array([[1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1], #vertical wall map
[1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1],
[1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1]])
self.wall_map_h=np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #horizontal wall map
[0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
"""
#print(self.wall_map_h)
l=[[0 for i in range(16)]for i in range(16)]
for i in range(16):
ctr=i
for j in range(16):
l[i][j]=ctr
ctr=ctr+1
self.cell_map_return=np.array(l)
print(self.cell_map_return)
def update(self, pos, walls=[0,0,0,0]):
# Updating the position, holding the value and clearing the stack #
if pos == (7, 7) or pos == (7, 8) or pos == (8, 7) or pos == (8, 8) :
return self.update_return(pos, walls)
self.pos = pos
self.val = self.cell_map[self.pos[0]][self.pos[1]]
self.stack = []
# Updating the Wall Map #
walls = np.array(walls, dtype=bool)
if walls[0]: ### North Update ###
self.wall_map_h[self.pos[0]][self.pos[1]] = 1
if walls[1]: ### East Update ###
self.wall_map_v[self.pos[0]][self.pos[1]+1] = 1
if walls[2]: ### West Update ###
self.wall_map_v[self.pos[0]][self.pos[1]] = 1
if walls[3]: ### South Update ###
self.wall_map_h[self.pos[0]+1][self.pos[1]] = 1
# Main Loop for updating the values of the cells in stack #
# This loop will update the flood values for the cells #
self.stack.append(pos)
open_neighbors_val = []
open_neighbors = []
my_neighbors = []
my_neighbors_val = []
if not self.wall_map_h[pos[0]+1][pos[1]]: #south check
my_neighbors.append([pos[0]+1, pos[1]])
if not self.wall_map_h[pos[0]][pos[1]]: #north check
my_neighbors.append([pos[0]-1, pos[1]])
if not self.wall_map_v[pos[0]][pos[1]+1]: #east check
my_neighbors.append([pos[0], pos[1]+1])
if not self.wall_map_v[pos[0]][pos[1]]: #west check
my_neighbors.append([pos[0], pos[1]-1])
if len(open_neighbors)==1:
print("blocked at ",pos)
while len(self.stack) != 0:
popped_cell = self.stack.pop()
popped_val = self.cell_map[popped_cell[0]][popped_cell[1]]
# open_neighbors.clear()
del open_neighbors[:]
# open_neighbors_val.clear()
del open_neighbors_val[:]
if not self.wall_map_h[popped_cell[0]+1][popped_cell[1]]: #south check
open_neighbors.append([popped_cell[0]+1, popped_cell[1]])
if not self.wall_map_h[popped_cell[0]][popped_cell[1]]: #north check
open_neighbors.append([popped_cell[0]-1, popped_cell[1]])
if not self.wall_map_v[popped_cell[0]][popped_cell[1]+1]: #east check
open_neighbors.append([popped_cell[0], popped_cell[1]+1])
if not self.wall_map_v[popped_cell[0]][popped_cell[1]]: #west check
open_neighbors.append([popped_cell[0], popped_cell[1]-1])
# if len(open_neighbors)==1:
# print("blocked at ",)
for neighbor in open_neighbors:
open_neighbors_val.append(self.cell_map[neighbor[0]][neighbor[1]])
#print("open neighbors for ",popped_cell," ",open_neighbors)
if popped_val != 1 + min(open_neighbors_val):
self.cell_map[popped_cell[0]][popped_cell[1]] = 1 + min(open_neighbors_val)
#print("cell value changed")
for neighbor in open_neighbors:
self.stack.append(neighbor)
for neighbor in my_neighbors:
my_neighbors_val.append(self.cell_map[neighbor[0]][neighbor[1]])
next_pos = my_neighbors[my_neighbors_val.index(min(my_neighbors_val))]
# my_neighbors.clear()
del my_neighbors[:]
# my_neighbors_val.clear()
del my_neighbors_val[:]
self.path.append(next_pos)
return next_pos
# if next_pos==[7,7] or next_pos==[7,8] or next_pos==[8,7]:
# print("next step is destination")
# return next_pos
# else:
# print("going to ",next_pos)
# return self.update(tuple(next_pos),[0,0,0,0])
def update_return(self, pos, walls=[0,0,0,0]):
# Updating the position, holding the value and clearing the stack #
print("RETURING HOME")
if pos == (0, 0) :
return self.update(pos, walls)
self.pos = pos
self.val = self.cell_map_return[self.pos[0]][self.pos[1]]
self.stack = []
# Updating the Wall Map #
walls = np.array(walls, dtype=bool)
if walls[0]: ### North Update ###
self.wall_map_h[self.pos[0]][self.pos[1]] = 1
if walls[1]: ### East Update ###
self.wall_map_v[self.pos[0]][self.pos[1]+1] = 1
if walls[2]: ### West Update ###
self.wall_map_v[self.pos[0]][self.pos[1]] = 1
if walls[3]: ### South Update ###
self.wall_map_h[self.pos[0]+1][self.pos[1]] = 1
# Main Loop for updating the values of the cells in stack #
# This loop will update the flood values for the cells #
self.stack.append(pos)
open_neighbors_val = []
open_neighbors = []
my_neighbors = []
my_neighbors_val = []
if not self.wall_map_h[pos[0]+1][pos[1]]: #south check
my_neighbors.append([pos[0]+1, pos[1]])
if not self.wall_map_h[pos[0]][pos[1]]: #north check
my_neighbors.append([pos[0]-1, pos[1]])
if not self.wall_map_v[pos[0]][pos[1]+1]: #east check
my_neighbors.append([pos[0], pos[1]+1])
if not self.wall_map_v[pos[0]][pos[1]]: #west check
my_neighbors.append([pos[0], pos[1]-1])
if len(open_neighbors)==1:
print("blocked at ",pos)
while len(self.stack) != 0:
popped_cell = self.stack.pop()
popped_val = self.cell_map_return[popped_cell[0]][popped_cell[1]]
# open_neighbors.clear()
del open_neighbors[:]
# open_neighbors_val.clear()
del open_neighbors_val[:]
if not self.wall_map_h[popped_cell[0]+1][popped_cell[1]]: #south check
open_neighbors.append([popped_cell[0]+1, popped_cell[1]])
if not self.wall_map_h[popped_cell[0]][popped_cell[1]]: #north check
open_neighbors.append([popped_cell[0]-1, popped_cell[1]])
if not self.wall_map_v[popped_cell[0]][popped_cell[1]+1]: #east check
open_neighbors.append([popped_cell[0], popped_cell[1]+1])
if not self.wall_map_v[popped_cell[0]][popped_cell[1]]: #west check
open_neighbors.append([popped_cell[0], popped_cell[1]-1])
# if len(open_neighbors)==1:
# print("blocked at ",)
for neighbor in open_neighbors:
open_neighbors_val.append(self.cell_map_return[neighbor[0]][neighbor[1]])
#print("open neighbors for ",popped_cell," ",open_neighbors)
if popped_val != 1 + min(open_neighbors_val):
self.cell_map_return[popped_cell[0]][popped_cell[1]] = 1 + min(open_neighbors_val)
#print("cell value changed")
for neighbor in open_neighbors:
self.stack.append(neighbor)
for neighbor in my_neighbors:
my_neighbors_val.append(self.cell_map_return[neighbor[0]][neighbor[1]])
next_pos = my_neighbors[my_neighbors_val.index(min(my_neighbors_val))]
# my_neighbors.clear()
del my_neighbors[:]
# my_neighbors_val.clear()
del my_neighbors_val[:]
self.path_return.append(next_pos)
#return next_pos
if next_pos==[0,0]:
print("next step is starting point")
#self.path_return.append(next_pos)
return next_pos
else:
print("going to ",next_pos)
return self.update_return(tuple(next_pos),[0,0,0,0])
"""
ff=FloodFill()
#print(ff.update((2,1),[0,1,0,0]))
#print(ff.update((3,1),[0,1,0,0]))
#print(ff.update((4,1),[0,1,0,0]))
#print(ff.update((5,1),[0,1,1,1]))
#print(ff.update((0,0),[0,1,1,0]))
#print(ff.update((1,0),[0,0,1,0]))
#print(ff.update((2,0),[0,1,1,1]))
#----------test for sample maze 2--------
# if ff.update((0,0),[0,0,0,0])==[1,0]:
# print("testing")
ff.update_return(ff.update((0,0),[0,0,0,0]))
#print(ff.update((0,0),[0,0,0,0])[:,-1])
print(ff.cell_map)
print(ff.path,"initial steps ",len(ff.path))
print("return path \n",ff.path_return, " steps ",len(ff.path_return))
ff.path.clear()
print(ff.path)
ff.path_return.clear()
ff.update_return(ff.update((0,0),[0,0,0,0]))
#ff.update((0,0),[0,0,0,0])
print("final path \n", ff.path,"final steps ",len(ff.path))
print("return path \n",ff.path_return, " steps ",len(ff.path_return))
ff.path.clear()
ff.path_return.clear()
#ff.update((0,0),[0,0,0,0])
ff.update_return(ff.update((0,0),[0,0,0,0]))
print("final path 2 \n", ff.path,"final steps 2 ",len(ff.path))
print("return path \n",ff.path_return, " steps ",len(ff.path_return))
ff.path.clear()
ff.path_return.clear()
#ff.update((0,0),[0,0,0,0])
ff.update_return(ff.update((0,0),[0,0,0,0]))
print("final path 3 \n", ff.path,"final steps 3 ",len(ff.path))
print("return path \n",ff.path_return, " steps ",len(ff.path_return))
print(ff.cell_map)
print(ff.cell_map_return)
#print(ff.wall_map_v)
#print(ff.wall_map_h)
""" | [
"suchiv2311@gmail.com"
] | suchiv2311@gmail.com |
d3fbb683701349fd99cd00a40f06c23cd7e7a89e | f90945225102d40411bbd523c247e4068231981c | /traffic_signs.py | 3c58d13b6c135bed04fe7b387599a0d83cdc222f | [] | no_license | mreichelt/CarND-Traffic-Sign-Classifier-Project | a21e561d7b64b754e966e3ff8800d7f363ba6ad0 | 3f96915cb19f58cda5344d0eb3b515085dfecc80 | refs/heads/master | 2020-12-24T19:51:24.658352 | 2017-04-03T12:49:14 | 2017-04-03T12:49:14 | 86,221,295 | 0 | 0 | null | 2017-03-26T09:28:01 | 2017-03-26T09:28:01 | null | UTF-8 | Python | false | false | 11,678 | py | # Load pickled data
import pickle
import tensorflow as tf
# TODO: Fill this in based on where you saved the training and testing data
training_file = 'traffic-signs-data/train.p'
validation_file = 'traffic-signs-data/valid.p'
testing_file = 'traffic-signs-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = len(X_train)
# TODO: Number of testing examples.
n_test = len(X_test)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(set(y_train))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
#%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# grab indices of all 43 labels (first image is ok for visualization)
plt.rcParams.update({'figure.max_open_warning': 100})
u, indices = np.unique(y_train, return_index=True)
for i in indices:
plt.figure(figsize=(6, 3))
plt.title('label ' + str(y_train[i]))
plt.imshow(X_train[i].squeeze())
### Preprocess the data here. Preprocessing steps could include normalization, converting to grayscale, etc.
### Feel free to use as many code cells as needed.
def grayscale(X):
# we simply add up the colors - they will be normalized away anyway later on
return np.sum(X, axis=3, keepdims=True)
def feature_scaled(X, min, max):
return (X - min) / (max - min)
print('applying grayscale')
X_train = grayscale(X_train)
X_valid = grayscale(X_valid)
X_test = grayscale(X_test)
print('applying feature scaling')
min = np.min([np.min(X_train), np.min(X_valid), np.min(X_test)])
max = np.max([np.max(X_train), np.max(X_valid), np.max(X_test)])
X_train = feature_scaled(X_train, min, max)
X_valid = feature_scaled(X_valid, min, max)
X_test = feature_scaled(X_test, min, max)
### Define your architecture here.
### Feel free to use as many code cells as needed.
from tensorflow.contrib.layers import flatten
from sklearn.utils import shuffle
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
out1 = 6 * net_multiplier
w1 = tf.Variable(tf.truncated_normal([5, 5, 1, out1], mu, sigma))
b1 = tf.Variable(tf.zeros(out1))
conv1 = tf.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding='VALID') + b1
# Activation.
conv1 = tf.nn.relu(conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Layer 2: Convolutional. Output = 10x10x16.
out2 = 16 * net_multiplier
w2 = tf.Variable(tf.truncated_normal([5, 5, out1, out2], mu, sigma))
b2 = tf.Variable(tf.zeros(out2))
conv2 = tf.nn.conv2d(conv1, w2, strides=[1, 1, 1, 1], padding='VALID') + b2
# Activation.
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Flatten. Input = 5x5x16. Output = 400.
flat_out = 5 * 5 * out2
fc0 = flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 120.
out3 = 120 * net_multiplier
w3 = tf.Variable(tf.truncated_normal([flat_out, out3], mu, sigma))
b3 = tf.Variable(tf.zeros(out3))
fc1 = tf.matmul(fc0, w3) + b3
# Activation.
fc1 = tf.nn.relu(fc1)
# DROPOUT
h_fc1_drop = tf.nn.dropout(fc1, keep_prob)
# Layer 4: Fully Connected. Input = 120. Output = 84.
out4 = 84 * net_multiplier
w4 = tf.Variable(tf.truncated_normal([out3, out4], mu, sigma))
b4 = tf.Variable(tf.zeros(out4))
fc2 = tf.matmul(h_fc1_drop, w4) + b4
# Activation.
fc2 = tf.nn.relu(fc2)
# Layer 5: Fully Connected. Input = 84. Output = 43 (n_classes).
w5 = tf.Variable(tf.truncated_normal([out4, n_classes], mu, sigma))
b5 = tf.Variable(tf.zeros(n_classes))
logits = tf.matmul(fc2, w5) + b5
return logits
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
keep_prob = tf.placeholder(tf.float32)
one_hot_y = tf.one_hot(y, n_classes)
learning_rate = 0.001
batch_size = 128
epochs = 10
dropout = 0.5
net_multiplier = 5
skip_training = True
save_path = './model'
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, batch_size):
batch_x, batch_y = X_data[offset:offset + batch_size], y_data[offset:offset + batch_size]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
if not skip_training:
print("Training...")
print()
for i in range(epochs):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, batch_size):
end = offset + batch_size
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
print("Epoch {} ...".format(i + 1))
print("Train Accuracy = {:.3f}".format(evaluate(X_train, y_train)))
print("Validation Accuracy = {:.3f}".format(evaluate(X_valid, y_valid)))
print()
saver.save(sess, save_path)
print("Model saved")
else:
saver.restore(sess, save_path)
print("Model loaded")
print("Test Accuracy = {:.3f}".format(evaluate(X_test, y_test)))
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
import cv2
X_germansigns_files = [
'1_stop_14.png',
'2_noentry_17.png',
'3_stop_14.png',
'4_yield_13.png',
'5_rightofway_nextintersection_11.png'
]
# yay, matplotlib and cv2 have blue and red flipped - thanks to http://stackoverflow.com/a/15074748/1134940 we can
# easily flip those again :)
def flip_blue_red(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
X_germansigns_orig = np.array([flip_blue_red(cv2.imread('german_signs/' + file)) for file in X_germansigns_files])
y_germansigns = np.array([14, 17, 14, 13, 11])
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
X_germansigns = grayscale(X_germansigns_orig)
X_germansigns = feature_scaled(X_germansigns, min, max)
for i, img in enumerate(X_germansigns):
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plt.imshow(X_germansigns_orig[i].squeeze())
plt.title('original')
plt.subplot(1, 2, 2)
plt.imshow(img.squeeze(), cmap='gray')
plt.title('grayscaled + normalized')
with tf.Session() as sess:
saver.restore(sess, save_path)
# this will output all 43 predictions for each of the 5 images, shape: 5x43
prediction = sess.run(logits, feed_dict={x: X_germansigns, y: y_germansigns, keep_prob: 1.0})
# now just take the index with the highest possibility
predicted_labels = np.argmax(prediction, axis=1)
print('predicted labels: ' + np.array_str(predicted_labels))
print('correct labels: ' + np.array_str(y_germansigns))
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
accuracy = np.sum(predicted_labels == y_germansigns) / len(y_germansigns)
print("Accuracy for German signs = {:.3f}".format(accuracy))
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
print(sess.run(tf.nn.top_k(tf.constant(prediction), k=5)))
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
# def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1, plt_num=1):
# # Here make sure to preprocess your image_input in a way your network expects
# # with size, normalization, ect if needed
# # image_input =
# # Note: x should be the same name as your network's tensorflow data placeholder variable
# # If you get an error tf_activation is not defined it maybe having trouble accessing the variable from inside a function
# activation = tf_activation.eval(session=sess, feed_dict={x: image_input})
# featuremaps = activation.shape[3]
# plt.figure(plt_num, figsize=(15, 15))
# for featuremap in range(featuremaps):
# plt.subplot(6, 8, featuremap + 1) # sets the number of feature maps to show on each row and column
# plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
# if activation_min != -1 & activation_max != -1:
# plt.imshow(activation[0, :, :, featuremap], interpolation="nearest", vmin=activation_min,
# vmax=activation_max, cmap="gray")
# elif activation_max != -1:
# plt.imshow(activation[0, :, :, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
# elif activation_min != -1:
# plt.imshow(activation[0, :, :, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
# else:
# plt.imshow(activation[0, :, :, featuremap], interpolation="nearest", cmap="gray")
| [
"mcreichelt@gmail.com"
] | mcreichelt@gmail.com |
4066230eaf1dc2764d4c74bd5704166d665af315 | 346d033ca9a934e61bcfdf6ad54d4530c1145bba | /src/ui/pages/Inbox.py | 2adf653863372020c2e7a69c7e4b0b93adcc0ca7 | [] | no_license | Ernxst/Flat-UI-Concept | ac1c9cfbbf7700daac945da9d140a8a8e1f38d04 | fa15e575875c062cc76991b490304ab6e673c02d | refs/heads/master | 2022-12-06T14:48:05.177637 | 2020-08-03T18:45:45 | 2020-08-03T18:45:45 | 284,108,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from ui.pages.MenuPage import MenuPage
class Inbox(MenuPage):
def _update_page_data(self):
pass
def _config_grid(self):
pass
def _show(self):
pass
def search(self, search_term):
pass
def __init__(self, master, model):
super().__init__(master, 'Inbox', model=model) | [
"ernest.nkansah-badu.19@ucl.ac.uk"
] | ernest.nkansah-badu.19@ucl.ac.uk |
8f70b79b0832fcb6dbcfda0e09aa5d3be24a4288 | 7e7a2872907d3dd7a8139278b321c4a6a7129e87 | /go_fluent_app/urls.py | 28040862822a9aa2ab111b067bd7c4564b67dd27 | [] | no_license | xmaanall/go-fluent-psi | 47bdfbc39e56c66fd42486cbd589e2227da57e30 | 67ce5a0579101b073b0ea35085ee9e8b6944bccb | refs/heads/master | 2023-05-28T23:08:19.440928 | 2021-06-17T07:04:23 | 2021-06-17T07:04:23 | 377,732,347 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | from django.urls import path
from go_fluent_app import views
from django.conf.urls.static import static
from django.conf import settings
from video.views import video
from .views import (
# QuizListView,
quiz_view,
quiz_data_view,
# quiz,
quizes,
save_quiz_view
)
urlpatterns = [
path('' , views.home , name= "home" ),
path('start/' , views.choose , name= "choose"),
path('quiz/' , quizes , name= "quizes"),
path('quiz/<pk>/', quiz_view, name='quiz-view'),
path('quiz/<pk>/save/', save_quiz_view, name='save-view'),
path('quiz/<pk>/data/', quiz_data_view, name='quiz-data-view'),
path('language/<title>/' , views.language , name='language'),
path('language/<title>/lesson/', video ,name='video'),
# path('language/<title>/quiz/', quiz ,name='quiz'),
# path('quiz/', QuizListView.as_view(), name='main-view'),
# path('language/<title>/quiz/<pk>/', quiz_view, name='quiz-view'),
# path('language/<title>/quiz/<pk>/save/', save_quiz_view, name='save-view'),
# path('language/<title>/quiz/<pk>/data/', quiz_data_view, name='quiz-data-view'),
] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| [
"xmaanall@hotmail.com"
] | xmaanall@hotmail.com |
b6719129deb3753fda7d1da2bf054ef2b0b7086b | bb4e132c5978a1edc2ef4fb78d1bb5a793809408 | /dral_text/migrations/0005_auto_20180421_2332.py | 2011d67aa3e519a34a52ebb3021d281bc28eafa0 | [
"MIT"
] | permissive | n-romanova/dral-django | 7335e581f1fffe0e2d42614678010ead5c9202f3 | 4af92a46e207cc8a427d2f8eafe688c61a73d39e | refs/heads/master | 2020-08-31T03:11:37.199516 | 2019-10-03T17:19:05 | 2019-10-03T17:19:05 | 218,569,974 | 0 | 0 | null | 2019-10-30T16:15:47 | 2019-10-30T16:15:47 | null | UTF-8 | Python | false | false | 707 | py | # Generated by Django 2.0 on 2018-04-21 22:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dral_text', '0004_auto_20180421_2231'),
]
operations = [
migrations.AddField(
model_name='occurence',
name='paraphrase',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='occurence',
name='replace',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='occurence',
name='zero',
field=models.BooleanField(default=False),
),
]
| [
"geoffroy.noel@kcl.ac.uk"
] | geoffroy.noel@kcl.ac.uk |
172340ba642e9e8c49c315c40271563369d27c98 | bf2313718aaaa3219b2ef9f30a940b96c8de4b1c | /bookapp/models.py | 3509b99db09ab0c9781531763c4fce646a123a8a | [] | no_license | whyme0/BooksWeb | 4a63b28b01d7c0333ce3eade5007f2117577f03c | 38eda999e5ae28f287e1d3f0d2dde1cd35a16e3a | refs/heads/master | 2021-01-06T00:08:57.527182 | 2020-03-22T12:34:52 | 2020-03-22T12:34:52 | 241,174,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | from django.db import models
from django.core.validators import MaxValueValidator
from datetime import datetime
from django.contrib.auth.models import User
User._meta.get_field('email')._unique = True
class Autor(models.Model):
author_full_name = models.CharField(max_length=303, default=None)
date_birth = models.DateField('birth date')
death_date = models.DateField('death date')
birth_place = models.CharField(max_length=90, default=None)
author_picture = models.ImageField(upload_to='bookapp/static/bookapp/pictures', default='bookapp/static/bookapp/pictures/undefiend.png')
def __str__(self):
return self.author_full_name
def get_static_url(self) -> str:
return self.author_picture.url[7:]
class Book(models.Model):
book_autor = models.ForeignKey(Autor, on_delete=models.SET_NULL, null=True)
book_autor_info = models.CharField(max_length=303)
book_name = models.CharField(max_length=2**10)
book_genre = models.CharField(max_length=32)
book_picture = models.ImageField(upload_to='bookapp/static/bookapp/pictures', default='bookapp/static/bookapp/pictures/undefiend.png')
# just year of publication
book_year = models.PositiveSmallIntegerField(
validators=[
MaxValueValidator(
datetime.now().year,
'Год должен быть не больше чем тукущий'
)
]
)
book_description = models.TextField()
def __str__(self):
return self.book_name
def get_static_url(self):
return self.book_picture.url[7:]
def is_new():
return self._book_year >= 2000
| [
"sparkjetstudiost@gmail.com"
] | sparkjetstudiost@gmail.com |
803f3401202b20729ba63a9968b76cfb69eb1b03 | c558d1da1aedf377e6cb6cf66c5136cfb7c32167 | /python-new-trunk/sfapi2/sflib/runWithAnalysis.py | 978d569bb74522ab578050a8fad567a9a6a3a256 | [
"CC0-1.0"
] | permissive | raychorn/svn_molten-magma | 46a8da015844b52fd2fc777225f11b1891b0000a | 8aa2ff2340707eecae6514943e86f5afba9cd54a | refs/heads/main | 2022-12-26T15:45:24.851522 | 2020-10-15T16:52:04 | 2020-10-15T16:52:04 | 304,358,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,864 | py | import os, sys
import traceback
from vyperlogix import misc
from vyperlogix.misc import ioTimeAnalysis
import types
import SfStats
sf_stats = SfStats.SfStats()
def dummy():
pass
def init_AnalysisDataPoint(name):
ioTimeAnalysis.initIOTime(name)
def begin_AnalysisDataPoint(name):
ioTimeAnalysis.ioBeginTime(name)
def end_AnalysisDataPoint(name):
ioTimeAnalysis.ioEndTime(name)
def count_query():
sf_stats.count_query()
def runWithAnalysis(func=dummy,args=[],_ioElapsedTime=dummy):
caller = misc.callersName()
ioTimeAnalysis.initIOTime('%s::%s' % (__name__,caller))
ioTimeAnalysis.ioBeginTime('%s::%s' % (__name__,caller))
val = None
try:
if (len(args) == 0):
val = func()
else:
val = func(args)
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) Reason: %s' % (misc.funcName(),info_string)
ioTimeAnalysis.ioEndTime('%s::%s' % (__name__,caller))
ioTimeAnalysis.ioTimeAnalysisReport()
_et = 0
_key_list = [k for k in ioTimeAnalysis._ioTime.keys() if (k.find('SOQL') > -1)]
for _key in _key_list:
_et += (0 if (len(_key) == 0) else ioTimeAnalysis._ioTime[_key][0])
if (_et > 0):
_soql_per_sec = sf_stats.query_count / _et
if (_soql_per_sec > 0):
_ms_per_soql = 1000 / _soql_per_sec
else:
if (sf_stats.query_count == 0):
print >>sys.stderr, '(%s) 1.0 Cannot correctly report ms per SOQL because SOQL per Second reported 0 and we cannot divide Zero by some number at this time; recommend using the functions that count queries from this module.' % (misc.funcName())
elif ():
print >>sys.stderr, '(%s) 1.0 Cannot correctly report ms per SOQL because SOQL per Second reported 0 and we cannot divide by Zero at this time.' % (misc.funcName())
_ms_per_soql = -1
else:
print >>sys.stderr, '(%s) 1.0 Cannot correctly report ms per SOQL because SOQL per Second because there is no reported elapsed time from SOQL activities.' % (misc.funcName())
try:
v_ioElapsedTime = float(ioTimeAnalysis._ioElapsedTime)
if (v_ioElapsedTime > 0):
soql_per_sec = sf_stats.query_count / v_ioElapsedTime
if (soql_per_sec > 0):
ms_per_soql = 1000 / soql_per_sec
else:
print >>sys.stderr, '(%s) 2.0 Cannot correctly report ms per SOQL because SOQL per Second reported 0 and we cannot divide by Zero at this time.' % (misc.funcName())
ms_per_soql = -1
t_analysis_1 = '%-10.2f' % soql_per_sec
t_analysis_2 = '%-10.4f' % ms_per_soql
print >>sys.stdout, '(Apparent) SOQL per second = %s or %s ms per SOQL.' % (t_analysis_1.strip(),t_analysis_2.strip())
if (_et > 0):
_t_analysis_1 = '%-10.2f' % _soql_per_sec
_t_analysis_2 = '%-10.4f' % _ms_per_soql
print >>sys.stdout, '(Actual) SOQL per second = %s or %s ms per SOQL.' % (_t_analysis_1.strip(),_t_analysis_2.strip())
else:
print >>sys.stderr, 'Unable to perform Actual SOQL per second analysis because there is no reported elapsed time from SOQL activities.'
else:
print >>sys.stderr, 'Unable to perform Actual SOQL per second analysis because _ioElapsedTime is %4.2f.' % (v_ioElapsedTime)
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) Reason: %s' % (misc.funcName(),info_string)
print >>sys.stdout, 'SOQL Count=%d' % sf_stats.query_count
return val
| [
"raychorn@gmail.com"
] | raychorn@gmail.com |
9c6e732df610377c770e18ff5ccebf8e062cf91f | 419231d8bf3e94f07c11625b9f54f12522153d95 | /server/models/models/deposit.py | 813bac8c282f7f37fbc39b34b1ed0672e13da35f | [] | no_license | 06wagon/LightningFuturesExchange | f062268a343aeaab6415515f0ae5cd046fb8f5bf | 337915c9fc024a2da66975d2d6302a25b31d7a68 | refs/heads/master | 2020-03-28T12:04:54.500939 | 2018-04-30T03:46:19 | 2018-04-30T03:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | from shared.shared import db
import copy
class Deposit(db.Model):
user_id = db.Column(db.Integer, primary_key=True, nullable=False)
address_id = db.Column(db.Integer, primary_key=True, nullable=False)
deposit_id = db.Column(db.Integer, primary_key=True, nullable=False)
transaction_id = db.Column(db.String(100), nullable=False)
quantity = db.Column(db.BigInteger, nullable=False)
created_date = db.Column(db.DateTime(), nullable=False)
def to_dic(self):
return {
"userId": self.user_id,
"addressId": self.address_id,
"depositId": self.deposit_id,
"transactionId": self.transaction_id,
"quantity": float(self.quantity),
"createdDate": self.created_date
}
def clone(self):
return copy.copy(self)
def copy_values(self, item):
self.__dict__.update(item.__dict__)
| [
"ryansfishman@gmail.com"
] | ryansfishman@gmail.com |
bc982bdb45cf50ec555fed5e6ba31c92be918480 | 26b2a9882f24ce65e3df5a241045d3940af2e2b2 | /cons/admin/school/changeschools.py | 3a4ab04b76f4aa8f2547a89d9893606a56197580 | [] | no_license | qhn-qhn/recommend | 0093bea156932c65089454ecd1cbc4657df870d1 | f75200185b16d55aaf2a969c1b635215f7f161a2 | refs/heads/master | 2023-07-15T23:53:34.604350 | 2021-08-21T05:08:32 | 2021-08-21T05:08:32 | 398,299,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from conn import startdb
def changeschools(data):
db = startdb()
cursor = db.cursor()
sql = "UPDATE school SET school_type='%s',location='%s',location_type='%s',belong='%s',yjsy='%s',self_line='%s' WHERE name='%s'" % (data['school_type'], data['location'], data['location_type'], data['belong'], data['yjsy'], data['self_line'], data['name'])
try:
cursor.execute(sql)
db.commit()
# 关闭数据库连接
db.close()
return 1
except:
db.close()
return 0 | [
"qhn99323@163.com"
] | qhn99323@163.com |
f990c9a495667cc7da0ff93126f6ae00bde1b7a4 | 0a1eead498ec4770f9f54bae20e5ef3524427784 | /euler_11.py | 2eaf554c1dc5698830e40ca4a1269395a1e62685 | [] | no_license | eugenekang/Euler-Sets | 9dc43aa9a95e3dd3b2df07b210a95629098d8932 | b1d185ed2a714326140cfa877ce31222d69f17cd | refs/heads/master | 2022-12-19T05:05:01.625420 | 2020-09-25T16:33:37 | 2020-09-25T16:33:37 | 286,280,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,024 | py | """
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
"""
from Tools.common_tools import create_grid, extract_q_text, create_table
# String to operate on.
num_string = """
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
"""
# Function to get the product of horizontally sequential integers in a table, without wrap, according to the number of factors to multiply by, given by user.
#table is the array of arrays input
#num_factr is the number of factors to use to create the product.
def get_row_prod(table, num_factr):
prod_array = []
for row in table:
for element in range(0, len(row)):
if element <= len(row) - num_factr:
temp_prod = 1
for i in range (0, num_factr): #Create products
temp_prod *= int(row[element+i])
prod_array.append(temp_prod)
return prod_array
# Function to get the product of vertically sequential integers in a table, without wrap, according to the number of factors to multiply by, given by user.
def get_col_prod(table, num_factr):
num_cols = len(table[0])
prod_array = []
for col in range (0, num_cols):
for row in range (0, len(table) - num_factr + 1):
temp_prod = 1
for i in range (0, num_factr): #Create products
temp_prod *= int(table[row + i][col])
prod_array.append(temp_prod)
return prod_array
# Function to get products going diagonally up to the right of the grid.
def get_diag_asc_prod(table, num_factr):
prod_array = []
for row in range(num_factr - 1,len(table)): # Search each "row" as an array in table, bounds from row 3 to row 19.
for col in range(0, len(table[row]) + 1 - num_factr): # Search each "col" as an element in that array, bounds from row 0 to row 17
tmp_prod = 1
for x in range(0, num_factr): # Multiply the products together
tmp_prod *= int(table[row - x][col + x])
prod_array.append(tmp_prod)
return prod_array
# Function to get products going diagonally down to the right of the grid.
def get_diag_desc_prod(table, num_factr):
prod_array = []
for row in range(0, len(table) - num_factr + 1): # Search each row as an array, bounds from row 0 to row 16.
for col in range(0, len(table[row]) + 1 - num_factr): # Search each "col" as an element in that array, bounds from row 0 to row 17
tmp_prod = 1
for x in range (0, num_factr):
tmp_prod *= int(table[row + x][col + x])
prod_array.append(tmp_prod)
return prod_array
# Function to determine the greatest value in a table.
def find_greatest(list):
comparison = 0
for x in list:
if x > comparison:
comparison = x
return comparison
def compute(table, num_factr):
dir_max = {} # Dict of directional max products: vert, horiz, and diag.
# Get row products
dir_max['row'] = find_greatest(get_row_prod(table, num_factr))
# Get col products
dir_max['col'] = find_greatest(get_col_prod(table, num_factr))
# Get diag_asc products
dir_max['diag_asc'] = find_greatest(get_diag_asc_prod(table, num_factr))
# Get diag_desc products
dir_max['diag_desc'] = find_greatest(get_diag_desc_prod(table, num_factr))
return dir_max
if __name__ == "__main__":
# Create the table
table = create_table(extract_q_text("raw_input.txt", "E11"), 2)
# Describe number of factors to create product
num_factr = 4
# Compute solution
dir_max = compute(table, num_factr)
# Determine the greatest product value overall.
# Return information to user
print("The greatest value overall was: " + str(find_greatest(dir_max.values()))+ ".")
| [
"eugene.yc.kang@gmail.com"
] | eugene.yc.kang@gmail.com |
ca2dc73d520acbe6b4c50951edc3218dd761829c | be5fedd93bcd53a56519f2224e41398eaa11a457 | /pipeline/5_tfrecord_generator.py | 4097819c6d5c7460d7055d05f7355b27c2d86f4d | [] | no_license | Emilio-Picariello/Artificial_Vision-Unisa-_2021_group4 | 1f8b5095e7c0bed77a1dc93977885cfa45c05c3b | 4f2423d72554d52a359cbc8602e5f3d6e23100e3 | refs/heads/main | 2023-02-16T07:35:07.162346 | 2021-01-07T22:30:26 | 2021-01-07T22:30:26 | 327,071,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,478 | py | import tensorflow as tf
import csv
import pathlib
import numpy as np
import random
import _util_prepare_ as util
import os
import shutil
"""given some csvs it reads imges, detects face, puts image on it and adds it to one of n tfrecord files"""
"""FUNDAMENTAL ARGS"""
"""
[WARNING]
root is where to find dataset
if it's the first time you run this code,
write your relative dataset path here
"""
#root = "./../../complete_train"
root="./ultra_lite_train"
"""where to find original csvs to work with"""
"""WG:this script is very slow, I do not suggest to read all csv at once but to indicate a 'cache folder'
for a bunch of csvs"""
csv_root = "./divided_csv/"
#csv_root = "./cache_csvs"
"""where to print csvs"""
output = "./tf_records"
"""size to resize each image to"""
size = 96
"""SUPPORT FUNCTIONS"""
"""it is useless to put all of those in util py file those are created only to keep the code readable"""
"""All raw values should be converted to a type compatible with tf.Example."""
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature_old_(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _list_feature(value):
"""Returns a list useful for classification"""
class_list = [0] * 101
age = int(value)
if (value - float(age)) > 0.5:
age = age + 1
if age > 100 :
age = 100
class_list[age] = 1
arr = np.array(class_list)
return _bytes_feature(arr.tobytes())
def _int64_feature(value):
"""Returns an int in one shot format."""
class_list=np.array([0]*101, dtype=np.int64)
age = int(value)
if (value - float(age)) > 0.5 :
age = age +1
class_list[age] = 1
return tf.train.Feature(int64_list=tf.train.Int64List(value=class_list))
def _int64_feature_order(value):
"""Returns a list in an ordered format."""
class_list=np.array([0]*101, dtype=np.int64)
age = int(value)
if (value - float(age)) > 0.5 :
age = age +1
for i in range(0,age):
class_list[i] = 1
return tf.train.Feature(int64_list=tf.train.Int64List(value=class_list))
def _intify(num):
"""approximates an int to the nearest integer"""
age = int(num)
if (num - float(age)) > 0.5:
age = age + 1
if age > 100:
age = 100
return age
def tfRecordCreator(folder,to_store, name, size):
"""
Summary line.
it reads images, detects faces, resizes them, puts them in a tfrecord with label
Parameters
----------
folder : str
a path for an output folder to write to
to_store : []
a list of couples [path,age] to read from
name : str
a name for the resultant tfrecord file
size : int
desired dimension (size*size)
Returns
-------
int
a tf record with extracted faces and labels (in a lot of different variants because multiple tests
where done with different scripts)
"""
num_img = 0
print("from " + str(name))
tfrecord_writer = tf.io.TFRecordWriter(folder + "/" + name + ".tfrecords")
"""begins read and write operation on a tfrecord for each image in the list"""
for elem in to_store:
num_img = num_img +1
filename = elem[0]
label = elem[1]
img_path = root+"/"+filename
face = util.detect(img_path, (size,size))
if face is None:
num_img = num_img -1
continue
image_string = face.tobytes()
"""example is wrote with image in bytes and label in multiple versione (instead of making multiple
tfrecords version it has been chosen to put everything needed in one)"""
example = tf.train.Example(features=tf.train.Features(feature={
'label_regr': _float_feature(float(label)),
'label_regr_int': _int64_feature_old_(_intify(float(label))),
'label_class':_int64_feature(float(label)),
'label_order': _int64_feature_order(float(label)),
'image': _bytes_feature(image_string),
}))
tfrecord_writer.write(example.SerializeToString())
if num_img % 1000 == 0 :
print("for "+name+" tfrecord "+str(num_img)+" images have been written")
tfrecord_writer.close()
print(name +" tfrecord contains images " + str(num_img))
def csv_reader_with_check(root_path, csv_file_path):
"""it reads each [image,label] from a csv checking if image does exist from a given root and gives it in output"""
with open(csv_file_path, 'r') as file:
reader = csv.reader(file)
actual_images = []
for row in reader:
csv_old_path = row[0]
age = row[1]
new_path= root_path+"/"+csv_old_path
image = pathlib.Path(new_path)
if image.exists() and not row[1] in (None,""):
actual_images.append([csv_old_path, age])
return actual_images
tf.executing_eagerly()
"""BEGINNING OF THE SCRIPT"""
if not os.path.isdir(root):
print("modify root constant in the code with your relative path to dataset root")
exit(404)
if os.path.isdir(output):
shutil.rmtree(output)
os.mkdir(output)
"""it collects each csv found in the folder"""
p = pathlib.Path(csv_root)
found_csvs = [x for x in p.iterdir() if x.is_file()]
"""every csv root is elaborated in a list of couple [csv_root,bare_name] as support"""
csvs_to_record = []
for cs in found_csvs:
name = str(cs).split("\\").pop()
stuff = [csv_root+"/"+name ,name.split(".")[0]]
csvs_to_record.append(stuff)
"""for each csv images are read, shuffled and sent to tf print function"""
for cs in csvs_to_record:
to_store_list = csv_reader_with_check(root,cs[0])
print(str(len(to_store_list)) + " images read from " + cs[1] + ".csv and ready to be recorded.")
random.shuffle(to_store_list)
tfRecordCreator(output,to_store_list, cs[1],size)
print("END.") | [
"noreply@github.com"
] | noreply@github.com |
8d2a2ca297e307ce475c3a5183cea37e98fcc393 | dd54710ea6f6a6f8e623615999c3831bdbd6e67c | /dogs and cat.py | 5d0738663790df2167a0e65bf569d9bc327ce8f2 | [] | no_license | Parthdhir1/Image-Prediction | 6def1a434514e519a05d1852a18eae42987af302 | 8f7079c6334337510efe3c09f7bfefb9b1ab0611 | refs/heads/master | 2020-03-21T23:18:03.062153 | 2018-06-29T17:37:37 | 2018-06-29T17:37:37 | 139,178,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,547 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 20:30:32 2018
@author: Parth
"""
#1 building convolutional network CNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
classifier= Sequential()
#step 1 Convolution
classifier.add(Convolution2D(32,3,3, input_shape=(64,64,3),activation='relu'))
#step2 pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
#adding another mlayer of convlution for better performance
classifier.add(Convolution2D(32,3,3,activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
#flattening step
classifier.add(Flatten())
#full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 25,
validation_data = test_set,
validation_steps = 2000)
#single prediction
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat' | [
"40251393+Parthdhir1@users.noreply.github.com"
] | 40251393+Parthdhir1@users.noreply.github.com |
f8db1d8aeaa255369f51af9a750bd616d982d30b | 178b78b1300792fb962ec477b2671614ad8f2c0e | /flask-blueprint-tutorial/myenv/bin/webassets | 2f1ccd7471b1aeaeb49a7c42c794bda8d50d5db7 | [
"MIT"
] | permissive | Savithamark/Flask_Learning | c1b342fff559edf5037bdf77dbd54407b2436e1e | 14384cf28cc7b7b0c0b83f456f05b5f6d2ec20be | refs/heads/master | 2022-12-03T10:36:16.446171 | 2020-08-25T21:27:12 | 2020-08-25T21:27:12 | 288,428,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | #!/home/savitha/Documents/Flask_18-08-2020/Flask_Learning/Blueprint/flask-blueprint-tutorial/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from webassets.script import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"savithamark.093@gmail.com"
] | savithamark.093@gmail.com | |
e40a9f4648944ecbb580038b5267b736e6a1cc7a | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /TmasgxCm6iz3gTGHk_18.py | f0c40f2ac8ab47faef818d3f66b85e4ebaed9fb1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | """
Write a function that returns the **length of the shortest contiguous
sublist** whose sum of all elements **strictly exceeds** `n`.
### Examples
min_length([5, 8, 2, -1, 3, 4], 9) ➞ 2
min_length([3, -1, 4, -2, -7, 2], 4) ➞ 3
# Shortest sublist whose sum exceeds 4 is: [3, -1, 4]
min_length([1, 0, 0, 0, 1], 1) ➞ 5
min_length([0, 1, 1, 0], 2) ➞ -1
### Notes
* The sublist should be composed of **contiguous elements** from the original list.
* If no such sublist exists, return `-1`.
"""
def min_length(lst, n):
for i in range(1, len(lst) + 1):
v = [lst[j:j + i] for j in range(0, len(lst) - i + 1)]
for k in v:
if sum(k) > n:
return i
return -1
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
1b0bc314104194da1b427e848f31ff7d0f00a724 | 982cd1423d165a5b15c46eaac50ded7471d96154 | /yitu/resources/books/main.py | c668c9573d4baa50be03498fbeec29803df5c76b | [
"Apache-2.0"
] | permissive | 1ibrary/1ibrary-gzhu | a4ab7100c17e38bef793565bc4ffb13ce8ecb0d3 | 768af1837d6caa185c101cc82ea67efc931865c1 | refs/heads/master | 2022-12-22T10:32:11.613351 | 2017-09-10T06:35:13 | 2017-09-10T06:35:13 | 95,835,451 | 4 | 0 | Apache-2.0 | 2022-12-07T23:56:22 | 2017-06-30T01:32:46 | Python | UTF-8 | Python | false | false | 12,992 | py | # -*- coding: utf-8 -*-
import json
import jpush
from flask_restful import Resource
from yitu import db
from yitu.models.book import Book, HotBook as HBModel
from yitu.models.hot_search import HotSearch as HSModel
from yitu.models.user import User
from yitu.services.douban import Douban
from yitu.services.gzhu.library_search import NcuSearch
from yitu.utils import get_request_params
class HotBook(Resource):
decorators = []
def post(self):
args = get_request_params([
("page", int, True, "json"),
("uid", int, True, "json"),
("timestamp", float, True, "json"),
("token", str, True, "json")
])
page = args["page"]
uid = args["uid"]
timestamp = args["timestamp"]
token = args["token"]
user = User.verify_token(token)
if user is None or user.id_ != uid:
return {
"data": [],
"status": 2,
"msg": "认证失败"
}
try:
pagination = Book.query.filter_by(is_hot=True) \
.order_by(Book.hot_id.desc()) \
.paginate(page, per_page=20, error_out=False).items
return {
"status": 0,
"msg": "搜索成功",
"data": [{
"book_last_number": book.book_last_number,
"book_cover": book.book_cover,
"book_id": book.book_id,
"book_author": json.loads(book.book_author),
"book_title": book.book_title,
"book_db_id": book.book_db_id,
"book_publish": book.book_publish,
"book_rate": book.book_rate
}
for book in pagination]
}
except Exception as e:
return {
"data": [],
"status": 2,
"msg": "数据库溜了"
}
class SearchBook(Resource):
decorators = []
def post(self):
args = get_request_params([
("timestamp", float, True, "json"),
("token", str, True, "json"),
("content", str, True, "json"),
("uid", int, True, "json"),
("type", int, True, "json"),
("page", int, False, "json")
])
timestamp = args["timestamp"]
token = args["token"]
content = args["content"]
uid = args["uid"]
type = args["type"]
user = User.verify_token(token)
if user is None or user.id_ != uid:
return {
"data": [],
"status": 1,
"msg": "认证失败"
}
try:
clear_content = content
if type == 0:
books_of_db = Book.query.filter(Book.book_title.like('%' + clear_content + '%')).paginate(
page=args["page"], per_page=20, error_out=False).items
elif type == 1:
books_of_db = Book.query.filter(Book.book_author.like('%' + clear_content + '%')).paginate(
page=args["page"], per_page=20, error_out=False).items
else:
books_of_db = Book.query.filter(Book.book_publish.like('%' + clear_content + '%')).paginate(
page=args["page"], per_page=20, error_out=False).items
except Exception as e:
return {
"data": [],
"status": 2,
"msg": "数据库溜了"
}
if books_of_db:
return {
"status": 0,
"msg": "搜索成功",
"data": [{
"book_cover": book.book_cover,
"book_id": book.book_id,
"book_rate": book.book_rate,
"book_title": book.book_title,
"book_author": json.loads(book.book_author),
"book_last_number": book.book_last_number,
"book_db_id": book.book_db_id,
"book_publish": book.book_publish
} for book in books_of_db]
}
else:
ncu_search = NcuSearch()
douban = Douban()
data = []
try:
for book_info in ncu_search.get(content, args["page"]):
if book_info["book_key"]:
b = douban.search_by_isbn(book_info["book_key"])
if not b:
continue
book_info.update(b)
b = Book.query.filter_by(book_key=book_info["book_key"]).first()
if b:
continue
new_book = Book(book_author=book_info["book_author"])
new_book.book_cover = book_info["book_cover"]
new_book.book_rate = book_info["book_rate"]
new_book.book_content = book_info["book_content"]
new_book.book_publish = book_info["book_publish"]
new_book.book_last_number = len(
list(filter(lambda x: not x["is_borrowed"], book_info["data"])))
new_book.book_key = book_info["book_key"]
new_book.book_db_id = book_info["book_db_id"]
new_book.book_title = book_info["book_title"]
new_book.detail_data = json.dumps(book_info["data"])
db.session.add(new_book)
db.session.commit()
mydict = {
"book_cover": book_info["book_cover"],
"book_id": new_book.book_id,
"book_rate": book_info["book_rate"],
"book_title": book_info["book_title"],
"book_author": json.loads(book_info["book_author"]),
"book_last_number": new_book.book_last_number,
"book_db_id": book_info["book_db_id"],
"book_publish": book_info["book_publish"]
}
data.append(mydict)
else:
b = douban.search_by_isbn(book_info["book_title"])
if not b:
continue
book_info.update(b)
b = Book.query.filter_by(book_db_id=book_info["book_db_id"]).first()
if b:
continue
new_book = Book(book_author=book_info["book_author"])
new_book.book_cover = book_info["book_cover"]
new_book.book_rate = book_info["book_rate"]
new_book.book_content = book_info["book_content"]
new_book.book_publish = book_info["book_publish"]
new_book.book_last_number = len(
list(filter(lambda x: not x["is_borrowed"], book_info["data"])))
new_book.book_key = book_info["book_key"]
new_book.book_db_id = book_info["book_db_id"]
new_book.book_title = book_info["book_title"]
new_book.detail_data = json.dumps(book_info["data"])
db.session.add(new_book)
db.session.commit()
mydict = {
"book_cover": book_info["book_cover"],
"book_id": new_book.book_id,
"book_rate": book_info["book_rate"],
"book_title": book_info["book_title"],
"book_author": json.loads(book_info["book_author"]),
"book_last_number": new_book.book_last_number,
"book_db_id": book_info["book_db_id"],
"book_publish": book_info["book_publish"]
}
data.append(mydict)
return {
"status": 0,
"msg": "搜索成功",
"data": data
}
except Exception as e:
print(e)
return {
"data": [],
"status": 3,
"msg": "服务器溜了"
}
class ShowDetail(Resource):
decorators = []
def post(self):
args = get_request_params([
("timestamp", float, True, "json"),
("book_db_id", int, True, "json"),
("token", str, True, "json"),
("book_id", int, True, "json"),
("uid", int, True, "json"),
])
timestamp = args["timestamp"]
book_db_id = args["book_db_id"]
token = args["token"]
book_id = args["book_id"]
uid = args["uid"]
user = User.verify_token(token)
if user is None or user.id_ != uid:
return {
"data": [],
"status": 2,
"msg": "认证失败"
}
try:
the_book = Book.query.filter_by(book_id=book_id).first()
if not the_book:
return {
"status": 0,
"message": "搜索成功",
"data": None
}
the_detail_data = json.loads(the_book.detail_data)
return {
"status": 0,
"msg": "搜索成功",
"data": {
"book_rate": the_book.book_rate,
"book_content": the_book.book_content,
"book_publish": the_book.book_publish,
"book_last_number": the_book.book_last_number,
"book_key": the_book.book_key,
"book_db_id": the_book.book_db_id,
"book_title": the_book.book_title,
"detail_data": the_detail_data,
"book_author": json.loads(the_book.book_author),
"book_place": None if len(the_detail_data) == 0 else the_detail_data[0]["detail_place"],
"book_id": the_book.book_id,
"book_cover": the_book.book_cover,
"is_subscribe": 1 if uid in the_book.subscribers else 0
}
}
except Exception as e:
return {
"data": [],
"status": 2,
"msg": "服务器溜了"
}
class Subscribe_(Resource):
def post(self):
args = get_request_params([
("timestamp", float, True, "json"),
("token", str, True, "json"),
("book_id", int, True, "json"),
("uid", int, True, "json")
])
timestamp = args["timestamp"]
token = args["token"]
book_id = args["book_id"]
uid = args["uid"]
def _push_msg(message, device_id):
app_key = 'app_key'
master_secret = 'master_key'
_jpush = jpush.JPush(app_key, master_secret)
push = _jpush.create_push()
# push.audience = jpush.audience([{"registration_id":device_id}])
push.audience = {'registration_id': [device_id]}
# push.audience = device_id
android_msg = jpush.android(
message,
None,
None,
{
"msg": message, # 强行套用app中notification的相关格式
"status": 0
}
)
ios_msg = jpush.ios(
message,
None,
None,
{
"msg": message, # 强行套用app中notification的相关格式
"status": 0
}
)
push.notification = jpush.notification("hello jpush", ios_msg, android_msg, None)
# push.options = {"time_to_live": 86400, "sendno": 12345, "apns_production":True}
push.options = {"time_to_live": 86400, "apns_production": True}
push.platform = jpush.platform("all")
push.send()
the_book = Book.query.filter_by(book_id=book_id).first()
the_detail_data = json.loads(the_book.detail_data)
flag = 0
for a_book in the_detail_data:
if a_book["is_borrowed"] == 1:
flag = 1
if flag == 1:
_push_msg("有书了", uid)
class HotSearch(Resource):
def post(self):
hs = HSModel.query.all()
return {
"status": 0,
"msg": "获取成功",
"data": [k.name for k in hs]
}
| [
"gx-deng@163.com"
] | gx-deng@163.com |
9301a8e19c39fa597a374ec83ca5ac9308d25d56 | e9032e64138d7b9dd90a330dfe4588e2c83f6667 | /google/cloud/compute_v1/services/url_maps/pagers.py | 1a5d42fc43ca21958c622ecbbf65987afbee0aa4 | [
"Apache-2.0"
] | permissive | Ctfbuster/python-compute | 6cff2418969009794c3fadadc4c45e20d7b40509 | 7a9e8324e08c46a93050908760b2b5aca054a863 | refs/heads/main | 2023-08-26T12:37:52.363526 | 2021-10-04T15:34:37 | 2021-10-04T15:34:37 | 412,884,620 | 0 | 0 | Apache-2.0 | 2021-10-02T18:49:05 | 2021-10-02T18:49:03 | null | UTF-8 | Python | false | false | 5,578 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.UrlMapsAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.UrlMapsAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.UrlMapsAggregatedList],
request: compute.AggregatedListUrlMapsRequest,
response: compute.UrlMapsAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListUrlMapsRequest):
The initial request object.
response (google.cloud.compute_v1.types.UrlMapsAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListUrlMapsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.UrlMapsAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.UrlMapsScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.UrlMapsScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.UrlMapList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.UrlMapList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.UrlMapList],
request: compute.ListUrlMapsRequest,
response: compute.UrlMapList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListUrlMapsRequest):
The initial request object.
response (google.cloud.compute_v1.types.UrlMapList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListUrlMapsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.UrlMapList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.UrlMap]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| [
"noreply@github.com"
] | noreply@github.com |
ed4725673a73387fa9143bfc3a1a63fd28e669a2 | 9cabdeb8dce5718e8f4f490f3684eba0eb1f2d2e | /contrib/devtools/github-merge.py | 709b20287ca325e74972a29584e54ec67e442f2a | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | wolfoxonly/woo | fcfe275007cb102fff10239b0f722264dbbd40e2 | a5fb13575afe855b58915bd8e15cbffb9015e5e2 | refs/heads/master | 2020-03-09T17:00:57.668308 | 2018-05-13T15:21:17 | 2018-05-13T15:21:17 | 127,590,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,970 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2017 Woochain Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This script will locally construct a merge commit for a pull request on a
# github repository, inspect it, sign it and optionally push it.
# The following temporary branches are created/overwritten and deleted:
# * pull/$PULL/base (the current master we're merging onto)
# * pull/$PULL/head (the current state of the remote pull request)
# * pull/$PULL/merge (github's merge)
# * pull/$PULL/local-merge (our merge)
# In case of a clean merge that is accepted by the user, the local branch with
# name $BRANCH is overwritten with the merged result, and optionally pushed.
from __future__ import division,print_function,unicode_literals
import os
from sys import stdin,stdout,stderr
import argparse
import hashlib
import subprocess
import sys
import json,codecs
try:
from urllib.request import Request,urlopen
except:
from urllib2 import Request,urlopen
# External tools (can be overridden using environment)
GIT = os.getenv('GIT','git')
BASH = os.getenv('BASH','bash')
# OS specific configuration for terminal attributes
ATTR_RESET = ''
ATTR_PR = ''
COMMIT_FORMAT = '%h %s (%an)%d'
if os.name == 'posix': # if posix, assume we can use basic terminal escapes
ATTR_RESET = '\033[0m'
ATTR_PR = '\033[1;36m'
COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset'
def git_config_get(option, default=None):
'''
Get named configuration option from git repository.
'''
try:
return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8')
except subprocess.CalledProcessError as e:
return default
def retrieve_pr_info(repo,pull):
'''
Retrieve pull request information from github.
Return None if no title can be found, or an error happens.
'''
try:
req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull)
result = urlopen(req)
reader = codecs.getreader('utf-8')
obj = json.load(reader(result))
return obj
except Exception as e:
print('Warning: unable to retrieve pull information from github: %s' % e)
return None
def ask_prompt(text):
print(text,end=" ",file=stderr)
stderr.flush()
reply = stdin.readline().rstrip()
print("",file=stderr)
return reply
def get_symlink_files():
files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines())
ret = []
for f in files:
if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000:
ret.append(f.decode('utf-8').split("\t")[1])
return ret
def tree_sha512sum(commit='HEAD'):
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert(metadata[1] == b'blob')
name = line[name_sep+1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert(reply[0] == blob and reply[1] == b'blob')
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def print_merge_details(pull, title, branch, base_branch, head_branch):
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
def parse_arguments():
epilog = '''
In addition, you can set the following git configuration variables:
githubmerge.repository (mandatory),
user.signingkey (mandatory),
githubmerge.host (default: git@github.com),
githubmerge.branch (no default),
githubmerge.testcmd (default: none).
'''
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests',
epilog=epilog)
parser.add_argument('pull', metavar='PULL', type=int, nargs=1,
help='Pull request ID to merge')
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?',
default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')')
return parser.parse_args()
def main():
# Extract settings from git repo
repo = git_config_get('githubmerge.repository')
host = git_config_get('githubmerge.host','git@github.com')
opt_branch = git_config_get('githubmerge.branch',None)
testcmd = git_config_get('githubmerge.testcmd')
signingkey = git_config_get('user.signingkey')
if repo is None:
print("ERROR: No repository configured. Use this command to set:", file=stderr)
print("git config githubmerge.repository <owner>/<repo>", file=stderr)
sys.exit(1)
if signingkey is None:
print("ERROR: No GPG signing key set. Set one using:",file=stderr)
print("git config --global user.signingkey <key>",file=stderr)
sys.exit(1)
host_repo = host+":"+repo # shortcut for push/pull target
# Extract settings from command line
args = parse_arguments()
pull = str(args.pull[0])
# Receive pull information from github
info = retrieve_pr_info(repo,pull)
if info is None:
sys.exit(1)
title = info['title'].strip()
body = info['body'].strip()
# precedence order for destination branch argument:
# - command line argument
# - githubmerge.branch setting
# - base branch for pull (as retrieved from github)
# - 'master'
branch = args.branch or opt_branch or info['base']['ref'] or 'master'
# Initialize source branches
head_branch = 'pull/'+pull+'/head'
base_branch = 'pull/'+pull+'/base'
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
devnull = open(os.devnull,'w')
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot check out branch %s." % (branch), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*',
'+refs/heads/'+branch+':refs/heads/'+base_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find pull request #%s or branch %s on %s." % (pull,branch,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
subprocess.check_call([GIT,'checkout','-q',base_branch])
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull)
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch])
try:
# Go up to the repository's root.
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip()
os.chdir(toplevel)
# Create unsigned merge commit.
if title:
firstline = 'Merge #%s: %s' % (pull,title)
else:
firstline = 'Merge #%s' % (pull,)
message = firstline + '\n\n'
message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8')
message += '\n\nPull request description:\n\n ' + body.replace('\n', '\n ') + '\n'
try:
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot be merged cleanly.",file=stderr)
subprocess.check_call([GIT,'merge','--abort'])
sys.exit(4)
logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8')
if logmsg.rstrip() != firstline.rstrip():
print("ERROR: Creating merge failed (already merged?).",file=stderr)
sys.exit(4)
symlink_files = get_symlink_files()
for f in symlink_files:
print("ERROR: File %s was a symlink" % f)
if len(symlink_files) > 0:
sys.exit(4)
# Put tree SHA512 into the message
try:
first_sha512 = tree_sha512sum()
message += '\n\nTree-SHA512: ' + first_sha512
except subprocess.CalledProcessError as e:
print("ERROR: Unable to compute tree hash")
sys.exit(4)
try:
subprocess.check_call([GIT,'commit','--amend','-m',message.encode('utf-8')])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot update message.", file=stderr)
sys.exit(4)
print_merge_details(pull, title, branch, base_branch, head_branch)
print()
# Run test command if configured.
if testcmd:
if subprocess.call(testcmd,shell=True):
print("ERROR: Running %s failed." % testcmd,file=stderr)
sys.exit(5)
# Show the created merge.
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch])
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch])
if diff:
print("WARNING: merge differs from github!",file=stderr)
reply = ask_prompt("Type 'ignore' to continue.")
if reply.lower() == 'ignore':
print("Difference with github ignored.",file=stderr)
else:
sys.exit(6)
else:
# Verify the result manually.
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr)
print("Type 'exit' when done.",file=stderr)
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
os.putenv('debian_chroot',pull)
subprocess.call([BASH,'-i'])
second_sha512 = tree_sha512sum()
if first_sha512 != second_sha512:
print("ERROR: Tree hash changed unexpectedly",file=stderr)
sys.exit(8)
# Sign the merge commit.
print_merge_details(pull, title, branch, base_branch, head_branch)
while True:
reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower()
if reply == 's':
try:
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
break
except subprocess.CalledProcessError as e:
print("Error while signing, asking again.",file=stderr)
elif reply == 'x':
print("Not signing off on merge, exiting.",file=stderr)
sys.exit(1)
# Put the result in branch.
subprocess.check_call([GIT,'checkout','-q',branch])
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch])
finally:
# Clean up temporary branches.
subprocess.call([GIT,'checkout','-q',branch])
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
# Push the result.
while True:
reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower()
if reply == 'push':
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
break
elif reply == 'x':
sys.exit(1)
if __name__ == '__main__':
main()
| [
"415313577@qq.com"
] | 415313577@qq.com |
9b4653d77b64454c4c52778eb28e5e115a07210e | ee33abc8460ccfba3cd3edcb75337354845439fa | /d2l/numpy_version/softmax_reg_gluon.py | 339ddbda9b62382a31276348a51dcb2ef4e7c013 | [] | no_license | TomasBahnik/ml | fb9dc272cfc414efc38cd410918267b0c3537046 | 7296e1bda65037039478a277bc986942de76dc71 | refs/heads/master | 2023-01-31T17:43:44.513144 | 2020-12-13T19:57:21 | 2020-12-13T19:57:21 | 221,159,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | import d2l
from matplotlib import pyplot as plt
from mxnet import gluon, init, npx
from mxnet.gluon import nn
npx.set_np()
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
net = nn.Sequential()
net.add(nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
num_epochs = 2
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
def predict_ch3(model, t_i, n=6): # @save
for X, y in t_i:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(model(X).argmax(axis=1))
titles = [true + '\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(X[0:n].reshape(n, 28, 28), 1, n, titles=titles[0:n])
plt.show()
predict_ch3(net, test_iter)
net.save_parameters('softmax.params') | [
"tomas.bahnik@ataccama.com"
] | tomas.bahnik@ataccama.com |
5ebff17593beda1effd5a60635d6921fc7b98ab2 | 07cabeb47bd7c9a4e06e824ece28631c7d7441a1 | /virtual/bin/easy_install | d66210d85b88436a51d477c4ceeb85a8a146d4c6 | [
"MIT"
] | permissive | Jeffmusa/PITCH-POOL | bd2b27ea5bc5b47499c0b822c46ff518eae5f2f4 | 96654a3ba7fc3f4ba00d7fb617644cc9cd5ba041 | refs/heads/master | 2020-03-28T04:17:07.471479 | 2018-09-13T13:21:17 | 2018-09-13T13:21:17 | 147,705,197 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/home/vicklyne/Pitch/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jeffmusa@gmail.com"
] | jeffmusa@gmail.com | |
b4441a3d11f64591fc7709954ad9418fa5e0e508 | 88bdfdb4ada00629d7074f4adc6312def80f16da | /python/test/Ch4/PP3CharPicGrid.py | 6e08ae127c21b57068fb562e6d38534cc80a9ba9 | [] | no_license | JeffUsername/python-stuff | 783935c5cfb0aec98a2d9aa427aef506a30ce907 | f18f2b75a650279ab9fbbcfb7dd9282e5dfda6df | refs/heads/master | 2022-11-27T19:06:05.830412 | 2020-07-27T02:21:51 | 2020-07-27T02:21:51 | 263,786,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | grid = [['.', '.', '.', '.', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['O', 'O', 'O', 'O', 'O', '.'],
['.', 'O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['.', '.', '.', '.', '.', '.']]
def test(grids):
h = len(grids)
w = len(grids[0])
for i in range(w): #6):
for j in range(h): #8):
print(grid[j][i], end=" ")
print()
test(grid) | [
"jeffbecker56@gmail.com"
] | jeffbecker56@gmail.com |
6571016902aa089e9d980b11281607bfaf68d512 | 3fcdcc4899d37903e9717835b5eb8112c0c8afdf | /djangoBlog/settings.py | 99e6e273a60559f34515706170a327b15cee999b | [] | no_license | MohsenShekarbaigi/mydjangoBlog | 746457dc94e18ba14e199cd1575423b9472a02ad | bfaf00b3c748478b2d04772b0cc0515aadeafd30 | refs/heads/main | 2023-05-11T02:16:12.978068 | 2021-06-01T06:38:05 | 2021-06-01T06:38:05 | 343,312,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,172 | py | """
Django settings for djangoBlog project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'eyt$c649n%+*_0jwgu701$xnuca473**a#d1bfz-jb6ekb9!k)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'articles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoBlog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoBlog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR,'assets'),
)
| [
"m.shekarbaigi@gmail.com"
] | m.shekarbaigi@gmail.com |
f12435432db738d98e5673e98069089be1809472 | ebc2afdb623804044da79d1986a26f904b276cca | /number_of_contacts/src/main.py | fec3d71c442b8227c9bc723abfb8c2080931e95d | [] | no_license | davkhech/simulation_scripts | 22389169755638acc043ccb3f4b37607760a403b | f0d4116c4ab3a758b352fafd86895567c59040e3 | refs/heads/master | 2020-07-08T02:24:31.113232 | 2019-08-21T12:16:11 | 2019-08-21T12:16:11 | 203,539,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | import argparse
import ujson as json
import matplotlib.pyplot as plt
import numpy as np
from constants import default_cutoff
from file_processors import process_file, process_big_file, process_gro_xtc
from number_of_contacts import calculate_q
def parse_args(*argument_array):
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--input-xtc')
parser.add_argument('definition', default=1, type=int)
parser.add_argument('--ignore-h', action='store_true')
parser.add_argument('--cutoff', type=float, default=default_cutoff)
return parser.parse_args(*argument_array)
def main(args):
input_file_name = args.input
definition = args.definition
cutoff = args.cutoff
qs = []
iterator = process_big_file(input_file_name, args.ignore_h) if not args.input_xtc else process_gro_xtc(input_file_name, args.input_xtc, args.ignore_h)
for cnt_bucket, dna_bucket in iterator:
qs.append(calculate_q(cnt_bucket, dna_bucket, cutoff, definition))
json.dump(qs, open('result', 'w'))
# cnt_bucket, dna_bucket = process_file(input_file_name, args.ignore_h)
# print(cutoff, calculate_q(cnt_bucket, dna_bucket, cutoff, definition))
# cutoff_array = list(np.arange(0.1, 2, 0.05))
# qs = []
# for cutoff in cutoff_array:
# q = calculate_q(cnt_bucket, dna_bucket, cutoff, definition)
# qs.append(q)
# # derivative_qs = []
# # for ind in range(2, len(qs)):
# # derivative_qs.append((qs[ind] - qs[ind - 2]) / 0.0001)
# # axes = plt.gca()
# # axes.set_ylim([0, 1])
plt.plot(qs, marker='o')
plt.plot(0.5,linestyle='-')
plt.xticks(fontsize=24)
plt.yticks(fontsize=24)
plt.show()
if __name__ == '__main__':
main(parse_args())
| [
"davkhech@gmail.com"
] | davkhech@gmail.com |
f87f08c04c29a3a9eea5fea7390f55fb1e735947 | 6950681c05fa0ae594cee7a9f6e076f9daf44606 | /python/series/series.py | 8065079a39f6f4678f5b7e0131f6d0c51314a9fe | [] | no_license | seggiepants/Exercism | c85f42150f3201e39d2acaead1032583928f1713 | ce20dddb5d0f5056af468201fe7df8ee5b33f12a | refs/heads/master | 2022-05-23T08:51:58.875187 | 2022-05-16T01:18:37 | 2022-05-16T01:18:37 | 240,800,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | def slices(series, length):
"""Return each run of length characters within the given string series
as a list. Duplicate values are allowed.
Parameters:
series: string to return subsections of.
length: integer, length of desired sub-sections
Returns:
List of subsections in the string.
"""
result = []
if length <= 0:
raise ValueError(f'Invalid length supplied. Should be an integer greater than zero, instead recieved {length}')
elif len(series) < length:
raise ValueError('Cannot get a subsection of that length.')
else:
for i in range(len(series) - length + 1):
result.append(series[i:i + length])
return result
| [
"41271733+seggiepants@users.noreply.github.com"
] | 41271733+seggiepants@users.noreply.github.com |
547bd4f8240b7af49479b59148d063c5ad55fd28 | 2867eb59cb2f47e256b7594089ef22837805c577 | /gradient_debug.py | b09612e64ef272ce2dfce17783f981414c606888 | [] | no_license | chiukin/ESFNet-Pytorch | 931dd97ea90aa05ccae31c35d6b1b61fea464ce9 | 9c49e65cbf2929917a32caca8e9e35390c2141f1 | refs/heads/master | 2020-05-05T08:22:07.816532 | 2019-03-28T12:03:59 | 2019-03-28T12:03:59 | 179,861,160 | 2 | 0 | null | 2019-04-06T16:54:29 | 2019-04-06T16:54:28 | null | UTF-8 | Python | false | false | 978 | py | import torch
def get_printer(msg):
"""
returns a printer function, that prints information about a tensor's gradient
Used by register_hook in the backward pass.
:param msg:
:return: printer function
"""
def printer(tensor):
if tensor.nelement == 1:
print("{} {}".format(msg, tensor))
else:
print("{} shape: {}"
"max: {} min: {}"
"mean: {}"
.format(msg, tensor.shape, tensor.max(), tensor.min(), tensor.mean()))
return printer
def register_hook(tensor, msg):
"""
Utility function to call retain_grad and register_hook in a single line
:param tensor:
:param msg:
:return:
"""
tensor.retain_grad()
tensor.register_hook(get_printer(msg))
if __name__ == '__main__':
x = torch.randn((1,1), requires_grad=True)
y = 3*x
z = y**2
register_hook(y, 'y')
z.backward()
| [
"noreply@github.com"
] | noreply@github.com |
7bf668deb039572910332ff426b608be219b4892 | b4d903d2e9affbbd274b68940217423c7d8435c7 | /Pi/Pi Python Scripts/Mail Sending/sendemail.py | b328e4c2b0f8ed9c5af07aafec28a45ec4d96972 | [] | no_license | kimoantiqe/SmartHome-Door | 26dde6a4cc132ce5de3826c8056a9ab85c808e7a | 43f41e41b4232d8ed8a5e5d85a17e93251cd8ed7 | refs/heads/master | 2020-04-03T05:05:04.307841 | 2019-02-15T15:04:42 | 2019-02-15T15:04:42 | 155,034,075 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
def(toaddr,filename,filepath):
fromaddr = " " //make new email
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "UNAUTHORISED INDIVIDUAL DETECTED"
body = "An unauthorised person tried to access your home. Check attachment for a picture." //can change after testing
msg.attach(MIMEText(body, 'plain'))
attachment = open(filepath, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, "PASSWORD")
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
| [
"sheenayadav98@gmail.com"
] | sheenayadav98@gmail.com |
9569c4cb8102b9e9490c2bbeb94f08004487c79f | e6329716335b0570c3df478ee905015e0275e78b | /test/migrations/0003_initial.py | 532a17749f6548f389571609b1a08941cd5ebe86 | [] | no_license | Morphnus-IT-Solutions/knowell | 39402d853b46087c5e8176a9cda0c652460e0333 | ed28f601730ab5ad3e62443c90be0991697c0da0 | refs/heads/master | 2021-01-18T13:57:23.825812 | 2012-08-03T19:55:04 | 2012-08-03T19:55:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,589 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LevelOfDifficulty'
db.create_table('test_levelofdifficulty', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('level', self.gf('django.db.models.fields.CharField')(unique=True, max_length=10)),
))
db.send_create_signal('test', ['LevelOfDifficulty'])
# Adding model 'SectionGroup'
db.create_table('test_sectiongroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
))
db.send_create_signal('test', ['SectionGroup'])
# Adding model 'Section'
db.create_table('test_section', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['test.SectionGroup'])),
('type', self.gf('django.db.models.fields.CharField')(default='mcq', max_length=25, db_index=True)),
))
db.send_create_signal('test', ['Section'])
# Adding unique constraint on 'Section', fields ['name', 'type']
db.create_unique('test_section', ['name', 'type'])
# Adding model 'Test'
db.create_table('test_test', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=30)),
('description', self.gf('tinymce.models.HTMLField')()),
('marks', self.gf('django.db.models.fields.IntegerField')(max_length=3)),
('time', self.gf('django.db.models.fields.IntegerField')(max_length=3)),
('standard', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['students.Standard'])),
('stream', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['students.Stream'], null=True, blank=True)),
))
db.send_create_signal('test', ['Test'])
# Adding model 'TestSections'
db.create_table('test_testsections', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('test', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['test.Test'])),
('section', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['test.Section'])),
('level_of_difficulty', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['test.LevelOfDifficulty'])),
('total_questions', self.gf('django.db.models.fields.IntegerField')(max_length=3)),
))
db.send_create_signal('test', ['TestSections'])
def backwards(self, orm):
# Removing unique constraint on 'Section', fields ['name', 'type']
db.delete_unique('test_section', ['name', 'type'])
# Deleting model 'LevelOfDifficulty'
db.delete_table('test_levelofdifficulty')
# Deleting model 'SectionGroup'
db.delete_table('test_sectiongroup')
# Deleting model 'Section'
db.delete_table('test_section')
# Deleting model 'Test'
db.delete_table('test_test')
# Deleting model 'TestSections'
db.delete_table('test_testsections')
models = {
'students.standard': {
'Meta': {'object_name': 'Standard'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standard': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'students.stream': {
'Meta': {'object_name': 'Stream'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stream': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'test.levelofdifficulty': {
'Meta': {'object_name': 'LevelOfDifficulty'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'})
},
'test.section': {
'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Section'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['test.SectionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'mcq'", 'max_length': '25', 'db_index': 'True'})
},
'test.sectiongroup': {
'Meta': {'object_name': 'SectionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'test.test': {
'Meta': {'object_name': 'Test'},
'description': ('tinymce.models.HTMLField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marks': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'standard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['students.Standard']"}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['students.Stream']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'test.testsections': {
'Meta': {'object_name': 'TestSections'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_of_difficulty': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['test.LevelOfDifficulty']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['test.Section']"}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['test.Test']"}),
'total_questions': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
}
}
complete_apps = ['test'] | [
"dala.saumil@gmail.com"
] | dala.saumil@gmail.com |
174c38544d185d8104e7dac5560809d28c2c714f | 54f0eb4fbad9b7cc30bbfdd81241f25a8dfda149 | /hyebin/week_1/hash2.py | adfbf2264ba446566936feb44a88dd9c115c3de6 | [] | no_license | leeleelee3264/thursday-algo-study | 1791e52732f174ad81accb6aeba3299719391a27 | 6507edef60b2129f372bd9b45c412b846f140ed1 | refs/heads/master | 2023-02-02T06:15:55.123415 | 2020-08-06T10:20:23 | 2020-08-06T10:20:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | def solution(phone_book):
m = min(phone_book)
phone_book.remove(m)
for i in phone_book:
if m == i[:len(m)]:
return False
return True | [
"seungmin.lee@dnx.kr"
] | seungmin.lee@dnx.kr |
fa04c44b33e4ba3585b813ad42176847e7052829 | 81d79b0bb6e167c0c150e6e25c9e00db037035e4 | /Geodesy1.py | f7b6e38da091eb244ae9fe749e9e426caa8440e4 | [] | no_license | alaishan/Geodesy | 30b9d236f5e06d43ecc068ef496864f4a0a21850 | b9e4bb423b43293a71a2bbb255e773b08284a22a | refs/heads/master | 2023-01-06T07:54:17.521579 | 2020-11-09T13:23:52 | 2020-11-09T13:23:52 | 293,931,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 17:55:08 2020
@author: Alaisha Naidu
Name: Geodetic Coords to Curvilinear
Creds: University of Cape Town
"""
import math
from math import sqrt, pi, atan, sin, cos
import numpy as np
from numpy import matmul
import sympy as sp
from sympy import Matrix, symbols, atan, sqrt
#WGS84 to Local
X1 = sp.Matrix([[5384125.138],[3402602.734],[-377241.673]])
x01 = sp.Matrix([[162.3],[14.5],[308.4]])
K1 = 1.000001
Rot1 = sp.Matrix([[1,-0.000000198773609,0.000001139312],[0.000000198773609,1,-0.000001677455],[-0.000001139312,0.000001677455,1]])
X = np.array(X1).astype(np.float64)
x0 = np.array(x01).astype(np.float64)
K = np.array(K1).astype(np.float64)
Rot = np.array(Rot1).astype(np.float64)
#Geodetic to Local Datum
B = np.matmul(Rot,X)
x = x0 + K*B
print(x)
print("")
#Local to Curvilinear
u = x[0]
v = x[1]
w = x[2]
f = (1/293.46)
a = 6378249.14
e2 = 2*f - (f*f)
p = math.sqrt((u*u)+(v*v))
r = math.sqrt((p*p)+(w*w))
fir = (w*(1-f))/p
sec = 1+((e2*a)/(r*(1-f)))
thir = fir*sec
U = math.atan(thir)
j = v/u
lamda = math.atan(j)
print("Lamda in Radians = ", lamda)
o = pow(math.sin(U), 3)
m = w+e2*a*o
q = pow(math.cos(U), 3)
n = (p - e2*a*q)*(1-f)
phi = math.atan(m/n)
print("Phi in Radians = ", phi)
z = pow(math.sin(phi),2)
W = math.sqrt(1 - e2*z)
N = a/W
h1 = math.cos(phi)
h2 = math.sin(phi)
h3 = ((a*a)/N)
h = p*h1 +w*h2 - h3
print("Height = ", h)
| [
"noreply@github.com"
] | noreply@github.com |
fa840b3bf567a24d58b99ea0570aae3242c299a9 | c3bbc57eb19a9c838beab5308f150ccc800bf458 | /item2(tentativa-bergao).py | 79809990f8100271abf19bdcb43a6f9c26efff02 | [] | no_license | gustavosberger/EP-oficial | 9fbf69d27358b7acf296733f56c06d929a50ce1f | 9ce6682ca6f82aad8721c5903c9d57420c054ef4 | refs/heads/master | 2021-06-14T23:49:06.693766 | 2021-03-16T17:13:02 | 2021-03-16T17:13:02 | 129,413,475 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | import.json
with open ("item01(progesso).py","r") as tentativa
conteudo= json.load(tentativa)
with open ("item01(progesso).py","w") as tentativa2
conteudo2= json.dumps(tentativa2)
| [
"gustavosberger@gmail.com"
] | gustavosberger@gmail.com |
c406126c248257ef157a13000626454125a7d526 | 434aaf06d2b553fd4f5ee8acbc647caa52fcf91b | /wolframclient/tests/evaluation/test_async_cloud.py | 2f8c677af27a8f1517d802334821d77f09c1af78 | [
"MIT"
] | permissive | wdscxsj/WolframClientForPython | 43ed9dff93934b5ec79f23c32860bdb8f622ef96 | 2091ba8767d0b9abf0165cd1136ba05d9dd83b9e | refs/heads/master | 2020-05-24T06:00:18.250163 | 2019-05-17T02:29:51 | 2019-05-17T02:29:51 | 187,131,190 | 0 | 0 | MIT | 2019-05-17T02:14:48 | 2019-05-17T02:14:45 | null | UTF-8 | Python | false | false | 18,465 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import asyncio
import logging
import os
import unittest
from wolframclient.evaluation.cloud.asynccloudsession import (
WolframAPICallAsync, WolframCloudAsyncSession)
from wolframclient.evaluation.cloud.base import (SecuredAuthenticationKey,
UserIDPassword)
from wolframclient.exception import (AuthenticationException, RequestException,
WolframLanguageException)
from wolframclient.language import wl
from wolframclient.language.expression import WLFunction
from wolframclient.tests.configure import (MSG_JSON_NOT_FOUND, json_config,
secured_authentication_key, server,
user_configuration)
from wolframclient.utils import six
from wolframclient.utils.asyncio import get_event_loop, run_in_loop
from wolframclient.utils.encoding import force_text
from wolframclient.utils.tests import TestCase as BaseTestCase
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
LOOP = get_event_loop()
@unittest.skipIf(json_config is None, MSG_JSON_NOT_FOUND)
class TestCaseSettings(BaseTestCase):
user_cred = None
server = None
@classmethod
def setUpClass(cls):
cls.setupCloudSession()
@classmethod
def setupCloudSession(cls):
cls.sak = secured_authentication_key
cls.api_owner = json_config['ApiOwner']
cls.user_cred = user_configuration
cls.server = server
cls.cloud_session_async = WolframCloudAsyncSession(
credentials=cls.sak, server=server)
@classmethod
def tearDownClass(cls):
cls.tearDownCloudSession()
@classmethod
@run_in_loop
async def tearDownCloudSession(cls):
if cls.cloud_session_async is not None:
await cls.cloud_session_async.stop()
def get_data_path(self, filename):
"""Return full path of a file in ./data/directory"""
current_file_dir = os.path.dirname(__file__)
return os.path.join(current_file_dir, '..', 'data', filename)
@unittest.skipIf(json_config is None, MSG_JSON_NOT_FOUND)
@unittest.skipIf(six.JYTHON, "Not supported in Jython.")
class TestCase(TestCaseSettings):
def test_section_not_authorized(self):
session = WolframCloudAsyncSession(server=self.server)
self.assertEqual(session.authorized(), False)
self.assertEqual(session.anonymous(), True)
@run_in_loop
async def test_section_authorized_oauth(self):
cloud_session = WolframCloudAsyncSession(
credentials=self.sak, server=self.server)
try:
await cloud_session.start()
self.assertEqual(cloud_session.authorized(), True)
self.assertEqual(cloud_session.anonymous(), False)
finally:
await cloud_session.terminate()
@run_in_loop
async def test_section_authorized_oauth_with(self):
async with WolframCloudAsyncSession(
credentials=self.sak, server=self.server) as cloud_session:
self.assertEqual(cloud_session.authorized(), True)
self.assertEqual(cloud_session.anonymous(), False)
@run_in_loop
async def test_section_authorized_xauth(self):
if self.user_cred and self.server:
cloud_session = WolframCloudAsyncSession(
credentials=self.user_cred, server=self.server)
try:
await cloud_session.start()
self.assertEqual(cloud_session.authorized(), True)
self.assertEqual(cloud_session.anonymous(), False)
finally:
await cloud_session.terminate()
else:
print('xauth not available. Test skipped.')
@run_in_loop
async def test_section_authorized_xauth_with(self):
if self.user_cred and self.server:
async with WolframCloudAsyncSession(
credentials=self.user_cred,
server=self.server) as cloud_session:
self.assertEqual(cloud_session.authorized(), True)
self.assertEqual(cloud_session.anonymous(), False)
else:
print('xauth not available. Test skipped.')
@run_in_loop
async def test_bad_sak(self):
bad_sak = SecuredAuthenticationKey('foo', 'bar')
with self.assertRaises(AuthenticationException):
cloud_session = WolframCloudAsyncSession(credentials=bad_sak, server=server)
await cloud_session.start()
@run_in_loop
async def test_need_auth_err(self):
bad_sak = SecuredAuthenticationKey('foo', 'bar')
with self.assertRaises(RequestException):
async with WolframCloudAsyncSession(server=server) as cloud_session:
await cloud_session.evaluate('1+1')
@run_in_loop
async def test_bad_sak_with(self):
bad_sak = SecuredAuthenticationKey('foo', 'bar')
with self.assertRaises(RequestException):
async with WolframCloudAsyncSession(
credentials=bad_sak, server=server) as cloud_session:
cloud_session.authorized()
@run_in_loop
async def test_section_api_call_no_param(self):
url = 'api/private/requesterid'
response = await self.cloud_session_async.call((self.api_owner, url))
self.assertIn(self.api_owner, force_text(await response.get()))
@run_in_loop
async def test_section_api_call_one_param(self):
url = 'api/private/stringreverse'
response = await self.cloud_session_async.call(
(self.api_owner, url), input_parameters={'str': 'abcde'})
self.assertEqual('"edcba"', force_text(await response.get()))
@run_in_loop
async def test_section_api_permission_key(self):
async with WolframCloudAsyncSession(server=server) as cloud:
url = 'api/public/permkey_stringreverse_wxf'
response = await cloud.call((self.api_owner, url),
input_parameters={'str': 'abcde'},
permissions_key='my_key')
self.assertEqual('edcba', await response.get())
# currently missing key result in a webpage with an input field for the key.
# @run_in_loop
# async def test_section_api_missing_permission_key(self):
# url = 'api/public/permkey_stringreverse_wxf'
# with self.assertRaises(AuthenticationException):
# await self.cloud_session_async.call((self.api_owner, url), input_parameters={'str': 'abcde'})
@run_in_loop
async def test_section_api_call_one_param_wrong(self):
url = 'api/private/stringreverse'
response = await self.cloud_session_async.call((self.api_owner, url))
self.assertFalse(response.success)
field, _ = (await response.fields_in_error())[0]
self.assertEqual(field, 'str')
@run_in_loop
async def test_public_api_call(self):
url = "api/public/jsonrange"
cloud_session = WolframCloudAsyncSession(server=self.server)
try:
self.assertFalse(cloud_session.authorized())
self.assertTrue(cloud_session.anonymous())
response = await cloud_session.call((self.api_owner, url),
input_parameters={'i': 5})
self.assertTrue(response.success)
self.assertEqual(await response.get(), list(range(1, 6)))
finally:
await cloud_session.terminate()
@run_in_loop
async def test_section_api_call_two_param(self):
api = (self.api_owner, 'api/private/range/formated/json')
v_min, v_max, step = (1, 10, 2)
response = await self.cloud_session_async.call(
api, input_parameters={
'min': v_min,
'max': v_max,
'step': step
})
if not response.success:
logger.warning(await response.failure)
expected = list(range(v_min, v_max, step))
self.assertListEqual(expected, await response.get())
@run_in_loop
async def test_section_invalid_api_path(self):
with self.assertRaises(WolframLanguageException):
api = (self.api_owner, 'invalid/api/path/no/resource')
res = await self.cloud_session_async.call(api)
await res.get()
@run_in_loop
async def test_section_wl_error(self):
api = (self.api_owner, "api/private/range/wlerror")
i = 1
response = await self.cloud_session_async.call(
api, input_parameters={'i': i})
self.assertFalse(response.success)
self.assertEqual(response.status, 500)
@run_in_loop
async def test_small_image_file(self):
api = (self.api_owner, 'api/private/imagedimensions')
with open(self.get_data_path('32x2.png'), 'rb') as fp:
response = await self.cloud_session_async.call(
api, files={'image': fp})
self.assertTrue(response.success)
res = await response.get()
self.assertListEqual(res, [32, 2])
@run_in_loop
async def test_image_file(self):
api = (self.api_owner, 'api/private/imagedimensions')
with open(self.get_data_path('500x200.png'), 'rb') as fp:
response = await self.cloud_session_async.call(
api, files={'image': fp})
self.assertTrue(response.success)
res = await response.get()
self.assertListEqual(res, [500, 200])
@run_in_loop
async def test_image_string_int(self):
api = (self.api_owner, 'api/private/str_image_int')
with open(self.get_data_path('32x2.png'), 'rb') as fp:
response = await self.cloud_session_async.call(
api,
input_parameters={
'str': 'abc',
'int': 10
},
files={'image': fp})
self.assertTrue(response.success)
res = await response.get()
self.assertListEqual(res, ['abc', [32, 2], 10])
@run_in_loop
async def test_xml_valid_response(self):
api = ('dorianb', 'api/private/rangeXML')
response = await self.cloud_session_async.call(
api, input_parameters={'i': 5})
self.assertTrue(response.success)
self.assertEqual(response.status, 200)
@run_in_loop
async def test_xml_invalid_response(self):
api = ('dorianb', 'api/private/rangeXML')
response = await self.cloud_session_async.call(api)
self.assertFalse(response.success)
self.assertEqual(response.status, 400)
with self.assertRaises(WolframLanguageException):
await response.get()
@run_in_loop
async def test_evaluate_string_disable(self):
async with WolframCloudAsyncSession(
credentials=self.sak,
server=self.server,
inputform_string_evaluation=False) as session:
res = await session.evaluate('Range[3]')
self.assertEqual(res, 'Range[3]')
cor = session.function('f')
res = await cor('abc')
self.assertEqual(res, WLFunction('f', 'abc'))
@run_in_loop
async def test_stop_start_restart_status(self):
session = WolframCloudAsyncSession(
credentials=self.sak, server=self.server)
try:
self.assertFalse(session.started)
self.assertTrue(session.stopped)
await session.start()
self.assertTrue(session.started)
self.assertFalse(session.stopped)
await session.stop()
self.assertFalse(session.started)
self.assertTrue(session.stopped)
await session.restart()
self.assertTrue(session.started)
self.assertFalse(session.stopped)
await session.terminate()
self.assertFalse(session.started)
self.assertTrue(session.stopped)
finally:
await session.terminate()
### Evaluation
@run_in_loop
async def test_evaluate_string(self):
res = await self.cloud_session_async.evaluate('Range[3]')
self.assertEqual(res, [1, 2, 3])
@run_in_loop
async def test_evaluate_wl_expr(self):
res = await self.cloud_session_async.evaluate(wl.Range(2))
self.assertEqual(res, [1, 2])
@run_in_loop
async def test_evaluate_wl_expr_option(self):
res = await self.cloud_session_async.evaluate(
wl.ArrayPad([[1]], 1, Padding=1))
self.assertEqual(res, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
@run_in_loop
async def test_evaluate_wrap(self):
res = await self.cloud_session_async.evaluate_wrap(wl.Range(2))
self.assertTrue(await res.success)
self.assertEqual(await res.get(), [1, 2])
@run_in_loop
async def test_evaluate_function(self):
f = self.cloud_session_async.function('Range')
self.assertEqual(await f(3), [1, 2, 3])
@run_in_loop
async def test_evaluate_function_wl(self):
f = self.cloud_session_async.function(wl.Range)
self.assertEqual(await f(3), [1, 2, 3])
@run_in_loop
async def test_evaluate_function_wl_option(self):
f = self.cloud_session_async.function(wl.ArrayPad)
self.assertEqual(await f([[1]], 1, Padding=1),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
@run_in_loop
async def test_evaluate_string(self):
res1 = await self.cloud_session_async.evaluate('Range[1]')
res2 = await self.cloud_session_async.evaluate('Range[2]')
self.assertEqual(res1, [1])
self.assertEqual(res2, [1, 2])
@run_in_loop
async def test_evaluate_string_concurrently(self):
task1 = asyncio.ensure_future(
self.cloud_session_async.evaluate('Range[1]'))
task2 = asyncio.ensure_future(
self.cloud_session_async.evaluate_wrap('Range[2]'))
res1, res2 = await asyncio.gather(task1, task2)
self.assertEqual(res1, [1])
res2 = await res2.result
self.assertEqual(res2, [1, 2])
# @run_in_loop
# async def test_big_expr(self):
# a=numpy.ndarray((1000,1000), dtype='uint64')
# a.fill(1)
# total = await self.cloud_session_async.evaluate(wl.Total(a))
# self.assertEqual(total, 1000 * 1000)
class TestWolframAPI(TestCaseSettings):
@run_in_loop
async def test_wolfram_api_call_image(self):
api = (self.api_owner, 'api/private/imagedimensions')
apicall = WolframAPICallAsync(self.cloud_session_async, api)
with open(self.get_data_path('32x2.png'), 'rb') as fp:
apicall.add_file_parameter('image', fp)
res = await apicall.perform()
self.assertTrue(res.success)
res = await res.get()
self.assertListEqual(res, [32, 2])
@run_in_loop
async def test_wolfram_api_call_named_image(self):
api = (self.api_owner, 'api/private/imagedimensions')
apicall = WolframAPICallAsync(self.cloud_session_async, api)
with open(self.get_data_path('32x2.png'), 'rb') as fp:
apicall.add_file_parameter('image', fp, filename='testimage')
res = await apicall.perform()
self.assertTrue(res.success)
res = await res.get()
self.assertListEqual(res, [32, 2])
@run_in_loop
async def test_wolfram_api_from_session(self):
api = (self.api_owner, 'api/private/imagedimensions')
apicall = self.cloud_session_async.wolfram_api_call(api)
with open(self.get_data_path('32x2.png'), 'rb') as fp:
apicall.add_file_parameter('image', fp)
res = await apicall.perform()
self.assertTrue(res.success)
res = await res.get()
self.assertListEqual(res, [32, 2])
@run_in_loop
async def test_wolfram_api_call_str(self):
api = (self.api_owner, 'api/private/stringreverse')
apicall = WolframAPICallAsync(self.cloud_session_async, api)
apicall.set_parameter('str', 'abcde')
res = await apicall.perform()
self.assertEqual('"edcba"', force_text(await res.get()))
@run_in_loop
async def test_wolfram_api_image_string_int(self):
api = (self.api_owner, 'api/private/str_image_int')
with open(self.get_data_path('32x2.png'), 'rb') as fp:
apicall = WolframAPICallAsync(self.cloud_session_async, api)
apicall.set_parameter('str', 'abc')
apicall.set_parameter('int', 10)
apicall.add_file_parameter('image', fp)
result = await apicall.perform()
res = await result.get()
self.assertListEqual(res, ['abc', [32, 2], 10])
@run_in_loop
async def test_wolfram_api_imagebytes_string_int(self):
api = (self.api_owner, 'api/private/str_image_int')
with open(self.get_data_path('32x2.png'), 'rb') as fp:
buffer = fp.read()
apicall = WolframAPICallAsync(self.cloud_session_async, api)
apicall.set_parameter('str', 'abc')
apicall.set_parameter('int', 10)
apicall.add_image_data_parameter('image', buffer)
result = await apicall.perform()
res = await result.get()
self.assertListEqual(res, ['abc', [32, 2], 10])
@run_in_loop
async def test_api_invalid_input(self):
api_urls = ('api/private/two_parameters_out_json',
'api/private/two_parameters_out_wxf',
'api/private/two_parameters_out_default')
for url in api_urls:
api = (self.api_owner, url)
apicall = WolframAPICallAsync(self.cloud_session_async, api)
apicall.set_parameter('x', 'abc')
res = await apicall.perform()
self.assertFalse(res.success)
@run_in_loop
async def test_api_permission_key(self):
async with WolframCloudAsyncSession(server=server) as cloud:
url = 'api/public/permkey_stringreverse_wxf'
api = (self.api_owner, url)
apicall = WolframAPICallAsync(cloud, api, permission_key='my_key')
apicall.set_parameter('str', 'abcde')
response = await apicall.perform()
self.assertEqual('edcba', await response.get())
| [
"dorianb@wolfram.com"
] | dorianb@wolfram.com |
e2fdae7c3f7a42aff20c1c5369d5023422073a47 | 6aee676ce66e2680ef887c3b5692c8b776486e95 | /file.py | b281b3aae29f1653b5be086d23e353b7ac9c1d7e | [] | no_license | kushagra67414/python | 80465b31f70a3b313c964fa37972478859abc3c5 | 09c5632a730b1b61ac9a3d82c445020ecda1fe71 | refs/heads/master | 2021-10-09T01:43:44.428089 | 2021-10-07T12:17:40 | 2021-10-07T12:17:40 | 242,206,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | temperatures=[10,-20,-289,100]
def c_to_f(c):
if c< -273.15:
return "That temperature doesn't make sense!"
else:
f=c*9/5+32
return f
for t in temperatures:
print(c_to_f(t)) | [
"500067414@stu.upes.ac.in"
] | 500067414@stu.upes.ac.in |
cdcbf556e7f4227e3645dd0dbd3f03c0d1261880 | e149967bf6e5db564277f59b0f516002a217ae54 | /pyramid_pagination/mapper.py | e48ccdac002f83667cc584d7fbe4b6014f2a69ad | [
"MIT"
] | permissive | canaryhealth/pyramid_pagination | dbe4de7c6f52adefda2d13815cb2dd0b2f6e25c9 | 6ddde01afee420b62bde6658ec07c05e2efc2333 | refs/heads/master | 2021-01-21T21:47:49.499408 | 2016-04-12T21:58:33 | 2016-04-12T21:58:33 | 33,215,459 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,735 | py | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <phil@canary.md>
# date: 2015/04/02
# copy: (C) Copyright 2015-EOT Canary Health, Inc., All Rights Reserved.
#------------------------------------------------------------------------------
import morph
from .decoder import SortValidator, SmartSort
#------------------------------------------------------------------------------
class Mapper(object):
'''
A `Mapper` selects the result set to be paginated from the
response and transforms the response to contain the paginated result
set and the pagination meta-information.
'''
#----------------------------------------------------------------------------
def __init__(self, target=None, *args, **kw):
super(Mapper, self).__init__(*args, **kw)
self.target = target
#----------------------------------------------------------------------------
def extend(self, *args, **kw):
params = dict(target=self.target)
for arg in args:
params.update(arg)
params.update(kw)
return self.__class__(**params)
#----------------------------------------------------------------------------
def get(self, p8n, result):
return self.resolve(p8n, result)()
#----------------------------------------------------------------------------
def resolve(self, p8n, result):
'''
Returns a function that is expected to be called in one of the
following ways:
* as a "getter" (with no arguments)
* as a "setter" (with exactly one argument)
* returns the target path (with exactly two arguments)
'''
# todo: this `exactly two arguments` is ridiculous... change!
if self.target is not None:
return self.resolve_target(p8n, result)
if not morph.isdict(result):
def _resolve(*args):
if len(args) <= 0:
return result
if len(args) == 1:
return dict(((p8n.paginator.result_name, args[0]),))
if len(args) == 2:
return p8n.paginator.result_name
return _resolve
if len(result.keys()) != 1:
raise ValueError(
'Pagination of multi-key dictionaries requires setting the'
' pagination mapper "target" attribute')
key = result.keys()[0]
def _resolve(*args):
if len(args) <= 0:
return result[key]
if len(args) == 1:
return dict([(key, args[0])])
if len(args) == 2:
return key
return _resolve
#----------------------------------------------------------------------------
def resolve_target(self, p8n, result):
# todo: support list-index style notation as well, eg:
# ``foo-1.bar`` would resolve to the ``"here"`` element in:
# {foo: [{bar: 'no'}, {bar: 'here'}, {bar: 'nada'}]}
# TODO: do error checking for missing keys...
container = result
keys = self.target.split('.')
for key in keys[:-1]:
container = container.get(key)
key = keys[-1]
def _resolve(*args):
if len(args) <= 0:
return container[key]
if len(args) == 1:
container[key] = args[0]
return result
if len(args) == 2:
return self.target
return _resolve
#----------------------------------------------------------------------------
def put(self, p8n, result, value):
result = self.put_data(p8n, result, value)
result = self.put_meta(p8n, result, value)
return result
#----------------------------------------------------------------------------
def put_data(self, p8n, result, value):
resolver = self.resolve(p8n, result)
value[1]['attribute'] = resolver(None, None)
return resolver(value[0])
#----------------------------------------------------------------------------
def put_meta(self, p8n, result, value):
page = dict()
if 'count' in value[1]:
page[p8n.paginator.count_name] = value[1]['count']
page[p8n.paginator.offset_name] = p8n.offset
page[p8n.paginator.limit_name] = p8n.limit
sort = SortValidator.encode(p8n.sort)
if sort != ( SmartSort.MARK
if p8n.paginator.sort_default is SmartSort
else p8n.paginator.sort_default ):
page[p8n.paginator.sort_name] = sort
page[p8n.paginator.attribute_name] = value[1].get(
'attribute', p8n.paginator.result_name) or p8n.paginator.result_name
try:
ret = dict(result)
except ValueError:
# todo: what the ... ?
ret = dict(result=result)
ret[p8n.paginator.page_name] = page
return ret
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
| [
"pjg.github@ubergrabner.net"
] | pjg.github@ubergrabner.net |
01cc466fb5ad903411b71998921b75b72f93fce2 | fa34c936d82aa35830097768d61e5a532aca4e04 | /main.py | 345745dba47ac70ebe390f7a6283737e64191cb4 | [
"MIT"
] | permissive | wwwdebug/tg-kicker | c6d09f56a6ec69597ea50b44b08919551aca2f1c | ac332eaa664e331e55f2975fb7292f9322877fe5 | refs/heads/master | 2022-04-26T00:55:55.446519 | 2020-04-18T21:23:12 | 2020-04-18T21:23:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | from telethon import TelegramClient
from telethon.tl.functions.channels import EditBannedRequest
from telethon.tl.types import ChatBannedRights
import asyncio
import datetime
api_id = 1234 # Your API_ID
api_hash = "" # Your APP_ID
async def clear_chat(client):
group = input("Enter the group username where the script should search for deleted accounts: ")
deleted_accounts = 0
async for user in client.iter_participants(group):
if user.deleted:
try:
deleted_accounts += 1
await client(EditBannedRequest(group, user, ChatBannedRights(
until_date=datetime.timedelta(minutes=1),
view_messages=True
)))
except Exception as exc:
print(f"Failed to kick one deleted account because: {str(exc)}")
if deleted_accounts:
print(f"Kicked {deleted_accounts} Deleted Accounts")
else:
print(f"No deleted accounts found in {group}")
with TelegramClient("deleteacc", api_id, api_hash) as client:
asyncio.get_event_loop().run_until_complete(clear_chat(client))
| [
"noreply@github.com"
] | noreply@github.com |
7de71a4cbffcfce8c1ee6a8e2ce7da5bbb514867 | ad683cde153aca78f937fbd72e8e658cdcd5dd83 | /apps.py | 144d159eaa21812369bff9a4fad65a69600454d9 | [] | no_license | clepperdesign/funWithColors | 377eeac62f4efa8e49146a3729ff094ef2ffc86a | 20bf89664ece5cb7ba351eaa2d3a077e199896bd | refs/heads/master | 2020-07-12T12:42:59.162006 | 2019-08-29T16:43:40 | 2019-08-29T16:43:40 | 204,822,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from django.apps import AppConfig
class ColortimeConfig(AppConfig):
name = 'colortime'
| [
"noreply@github.com"
] | noreply@github.com |
dcd72593ae864f3f3d6640672d80dbf114e290a4 | 1bc5cbda26c5556ac9e9e46fa4af47894f90f7f8 | /src/cli/__init__.py | 79766d20ef6538480dae2d35fd1998521b88ec56 | [] | no_license | Alexander-Andrade/heri_helper | 64388d5dd0e1d0e336544bd00a64b4c9f5f8453d | d11a49b6835c30b81e4631a86c9e99ea13603ac3 | refs/heads/master | 2023-04-03T16:42:04.772315 | 2021-04-11T11:40:12 | 2021-04-11T11:40:12 | 352,153,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from .cli_args_parser import CliArgsParser
from .cli_args_validator import CliArgsValidator
from .args import Args
| [
"aliexandr.andrade@spaceos.io"
] | aliexandr.andrade@spaceos.io |
91bbd6ec721033ed14205dda6b686244b064d94a | 39fa30628f9b83048d548499790f9e41c6f2e07f | /graph.py | 10c292d4684ecca5520ec72f3b8ade0582e2c2e3 | [] | no_license | sleepycat/adjacency-list | 4bb4f4d94e7c008a17a37e175acb0800e693f2d7 | 028c69b8b9b0fc294413620a75fe282783f192b3 | refs/heads/master | 2020-04-05T17:16:20.471007 | 2018-11-11T06:07:29 | 2018-11-11T06:07:29 | 157,052,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | def create_graph_from_file(path):
graph = {}
file = open(path, 'r')
for line in file.readlines():
vertices = line.strip().split(' ')
if len(vertices) == 2:
graph = add_relationship(graph, (vertices[0], vertices[1]))
return list(graph.items())
def add_directed_edge(graph, origin, destination):
if origin in graph:
graph[origin].append(destination)
else:
graph[origin] = [destination]
return graph
def add_relationship(graph, relation):
origin, destination = relation
graph = add_directed_edge(graph, origin, destination)
graph = add_directed_edge(graph, destination, origin)
return graph
| [
"mike@korora.ca"
] | mike@korora.ca |
a72c12112ac974c237a065d606907b8325e64cf8 | 1a12037eee96ea5aeae94060f79c2a431d1aef39 | /touchstone/lib/mocks/networked_runnables/mysql/i_mysql_behabior.py | 0e4add2e07bda2a05fa3cb2ba87da28a8d38e538 | [] | no_license | melodylail/touchstone | 6897ef6180700936dd1f318f2d0f60460840991d | 6c954081b1801afa93ee3488088c4053fc8750cf | refs/heads/master | 2023-06-01T23:04:05.638995 | 2021-05-26T06:43:25 | 2021-05-26T06:43:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | import abc
from typing import List
class IMysqlSetup(object):
@abc.abstractmethod
def execute(self, database: str, sql: str):
"""Executes arbitrary SQL on the given database."""
pass
@abc.abstractmethod
def insert_row(self, database: str, table: str, data: dict):
"""Inserts a dictionary of key-value pairs into the given database and table. If the config option,
"camel_to_snake" is set (default True), the dictionary keys will be converted from camel case to
snake case."""
pass
@abc.abstractmethod
def insert_rows(self, database: str, table: str, data: List[dict]):
"""Inserts a list of dictionaries of key-value pairs into the given database and table. If the config option,
"camel_to_snake" is set (default True), the dictionary keys will be converted from camel case to
snake case."""
pass
class IMysqlVerify(object):
@abc.abstractmethod
def row_exists(self, database: str, table: str, where_conditions: dict, num_expected: int = 1) -> bool:
"""Returns True if the given where conditions are found in the given database. If num_expected is set to None,
any number of rows will be considered passing."""
pass
@abc.abstractmethod
def row_does_not_exist(self, database: str, table: str, where_conditions: dict) -> bool:
"""Returns True if the given where conditions are not found in the given database."""
pass
class IMysqlBehavior(object):
DEFAULT_CONFIG = {
'camel_to_snake': True,
'snapshot_databases': False
}
@abc.abstractmethod
def setup(self) -> IMysqlSetup:
pass
@abc.abstractmethod
def verify(self) -> IMysqlVerify:
pass
| [
"shanejjansen@gmail.com"
] | shanejjansen@gmail.com |
0cbe5366f2fc7d68de24b87f866720a86bb885f2 | 8d9e71e74069ccbe329ebefab034fbe090d59392 | /Session_9/project/app/views.py | afb29f9463d2dde667bf126dc0697675c89ffee9 | [] | no_license | jiyoon27/NEXT_HW | 19fa9dde54dbb3a94d40d3a1d28c52b3688f5f0d | a760f18c4bd42e98126f8e9c657ffdee0031001a | refs/heads/master | 2023-06-05T19:00:15.756325 | 2021-07-08T09:53:52 | 2021-07-08T09:53:52 | 349,907,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,237 | py | from django.shortcuts import render, redirect
from .models import Post, Comment
from django.contrib.auth.models import User
from django.contrib import auth
from django.contrib.auth.decorators import login_required
# Create your views here.
def home(request):
posts = Post.objects.all()
return render(request, 'home.html', { 'posts' : posts })
@login_required(login_url = '/registration/login')
def new(request):
if request.method == 'POST':
new_post = Post.objects.create(
title = request.POST['title'],
content = request.POST['content'],
author = request.user
)
return redirect('detail', new_post.pk)
return render(request, 'new.html')
def detail(request, post_pk):
post = Post.objects.get(pk=post_pk)
if request.method == 'POST':
content = request.POST['content']
Comment.objects.create(
post=post,
content=content,
author=request.user
)
return redirect('detail', post_pk)
return render(request, 'detail.html', {'post' : post})
def edit(request, post_pk):
post = Post.objects.get(pk=post_pk)
if request.method == 'POST':
Post.objects.filter(pk=post_pk).update(
title = request.POST['title'],
content = request.POST['content']
)
return redirect('detail', post_pk)
return render(request, 'edit.html', {'post' : post})
def delete(request, post_pk):
post = Post.objects.get(pk=post_pk)
post.delete()
return redirect('home')
def delete_comment(request, post_pk, comment_pk):
comment = Comment.objects.get(pk=comment_pk)
comment.delete()
return redirect('detail', post_pk)
def signup(request):
if (request.method == 'POST'):
found_user = User.objects.filter(username=request.POST['username'])
if (len(found_user) > 0):
error = 'username이 이미 존재합니다'
return render(request, 'registration/signup.html', {
'error' : error
})
new_user = User.objects.create_user(
username = request.POST['username'],
password = request.POST['password']
)
auth.login(request, new_user, backend="django.contrib.auth.backends.ModelBackend")
return redirect('home')
return render(request, 'registration/signup.html')
def login(request):
if (request.method == 'POST'):
found_user = auth.authenticate(
username=request.POST['username'],
password=request.POST['password']
)
if (found_user is None):
error = '아이디 또는 비밀번호가 틀렸습니다'
return render(request, 'registration/login.html', {
'error' : error
})
auth.login(request, found_user)
return redirect('home')
return render(request, 'registration/login.html')
def logout(request):
auth.logout(request)
return redirect('home')
def mypage(request):
my_posts = Post.objects.filter(author = request.user)
my_comments = Comment.objects.filter(author = request.user)
return render(request, 'mypage.html', {'my_posts' : my_posts, 'my_comments' : my_comments}) | [
"janejung327@gmail.com"
] | janejung327@gmail.com |
5df0190be56cc9d18e5109f9acbaf7e086a16cf4 | 27ac091aa60b537a32f8d1977301313d5c316e3f | /beer_site/urls.py | 44965afd5166348285545a6a762145fdbda92752 | [] | no_license | yarik2215/beer-site-backend | 58ef1bb79fa1f1c37a961f08984f93f7c1785e3b | 41eafbef4a520c79afa5035842d033e968779cca | refs/heads/master | 2023-05-28T23:04:49.913366 | 2021-06-16T07:28:41 | 2021-06-16T07:28:41 | 369,219,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | """beer_site URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.http.response import JsonResponse
from django.urls import path, include, re_path
from django.conf import settings
from django.conf.urls.static import static
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="BeerRating API",
default_version='v1',
description="Beer rating api",
),
public=True,
permission_classes=[permissions.AllowAny],
)
docs_urlpatterns = [
re_path(r'^api/docs(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'^api/docs/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^api/redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('beer_app.urls')),
path('api/ping/', lambda request: JsonResponse(data='pong', safe=False))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += docs_urlpatterns
| [
"stikblacklabel@gmail.com"
] | stikblacklabel@gmail.com |
3995df86f3e0ecb3783f09c801cb9ca185abb043 | c11dc2ab84ba586eca36363dbce0806ac21f06b8 | /C5/REPORTES/IncidentesTiempo/IncidentesTiempo_DiaSemana.py | 47c2ddc4b9e700fce9f09839711da831ab0a2e67 | [] | no_license | YovannaOr/Promad | b802d177942dec4a4d82f4514f25bcb1237a0966 | a9bad6e171843d3bf87cb896854cc485bc7916dc | refs/heads/master | 2023-01-03T06:16:11.712197 | 2020-10-25T18:10:24 | 2020-10-25T18:10:24 | 307,475,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,927 | py | from pip._vendor.distlib.compat import raw_input
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC, wait
from htmlrunner import HTMLRunner
from datetime import datetime
import unittest, time, re, codecs, os
class UntitledTestCase(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome("C:\dchrome\chromedriver.exe")
self.driver.set_window_size(1400, 1000)
self.driver.implicitly_wait(30)
self.verificationErrors = []
self.accept_next_alert = True
def test_untitled_test_case(self):
driver = self.driver
# host = "http://52.9.236.138:9596"
host = "http://qa-promad.opensystems.mx"
driver.get(host)
now = datetime.now()
driver.find_element_by_id("mat-input-0").send_keys("QA04")
time.sleep(1)
driver.find_element_by_id("mat-input-1").send_keys("C5")
time.sleep(1)
driver.find_element_by_id("mat-input-2").send_keys("12345")
time.sleep(1)
driver.find_element_by_class_name("mat-raised-button").click()
print("Termina login")
# seleccionar perfil
driver.find_element_by_class_name("icnIph").click()
print("Perfil seleccionado")
time.sleep(40)
print("Clic para abrir calendario")
i = 1
while i <= 1:
time.sleep(2)
# Folio desde
# driver.refresh()
driver.find_element_by_xpath("//input[contains(@placeholder,'Desde')]").send_keys("C5/20200710/1")
time.sleep(2)
# Folio hasta
driver.find_element_by_xpath("//input[contains(@placeholder,'Hasta')]").send_keys("C5/20201010/1")
time.sleep(2)
print("Seleccionando Radio")
driver.find_element_by_xpath("//mat-radio-button[@id='mat-radio-10']/label/div/div").click()
time.sleep(3)
print("Importa en PDF")
driver.find_element_by_xpath("//i[contains(@class,'fa fa-file-pdf-o fz-24 icons')]").click()
time.sleep(3)
print("Importa en Excel")
driver.find_element_by_xpath("//i[contains(@class,'fa fa-file-excel-o fz-24 icons')]").click()
time.sleep(10)
print("limpiando formulario")
driver.find_element_by_xpath("//i[contains(@class,'fa fa-eraser fz-22 icons')]").click()
i += 1
def tearDown(self):
self.driver.quit()
if __name__ == "__main__":
unittest.main(testRunner=HTMLRunner.HTMLRunner(output='Crea resultado'))
# unittest.main()
| [
"cesarerr@gmail.com"
] | cesarerr@gmail.com |
99d7d1c5b74d24f29b39a96a995a47f8a24d3c91 | 49fc073dcfc0d55457f9731855540dbd4b715556 | /src/is_alive/application/ports/event_publisher.py | 23b5b748c835bcd2565a3c10e4b9db75b4653f39 | [] | no_license | EduardMaghakyan/is-alive | 149b806611e1ffe742afd7af1ac199ca2c03cd6f | 53778c08c6b822f7fdec51ca4c96f985c38c02a6 | refs/heads/main | 2023-05-14T10:48:26.483097 | 2021-05-20T15:20:55 | 2021-05-20T15:20:55 | 369,253,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | import abc
from is_alive.domain.event import DomainEvent
class EventPublisher:
@abc.abstractmethod
def publish(self, event: DomainEvent, **attributes) -> None:
pass
| [
"edi.maghakyan@gmail.com"
] | edi.maghakyan@gmail.com |
610090e30faacc1439436865e67666d88fa0f32f | 18f577ff6927ac682a85f08bad4f3eec433bd8c0 | /BannerBot.py | 19fa18eee8aade60d4dc2865ad28311e6b355d81 | [] | no_license | nicolas-raoul/BannerBot | c50d3954a4c5f1418b78d3c0110cfd9f4d511ede | 915cfd35c6d0501b15898a178207f10922d280a9 | refs/heads/master | 2020-05-07T21:16:15.693940 | 2013-10-15T08:22:07 | 2013-10-15T08:22:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # -*- coding: utf-8 -*-
# Set the Wikivoyage banner of a destination on Wikidata.
import pywikibot
page=u"Aachen"
banner=u"Aachen banner Winter Panorama.jpg"
print "Defining data source"
site = pywikibot.Site("en", "wikivoyage")
repo = site.data_repository()
page = pywikibot.Page(site, page)
item = pywikibot.ItemPage.fromPage(page)
#print "Test loading data"
#dictionary = item.get()
print "Setting Wikivoyage banner"
stringclaim = pywikibot.Claim(repo, u'P948')
image = pywikibot.page.ImagePage(site, banner)
stringclaim.setTarget(image)
item.addClaim(stringclaim)
print "Banner has been set"
| [
"nicolas.raoul@gmail.com"
] | nicolas.raoul@gmail.com |
8517ce3f417f877036d4b1f5d9af879c97c0a703 | e02506da0c661c8241fed00efdd0d6b2f8b147df | /textattack/attack_recipes/seq2sick_cheng_2018_blackbox.py | 8af6d15138de6bc314511c851970b1c226990123 | [
"MIT"
] | permissive | SatoshiRobatoFujimoto/TextAttack | 2592a828f128fd8bf0b8ce5578e9488df5b2ac97 | a809a9bddddff9f41750949e26edde26c8af6cfa | refs/heads/master | 2022-07-11T02:10:24.536157 | 2020-05-14T13:29:44 | 2020-05-14T13:29:44 | 263,941,825 | 1 | 0 | MIT | 2020-05-14T14:43:47 | 2020-05-14T14:43:46 | null | UTF-8 | Python | false | false | 1,205 | py | """
Cheng, Minhao, et al.
Seq2Sick: Evaluating the Robustness of Sequence-to-Sequence Models with
Adversarial Examples
ArXiv, abs/1803.01128.
This is a greedy re-implementation of the seq2sick attack method. It does
not use gradient descent.
"""
from textattack.constraints.overlap import LevenshteinEditDistance
from textattack.goal_functions import NonOverlappingOutput
from textattack.search_methods import GreedyWordSwapWIR
from textattack.transformations import WordSwapEmbedding
def Seq2SickCheng2018BlackBox(model, goal_function='non_overlapping'):
#
# Goal is non-overlapping output.
#
goal_function = NonOverlappingOutput(model)
# @TODO implement transformation / search method just like they do in
# seq2sick.
transformation = WordSwapEmbedding(max_candidates=50)
#
# In these experiments, we hold the maximum difference
# on edit distance (ϵ) to a constant 30 for each sample.
#
#
# Greedily swap words with "Word Importance Ranking".
#
attack = GreedyWordSwapWIR(goal_function, transformation=transformation,
constraints=[], max_depth=10)
return attack
| [
"jxmorris12@gmail.com"
] | jxmorris12@gmail.com |
3fb361418fab76466fd2fe3aa67d0e02198edd43 | 9b2089e7f3acf3da1a84316db84db9b7637d46a1 | /train.py | d61e907df79aa8e4a6ae5a3cb4abbb7690023292 | [] | no_license | Predstan/Traffic-Sign-Recognition | 25777a18d329d6cf50ea89533eaead3ee146936a | a1d02b2f1ba7dc24100d924668ef709df2083cdf | refs/heads/main | 2023-05-30T21:27:41.675780 | 2021-06-22T12:29:04 | 2021-06-22T12:29:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,569 | py |
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import cv2
import numpy as np
def batch_norm(x, out, phase_train):
"""
Batch normalization on convolutional maps.
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
with tf.compat.v1.variable_scope('bn'):
beta = tf.compat.v1.Variable(tf.constant(0.0, shape=[out]),
name='beta', trainable=True)
gamma = tf.compat.v1.Variable(tf.constant(1.0, shape=[out]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def convolution2D(X, kernel, filter, name, strides=1, padding="VALID", activation=None, mean=0, sigma=0.1):
"""
Implement a Convolutional Step
Arguments:
X -- Input Tensor
kernel -- Kernel Size of type integer
filter -- Size of filter of type integer
"""
shape = X.get_shape()
print(shape)
#print(shape)
w = tf.compat.v1.Variable(tf.compat.v1.truncated_normal(shape = (kernel, kernel, shape[3], filter), mean=mean, stddev=sigma))
b = tf.compat.v1.Variable(tf.zeros(filter))
X = tf.nn.conv2d(X, w, strides = [1, strides, strides, 1], padding=padding, name= name) + b
if activation is not None:
return tf.nn.relu(X)
return X
def MyNet(input_shape= (32, 32, 3), classes=43, mean=0, sigma=0.1, training=True, dropout_rate=0.5):
X_input = tf.compat.v1.placeholder(tf.float32, shape=[None] + list(input_shape))
dropout_rate = tf.compat.v1.placeholder(tf.float32, name='dropout_rate')
train_tensor = tf.compat.v1.placeholder(tf.bool, (None))
Y = tf.compat.v1.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(Y, classes)
X = convolution2D(X_input, filter = 64, kernel=3, strides= 1, name="conv_1")
X = tf.nn.relu(X)
X = tf.nn.max_pool(X, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')
X = convolution2D(X, filter = 128, kernel=3, strides= 1, name="conv_2")
if training:
X = batch_norm(X, 128, train_tensor)
X = tf.nn.relu(X)
X = tf.nn.max_pool(X, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')
X = convolution2D(X, filter = 256, kernel=3, strides= 1, name="conv_3")
if training:
X = batch_norm(X, 256, train_tensor)
X = tf.nn.relu(X)
X = tf.nn.max_pool(X, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')
if training:
X = tf.nn.dropout(X, rate =dropout_rate)
shape = X.get_shape()
conv_output_width = shape[2]
conv_output_height = shape[1]
conv_element_count = int(
conv_output_width * conv_output_height * shape[3])
X = tf.reshape(X,[-1, conv_element_count])
fc_W = tf.compat.v1.Variable(tf.compat.v1.truncated_normal(shape=(conv_element_count, 256), mean = mean, stddev = sigma))
fc_b = tf.compat.v1.Variable(tf.zeros(256))
X = tf.matmul(X, fc_W) + fc_b
fc_W = tf.compat.v1.Variable(tf.compat.v1.truncated_normal(shape=(256, classes), mean = mean, stddev = sigma))
fc_b = tf.compat.v1.Variable(tf.zeros(classes))
logits = tf.matmul(X, fc_W) + fc_b
return X_input, Y, one_hot_y, logits, train_tensor, dropout_rate
def preprocess_input(image):
shape = image.shape
if shape[-2] == 32 and shape[-3] == 32:
im = image/255.0
return im
if len(shape) == 4:
all_image = []
for im in image:
all_image.append(np.expand_dims(cv2.resize(im, (32, 32) )/255, 0))
im = np.concatenate(all_image)
else:
im = cv2.resize(image, (32, 32) )/255.
return im
| [
"adeolaraji12@gmail.com"
] | adeolaraji12@gmail.com |
5fbfa5c6bed801a627d6b474811331e09eda95a8 | 4bed46cfc5e5bf579c65509884bc8d04e848c1b1 | /todo_drf/api/migrations/0001_initial.py | 0365c326a1df63368dfda5fa5ef337cc7d4da030 | [] | no_license | delighttakudzwa/Titi_todo_app | 430a073565760452446c5fec5483b77c4e4d88a0 | 18231d145e8bf6a33f5697c75f05d39541ce7d0e | refs/heads/master | 2022-10-02T21:16:52.557348 | 2020-05-26T21:46:49 | 2020-05-26T21:46:49 | 267,153,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # Generated by Django 3.0.6 on 2020-05-26 12:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('completed', models.BooleanField(blank=True, default=False, null=True)),
],
),
]
| [
"keptac.flutter@gmail.com"
] | keptac.flutter@gmail.com |
958e5eceba3a97c5f73ae5f97c2f2d507c3228c4 | 8f8498bb6f56b19d45a1989c8113a077348c0a02 | /백준/최소신장트리/행성 터널 - 프림.py | 1b9cd115b4de9658e77fc0d211d97f40b0242f95 | [] | no_license | gjtjdtn201/practice | a09b437c892b0b601e156c09cb1f053b52fab11b | ea45582b2773616b2b8f350b927559210009d89f | refs/heads/master | 2021-01-01T13:29:46.640740 | 2020-11-28T00:55:37 | 2020-11-28T00:55:37 | 239,299,485 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | import sys
sys.stdin = open('행성 터널.txt')
import sys
input = sys.stdin.readline
from heapq import heappush, heappop
N = int(input())
star = []
for i in range(N):
x, y, z = map(int, input().split())
star.append((x, y, z, i))
edges = [[] for _ in range(N)]
for i in range(3):
star.sort(key=lambda x: x[i])
for j in range(N-1):
n1, n2 = star[j][3], star[j+1][3]
cost = abs(star[j][i]-star[j+1][i])
edges[n1].append((cost, n2))
edges[n2].append((cost, n1))
mst = [False]*N
ans = 0
q = []
heappush(q, (0, 0))
while q:
cost, node = heappop(q)
if mst[node]:
continue
ans += cost
mst[node] = True
for nxt_cost, nxt in edges[node]:
if mst[nxt]:
continue
heappush(q, (nxt_cost, nxt))
print(ans) | [
"gjtjdtn201@naver.com"
] | gjtjdtn201@naver.com |
d9d15c7369252080d67b4a3db18eda581179e3b9 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /contest/weekly-contest-266/5919.0_Vowels_of_All_Substrings.py | 836bcb1c21e6f95554a3972b51237f0616b166fa | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | '''
41 / 51 个通过测试用例
状态:超出时间限制
brute force
T: O(N^2)
S: O(N)
'''
class Solution:
def countVowels(self, word: str) -> int:
N = len(word)
pre = [0] * (N + 1)
for i, ch in enumerate(word):
if ch in 'aeiou':
pre[i + 1] = pre[i] + 1
else:
pre[i + 1] = pre[i]
ans = 0
for i in range(1, len(word) + 1):
for j in range(i):
ans += pre[i] - pre[j]
return ans
'''
"aba"
0112
'''
'''
前缀和+前缀和
这是从双层暴力优化过来的
通过
296 ms 23.8 MB Python3 2021/11/07 19:48
T: O(3N)
S: O(2N)
ref:
https://leetcode-cn.com/problems/vowels-of-all-substrings/solution/cqian-zhui-he-qian-zhui-he-by-answerer-360n/
'''
class Solution:
def countVowels(self, word: str) -> int:
N = len(word)
pre = [0] * (N + 1)
for i, ch in enumerate(word):
if ch in 'aeiou':
pre[i + 1] = pre[i] + 1
else:
pre[i + 1] = pre[i]
# presum of presum
prepre = [0] * (N + 1)
for i in range(1, N + 1):
prepre[i] = prepre[i - 1] + pre[i]
ans = 0
for i in range(N):
ans += pre[i + 1] * (i + 1) - prepre[i]
return ans
'''
乘法原理
T: O(N)
S: O(1)
执行用时:92 ms, 在所有 Python3 提交中击败了100.00% 的用户
内存消耗:15.2 MB, 在所有 Python3 提交中击败了100.00% 的用户
通过测试用例:51 / 51
'''
class Solution:
def countVowels(self, word: str) -> int:
ans, N = 0, len(word)
for i, ch in enumerate(word):
if ch in 'aeiou':
ans += (i + 1) * (N - i)
return ans
| [
"838255715@qq.com"
] | 838255715@qq.com |
349da3c46b25c597a4fea4b6ffed199281d111b3 | e61e664d95af3b93150cda5b92695be6551d2a7c | /vega/metrics/tensorflow/__init__.py | 5eb861df8a3c94200471f2efbde2cb138194a48e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | huawei-noah/vega | 44aaf8bb28b45f707ed6cd4e871ba70fc0c04846 | 12e37a1991eb6771a2999fe0a46ddda920c47948 | refs/heads/master | 2023-09-01T20:16:28.746745 | 2023-02-15T09:36:59 | 2023-02-15T09:36:59 | 273,667,533 | 850 | 184 | NOASSERTION | 2023-02-15T09:37:01 | 2020-06-20T08:20:06 | Python | UTF-8 | Python | false | false | 449 | py | from vega.common.class_factory import ClassFactory
from .metrics import Metrics
ClassFactory.lazy_register("vega.metrics.tensorflow", {
"segmentation_metric": ["trainer.metric:IoUMetric"],
"classifier_metric": ["trainer.metric:accuracy"],
"sr_metric": ["trainer.metric:PSNR", "trainer.metric:SSIM"],
"forecast": ["trainer.metric:MSE", "trainer.metric:RMSE"],
"r2score": ["trainer.metric:r2score", "trainer.metric:R2Score"],
})
| [
"zhangjiajin@huawei.com"
] | zhangjiajin@huawei.com |
2292edbbc89dfc17bb498d30d6ae3e66d13c29b7 | fb238ca57a8860481c29032330302ab302e5c207 | /spam.py | 882ac4e3608c7b71e2a0d54b1fbed5dc8d265a34 | [] | no_license | rahul2240/Smart-India-Hackathon-2019 | 9378cda6ada389dbf04e4531bf03cebbf4053d23 | 714f73ab7fe121a77a48ac4b8e433b481801423f | refs/heads/master | 2020-04-24T04:49:07.900273 | 2019-03-30T20:32:01 | 2019-03-30T20:32:01 | 171,717,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | import os
from flask import Flask, render_template, request, redirect, url_for, jsonify
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.multiclass import *
from sklearn.svm import *
import pandas
app = Flask(__name__)
global Classifier
global Vectorizer
# load data
data = pandas.read_csv('spam.csv', encoding='latin-1')
train_data = data[:4400] # 4400 items
test_data = data[4400:] # 1172 items
# train model
Classifier = OneVsRestClassifier(SVC(kernel='linear', probability=True))
Vectorizer = TfidfVectorizer()
vectorize_text = Vectorizer.fit_transform(train_data.v2)
Classifier.fit(vectorize_text, train_data.v1)
@app.route('/predict', methods=['POST'])
def index():
message = request.get_json()
message = message["text"]
error = ''
predict_proba = ''
predict = ''
global Classifier
global Vectorizer
try:
if len(message) > 0:
vectorize_message = Vectorizer.transform([message])
predict = Classifier.predict(vectorize_message)[0]
predict_proba = Classifier.predict_proba(vectorize_message).tolist()
except BaseException as inst:
error = str(type(inst).__name__) + ' ' + str(inst)
return jsonify(
message=message, predict_proba=predict_proba,
predict=predict, error=error)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8800))
app.run(host='0.0.0.0', port=port, debug=True, use_reloader=True)
| [
"rahulsingh2240@gmail.com"
] | rahulsingh2240@gmail.com |
8a72939c8114781ca876f70213a8ac42804e358f | 4d6387b028984c68f0eb57a575b297fd24850eca | /qstrader/profiling.py | e049f45abba6769b11c611d0b787436972ef7ea4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | lydia99992/newqstrader | 985acb2508addc4c2deecb8858ab08d1c27ad5bc | c82651c75a572ac5dff0ac376133fb0ff2b55417 | refs/heads/master | 2022-06-27T02:09:31.135929 | 2020-03-06T12:45:25 | 2020-03-06T12:45:25 | 245,416,142 | 1 | 1 | MIT | 2022-06-22T01:19:15 | 2020-03-06T12:38:51 | Python | UTF-8 | Python | false | false | 243 | py | import time
def speed(ticks, t0):
return ticks / (time.time() - t0)
def s_speed(time_event, ticks, t0):
sp = speed(ticks, t0)
s_typ = time_event.typename + "S"
return "%d %s processed @ %f %s/s" % (ticks, s_typ, sp, s_typ)
| [
"dannalee999@gmail.com"
] | dannalee999@gmail.com |
d1b07ca19d527477342052e82c10beda1d9365ab | d72a8945b1f028333679c7f799d6efee4fcbcfa3 | /wendeldr_helpers/statistics/general.py | 51ed2308af267afc9c22d9087d1f4141b7c4d8fb | [] | no_license | wendeldr/wendeldr_helpers | aff5d439abbdd2d9dde279b7f6e9417b64684b41 | 5bb0d796142190c2d8cbf2dfb45b91dea5b16b9d | refs/heads/main | 2023-04-01T23:57:26.867726 | 2021-03-28T22:25:32 | 2021-03-28T22:25:32 | 348,420,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | from round_robin_tournament import Tournament as rrT
from lifelines import CoxPHFitter
def recombitulate_covariates(df):
return list(df[['sleepstate', 'feature', 'analysis', 'parameters']].agg('_'.join, axis=1).values)
class Feature():
def __init__(self,
name,
univariate_cohort_total,
univariate_n_total,
univariate_n_event,
univariate_n_noevent,
univariate_miss_n_event,
univariate_miss_precent_n_event,
univariate_miss_precent_n_noevent,
univariate_events_precent_total,
univariate_mean_event,
univariate_mean_noevent,
univariate_mean_abs_diff,
univariate_ttest,
univariate_or,
univariate_or_positive_ci,
univariate_or_negative_ci,
univariate_or_standard_error,
univariate_or_p,
univariate_hr,
univariate_hr_positive_ci,
univariate_hr_negative_ci,
univariate_hr_standard_error,
univariate_hr_p,):
self.name = name
# uv == Univariate, or == odds ratio, hr == hazard ratio
self.univariate_cohort_total = univariate_cohort_total
self.univariate_n_total = univariate_n_total
self.univariate_n_event = univariate_n_event
self.univariate_n_noevent = univariate_n_noevent
self.univariate_miss_n_event = univariate_miss_n_event
self.univariate_miss_precent_n_event = univariate_miss_precent_n_event
self.univariate_miss_precent_n_noevent = univariate_miss_precent_n_noevent
self.univariate_events_precent_total = univariate_events_precent_total
self.univariate_mean_event = univariate_mean_event
self.univariate_mean_noevent = univariate_mean_noevent
self.univariate_mean_abs_diff = univariate_mean_abs_diff
self.univariate_ttest = univariate_ttest
self.univariate_or = univariate_or
self.univariate_or_positive_ci = univariate_or_positive_ci
self.univariate_or_negative_ci = univariate_or_negative_ci
self.univariate_or_standard_error = univariate_or_standard_error
self.univariate_or_p = univariate_or_p
self.univariate_hr = univariate_hr
self.univariate_hr_positive_ci = univariate_hr_positive_ci
self.univariate_hr_negative_ci = univariate_hr_negative_ci
self.univariate_hr_standard_error = univariate_hr_standard_error
self.univariate_hr_p = univariate_hr_p
| [
"wendeldr@mail.uc.edu"
] | wendeldr@mail.uc.edu |
f7672e8bb4bad41f3bfa672f7efa3395328dbf7c | 27ca36a60ebfa24f986f1c9d08c2dd8d64c1e095 | /hosting/hosting/urls.py | bd3a3e58ce9a7179a79f782630461521a62e014d | [] | no_license | srivaryan/Project-DBMS | c9a8c7b13e37c7b2ff1e50c6846d2c9bc85c4c6b | 6ab07db21f9fe4d394c0d05c3fd914680ff82979 | refs/heads/main | 2023-08-19T23:46:58.982258 | 2021-10-29T17:59:09 | 2021-10-29T17:59:09 | 419,330,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | """hosting URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
# from hosting import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('app.urls'))
]
| [
"srivaryan2@gmail.com"
] | srivaryan2@gmail.com |
05bbe819c737091fa9d1aff4a383a5ca8734dd1c | 461cf2fd99330558ec96bf551cb1703e627868a0 | /get_pages.py | b050961df0e6de3f1240f8bc48e06c5237fb092d | [] | no_license | abelsonlive/bcni-pra | 408f72ba369ca164c5efb4442ebd2eaeb2c8dd78 | fa51ae45382c45f15fe861060d6e90cc00c27590 | refs/heads/master | 2021-01-20T11:50:13.432486 | 2013-04-29T14:45:04 | 2013-04-29T14:45:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | import selenium
from selenium import webdriver
import time
URL = "http://secure.phila.gov/paplpublicweb/GridView.aspx"
b = webdriver.Firefox()
b.get(URL)
for i in range(2, 806):
print i
text = b.page_source.encode('utf-8')
fp = "raw_pages/page%s.txt" % (i-1)
print "writing", fp, "to file"
with open(fp, "w") as text_file:
text_file.write(text)
try:
next = b.find_element_by_xpath("//span[contains(text(),'%s')]" % (i))
except selenium.common.exceptions.NoSuchElementException or selenium.common.exceptions.StaleElementReferenceException:
print "ERROR ERROR!!!"
i = i - 1
print "trying again"
next.click()
time.sleep(2)
b.close()
| [
"brianabelson@gmail.com"
] | brianabelson@gmail.com |
74ccae8a1b850350f6fc5408dc7d415442e67624 | ec50d7e79741d1dfa050559220cb536ec5fe42f5 | /prettify.py | 0264ee65f3493fa6d84389798f165d96608d5b28 | [] | no_license | pankdm/icfpc-2016 | b2fbc355db4312f272e991c6708c779bf1b657dc | 14ac75fec03bc34c4d6145e5f9fde1682b93e52c | refs/heads/master | 2021-01-09T09:24:38.872494 | 2016-08-08T23:08:45 | 2016-08-08T23:08:45 | 65,159,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | import sys
import json
f = open(sys.argv[1], 'r')
data = f.read()
js = json.loads(data)
| [
"pank.dm@gmail.com"
] | pank.dm@gmail.com |
99c513f86707ade0a324dfc4b72f5acc7eaaa4ac | 80b2c3c5c7bdf6b52dc857a89aa217207bbdbbc0 | /Code/DataLoader_Turnover.py | a026cc878b96ab822002e44b0193825864d401cc | [] | no_license | yc11241124/NYU-ML-2021-Spring | 694a64bc381e7dc7fee120d12394a751c800dcde | 093c738ece70a7ea10dd8725a749d460c37b6c3a | refs/heads/main | 2023-08-11T01:44:05.876971 | 2021-09-12T22:41:30 | 2021-09-12T22:41:30 | 405,762,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | import os
import numpy as np
def LoadData_Abalone():
file_original = open("abalone.data", "r")
Lines = file_original.readlines()
X = []
Y = []
for line in Lines:
data = []
line = line.replace('\n','')
line_split = line.split(',')
label_int = int(line_split[len(line_split)-1])
Y.append(label_int)
# if label_int >= 1 and label_int <= 9:
# list_data.append(str(1))
# else:
# list_data.append(str(0))
if(line_split[0]=="F"):
data.append(1)
elif(line_split[0]=="M"):
data.append(2)
elif(line_split[0]=="I"):
data.append(3)
for i in range(1,len(line_split)-1):
val = float(line_split[i])
data.append(val)
X.append(data)
file_original.close()
X = np.array(X)
Y = np.array(Y)
return X, Y
def LoadData_Banknote():
file_original = open("banknote.data", "r")
Lines = file_original.readlines()
X_raw = []
Y = []
for line in Lines:
data = []
line = line.replace('\n','')
line_split = line.split(',')
label_int = int(line_split[len(line_split)-1])
Y.append(label_int)
for i in range(1,len(line_split)-1):
val = float(line_split[i])
data.append(val)
X_raw.append(data)
file_original.close()
X_raw = np.array(X_raw)
X = np.zeros(X_raw.shape)
for i in range(0,X_raw.shape[1]):
X_i = X_raw[:,i]
min_i = np.amin(X_i)
# print(np.amin(X_i))
min_i = 100 #np.amin(X_i)
X_shifted_i = X_i + np.absolute(min_i)
X[:,i] = X_shifted_i
Y = np.array(Y)
return X, Y
def LoadData_Turnover():
file_original = open("turnover.data", "r")
Lines = file_original.readlines()
X = []
Y = []
for line in Lines:
data = []
line = line.replace('\n','')
line_split = line.split(',')
label_int = int(line_split[len(line_split)-1])
Y.append(label_int)
for i in range(1,len(line_split)-1):
val = float(line_split[i])
data.append(val)
X.append(data)
file_original.close()
X = np.array(X)
Y = np.array(Y)
return X, Y
| [
"noreply@github.com"
] | noreply@github.com |
07ef1f4cd0c16e4c233fb1e556b52b581f2e4532 | 0ed9c1d60399ca524779d403b641c8585bab8e15 | /test_request/get_package/case_check.py | 25cbe729b65391ae392dc2902d5d79a1d12d1f44 | [] | no_license | Asan7777/Asan | 17ccc45fa2633c1e63351fd2552864a25dbbc893 | b9b86455b00d3c004d4ce0da31465154a410025c | refs/heads/master | 2022-01-20T10:18:47.309895 | 2019-07-26T09:35:55 | 2019-07-26T09:35:55 | 198,197,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | import json
import operator as op
# from idna import unicode
from idna import unicode
class CommonUtil():
def __init__(self, str_one, str_two):
self.str_one = str_one
self.str_two = str_two
def is_contain(self):
'''
判断一个字符串是否再另外一个字符串中
str_one:查找的字符串
str_two:被查找的字符串
'''
#if isinstance(str_one,unicode):
#str_one = str_one.encode('unicode-escape').decode('string_escape')
#op.eq 判断俩个字符串是否相等
#return op.eq(str_one, str_two)
if self.str_one in self.str_two:
print(True)
else:
print(False)
def is_equal_dict(self, dict_one, dict_two):
#isinstance如果对象的类型与参数的类型相同则返回 True,否则返回 False
if isinstance(dict_one, str):
dict_one = json.loads(dict_one)
if isinstance(dict_two, str):
dict_two = json.loads(dict_two)
return print(dict_one, dict_two)
c = CommonUtil('w','ww')
c.is_contain()
| [
"13426038659@163.com"
] | 13426038659@163.com |
a73c7308d19a2723bbdb73a89ceca2790e0ddbea | 3a10cda6dbdeee36b24591ada2c551ff2f179d19 | /app/models/hour.py | 1a14852b19fe5a1765504a13f12ccb678185c99c | [] | no_license | ttecles/weather_backend | a0d0c6317c3fde6c4ac6df17c7c9a5ea59299302 | b4b2886a3f0a2b6b502bd38d0b98f017b01ef6b0 | refs/heads/master | 2023-02-19T12:31:52.295041 | 2021-01-26T08:56:34 | 2021-01-26T08:56:34 | 330,950,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | from app import db
class Hour(db.Model):
__tablename__ = 'Hour'
locality_id = db.Column(db.Integer, db.ForeignKey('Locality.id'), primary_key=True, nullable=False)
date = db.Column(db.Date(), primary_key=True) # "2021-1-15"
hour_data = db.Column(db.Time(), primary_key=True) # "13:00",
temperature = db.Column(db.Integer) # -1,
icon = db.Column(db.String(10)) # "6",
text = db.Column(db.String(80)) # "Mostly cloudy",
humidity = db.Column(db.Integer) # 89,
wind = db.Column(db.Integer) # 4,
wind_direction = db.Column(db.String(30)) # "Northwest",
icon_wind = db.Column(db.String(10)) # "NO",
pressure = db.Column(db.Integer) # 1016,
locality = db.relationship("Locality", backref="hour_forecast")
| [
"joan.prat@knowtrade.eu"
] | joan.prat@knowtrade.eu |
b83bd941c10051706dd4bda266e000a394c0b865 | e50838a8cb6617808c4a76da536b03386e1d8303 | /Exercise10.9.py | a28becd94eaf10356d6a567ac2bda3e111a4e5cf | [] | no_license | clayygodd/my-ThinkPython-solutions- | afaab8dad6963b7abe60120d1489f3b9726b6fa5 | bc11160b504511ee4a916621c666b09047b0d8b0 | refs/heads/master | 2020-07-01T15:42:37.453300 | 2019-08-08T08:45:19 | 2019-08-08T08:45:19 | 201,214,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | fin = open('words.txt')
t = []
for word in fin:
t.append(word)
print t[0]
| [
"noreply@github.com"
] | noreply@github.com |
348bfe11b3b0696038f79b93c1de0305b51118f6 | a1cc8e0976f296192b1002e279d66a6a3dada28f | /reefbot/bin/CameraCaptureManager.py | cb0106b35bd019c8b1f90eb0dcbae7a7aef5599a | [
"MIT"
] | permissive | MRSD2018/reefbot-1 | 40ef435ac4e1664858f225fde1d9ee995d55f235 | a595ca718d0cda277726894a3105815cef000475 | refs/heads/master | 2021-04-09T10:26:25.455651 | 2018-04-01T19:13:43 | 2018-04-01T19:13:43 | 125,381,499 | 0 | 0 | MIT | 2018-04-01T15:24:29 | 2018-03-15T14:41:59 | C++ | UTF-8 | Python | false | false | 5,485 | py | #!/usr/bin/python
'''The Camera Capture Manager that recieves joystick commands to take
a picture and then, grabs the picture from the camera publishes a
SpeciesID Request to figure out what fish was in the picture.
Author: Mark Desnoyer (markd@cmu.edu)
Date: July 2010
'''
import roslib; roslib.load_manifest('reefbot')
import rospy
import httplib
import sys
import time
import urllib
import ImageFile
import cv
import socket
from cv_bridge import CvBridge
from joy.msg import Joy
from reefbot_msgs.msg import ImageCaptured
class CameraCaptureManager:
'''Class that handles capturing an image from the camera.'''
def __init__(self):
# ----------------------------#
# Parameters for the process that are filled from the ros parameter server
# IP address of the camera
self.cameraIp = rospy.get_param("camera_ip", "192.168.1.13")
# Joystick topic to listen to
self.joystickTopic = rospy.get_param("joystick_topic", "joy")
# Button index for the button that is used to signal that a picture
# should be taken.
self.buttonId = rospy.get_param("~button_id", 0)
# Topic to publish the images on
self.imageTopic = rospy.get_param("still_image_topic", "still_image")
# Resolution of the camera. Can be "half" or "full"
self.res = rospy.get_param("~res", "full")
# Specifies the bounds of the requested image window. They cannot
# exceed the size of the image sensor array and should be divisible by
# 16 if Res is full and 32 if res is half.
self.x0 = rospy.get_param("~x0", 352)
self.y0 = rospy.get_param("~y0", 416)
self.x1 = rospy.get_param("~x1", 3296)
self.y1 = rospy.get_param("~y1", 2336)
# JPEG quality with a range from 1 to 20
self.quality = rospy.get_param("~quality", 10);
#------------------------------#
# State machine variables
self.curImageId = long(rospy.Time.now().secs)
self.buttonWasPressed = False
# Other variables
self.imagePublisher = None
self.cvBridge = CvBridge()
def __del__(self):
pass
def Init(self):
self.imagePublisher = rospy.Publisher(self.imageTopic, ImageCaptured,
tcp_nodelay=True, latch=False);
rospy.Subscriber(self.joystickTopic, Joy, JoystickCallback, self)
rospy.loginfo("Initialized Camera Capture Manager")
def ConnectToCamera(self):
rospy.loginfo("Connecting to camera at: %s" % self.cameraIp)
cameraConnection = None
connected = False
while not connected:
try:
cameraConnection = httplib.HTTPConnection(self.cameraIp)
cameraConnection.connect()
connected = True
except httplib.HTTPException as e:
rospy.logerr('Cannot connect to the camera: %s' % e)
rospy.sleep(5)
except socket.error as e:
rospy.logerr('Cannot connect to the camera: %s' % e)
rospy.sleep(5)
rospy.loginfo("Connected to camera at: %s" % self.cameraIp)
return cameraConnection
def RetrieveImageFromCamera(self):
cameraConnection = self.ConnectToCamera()
try:
image = None
# Define the settings for the camera
params = urllib.urlencode({'res': self.res, 'x0': self.x0,
'y0' : self.y0, 'x1' : self.x1,
'y1' : self.y1, 'quality' : self.quality,
'doublescan' : 1})
# Request an image from the camera
try:
cameraConnection.request("GET", "/image?%s" % params)
#cameraConnection.request("GET", "/h264f?res=full&x0=640&x1=1280&y0=352&y1=768&qp=16&doublescan=1&ssn=33&iframe=1")
response = cameraConnection.getresponse()
except httplib.HTTPException as e:
rospy.logerr('Cannot connect to the camera: %s' % e)
return None
except socket.error as e:
rospy.logerr('Cannot connect to the camera: %s' % e)
return None
if response.status != 200:
# There was an error reading from the camera
rospy.logerr('Received an error code from the camera %i, %s' %
(response.status, response.reason))
else:
# We have a response from the camera so parse it out into a
# message format
parser = ImageFile.Parser()
rawBytes = response.read(response.getheader('content-length'))
parser.feed(rawBytes)
pilImage = parser.close()
cvImage = cv.CreateImageHeader(pilImage.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cvImage, pilImage.tostring(), pilImage.size[0]*3)
image = self.cvBridge.cv_to_imgmsg(cvImage, "bgr8")
finally:
cameraConnection.close()
return image
def JoystickCallback(joystickMsg, manager):
buttonIsPressed = joystickMsg.buttons[manager.buttonId] != 0
if manager.buttonWasPressed:
manager.buttonWasPressed = buttonIsPressed;
return
manager.buttonWasPressed = buttonIsPressed;
if not buttonIsPressed:
return
# A new button press, so we need to capture a frame from the camera
image = manager.RetrieveImageFromCamera();
if image is None:
return
manager.curImageId = manager.curImageId + 1
# Now publish the image
request = ImageCaptured(image_id=manager.curImageId, image=image)
request.header.stamp = rospy.Time.now()
manager.imagePublisher.publish(request)
if __name__ == '__main__':
rospy.init_node('CameraCaptureManager')
manager = CameraCaptureManager()
manager.Init()
rospy.spin()
| [
"mdesnoyer@gmail.com"
] | mdesnoyer@gmail.com |
f9791b2b58c0ed0961046f7b8a0dd4bb73d8450a | 5410700e83210d003f1ffbdb75499062008df0d6 | /leetcode/tree2Str.py | 61ae648b31c803481fe3db7769a6109de4b7ac74 | [] | no_license | lilyandcy/python3 | 81182c35ab8b61fb86f67f7796e057936adf3ab7 | 11ef4ace7aa1f875491163d036935dd76d8b89e0 | refs/heads/master | 2021-06-14T18:41:42.089534 | 2019-10-22T00:24:30 | 2019-10-22T00:24:30 | 144,527,289 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | class Solution:
def tree2str(self, t):
"""
:type t: TreeNode
:rtype: str
"""
if t == None:
return ""
if t.left == None and t.right == None:
return str(t.val)
elif t.left == None:
return str(t.val) + "()" + "(" + self.tree2str(t.right) + ")"
elif t.right == None:
return str(t.val) + "(" + self.tree2str(t.left) + ")"
else:
return str(t.val) + "(" + self.tree2str(t.left) + ")" + "(" + self.tree2str(t.right) + ")" | [
"myyan_yan@msn.com"
] | myyan_yan@msn.com |
4622dd167378c5d839005568cfb7944437db9bb2 | d2a2c252488adbcd3546668e6abcd32fbf0c427b | /chapter_2/scrape_re.py | 32bb95fbc360692af51c658887ca5aefdbd7c20e | [] | no_license | niharu/python-crawling-scraping | f0493a275e8a6a33bbb2963b2a7feb5e6cc4642d | 7e49fe654ad5575113bcfa1de3d7d711b45692df | refs/heads/main | 2023-07-07T23:18:15.040898 | 2021-08-21T17:47:24 | 2021-08-21T17:47:24 | 398,577,826 | 0 | 0 | null | 2021-08-21T17:47:25 | 2021-08-21T14:25:27 | null | UTF-8 | Python | false | false | 495 | py | import re
from html import unescape
with open('dp.html') as f:
html = f.read()
for partial_html in re.findall(r'<a itemprop="url".*?</ul>\s*</a></li>', html, re.DOTALL):
url = re.search(r'<a itemprop="url" href="(.*?)">', partial_html).group(1)
url = 'https://gihyo.jp' + url
title = re.search(r'<p itemprop="name".*?</p>', partial_html).group(0)
title = title.replace('<br/>', ' ')
title = re.sub(r'<.*?>', '', title)
title = unescape(title)
print(url,title) | [
"niharu.dev@gmail.com"
] | niharu.dev@gmail.com |
0924a12cd5b76c25074c0a53e6bd6dd9a4edbe97 | 865969935f29dc20c74ae0d7a6c5c3218ef73a4f | /app.py | b977edcbf175848b580d8cfb8dea668d7fe93a3e | [] | no_license | Nitesh909/LoanApp3 | f615261378e9da55e80aff1ae7bfb26f61bd9620 | 39df25696546abd0200ed24403228e45593d8a5e | refs/heads/main | 2022-12-26T06:42:37.001173 | 2020-10-02T19:38:43 | 2020-10-02T19:38:43 | 300,717,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | from flask import Flask, render_template, request, url_for, redirect
from joblib import load
import numpy as np
app = Flask(__name__)
model = load("newmodel.joblib")
@ app.route('/')
def home():
return render_template("index.html")
@ app.route('/result', methods=['GET', 'POST'])
def result():
if request.method == 'POST':
df = []
df.append( int(request.form['Gender']))
df.append(int(request.form['Married']))
df.append(int(request.form['NotEducated']))
df.append(int(request.form['Self Employe']))
df.append(float(request.form['Loan Amount']))
df.append(float(request.form['Loan C. History']))
df.append(float(request.form['Applicant_Income']))
df.append(float(request.form['Co-App. Income']))
df.append(int(request.form['Property Area']))
df.append(int(request.form['Dependents']))
df.append(int(request.form['Loan Amt Term']))
prediction = model.predict([df])
if prediction[0] == 0:
return render_template("result0.html")
else :
return render_template("result1.html")
if __name__ == "__main__":
app.run(debug=True) | [
"noreply@github.com"
] | noreply@github.com |
fbb8fd2981dcd6d139d81dc69aef91c52bf1e604 | 87f5d4f3092929921069986d4ee53002fbcfaad8 | /scripts/opencache-node | 4bb7f2a377a2edc0746a8b3c0c233c952d1f0437 | [
"Apache-2.0"
] | permissive | opencache-project/opencache-node | 22f675f10039c286cac97a3f055e7db1f46fe0a7 | 65aae90ba6e8c2f11ad4df8e812d4811d7d29007 | refs/heads/master | 2020-03-27T23:06:46.983150 | 2016-01-08T19:50:13 | 2016-01-08T19:50:13 | 20,293,321 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 542 | #!/usr/bin/env python2.7
"""opencache-controller: Simple script used to execute either the OpenCache controller or node."""
import opencache.node.opencachenode as node
import optparse
import os
parser = optparse.OptionParser()
parser.add_option("--config", "-c", dest="config",
help="location of configuration file to load")
(options, args) = parser.parse_args()
if options.config == None:
print "[ERROR] Please specify the path to a configuration file."
parser.print_help()
os._exit(3)
else:
_node = node.Node(options.config)
| [
"matt@matthewbroadbent.net"
] | matt@matthewbroadbent.net | |
43387b0df1c22da601c740d3c1fbbd6238e02279 | 3e924b59710ffdce72777358f76d34c860d25fc3 | /custom_gym/setup.py | d8b0cb22e9c2cde031cadcedef06c04d7ccd6798 | [] | no_license | nontnont01/lucy-rl-appium-1 | 648b567a09adb8125cc9ac8dc9cd4f915ca4a8dd | 829d72fb5a7ca999de3976643786965b0394e885 | refs/heads/master | 2023-06-20T16:39:24.746392 | 2021-07-28T10:10:07 | 2021-07-28T10:10:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from setuptools import setup
setup(
name= 'custom_env',
version= '0.1',
install_requires=['gym']
) | [
"hoppyhope01@gmail.com"
] | hoppyhope01@gmail.com |
d88a68e4fb641928f7fcb6702f53e4c94ab26f6c | 54bcefb6cb926583d1a6abe97190d04b9142f5f4 | /tests/livetest/livetest.py | 88eb1c75183379d0ed388cdce0cc3a53a8729e82 | [
"MIT"
] | permissive | huntflow/pytest-testrail | 15ed9a0fb49edf2aa0ec678341aa48179d9544d4 | 7ed8829e9948597ebc7eb76e6a91f1609a05e069 | refs/heads/master | 2022-12-25T05:03:26.853604 | 2020-10-14T06:09:19 | 2020-10-14T06:09:19 | 263,955,931 | 1 | 0 | MIT | 2020-10-14T06:09:21 | 2020-05-14T15:40:57 | Python | UTF-8 | Python | false | false | 434 | py | # -*- coding: UTF-8 -*-
import pytest
import time
from pytest_testrail.plugin import testrail, pytestrail
@testrail('C344', 'C366')
def test_func1():
time.sleep(0.5)
@testrail('C345')
def test_func2():
time.sleep(1.6)
pytest.fail()
@testrail('C99999')
def test_func3():
time.sleep(0.5)
@pytestrail.case('C1788')
def test_func4():
pytest.skip()
@pytestrail.case('C1789')
def test_func5():
time.sleep(0.5)
| [
"dubnerr@gmail.com"
] | dubnerr@gmail.com |
ae411c2778a3bce3f10cac7602f68285ebf4dbe5 | b37ada1653d97de60173d85989c34a2a93e2f7b1 | /remindGit/celery_beat.py | 9ab6ce2dab99c48dfe25a42d77f99fe537f84183 | [] | no_license | PyJava-Nikhil/remindGit | 497a95abd191a967358f09d0872f6836a8d6e861 | 08ecd3bca374d32b03da27de16bdbc4494963b6d | refs/heads/master | 2022-12-17T23:19:19.043317 | 2020-02-10T11:51:46 | 2020-02-10T11:51:46 | 238,858,141 | 0 | 1 | null | 2022-12-08T03:34:14 | 2020-02-07T06:27:51 | Python | UTF-8 | Python | false | false | 184 | py | from celery.schedules import crontab
CELERY_BEAT_SCHEDULE = {
'send_reminder': {
'task': 'reminder.tasks.send_reminders',
'schedule': crontab(minute='*/1')
}
} | [
"nsharma@focusvision.com"
] | nsharma@focusvision.com |
d87de4a5deb346043db63ce34a639299059fde51 | 378ad0c10fdb35e83c8bd1640202518acaa936f5 | /osa09-15_tavara_matkalaukku_lastiruuma/test/test_1_tavara.py | 08cd9f777e5c0b8e5b3db44471b4ba09cbcffe66 | [] | no_license | sami-one/mooc-ohjelmointi-21 | cc1af96f0accdf18e2c4bbcbaa236a00e1d905bf | ba41b099be16ed55a5fd3ebe113fd145ba8d88f4 | refs/heads/main | 2023-05-02T12:12:09.233333 | 2021-05-27T13:10:52 | 2021-05-27T13:10:52 | 371,375,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,309 | py | import unittest
from unittest.mock import patch
from tmc import points, reflect
from tmc.utils import load, load_module, reload_module, get_stdout, check_source
from functools import reduce
import os
import os.path
import textwrap
from random import choice, randint
from datetime import date, datetime, timedelta
exercise = 'src.koodi'
def f(attr: list):
return ",".join(attr)
@points('8.tavara_matkalaukku_lastiruuma_osa1')
class TavaraTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=[AssertionError("Syötteen pyytämistä ei odotettu")]):
cls.module = load_module(exercise, 'fi')
def test_0a_paaohjelma_kunnossa(self):
ok, line = check_source(self.module)
message = """Funktioita testaava koodi tulee sijoittaa lohkon
if __name__ == "__main__":
sisälle. Seuraava rivi tulee siirtää:
"""
self.assertTrue(ok, message+line)
def test1_tavara_olemassa(self):
try:
from src.koodi import Tavara
except:
self.assertTrue(False, "Ohjelmastasi pitäisi löytyä luokka nimeltä Tavara")
def test2_tavara_konstruktori(self):
try:
from src.koodi import Tavara
tavara = Tavara("Aapiskukko", 2)
except Exception as e:
self.assertTrue(False, 'Luokan Tavara konstuktorin kutsuminen arvoilla Tavara("Aapiskukko", 2)' +
f' palautti virheen: {e}\nVarmista että konstruktori on määritelty oikein')
def test3_tavara_str(self):
test_cases = [("Aapiskukko", 2), ("Moukari", 8), ("Kalajapullo", 1)]
for test_case in test_cases:
from src.koodi import Tavara
tavara = Tavara(test_case[0], test_case[1])
corr = f'{test_case[0]} ({test_case[1]} kg)'
val = str(tavara)
self.assertEqual(corr, val, f"Metodin __str__ pitäisi palauttaa merkkijono\n{corr}\nkun olio luotiin kutsulla\n" +
f'Tavara("{test_case[0]}", {test_case[1]})\nNyt metodi palauttaa merkkijonon\n{val}')
def test4_aatribuutit_piilossa(self):
from src.koodi import Tavara
koodi = """
tavara = Tavara("Aapiskukko", 2)
print(tavara.paino)
"""
ok = False
tavara = Tavara("Aapiskukko", 2)
try:
v = tavara.paino
except Exception as e:
ok = True
if not ok:
self.assertFalse(type(v) == type(2), f'Koodin\n{koodi}\nsuorituksen ei pitäisi tulostaa tuotteen painoa. Tuotteen painon tulee olla kapseloitu')
koodi = """
tavara = Tavara("Aapiskukko", 2)
print(tavara.nimi)
"""
ok = False
tavara = Tavara("Aapiskukko", 2)
try:
v = tavara.paino
except Exception as e:
ok = True
if not ok:
self.assertFalse(type(v) == type("LOL"), f'Koodin\n{koodi}\nsuorituksen ei pitäisi tulostaa tuotteen nimeä. Tuotteen nimen tulee olla kapseloitu')
def test5_tavara_paino(self):
try:
from src.koodi import Tavara
koodi = """
tavara = Tavara("Aapiskukko", 2)
tavara.paino()
"""
tavara = Tavara("Aapiskukko", 2)
p = tavara.paino()
except Exception as e:
self.assertTrue(False, f'Koodin\n{koodi}\nsuoritus aiheutti virheen\n{e}\nOnhan metodi paino(self) määritelty?')
self.assertTrue(p == 2, f'Kun suoritetaan\n{koodi}\n, metodin pitäsi palauttaa 2, paluuarvo oli {p}')
@points('8.tavara_matkalaukku_lastiruuma_osa1')
def test6_tavara_nimi(self):
try:
from src.koodi import Tavara
koodi = """
tavara = Tavara("Aapiskukko", 2)
tavara.nimi()
"""
tavara = Tavara("Aapiskukko", 2)
p = tavara.nimi()
except Exception as e:
self.assertTrue(False, f'Koodin\n{koodi}\nsuoritus aiheutti virheen\n{e}\nOnhan metodi nimi(self) määritelty?')
self.assertTrue(p == "Aapiskukko", f'Kun suoritetaan\n{koodi}\n, metodin pitäsi palauttaa Aapiskukko, paluuarvo oli {p}')
def test7_tavara_paino_2(self):
try:
from src.koodi import Tavara
koodi = """
tavara = Tavara("Aapiskukko", 5)
tavara.paino()
"""
tavara = Tavara("Aapiskukko", 5)
p = tavara.paino()
except Exception as e:
self.assertTrue(False, f'Koodin\n{koodi}\nsuoritus aiheutti virheen\n{e}\nOnhan metodi paino(self) määritelty?')
self.assertTrue(p == 5, f'Kun suoritetaan\n{koodi}\n, metodin pitäsi palauttaa 5, paluuarvo oli {p}')
@points('8.tavara_matkalaukku_lastiruuma_osa1')
def test7_tavara_nimi_2(self):
try:
from src.koodi import Tavara
koodi = """
tavara = Tavara("Kukko", 2)
tavara.nimi()
"""
tavara = Tavara("Kukko", 2)
p = tavara.nimi()
except Exception as e:
self.assertTrue(False, f'Koodin\n{koodi}\nsuoritus aiheutti virheen\n{e}\nOnhan metodi nimi(self) määritelty?')
self.assertTrue(p == "Kukko", f'Kun suoritetaan\n{koodi}\n, metodin pitäsi palauttaa Kukko, paluuarvo oli {p}')
if __name__ == '__main__':
unittest.main()
| [
"sami@samione.fi"
] | sami@samione.fi |
53234971fde311041641ce460e876ae08df1a6d4 | ea67d0e7db990e341f8dbb0b69a2aa48835af35c | /renderMeshSilhouette.py | 6de115aff35fea4677f7690e9985ed85c2fb546b | [
"MIT"
] | permissive | alexlimofficial/opengl-toonshader-silhouette | 86b2e883cf7a04b339842d22e5c100175bdf9003 | 727ee223cd6adaf0a30b47344a21a4c3ee48da4d | refs/heads/main | 2023-03-29T12:09:40.290193 | 2021-04-02T15:53:52 | 2021-04-02T15:53:52 | 353,188,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,268 | py | import pyrr
import glfw
import numpy as np
from math import sin, cos
from OpenGL.GL import *
from OpenGL.GL.shaders import compileProgram, compileShader
from lib.util.obj import ObjLoader
from lib.util.texture import load_texture
##############################################################################
# shaders
##############################################################################
vertex_src = """
# version 330
layout(location = 0) in vec3 a_position;
layout(location = 1) in vec2 a_texture;
layout(location = 2) in vec3 a_normal;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform mat4 normal_matrix;
out vec2 v_texture;
out vec3 v_normal;
void main()
{
gl_Position = view * model * vec4(a_position, 1.0);
v_texture = a_texture;
v_normal = normalize(mat3(normal_matrix) * a_normal); // using normal matrix
}
"""
fragment_src = """
# version 330
in vec2 v_texture;
in vec3 v_normal;
uniform vec3 camera_pos;
uniform vec3 camera_target;
uniform sampler2D s_texture;
out vec4 out_color;
void main()
{
vec3 normal = normalize(v_normal);
vec3 camera_dir = normalize(camera_pos - camera_target);
float sil = dot(normal, camera_dir);
if (sil < 0.2 && sil > -0.2)
out_color = vec4(1.0, 1.0, 1.0, 1.0);
else
out_color = texture(s_texture, v_texture);
}
"""
##############################################################################
##############################################################################
# glfw callback functions
def window_resize(window, width, height):
glViewport(0, 0, width, height)
projection = pyrr.matrix44.create_perspective_projection_matrix(45, width / height, 0.1, 100)
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection)
##############################################################################
# glfw
##############################################################################
if not glfw.init():
raise Exception("glfw can not be initialized!")
width, height = 1920, 1080
window = glfw.create_window(width, height, "Mesh Visualization", None, None)
if not window:
glfw.terminate()
raise Exception("glfw window can not be created!")
glfw.set_window_pos(window, 400, 200)
glfw.set_window_size_callback(window, window_resize)
glfw.make_context_current(window)
##############################################################################
##############################################################################
##############################################################################
# load model
##############################################################################
# mesh obj paths
face_obj_path = './assets/therock/Face.obj'
lefteye_obj_path = './assets/therock/LeftEye.obj'
righteye_obj_path = './assets/therock/RightEye.obj'
# mesh texture paths
face_tex_path = './assets/therock/textures/Texture_Face.jpg'
lefteye_tex_path = './assets/therock/textures/Texture_LeftEye.jpg'
righteye_tex_path = './assets/therock/textures/Texture_RightEye.jpg'
face_meta = ObjLoader.load_model(face_obj_path)
lefteye_meta = ObjLoader.load_model(lefteye_obj_path)
righteye_meta = ObjLoader.load_model(righteye_obj_path)
#================= FACE =================#
face_vertices = face_meta['v']
face_tex = face_meta['vt']
face_norms = face_meta['vn']
face_indices = face_meta['indices']
face_buffer = face_meta['buffer']
#================= EYES =================#
lefteye_indices = lefteye_meta['indices']
lefteye_buffer = lefteye_meta['buffer']
righteye_indices = righteye_meta['indices']
righteye_buffer = righteye_meta['buffer']
##############################################################################
##############################################################################
# compile the shader programs
shader = compileProgram(
compileShader(vertex_src, GL_VERTEX_SHADER),
compileShader(fragment_src, GL_FRAGMENT_SHADER)
)
##############################################################################
# VAO/VBO
##############################################################################
VAO = glGenVertexArrays(3)
VBO = glGenBuffers(3)
#================= FACE =================#
glBindVertexArray(VAO[0])
glBindBuffer(GL_ARRAY_BUFFER, VBO[0])
glBufferData(GL_ARRAY_BUFFER, face_buffer.nbytes, face_buffer, GL_STATIC_DRAW)
# face vertices (x, y, z)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, face_buffer.itemsize * 8, ctypes.c_void_p(0))
# face textures (u, v)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, face_buffer.itemsize * 8, ctypes.c_void_p(12))
# face normals (x, y, z)
glEnableVertexAttribArray(2)
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, face_buffer.itemsize * 8, ctypes.c_void_p(20))
glBindVertexArray(0)
#================= LEFT EYE =================#
glBindVertexArray(VAO[1])
glBindBuffer(GL_ARRAY_BUFFER, VBO[1])
glBufferData(GL_ARRAY_BUFFER, lefteye_buffer.nbytes, lefteye_buffer, GL_STATIC_DRAW)
# left eye vertices (x, y, z)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, lefteye_buffer.itemsize * 8, ctypes.c_void_p(0))
# left eye textures (u, v)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, lefteye_buffer.itemsize * 8, ctypes.c_void_p(12))
# left eye normals (x, y, z)
glEnableVertexAttribArray(2)
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, lefteye_buffer.itemsize * 8, ctypes.c_void_p(20))
glBindVertexArray(0)
#================= RIGHT EYE =================#
glBindVertexArray(VAO[2])
glBindBuffer(GL_ARRAY_BUFFER, VBO[2])
glBufferData(GL_ARRAY_BUFFER, righteye_buffer.nbytes, righteye_buffer, GL_STATIC_DRAW)
# left eye vertices (x, y, z)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, righteye_buffer.itemsize * 8, ctypes.c_void_p(0))
# left eye textures (u, v)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, righteye_buffer.itemsize * 8, ctypes.c_void_p(12))
# left eye normals (x, y, z)
glEnableVertexAttribArray(2)
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, righteye_buffer.itemsize * 8, ctypes.c_void_p(20))
glBindVertexArray(0)
##############################################################################
##############################################################################
##############################################################################
# textures
##############################################################################
textures = glGenTextures(3)
load_texture(face_tex_path, textures[0])
load_texture(lefteye_tex_path, textures[1])
load_texture(righteye_tex_path, textures[2])
##############################################################################
##############################################################################
##############################################################################
# setup/transformations
##############################################################################
glUseProgram(shader)
glClearColor(0, 0.1, 0.1, 1)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
scale = pyrr.Matrix44.from_scale((0.005, 0.005, 0.005))
translation = pyrr.matrix44.create_from_translation(pyrr.Vector3([0.0, 0.0, 0.0]))
model = pyrr.matrix44.multiply(translation, scale)
projection = pyrr.matrix44.create_perspective_projection_matrix(
fovy=45,
aspect=width/height,
near=0.1,
far=1000
)
normal_matrix = np.linalg.inv(model).T
model_loc = glGetUniformLocation(shader, "model")
view_loc = glGetUniformLocation(shader, "view")
proj_loc = glGetUniformLocation(shader, "projection")
normal_matrix_loc = glGetUniformLocation(shader, "normal_matrix")
camera_pos_loc = glGetUniformLocation(shader, "camera_pos")
camera_target_loc = glGetUniformLocation(shader, "camera_target")
glUniformMatrix4fv(model_loc, 1, GL_FALSE, model)
glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection)
glUniformMatrix4fv(normal_matrix_loc, 1, GL_FALSE, normal_matrix)
##############################################################################
##############################################################################
##############################################################################
# main application loop
##############################################################################
while not glfw.window_should_close(window):
# clear the buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# move camera
radius = 0.1
camX = sin(0.5 * glfw.get_time()) * radius
camZ = cos(0.5 * glfw.get_time()) * radius
camera_position = pyrr.Vector3([camX, 0.0, camZ])
camera_target = pyrr.Vector3([0.0, 0.0, 0.0])
camera_up = pyrr.Vector3([0.0, 1.0, 0.0])
view = pyrr.matrix44.create_look_at(
eye=camera_position,
target=camera_target,
up=camera_up
)
#================= FACE =================#
glBindVertexArray(VAO[0])
glUniformMatrix4fv(view_loc, 1, GL_FALSE, view)
glUniform3fv(camera_pos_loc, 1, camera_position)
glUniform3fv(camera_target_loc, 1, camera_target)
glBindTexture(GL_TEXTURE_2D, textures[0])
glDrawArrays(GL_TRIANGLES, 0, len(face_indices))
glBindVertexArray(0)
#================= EYES =================#
glBindVertexArray(VAO[1])
glUniformMatrix4fv(view_loc, 1, GL_FALSE, view)
glUniform3fv(camera_pos_loc, 1, camera_position)
glUniform3fv(camera_target_loc, 1, camera_target)
glBindTexture(GL_TEXTURE_2D, textures[1])
glDrawArrays(GL_TRIANGLES, 0, len(lefteye_indices))
glBindVertexArray(0)
glBindVertexArray(VAO[2])
glUniformMatrix4fv(view_loc, 1, GL_FALSE, view)
glUniform3fv(camera_pos_loc, 1, camera_position)
glUniform3fv(camera_target_loc, 1, camera_target)
glBindTexture(GL_TEXTURE_2D, textures[2])
glDrawArrays(GL_TRIANGLES, 0, len(righteye_indices))
glBindVertexArray(0)
# swap front and back buffers | poll for and process events
glfw.swap_buffers(window)
glfw.poll_events()
glfw.terminate()
##############################################################################
##############################################################################
| [
"alexlim95@gmail.com"
] | alexlim95@gmail.com |
0b5f2151006a1871143c1b28da5be92d90adf6e6 | 2c38a2d95aa7d27f66d36e8a60af86fdc5956afe | /password-storage/Table.py | 9dda1fc873614b446e537697584af2da84ebb7f6 | [] | no_license | HuBoZhi/python | 312602efcafdc945f1d8937b76f268e2a852414d | 699b696f963c841a8825270cacb645b4388440b2 | refs/heads/master | 2021-04-03T07:07:33.451691 | 2018-03-09T10:10:39 | 2018-03-09T10:10:39 | 124,405,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Table.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Table(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(570, 470)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(0, 0, 570, 470))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
| [
"36149293+HuBoZhi@users.noreply.github.com"
] | 36149293+HuBoZhi@users.noreply.github.com |
7cc2c7507b75fcd535a7e8e9c9b0457f48bd6414 | e0b6f5bd451aa8af3273fbc948799637681342e1 | /scripts/wm_representation/functions/IEM/Controls/trial_by_trial/trainT_testT_wm3_shuffles_refs.py | 99a1066c8955feb220ec3514ad753bea566ad476 | [] | no_license | davidbestue/encoding | 6b304f6e7429f94f97bd562c7544d1fdccf7bdc1 | c27319aa3bb652b3bfc6b7340044c0fda057bc62 | refs/heads/master | 2022-05-05T23:41:42.419252 | 2022-04-27T08:34:52 | 2022-04-27T08:34:52 | 144,248,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,890 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 18:24:32 2019
@author: David Bestue
"""
#######
####### In this analysis:
####### I am doing the reconstruction training in the delay period and testing in each trial. No CV and No Shuffles
#######
############# Add to sys path the path where the tools folder is
import sys, os
#path_tools = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) ### same directory or one back options
path_tools = os.path.abspath(os.path.join(os.getcwd(), os.pardir, os.pardir)) ### same directory or one back options
sys.path.insert(1, path_tools)
from tools import *
############# Namefiles for the savings.
path_save_reconst_shuffs ='/home/david/Desktop/Reconstructions/IEM/recs_shuffs_references_IEM_trainT_testT_wm3.npy'
############# Testing options
decoding_thing = 'T_alone' #'dist_alone' 'T_alone'
############# Training options
training_item = 'T_alone' #'dist_alone' 'T_alone'
cond_t = '1_7' #'1_7' '2_7'
Distance_to_use = 'mix' #'close' 'far'
training_time= 'delay' #'stim_p' 'delay' 'respo'
tr_st=4
tr_end=6
############# Elements for the loop
Conditions=['1_0.2', '1_7', '2_0.2', '2_7']
Subjects=['d001', 'n001', 'b001', 'r001', 's001', 'l001']
brain_regions = ['visual','ips', 'pfc', 'broca']
ref_angle=180
Reconstructions_ = [] ## subjects x brain regiond --> ntrials x 16 x 720 matrix
############# Analysis
#############
for Subject in Subjects:
for Brain_region in brain_regions:
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
activity, behaviour = process_wm_task(wm_fmri_paths, masks, wm_beh_paths, nscans_wm=nscans_wm)
behaviour['Condition'] = behaviour['Condition'].replace(['1.0_0.2', '1.0_7.0', '2.0_0.2','2.0_7.0' ], ['1_0.2', '1_7', '2_0.2', '2_7'])
behaviour['brain_region'] = Brain_region
###
###
print(Subject, Brain_region)
Reconstructed_trials=[] ## ntrials x 16 x 720 matrix
###
###
#angx = behaviour[decoding_thing].values
#angles_shuffled = random.sample( list(angx), len(angx) )
###
###
for trial in range(len(behaviour)):
activity_trial = activity[trial,:,:]
beh_trial = behaviour.iloc[trial,:]
session_trial = beh_trial.session_run
###
### Training
###
if cond_t == '1_7':
boolean_trials_training = np.array(behaviour['delay1']==7) * np.array(behaviour['order']==1) * np.array(behaviour['session_run']!=session_trial)
elif cond_t == '2_7':
boolean_trials_training = np.array(behaviour['delay1']==7) * np.array(behaviour['order']==2) * np.array(behaviour['session_run']!=session_trial)
#
activity_train_model = activity[boolean_trials_training, :, :]
activity_train_model_TRs = np.mean(activity_train_model[:, tr_st:tr_end, :], axis=1)
behavior_train_model = behaviour[boolean_trials_training]
training_angles = behavior_train_model[['T', 'NT1', 'NT2']].values
#
Weights_matrix, Interc = Weights_matrix_LM_3items(activity_train_model_TRs, training_angles)
Weights_matrix_t = Weights_matrix.transpose()
###
### Testing
###
Reconstructed_TR = [] ## 16 x 720 matrix
#
for TR_ in range(nscans_wm):
activity_TR = activity_trial[TR_, :]
angle_trial = random.choice([0,90,180,270])
Inverted_encoding_model = np.dot( np.dot ( np.linalg.pinv( np.dot(Weights_matrix_t, Weights_matrix ) ), Weights_matrix_t), activity_TR)
#Inverted_encoding_model_pos = Pos_IEM2(Inverted_encoding_model)
IEM_hd = ch2vrep3(Inverted_encoding_model) #36 to 720
to_roll = int( (ref_angle - angle_trial)*(len(IEM_hd)/360) ) ## degrees to roll
IEM_hd_aligned=np.roll(IEM_hd, to_roll) ## roll this degree ##vector of 720
Reconstructed_TR.append(IEM_hd_aligned)
##
resconstr_trial = np.array(Reconstructed_TR)
Reconstructed_trials.append(resconstr_trial)
##
##
Reconstructions_.append(Reconstructed_trials)
########
final_rec = np.array(Reconstructions_)
np.save(path_save_reconst_shuffs, final_rec)
############# Options de training times, the TRs used for the training will be different
# training_time=='delay':
# tr_st=4
# tr_end=6
# training_time=='stim_p':
# tr_st=3
# tr_end=4
# training_time=='delay':
# tr_st=4
# tr_end=6
# training_time=='respo':
# if decoding_thing=='Target':
# tr_st=8
# tr_end=9
# elif decoding_thing=='Distractor':
# tr_st=11
# tr_end=12 | [
"davidsanchezbestue@hotmail.com"
] | davidsanchezbestue@hotmail.com |
7e94dcdb37b413a9610d6df5998fce9b1a0a13bb | 86d01337a2d3f354c27e03299e1088b05932edcb | /src/trackimo/protocol/protocol.py | 8d3d03874c2cfa20ec37ebf3f34003ffc512eee1 | [
"MIT"
] | permissive | troykelly/python-trackimo | 2b2281f8053b7639c016e081576bbd75dc8f5d73 | d38a36faccfb6d0f75d1cef23637c1e7e5e6797a | refs/heads/master | 2023-05-29T16:27:42.650967 | 2020-10-01T07:42:32 | 2020-10-01T07:42:32 | 149,948,474 | 1 | 0 | MIT | 2023-05-23T00:12:47 | 2018-09-23T04:42:28 | Python | UTF-8 | Python | false | false | 16,661 | py | # -*- coding: utf-8 -*-
"""
Protocol handler for Trackimo
"""
import logging
import sys
import requests
import os
import asyncio
import functools
import backoff
from datetime import datetime, timedelta
from .user import UserHandler
from .account import AccountHandler
from ..exceptions import (
MissingInformation,
UnableToAuthenticate,
NoSession,
CanNotRefresh,
TrackimoAPIError,
TrackimoAccessDenied,
TrackimoLoginFailed,
)
_logger = logging.getLogger(__name__)
logging.getLogger("backoff").addHandler(logging.StreamHandler())
def fatal_code(e):
return 400 <= e.response.status_code < 500
@backoff.on_exception(
backoff.expo, requests.exceptions.RequestException, max_time=300, giveup=fatal_code
)
class Protocol(object):
def __init__(
self,
client_id,
client_secret,
host="app.trackimo.com",
version=3,
port=443,
protocol="https",
username=None,
password=None,
loop=None,
):
super().__init__()
self.__loop = loop if loop else asyncio.get_event_loop()
self.__client_id = client_id
self.__client_secret = client_secret
self.__host = host
self.__version = version
self.__port = port
self.__protocol = protocol
self.__api_url = (
f"{self.__protocol}://{self.__host}:{self.__port}/api/v{self.__version}"
)
self.__internal_url = (
f"{self.__protocol}://{self.__host}:{self.__port}/api/internal/v1"
)
self.__api_login_url = f"{self.__protocol}://{self.__host}:{self.__port}/api/internal/v2/user/login"
self.__session = None
self.__api_token = None
self.__api_expires = None
self.__refresh_token = None
self.__trackimo_username = username if username else None
self.__trackimo_password = password if password else None
self.__trackimo_accountid = None
self.__user = None
self.__scopes = [
"locations",
"notifications",
"devices",
"accounts",
"settings",
"geozones",
]
_logger.debug("Protocol handler ready.")
@property
def accountid(self):
return self.__trackimo_accountid
@property
def auth(self):
if not self.__api_token:
return None
return {
"token": self.__api_token,
"refresh": self.__refresh_token,
"expires": self.__api_expires,
}
@property
def loop(self):
if not self.__loop:
return None
return self.__loop
@property
def username(self):
if not self.__trackimo_username:
return None
return self.__trackimo_username
@username.setter
def username(self, username):
self.__trackimo_username = username
@property
def password(self):
if not self.__trackimo_password:
return None
return self.__trackimo_password
@password.setter
def password(self, password):
self.__trackimo_password = password
async def restore_session(self, refresh_token):
self.__refresh_token = refresh_token
_logger.debug("Restoring session with token: %s", self.__refresh_token)
await self.__token_refresh()
return self.auth
async def login(self, username=None, password=None, scopes=None):
if username:
self.__trackimo_username = username
if password:
self.__trackimo_password = password
if scopes:
self.__scopes = scopes
if not (self.__trackimo_username and self.__trackimo_password):
raise UnableToAuthenticate("Must have a username and password available")
self.__session = None
self.__api_token = None
self.__api_expires = None
self.__refresh_token = None
self.__session = requests.Session()
login_payload = {
"username": self.__trackimo_username,
"password": self.__trackimo_password,
"remember_me": True,
"whitelabel": "TRACKIMO",
}
auth_payload = {
"client_id": self.__client_id,
"redirect_uri": "https://app.trackimo.com/api/internal/v1/oauth_redirect",
"response_type": "code",
"scope": ",".join(self.__scopes),
}
token_payload = {
"client_id": self.__client_id,
"client_secret": self.__client_secret,
"code": None,
}
def send_login_payload():
return self.__session.request(
"POST", self.__api_login_url, json=login_payload, allow_redirects=True
)
try:
response = await self.__loop.run_in_executor(None, send_login_payload)
except Exception as err:
raise err
status_code = getattr(response, "status_code", None)
if status_code != 200:
raise TrackimoLoginFailed(
"Trackimo API Rejecting Credentials",
status_code=status_code,
response=response,
)
try:
data = await self.api(
method="GET",
path="oauth2/auth",
data=auth_payload,
headers=None,
no_check=True,
)
except TrackimoAccessDenied as apierror:
raise TrackimoLoginFailed(
"Trackimo API Rejecting token exchange",
status_code=apierror.status_code,
body=apierror.body,
json=apierror.json,
headers=apierror.headers,
response=apierror.response,
)
except TrackimoAPIError as apierror:
raise TrackimoLoginFailed(
"Trackimo API error response",
status_code=apierror.status_code,
body=apierror.body,
json=apierror.json,
headers=apierror.headers,
response=apierror.response,
)
except Exception as err:
raise err
if not data or not "code" in data:
raise TrackimoLoginFailed(
"Trackimo API missing oauth code",
)
token_payload["code"] = data["code"]
try:
data = await self.api(
method="POST",
path="oauth2/token",
data=token_payload,
headers=None,
no_check=True,
)
except TrackimoAccessDenied as apierror:
raise TrackimoLoginFailed(
"Trackimo API Rejecting token exchange",
status_code=apierror.status_code,
body=apierror.body,
json=apierror.json,
headers=apierror.headers,
response=apierror.response,
)
except TrackimoAPIError as apierror:
raise TrackimoLoginFailed(
"Trackimo API failure to exchange code",
status_code=apierror.status_code,
body=apierror.body,
json=apierror.json,
headers=apierror.headers,
response=apierror.response,
)
except Exception as err:
raise err
if not data or not "access_token" in data:
raise UnableToAuthenticate("Could not retrieve access token code from API")
self.__api_token = data["access_token"]
if "refresh_token" in data:
self.__refresh_token = data["refresh_token"]
if "expires_in" in data:
self.__api_expires = datetime.now() + timedelta(
seconds=int(data["expires_in"]) / 1000
)
await self.__post_login()
return {
"token": self.__api_token,
"refresh": self.__refresh_token,
"expires": self.__api_expires,
}
async def __token_refresh(self):
if not self.__refresh_token:
_logger.debug("No refresh token available. Logging in.")
return await self.login()
refresh_payload = {
"client_id": self.__client_id,
"client_secret": self.__client_secret,
"refresh_token": self.__refresh_token,
}
self.__session = requests.Session()
self.__api_token = None
self.__refresh_token = None
self.__api_expires = None
try:
_logger.debug("Sending refresh payload: %s", refresh_payload)
data = await self.api(
method="POST",
path="oauth2/token/refresh",
data=refresh_payload,
headers=None,
no_check=True,
)
except TrackimoAPIError as apierror:
_logger.debug("API Error. Trying to log in. %s", apierror.body)
return await self.login()
except TrackimoAccessDenied as apierror:
_logger.debug("Refresh token rejected. Trying to log in. %s", apierror.body)
return await self.login()
except Exception as err:
raise err
if not data or not "access_token" in data:
_logger.debug("Could not refresh. Trying to log in.")
return await self.login()
self.__api_token = data["access_token"]
if "refresh_token" in data:
_logger.debug("Token refreshed. Updating token.")
self.__refresh_token = data["refresh_token"]
if "expires_in" in data:
_logger.debug("Token refreshed. Updating expiry time.")
self.__api_expires = datetime.now() + timedelta(
seconds=int(data["expires_in"]) / 1000
)
await self.__post_login()
return {
"token": self.__api_token,
"refresh": self.__refresh_token,
"expires": self.__api_expires,
}
async def __post_login(self):
handler = UserHandler(self)
user = await handler.get()
if not user:
raise UnableToAuthenticate("Could not fetch user information.")
self.__user = user
self.__trackimo_accountid = user.accountId
return user
def __request(self, method="GET", url=None, params=None, json=None, headers=None):
_logger.debug(
{
"url": url,
"params": params,
"data": json,
"headers": headers,
}
)
try:
response = self.__session.request(
method, url, params=params, json=json, headers=headers
)
except Exception as err:
_logger.error("No response at all")
_logger.exception(err)
status_code = getattr(response, "status_code", None)
body = getattr(response, "body", None)
try:
data = response.json()
except:
data = None
if not status_code:
raise TrackimoAPIError("Trackimo API failed to repond.", response=response)
success = 200 <= response.status_code <= 299
if response.status_code == 401 or response.status_code == 403:
raise TrackimoAccessDenied(
"Trackimo API Access Denied",
status_code=response.status_code,
body=body,
json=data,
headers=response.headers,
response=response,
)
if not success:
raise TrackimoAPIError(
"Trackimo API Error",
status_code=response.status_code,
body=body,
json=data,
headers=response.headers,
response=response,
)
return data
async def api(
self,
method="GET",
path="",
data=None,
headers={},
no_check=False,
use_internal_api=False,
query_string={},
):
"""Make a request to the Trackimo API
Attributes:
method (str): The request verb ie GET PUT POST DELETE
path (str): The path of the API endpoint
data (object): Data to be passed as a querystring
headers (object): Any headers to be sent
no_check (bool): Don't check for an expired token
use_internal_api (bool): Use the alternate internal API endpoint
"""
if not self.__session:
raise NoSession("There is no current API session. Please login() first.")
if not no_check and (
self.__api_expires and (datetime.now() > self.__api_expires)
):
_logger.debug("Refreshing token, it has expired.")
await self.__token_refresh()
url = (
f"{self.__api_url}/{path}"
if not use_internal_api
else f"{self.__internal_url}/{path}"
)
method = method.upper()
json = None
params = None
if method == "GET":
if data and not query_string:
params = data
elif query_string:
params = query_string
elif method == "POST":
if data:
json = data
if query_string:
params = query_string
elif method == "DELETE":
if data:
json = data
if query_string:
params = query_string
elif method == "PUT":
if data:
json = data
if query_string:
params = query_string
if self.__api_token and not no_check:
headers["Authorization"] = f"Bearer {self.__api_token}"
data = None
def process_request(method, url, params, json, headers):
return self.__request(
method=method, url=url, params=params, json=json, headers=headers
)
try:
data = await self.__loop.run_in_executor(
None, process_request, method, url, params, json, headers
)
except TrackimoAccessDenied as err:
if no_check:
raise TrackimoAccessDenied(
"Trackimo API Access Denied",
status_code=err.status_code,
body=err.body,
json=err.json,
headers=err.headers,
response=err.response,
)
_logger.debug("Access Denied. Need to refresh token.")
try:
auth = await self.__token_refresh()
except Exception as refreshError:
raise refreshError
_logger.debug("Retrying request after re-auth")
try:
data = await self.__loop.run_in_executor(
None, process_request, method, url, params, json, headers
)
except Exception as err:
raise err
except Exception as err:
raise err
if not data:
data = {}
return data
async def api_get(self, path=None, data=None):
"""Make a get request to the Trackimo API
Attributes:
path (str): The path of the API endpoint
data (object): Data to be passed as a querystring
"""
return await self.api("GET", path=path, data=data)
async def api_post(self, path=None, data=None, query_string=None):
"""Make a post request to the Trackimo API
Attributes:
path (str): The path of the API endpoint
data (object): Data to be passed as a json payload
"""
return await self.api("POST", path=path, data=data, query_string=query_string)
async def api_delete(self, path=None, data=None, query_string=None):
"""Make a delete request to the Trackimo API
Attributes:
path (str): The path of the API endpoint
data (object): Data to be passed as a json payload
"""
return await self.api("DELETE", path=path, data=data, query_string=query_string)
async def api_put(self, path=None, data=None, query_string=None):
"""Make a put request to the Trackimo API
Attributes:
path (str): The path of the API endpoint
data (object): Data to be passed as a json payload
"""
return await self.api("PUT", path=path, data=data, query_string=query_string)
| [
"troy@troykelly.com"
] | troy@troykelly.com |
a8016f7948019572509c378ff2bfdf66dd58aae8 | d165fc9cdb0d83b23bca7817cce5396ff8ab7e72 | /test_rc_car/flat_game/siraj_nn.py | 6151c847b93bf9d514f2542fc0d3cf2097e8e57d | [
"MIT"
] | permissive | Amaravati/outlace | 277d6e0fd915e1d3818b127d3c04a232572f8b07 | 4fba15e960ed318dd02c2d0fedb4fcd6f1ffa172 | refs/heads/master | 2021-01-09T06:09:09.002929 | 2017-02-04T14:48:42 | 2017-02-04T14:48:42 | 80,925,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | """
Simplistic implementation of the two-layer neural network.
Training method is stochastic (online) gradient descent with momentum.
As an example it computes XOR for given input.
Some details:
- tanh activation for hidden layer
- sigmoid activation for output layer
- cross-entropy loss
Less than 100 lines of active code.
"""
import numpy as np
import time
n_hidden = 10
n_in = 10
n_out = 10
n_samples = 300
learning_rate = 0.01
momentum = 0.9
np.random.seed(0)
def sigmoid(x):
return 1.0/(1.0 + np.exp(-x))
def tanh_prime(x):
return 1 - np.tanh(x)**2
def train(x, t, V, W, bv, bw):
# forward
A = np.dot(x, V) + bv
Z = np.tanh(A)
B = np.dot(Z, W) + bw
Y = sigmoid(B)
# backward
Ew = Y - t
Ev = tanh_prime(A) * np.dot(W, Ew)
dW = np.outer(Z, Ew)
dV = np.outer(x, Ev)
loss = -np.mean ( t * np.log(Y) + (1 - t) * np.log(1 - Y) )
# Note that we use error for each layer as a gradient
# for biases
return loss, (dV, dW, Ev, Ew)
def predict(x, V, W, bv, bw):
A = np.dot(x, V) + bv
B = np.dot(np.tanh(A), W) + bw
return (sigmoid(B) > 0.5).astype(int)
# Setup initial parameters
# Note that initialization is cruxial for first-order methods!
V = np.random.normal(scale=0.1, size=(n_in, n_hidden))
W = np.random.normal(scale=0.1, size=(n_hidden, n_out))
bv = np.zeros(n_hidden)
bw = np.zeros(n_out)
params = [V,W,bv,bw]
# Generate some data
X = np.random.binomial(1, 0.5, (n_samples, n_in))
T = X ^ 1
# Train
for epoch in range(100):
err = []
upd = [0]*len(params)
t0 = time.clock()
for i in range(X.shape[0]):
loss, grad = train(X[i], T[i], *params)
for j in range(len(params)):
params[j] -= upd[j]
for j in range(len(params)):
upd[j] = learning_rate * grad[j] + momentum * upd[j]
err.append( loss )
print ('Epoch: %d, Loss: %.8f, Time: %.4fs'%(
epoch, np.mean( err ), time.clock()-t0) )
# Try to predict something
x = np.random.binomial(1, 0.5, n_in)
print ('XOR prediction:')
print (x)
#print predict(x, *params)
| [
"aamaravati3@gatech.edu"
] | aamaravati3@gatech.edu |
e947ab798f35f442dd5385a344c2690865fd1546 | 947867a3d446f7b790d1bcd0cfaf9cf3a1b4d841 | /python/example_code/ses/ses_deletereceiptfilter.py | 28fb99c7c111091b20d4bffdc0f378449cc1c645 | [
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] | permissive | qh96/aws-doc-sdk-examples | 1839e0b6379980418a3d1df44e205f9fd90f90d7 | 3d8d8aaf199c276e84e1d5e10c9355306e095a38 | refs/heads/master | 2020-04-05T08:50:08.236093 | 2018-11-07T02:46:07 | 2018-11-07T02:46:07 | 156,731,920 | 1 | 0 | Apache-2.0 | 2018-11-08T15:56:33 | 2018-11-08T15:56:32 | null | UTF-8 | Python | false | false | 1,246 | py | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create SES client
ses = boto3.client('ses')
response = ses.delete_receipt_filter(
FilterName = 'NAME'
)
print(response)
#snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
#snippet-sourcedescription:[ses_deletereceiptfilter.py demonstrates how to remove an existing filter for a specific IP address.]
#snippet-keyword:[Python]
#snippet-keyword:[AWS SDK for Python (Boto3)]
#snippet-keyword:[Code Sample]
#snippet-keyword:[Amazon Simple Email Service]
#snippet-service:[ses]
#snippet-sourcetype:[full-example]
#snippet-sourcedate:[2018-08-11]
#snippet-sourceauthor:[tapasweni-pathak]
| [
"jamisch@amazon.com"
] | jamisch@amazon.com |
d3a92669d402c8e28659a759ac1a2d6cc27440b5 | 0add7953d3e3ce2df9e8265102be39b758579753 | /built-in/MindSpore/Research/cv/image_classification/FaceAttribute_for_MindSpore/train.py | a269bece0a922cf7323ebe7a7e5a57abd9157391 | [
"Apache-2.0"
] | permissive | Huawei-Ascend/modelzoo | ae161c0b4e581f8b62c77251e9204d958c4cf6c4 | df51ed9c1d6dbde1deef63f2a037a369f8554406 | refs/heads/master | 2023-04-08T08:17:40.058206 | 2020-12-07T08:04:57 | 2020-12-07T08:04:57 | 319,219,518 | 1 | 1 | Apache-2.0 | 2023-03-24T22:22:00 | 2020-12-07T06:01:32 | Python | UTF-8 | Python | false | false | 8,476 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Face attribute train."""
import os
import time
import datetime
import argparse
import mindspore.nn as nn
from mindspore import context
from mindspore import Tensor
from mindspore.nn.optim import Momentum
from mindspore.communication.management import get_group_size, init, get_rank
from mindspore.nn import TrainOneStepCell
from mindspore.context import ParallelMode
from mindspore.train.callback import ModelCheckpoint, RunContext, _InternalCallbackParam, CheckpointConfig
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
from src.FaceAttribute.resnet18 import get_resnet18
from src.FaceAttribute.loss_factory import get_loss
from src.dataset_train import data_generator
from src.lrsche_factory import warmup_step
from src.logging import get_logger, AverageMeter
from src.config import config
devid = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True, device_id=devid)
class BuildTrainNetwork(nn.Cell):
def __init__(self, network, criterion):
super(BuildTrainNetwork, self).__init__()
self.network = network
self.criterion = criterion
self.print = P.Print()
def construct(self, input_data, label):
logit0, logit1, logit2 = self.network(input_data)
loss = self.criterion(logit0, logit1, logit2, label)
return loss
def parse_args():
parser = argparse.ArgumentParser('Face Attributes')
parser.add_argument('--mindrecord_path', type=str, default='', help='dataset path, e.g. /home/data.mindrecord')
parser.add_argument('--pretrained', type=str, default='', help='pretrained model to load')
parser.add_argument('--local_rank', type=int, default=0, help='current rank to support distributed')
parser.add_argument('--world_size', type=int, default=8, help='current process number to support distributed')
args, _ = parser.parse_known_args()
return args
def train():
# logger
args = parse_args()
# init distributed
if args.world_size != 1:
init()
args.local_rank = get_rank()
args.world_size = get_group_size()
args.per_batch_size = config.per_batch_size
args.dst_h = config.dst_h
args.dst_w = config.dst_w
args.workers = config.workers
args.attri_num = config.attri_num
args.classes = config.classes
args.backbone = config.backbone
args.loss_scale = config.loss_scale
args.flat_dim = config.flat_dim
args.fc_dim = config.fc_dim
args.lr = config.lr
args.lr_scale = config.lr_scale
args.lr_epochs = config.lr_epochs
args.weight_decay = config.weight_decay
args.momentum = config.momentum
args.max_epoch = config.max_epoch
args.warmup_epochs = config.warmup_epochs
args.log_interval = config.log_interval
args.ckpt_path = config.ckpt_path
if args.world_size == 1:
args.per_batch_size = 256
else:
args.lr = args.lr * 4.
if args.world_size != 1:
parallel_mode = ParallelMode.DATA_PARALLEL
else:
parallel_mode = ParallelMode.STAND_ALONE
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=args.world_size)
# model and log save path
args.outputs_dir = os.path.join(args.ckpt_path, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
args.logger = get_logger(args.outputs_dir, args.local_rank)
loss_meter = AverageMeter('loss')
# dataloader
args.logger.info('start create dataloader')
de_dataloader, steps_per_epoch, num_classes = data_generator(args)
args.steps_per_epoch = steps_per_epoch
args.num_classes = num_classes
args.logger.info('end create dataloader')
args.logger.save_args(args)
# backbone and loss
args.logger.important_info('start create network')
create_network_start = time.time()
network = get_resnet18(args)
criterion = get_loss()
# load pretrain model
if os.path.isfile(args.pretrained):
param_dict = load_checkpoint(args.pretrained)
param_dict_new = {}
for key, values in param_dict.items():
if key.startswith('moments.'):
continue
elif key.startswith('network.'):
param_dict_new[key[8:]] = values
else:
param_dict_new[key] = values
load_param_into_net(network, param_dict_new)
args.logger.info('load model {} success'.format(args.pretrained))
# optimizer and lr scheduler
lr = warmup_step(args, gamma=0.1)
opt = Momentum(params=network.trainable_params(),
learning_rate=lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
loss_scale=args.loss_scale)
train_net = BuildTrainNetwork(network, criterion)
# mixed precision training
criterion.add_flags_recursive(fp32=True)
# package training process
train_net = TrainOneStepCell(train_net, opt, sens=args.loss_scale)
context.reset_auto_parallel_context()
# checkpoint
if args.local_rank == 0:
ckpt_max_num = args.max_epoch
train_config = CheckpointConfig(save_checkpoint_steps=args.steps_per_epoch, keep_checkpoint_max=ckpt_max_num)
ckpt_cb = ModelCheckpoint(config=train_config, directory=args.outputs_dir, prefix='{}'.format(args.local_rank))
cb_params = _InternalCallbackParam()
cb_params.train_network = train_net
cb_params.epoch_num = ckpt_max_num
cb_params.cur_epoch_num = 0
run_context = RunContext(cb_params)
ckpt_cb.begin(run_context)
train_net.set_train()
t_end = time.time()
t_epoch = time.time()
old_progress = -1
i = 0
for step_i, (data, gt_classes) in enumerate(de_dataloader):
data_tensor = Tensor(data, dtype=mstype.float32)
gt_tensor = Tensor(gt_classes, dtype=mstype.int32)
loss = train_net(data_tensor, gt_tensor)
loss_meter.update(loss.asnumpy()[0])
# save ckpt
if args.local_rank == 0:
cb_params.cur_step_num = i + 1
cb_params.batch_num = i + 2
ckpt_cb.step_end(run_context)
if i % args.steps_per_epoch == 0 and args.local_rank == 0:
cb_params.cur_epoch_num += 1
# save Log
if i == 0:
time_for_graph_compile = time.time() - create_network_start
args.logger.important_info('{}, graph compile time={:.2f}s'.format(args.backbone, time_for_graph_compile))
if i % args.log_interval == 0 and args.local_rank == 0:
time_used = time.time() - t_end
epoch = int(i / args.steps_per_epoch)
fps = args.per_batch_size * (i - old_progress) * args.world_size / time_used
args.logger.info('epoch[{}], iter[{}], {}, {:.2f} imgs/sec'.format(epoch, i, loss_meter, fps))
t_end = time.time()
loss_meter.reset()
old_progress = i
if i % args.steps_per_epoch == 0 and args.local_rank == 0:
epoch_time_used = time.time() - t_epoch
epoch = int(i / args.steps_per_epoch)
fps = args.per_batch_size * args.world_size * args.steps_per_epoch / epoch_time_used
args.logger.info('=================================================')
args.logger.info('epoch time: epoch[{}], iter[{}], {:.2f} imgs/sec'.format(epoch, i, fps))
args.logger.info('=================================================')
t_epoch = time.time()
i += 1
args.logger.info('--------- trains out ---------')
if __name__ == "__main__":
train()
| [
"1571856591@qq.com"
] | 1571856591@qq.com |
22c3b581600e5d06700052f819ba896e25a18821 | 4872a403db1cb386a881c85c54308acb2480ca0e | /manage.py | 895c068f294129021a747c65d5884d88dd70324d | [] | no_license | jiangboLee/LEEBlog1 | 6d1e592b4f84b9c7780a23d1af192ef660d700c5 | 8f041750410e6bdbdf8f95a774750ba35c57fab0 | refs/heads/master | 2020-12-02T12:46:39.385049 | 2017-08-22T04:49:22 | 2017-08-22T04:49:22 | 96,593,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LEEBlog.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"584556825@qq.com"
] | 584556825@qq.com |
1c633248bdbe9600e57f95c55c00e346f1b7e29c | 29e10bbfcf805993d5acd3dd28aee9b2de3e4c76 | /posthog/models/filters/stickiness_filter.py | dcc963918d805564fcd7d970c75757e8bc23ebf7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ramesharun/posthog | 191375316a304b97848ef8ebcb66559ecf443821 | 51b291602e718e1bf213438f33a4cb7248761851 | refs/heads/master | 2023-02-05T00:53:55.563148 | 2020-12-23T11:56:10 | 2020-12-23T11:56:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,794 | py | from datetime import datetime
from typing import Any, Callable, Dict, Optional, Union
from django.db.models.expressions import Value
from django.db.models.functions.datetime import TruncDay, TruncHour, TruncMinute, TruncMonth, TruncWeek, TruncYear
from django.http import HttpRequest
from django.utils import timezone
from posthog.constants import INTERVAL, STICKINESS_DAYS
from posthog.models.entity import Entity
from posthog.models.event import Event
from posthog.models.filters.filter import Filter
from posthog.models.team import Team
from posthog.utils import relative_date_parse
class StickinessFilter(Filter):
num_intervals: int
date_from: datetime
date_to: datetime
interval: str = "Day"
entityId: Optional[str]
type: Optional[str]
stickiness_days: int
def __init__(self, data: Optional[Dict[str, Any]] = None, request: Optional[HttpRequest] = None, **kwargs) -> None:
super().__init__(data, request)
if request:
data = {
**(data if data else {}),
**request.GET.dict(),
}
elif not data:
raise ValueError("You need to define either a data dict or a request")
team: Optional[Team] = kwargs.get("team", None)
if not team:
raise ValueError("Team must be provided to stickiness filter")
if self._date_from == "all":
get_earliest_timestamp: Optional[Callable] = kwargs.get("get_earliest_timestamp", None)
if not get_earliest_timestamp:
raise ValueError("Callable must be provided when date filtering is all time")
self._date_from = get_earliest_timestamp(team_id=team.pk)
if not self._date_from:
self._date_from = relative_date_parse("-7d")
if not self._date_to:
self._date_to = timezone.now().isoformat()
self.stickiness_days = int(data.get(STICKINESS_DAYS, "0"))
self.interval = data.get(INTERVAL, "day").lower()
self.entityId = data.get("entityId", None)
self.type = data.get("type", None)
total_seconds = (self.date_to - self.date_from).total_seconds()
if self.interval == "minute":
self.num_intervals = int(divmod(total_seconds, 60)[0])
elif self.interval == "hour":
self.num_intervals = int(divmod(total_seconds, 3600)[0])
elif self.interval == "day":
self.num_intervals = int(divmod(total_seconds, 86400)[0])
elif self.interval == "week":
self.num_intervals = (self.date_to - self.date_from).days // 7
elif self.interval == "month":
self.num_intervals = (self.date_to.year - self.date_from.year) + (self.date_to.month - self.date_from.month)
else:
raise ValueError(f"{self.interval} not supported")
self.num_intervals += 2
def trunc_func(self, field_name: str) -> Union[TruncMinute, TruncHour, TruncDay, TruncWeek, TruncMonth]:
if self.interval == "minute":
return TruncMinute(field_name)
elif self.interval == "hour":
return TruncHour(field_name)
elif self.interval == "day":
return TruncDay(field_name)
elif self.interval == "week":
return TruncWeek(field_name)
elif self.interval == "month":
return TruncMonth(field_name)
else:
raise ValueError(f"{self.interval} not supported")
@property
def target_entity(self) -> Entity:
if self.entities:
return self.entities[0]
elif self.entityId and self.type:
return Entity({"id": self.entityId, "type": self.type})
else:
raise ValueError("An entity must be provided for stickiness target entity to be determined")
| [
"noreply@github.com"
] | noreply@github.com |
a628abcf7734d5f34909b24a744a964221a470f3 | a47a5d5280041e43b1a2dfdfdd0e77929a4e0a7b | /lasagne_mnist.py | f1993b60e070b48855275cd2c5bf818845507120 | [] | no_license | truongkyle/dummy_dataset_fpga | 69c2a4f7ea9ce795b005865a02fc11c713d1e03b | a6b140a86bfbd5e335590716ebbbbcaaed915d31 | refs/heads/master | 2020-08-31T02:03:05.532824 | 2019-12-05T06:49:55 | 2019-12-05T06:49:55 | 218,552,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,928 | py | #!/usr/bin/env python
"""
Usage example employing Lasagne for digit recognition using the MNIST dataset.
This example is deliberately structured as a long flat file, focusing on how
to use Lasagne, instead of focusing on writing maximally modular and reusable
code. It is used as the foundation for the introductory Lasagne tutorial:
http://lasagne.readthedocs.org/en/latest/user/tutorial.html
More in-depth examples and reproductions of paper results are maintained in
a separate repository: https://github.com/Lasagne/Recipes
"""
from __future__ import print_function
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
# ################## Download and prepare the MNIST dataset ##################
# This is just some way of getting the MNIST dataset from an online location
# and loading it into numpy arrays. It doesn't involve Lasagne at all.
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, filename)
# We then define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
import gzip
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
return X_train, y_train, X_val, y_val, X_test, y_test
# ##################### Build the neural network model #######################
# This script supports three types of models. For each one, we define a
# function that takes a Theano variable representing the input and returns
# the output layer of a neural network model built in Lasagne.
def build_mlp(input_var=None):
# This creates an MLP of two hidden layers of 800 units each, followed by
# a softmax output layer of 10 units. It applies 20% dropout to the input
# data and 50% dropout to the hidden layers.
# Input layer, specifying the expected input shape of the network
# (unspecified batchsize, 1 channel, 28 rows and 28 columns) and
# linking it to the given Theano variable `input_var`, if any:
l_in = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
# Apply 20% dropout to the input data:
l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.2)
# Add a fully-connected layer of 800 units, using the linear rectifier, and
# initializing weights with Glorot's scheme (which is the default anyway):
l_hid1 = lasagne.layers.DenseLayer(
l_in_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# We'll now add dropout of 50%:
l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)
# Another 800-unit layer:
l_hid2 = lasagne.layers.DenseLayer(
l_hid1_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify)
# 50% dropout again:
l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)
# Finally, we'll add the fully-connected output layer, of 10 softmax units:
l_out = lasagne.layers.DenseLayer(
l_hid2_drop, num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
# Each layer is linked to its incoming layer(s), so we only need to pass
# the output layer to give access to a network in Lasagne:
return l_out
def build_custom_mlp(input_var=None, depth=2, width=800, drop_input=.2,
drop_hidden=.5):
# By default, this creates the same network as `build_mlp`, but it can be
# customized with respect to the number and size of hidden layers. This
# mostly showcases how creating a network in Python code can be a lot more
# flexible than a configuration file. Note that to make the code easier,
# all the layers are just called `network` -- there is no need to give them
# different names if all we return is the last one we created anyway; we
# just used different names above for clarity.
# Input layer and dropout (with shortcut `dropout` for `DropoutLayer`):
network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
if drop_input:
network = lasagne.layers.dropout(network, p=drop_input)
# Hidden layers and dropout:
nonlin = lasagne.nonlinearities.rectify
for _ in range(depth):
network = lasagne.layers.DenseLayer(
network, width, nonlinearity=nonlin)
if drop_hidden:
network = lasagne.layers.dropout(network, p=drop_hidden)
# Output layer:
softmax = lasagne.nonlinearities.softmax
network = lasagne.layers.DenseLayer(network, 10, nonlinearity=softmax)
return network
def build_cnn(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return network
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(model='mlp', num_epochs=500):
# Load the dataset
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
if model == 'mlp':
network = build_mlp(input_var)
elif model.startswith('custom_mlp:'):
depth, width, drop_in, drop_hid = model.split(':', 1)[1].split(',')
network = build_custom_mlp(input_var, int(depth), int(width),
float(drop_in), float(drop_hid))
elif model == 'cnn':
network = build_cnn(input_var)
else:
print("Unrecognized model type %r." % model)
return
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv):
print("Trains a neural network on MNIST using Lasagne.")
print("Usage: %s [MODEL [EPOCHS]]" % sys.argv[0])
print()
print("MODEL: 'mlp' for a simple Multi-Layer Perceptron (MLP),")
print(" 'custom_mlp:DEPTH,WIDTH,DROP_IN,DROP_HID' for an MLP")
print(" with DEPTH hidden layers of WIDTH units, DROP_IN")
print(" input dropout and DROP_HID hidden dropout,")
print(" 'cnn' for a simple Convolutional Neural Network (CNN).")
print("EPOCHS: number of training epochs to perform (default: 500)")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['model'] = sys.argv[1]
if len(sys.argv) > 2:
kwargs['num_epochs'] = int(sys.argv[2])
main(**kwargs)
| [
"leky1610fx@gmail.com"
] | leky1610fx@gmail.com |
4f714d6172a078dceda6b04a5faec6a75aeec621 | dc63e528012fb2f3e15b73e05c924236760d01b1 | /cloudify_azure/resources/compute/virtualmachine/virtualmachine_utils.py | 4a67d65a4df9ff6e52f6dd881668444d4f9e6848 | [
"Apache-2.0"
] | permissive | cloudify-cosmo/cloudify-azure-plugin | 515b6285b63c2a01ae4d666957541a1f08472410 | 361c48bc4abe38cf57354e8d36839137462ad345 | refs/heads/master | 2023-08-21T14:23:06.673284 | 2023-07-30T10:44:39 | 2023-07-30T10:44:39 | 36,666,947 | 4 | 14 | Apache-2.0 | 2023-07-30T10:44:41 | 2015-06-01T14:42:32 | Python | UTF-8 | Python | false | false | 3,521 | py | # #######
# Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify import ctx
def check_if_configuration_changed(ctx, update_payload, current_vm):
for prop in ['location', 'tags', 'plan', 'availability_set',
'eviction_policy', 'billing_profile', 'priority',
'hardware_profile']:
update_property_value = update_payload.get(prop)
current_vm_property_value = current_vm.get(prop)
if update_property_value and ordered(
update_property_value) != ordered(current_vm_property_value):
ctx.logger.info("{prop} changed.".format(prop=prop))
ctx.logger.info("update payload: {content}.".format(
content=update_property_value))
ctx.logger.info("current configuration: {content}.".format(
content=current_vm_property_value))
return True
for prop in ['os_profile', 'storage_profile', 'network_profile']:
if prop == 'network_profile' and update_payload.get(prop):
update_property_value = update_payload.get(prop).as_dict()
else:
update_property_value = update_payload.get(prop, {})
current_vm_property_value = current_vm.get(prop, {})
if diff_dictionaries(update_property_value, current_vm_property_value):
ctx.logger.info("{prop} changed.".format(prop=prop))
return True
return False
def diff_dictionaries(update_dict, current_conf_dict):
"""
Returns True if update_dict has changes in a key that doesn't appear in
current_conf_dict.
current_conf_dict can have additional keys and its not considered as a
diff.
"""
for key in update_dict:
if isinstance(update_dict.get(key), dict):
res = diff_dictionaries(update_dict.get(key),
current_conf_dict.get(key, {}))
if res:
return True
elif ordered(update_dict.get(key)) != ordered(
current_conf_dict.get(key)):
ctx.logger.info(
'Changes found in diff_dictionaries: key={key}\n'.format(
key=key))
ctx.logger.info(
'update_dict: {}'.format(ordered(update_dict.get(key))))
ctx.logger.info(
'current_conf_dict: {}'.format(ordered(
current_conf_dict.get(key))))
return True
return False
def ordered(obj):
"""
This function will recursively sort any lists it finds
(and convert dictionaries to lists of (key, value) pairs so that they're
orderable)
"""
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
if isinstance(obj, str):
return obj.lower()
if isinstance(obj, (int, float)):
return str(obj)
else:
return obj
| [
"noreply@github.com"
] | noreply@github.com |
2cff4309bada63d3fcf90cd5da550da6f77ecbd8 | ba07ca708a80efeeaafcbe95e95c26d1bb334897 | /files/ord_class.py | b7292cffc9717d46064ea7dd2548d2f1290132bc | [] | no_license | arickels11/Final_Project | 8c7f93beb3d120c1b91244127c26d235659f96c7 | d268001047a52594313692cf3cd758ad38ccbfeb | refs/heads/master | 2020-09-21T20:59:33.291795 | 2019-12-09T03:04:18 | 2019-12-09T03:04:18 | 224,928,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,049 | py | """CIS 189
Alex Rickels
Final Project - Tuscan Eatery"""
import datetime
from datetime import timedelta
class Order:
def __init__(self, table, dishes_list=[]):
'''
:param table: table # for the order, so other employee can bring food if ready
:param dishes_list: dishes in the order
'''
if table == '': # input validation
raise EnterTableNumber # exception handling
nums = set("1234567890") # input validation
if not nums.issuperset(str(table)):
raise InvalidTableNumber # exception handling
if not 1 <= int(table) <= 16: # input validation
raise InvalidTableNumber # exception handling
self.table = table
if len(dishes_list) == 0: # input validation
raise MissingOrderError # exception handling
menu_list = ( # list, input validation
'bruschetta',
'arugula salad',
'carbonara',
'spaghetti',
'risotto',
'focaccia',
'gelato'
)
for item in dishes_list:
if item not in menu_list:
raise InvalidDishError # exception handling
self.dishes_list = dishes_list
def change_table(self, table):
self.table = table
def update_dishes_list(self, dishes_list):
self.dishes_list = dishes_list
def order_input(self):
return str(self.print_table()) + str(self.get_time()) # prints table # and order with prep time for each dish
def print_table(self):
print('Table:', self.table)
def get_time(self):
"""
:return: time value of dish prep
"""
def time(dish):
dish_dict = { # dictionary for all menu items and corresponding preparation time of each
'bruschetta': 5,
'arugula salad': 3,
'carbonara': 12,
'spaghetti': 7,
'risotto': 20,
'focaccia': 3,
'gelato': 2
}
ready_time = datetime.datetime.now() + timedelta(minutes=20) # ready time is now + 20 minutes
dish_time = dish_dict[str(dish)]
start_time = ready_time - timedelta(minutes=dish_time) # start time= order time - prep time(from dish dict)
return start_time.strftime('%X') # formatting for only time, no date
for dish in self.dishes_list:
prep_time = str(time(dish))
print('Begin ' + str(dish) + " at " + prep_time) # prints prep time for each dish in order
class InvalidTableNumber(Exception):
# This custom exception is raised if table # is not within specified range
pass
class EnterTableNumber(Exception):
# This custom exception if table # was left blank
pass
class InvalidDishError(Exception):
# This custom exception is dish is not on the menu
pass
class MissingOrderError(Exception):
# This custom exception is if no dishes are entered in order
pass
| [
"arickels93@gmail.com"
] | arickels93@gmail.com |
6ac86ed6fb4e52ebf43c10ef08a46b9288aff6e2 | a5c1dc40ee5e9383f00ebee400d59920ff867d67 | /InClass/Class18.py | f04283f12300693c789dc8769a8636e3ed352e9e | [] | no_license | Neurotrophin/CPSC230ParlettPelleriti | 9e5bce20f283bfa18e6f9f85cbb676e17b89d181 | 9977a6a6629b71fe26d083082c7fe79539a7219f | refs/heads/master | 2022-03-29T23:23:05.918982 | 2019-12-09T22:58:11 | 2019-12-09T22:58:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | #MUTABLE OBJECTS
def petAdder(dict):
name = input("What's your name ")
pet = input("What's your pet's name")
if name in dict:
print("You're already in here so...")
else:
dict[name] = pet
d = {}
i = 0
while i < 3:
petAdder(d)
i += 1
print(d)
#---------------------------------------------
def birthday(my_age):
print("INSIDE LOOP NOW---------------------------")
print("I'm", my_age)
my_age += 1
print("NOW I'm", my_age)
print("ENDING LOOP-------------------------------")
my_age = 80
print(my_age)
birthday(my_age)
print(my_age)
#---------------------------------------------
def birthday(my_age):
print("INSIDE LOOP NOW---------------------------")
print("I'm", my_age)
my_age += 1
print("NOW I'm", my_age)
print("ENDING LOOP-------------------------------")
return my_age
my_age = 80
print(my_age)
my_age = birthday(my_age)
print(my_age)
#---------------------------------------------
| [
"parlett@chapman.edu"
] | parlett@chapman.edu |
ecf2e202398d9c58d9d5bcb9846dbebaf58a02aa | 0ccab2965458454d6a4802b47d33310e43c10d8f | /classes/student.py | c9e7d33683deae9b858dc5fb04d7034fd00d39ca | [] | no_license | jazib-mahmood-attainu/Ambedkar_Batch | 11e66125647b3b348d4567862f8fc20a3457b2f0 | c99be9a401b8d00f6ca47398f48e90ead98f4898 | refs/heads/main | 2023-08-01T13:13:43.357769 | 2021-09-25T03:54:27 | 2021-09-25T03:54:27 | 390,405,238 | 16 | 10 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | class Student:
def __init__(self,roll,name,age):
self.roll = roll
self.name = name
self.age = age
def reads(self):
print(self.name,"is reading")
preeti = Student(10,"Preeti",24)
print(preeti.name)
print(preeti.roll)
print(preeti.age)
preeti.reads()
print("**********")
sapna = Student(11,"Sapna",19)
print(sapna.name)
print(sapna.roll)
print(sapna.age)
sapna.reads()
| [
"jazib.prof@gmail.com"
] | jazib.prof@gmail.com |
e6160cfebc2a43b5c30200a58e3ff04cd3aafdcc | c819c2f4b1547762d12015ebf7263f5db2b9cfb9 | /polls/admin.py | 0c56726fe2a93d3c015fe565b465aaefd2272649 | [] | no_license | saomajixiao/Django_vote | e66b77d6e0a18ac812c4973bf0036e5c9a6ab701 | b0561482fa3ee0c56d1bc5285565a495d3fb3150 | refs/heads/master | 2020-04-23T00:57:55.430251 | 2019-04-19T14:47:03 | 2019-04-19T14:47:03 | 170,798,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Question,Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fields = ['pub_date', 'question_text']
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice)
| [
"prnedved@163.com"
] | prnedved@163.com |
bb225b5211002db9b4aee185f095dae4f1e68fa8 | 4146e7cc441f51d66bd42d0fd49d436079cfacb0 | /par.py | d3ddb2fc89c56d99d52732f4efba49cbe9c7bc74 | [] | no_license | Iboll/untitled2 | 8dbcf3206adae6e633bfcde09ea345fb2b8ce6d8 | 9276c81a880134ecb5817977cb07d9ec568413ae | refs/heads/master | 2021-03-15T02:12:14.722375 | 2020-04-11T20:37:04 | 2020-04-11T20:37:04 | 246,816,299 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from flask_restful import reqparse
parser = reqparse.RequestParser()
parser.add_argument('email', required=True)
parser.add_argument('name', required=True)
parser.add_argument('surname', required=True)
parser.add_argument('age', required=True, type=int)
parser.add_argument('position', required=True)
parser.add_argument('speciality', required=True)
parser.add_argument('address', required=True)
parser.add_argument('about', required=True) | [
"soltan.shamgunov@yandex.ru"
] | soltan.shamgunov@yandex.ru |
588f53f955204f081034a2b5b07dc3312c3d971e | 101cab2e63d8cd73b6db0d9655291f181789ae0f | /catalog_app/tests/test_forms.py | c17f91136c53c9e48e60757dc77a48609fb00e65 | [] | no_license | TheWoops/test-deploy-backend | 268658f2f215b30c3d156f2b2a59f41e99452a9c | 2d15a25fc0188f2822914769ab4f731ad1d7066a | refs/heads/master | 2022-07-28T14:51:33.922973 | 2021-04-11T18:16:06 | 2021-04-11T18:16:06 | 350,643,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | from django.test import TestCase
from catalog_app.forms import UpdateCustomerForm
# Create your tests here.
class TestUpdateCustomerForm(TestCase):
def test_new_location_label(self):
'''Test if attribute new_location has the correct label assigned'''
form = UpdateCustomerForm()
self.assertTrue(form.fields['new_location'].label == None or form.fields['new_location'].label == 'new_location')
def test_new_location_help_text(self):
"""Test if help_text of new location attribute is correct"""
form = UpdateCustomerForm()
actual_help_text = form.fields['new_location'].help_text
self.assertEqual(actual_help_text, "Enter a new company location")
def test_clean_new_location(self):
"""Test if validation error is thrown if input longer 15 chars"""
form = UpdateCustomerForm(data ={'new_location': "1234567891111110"})
self.assertFalse(form.is_valid())
"""Test if NO validation error is thrown if input max. 15 chars"""
form = UpdateCustomerForm(data ={'new_location': "123456789111111"})
self.assertTrue(form.is_valid())
| [
"thewoops.deeplearning@gmail.com"
] | thewoops.deeplearning@gmail.com |
98fa4703bd418ed584d3c0b4069f185a536db5ec | 87e7f159b48ad4e2b784c8846bed37e1825fb375 | /gamma/grd_batch_process.py | 2438f8c31f8555e79c2e0e4fb469c0d34b7b1584 | [] | no_license | whigg/GeorgeVI-surface-melt | 0db560640209911d5ef432ebf1fdef49b1f9957a | 0778de50fa747a4165273c9ef9edd65bf783fd34 | refs/heads/master | 2023-07-10T22:14:57.455070 | 2020-05-28T12:20:57 | 2020-05-28T12:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,982 | py | #activate the gamma environment in the shell
#gma
#geoutils
import os
import os.path
from os import path
import subprocess
from pyroSAR import identify
#import faulthandler; faulthandler.enable()
dem = "/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/DEM/REMA_resampled_10m.dem"
dem_par = "/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/DEM/REMA_resampled_10m.dem_par"
outdir = "/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/s1_grd/s1_grd_processed/grd_processed"
rootdir = '/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/s1_grd/'
study_area = '/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/study_area/study_area_square.shp'
surplus_files = '/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/s1_grd/s1_grd_processed/to_be_deleted/'
def unzip():
'''Unzips S1.zip files into .SAFE folders.'''
for dirname in os.listdir(rootdir):
if dirname.endswith(".zip"):
filename = str(dirname)[:-4]
#unzip S1 data to .SAFE file
if not path.exists(f"{rootdir}{filename}.SAFE"):
unzip = f"unzip {rootdir}{dirname} -d {rootdir}"
os.system(unzip)
print(f"{dirname} is now unzipped.")
def mk_POEORB_dir():
'''creates the file structure needed for the orbit files.
Make sure the correct orbit file is downloaded and placed inside the
POEORB directory before running the processGRD() function. '''
for dirname in os.listdir(rootdir):
if dirname.endswith(".SAFE"):
if not path.exists(f"{rootdir}{dirname}/osv/"):
os.makedirs(f"{rootdir}{dirname}/osv/")
print("Directories for orbit files created.")
def downloadOSV():
'''downloads the OSV file associated with each S1 image and places it into the correct file structure'''
for dirname in os.listdir(rootdir):
if dirname.endswith(".zip"):
filename = str(dirname)[:-4]
if path.exists(f"{rootdir}{filename}.SAFE/osv/"):
scene = f"{rootdir}{dirname}"
platform = str(dirname)[:3]
year = str(dirname)[17:21]
month = str(dirname)[21:23]
day = str(dirname)[23:25]
id = identify(scene)
id.getOSV(osvdir=f'{rootdir}{filename}.SAFE/osv/', osvType='POE') #downloads OSV file as a zip file located in {rootdir}/POEORB/S1B/2019/05/
if day != "01":
unzip = f"unzip {rootdir}{filename}.SAFE/osv/POEORB/{platform}/{year}/{month}/*.zip -d {rootdir}{filename}.SAFE/osv/POEORB"
else:
pre_month = int(month)-1
if pre_month > 9:
orb_month = str(pre_month)
else:
orb_month = '0'+ str(pre_month)
unzip = f"unzip {rootdir}{filename}.SAFE/osv/POEORB/{platform}/{year}/{orb_month}/*.zip -d {rootdir}{filename}.SAFE/osv/POEORB"
os.system(unzip)
else:
print(f"Correct file structure for OSV files does not exist: {dirname}.")
def processGRD():
'''Processes the Sentinel 1 data using the Gamma workflow'''
for dirname in os.listdir(rootdir):
if dirname.endswith(".SAFE"):
#set directory and file names
dir = f'{rootdir}{dirname}'
if path.exists(f"{dir}/osv/POEORB/"):
filename= str(dirname).lower().replace("_", "-")[:-10]
filenameHH = filename.replace("1ssh","hh").replace("grdh","grd")
#Generate MLI and GRD images and parameter files from a Sentinel-1 GRD product
par_command= f"par_S1_GRD {dir}/measurement/{filenameHH}-001.tiff {dir}/annotation/{filenameHH}-001.xml {dir}/annotation/calibration/calibration-{filenameHH}-001.xml - {dir}/{filenameHH}_HH_grd.par {dir}/{filenameHH}_HH_grd - - - - -"
os.system(par_command)
# correct orb files must be allocated beforehand in SAFE folder (/osv/POEORB)
for file in os.listdir(f'{dir}/osv/POEORB/'):
if file.endswith("EOF"):
orb = str(file)
#Extract Sentinel-1 OPOD state vectors and copy into the ISP image parameter file
opod = f"S1_OPOD_vec {dir}/{filenameHH}_HH_grd.par {dir}/osv/POEORB/{orb} -"
os.system(opod)
#Multi-looking of intensity (MLI) images
multilook = f"multi_look_MLI {dir}/{filenameHH}_HH_grd {dir}/{filenameHH}_HH_grd.par {dir}/{filenameHH}_HH_grd_mli {dir}/{filenameHH}_HH_grd_mli.par 2 2 - - -"
os.system(multilook)
#Calculate terrain-geocoding lookup table and DEM derived data products
gc_map = f"gc_map {dir}/{filenameHH}_HH_grd_mli.par - {dem_par} {dem} {dir}/{filename}_dem_seg_geo.par {dir}/{filename}_dem_seg_geo {dir}/{filename}_lut_init 1.0 1.0 - - - {dir}/{filename}_inc_geo - {dir}/{filename}_pix_geo {dir}/{filename}_ls_map_geo 8 2 -"
os.system(gc_map)
#Calculate terrain-based sigma0 and gammma0 normalization area in slant-range geometry
pixel_area = f"pixel_area {dir}/{filenameHH}_HH_grd_mli.par {dir}/{filename}_dem_seg_geo.par {dir}/{filename}_dem_seg_geo {dir}/{filename}_lut_init {dir}/{filename}_ls_map_geo {dir}/{filename}_inc_geo - - - - {dir}/{filename}_pix_fine -"
os.system(pixel_area)
#Calculate product of two images: (image 1)*(image 2)
mli_samples = subprocess.check_output(f"grep samples {dir}/{filenameHH}_HH_grd_mli.par", shell=True)
mli_samples = str(mli_samples).replace("\n'","").split(' ')[-1][:-3]
print("MLI Samples:", mli_samples)
product = f"product {dir}/{filenameHH}_HH_grd_mli {dir}/{filename}_pix_fine {dir}/{filenameHH}_HH_grd_mli_pan {mli_samples} 1 1 -"
os.system(product)
#Geocoding of image data using a geocoding lookup table
dem_samples = subprocess.check_output(f"grep width {dir}/{filename}_dem_seg_geo.par", shell=True)
dem_samples = str(dem_samples).replace("\n'","").split(' ')[-1][:-3]
print("DEM Samples:", dem_samples)
geocode_back = f"geocode_back {dir}/{filenameHH}_HH_grd_mli_pan {mli_samples} {dir}/{filename}_lut_init {dir}/{filenameHH}_HH_grd_mli_pan_geo {dem_samples} - 2 - - - -"
os.system(geocode_back)
#Compute backscatter coefficient gamma (sigma0)/cos(inc)
sigma2gamma = f"sigma2gamma {dir}/{filenameHH}_HH_grd_mli_pan_geo {dir}/{filename}_inc_geo {dir}/{filenameHH}_HH_grd_mli_norm_geo {dem_samples}"
os.system(sigma2gamma)
#Conversion of data between linear and dB scale
linear_to_dB = f"linear_to_dB {dir}/{filenameHH}_HH_grd_mli_norm_geo {dir}/{filenameHH}_HH_grd_mli_norm_geo_db {dem_samples} 0 -99"
os.system(linear_to_dB)
#convert geocoded data with DEM parameter file to GeoTIFF format (dB)
data2geotiff = f"data2geotiff {dir}/{filename}_dem_seg_geo.par {dir}/{filenameHH}_HH_grd_mli_norm_geo_db 2 {outdir}/{filenameHH}_HH_grd_mli_norm_geo_db.tif -99"
os.system(data2geotiff)
#Produce different types of geotiffs (unhash lines below if want to create them)
#data2geotiff2 = f"data2geotiff {dir}/{filename}_dem_seg_geo.par {dir}/{filename}_inc_geo 2 {outdir}/{filename}_inc_geo.tif -99"
#os.system(data2geotiff2)
#data2geotiff3 = f"data2geotiff {dir}/{filename}_dem_seg_geo.par {dir}/{filename}_ls_map_geo 5 {outdir}/{filename}_ls_map_geo.tif 0"
#os.system(data2geotiff3)
print("I finished the scene")
else:
print(f"OSV files have not been downloaded: {dirname}.")
def transform_geotiff(): #Tested and works
'''Transforms geotiff into the UTM 19S projection (EPSG: 32719)'''
for geotiff in os.listdir(outdir):
if geotiff.endswith("db.tif"):
filename= str(geotiff)[:-4]
transform = f"gdalwarp -t_srs EPSG:32719 {outdir}/{filename}.tif {outdir}/{filename}_utm_19S.tif"
os.system(transform)
#gdal.Warp()
print(f"{geotiff} transformed to EPSG 32719.")
def crop_geotiff(): #Tested and works
'''Crops transformed geotiff to the study area boundary'''
for geotiff in os.listdir(outdir):
if geotiff.endswith("_utm_19S.tif"):
filename = str(geotiff)[:-4]
print(filename)
crop = f"gdalwarp -cutline {study_area} -crop_to_cutline {outdir}/{filename}.tif {outdir}/{filename}_cropped.tif"
os.system(crop)
print(f"{geotiff} cropped to study area.")
def move_surplus_files(): #Tested and works (also need to work on it so it deletes per S1 scene, rather than the whole folder, to ensure safety.)
'''Moves surplus files to other folder, from which they can then be deleted where necessary.
Should only run once the previous steps have been run on all of the geotiffs in the folder.'''
if any(File.endswith("_utm_19S_cropped.tif") for File in os.listdir(outdir)):
for geotiff in os.listdir(outdir):
if geotiff.endswith("geo_db.tif") or geotiff.endswith("_utm_19S.tif") or geotiff.endswith("geo.tif") or geotiff.endswith(".tif.ovr"):
os.rename(f"{outdir}/{geotiff}", f"{surplus_files}{geotiff}")
print(f"{geotiff} has been moved to the to_be_deleted folder.")
elif geotiff.endswith("_utm_19S_cropped.tif"):
print(f"{geotiff} is the final product (transformed and cropped).")
else:
print("The geotiff is yet to be cropped to the study area. Complete this step first, before removing the file from this folder.")
elif any(File.endswith("_utm_19S.tif") for File in os.listdir(outdir)):
for geotiff in os.listdir(outdir):
if geotiff.endswith("geo_db.tif"):
os.rename(f"{outdir}/{geotiff}", f"{surplus_files}{geotiff}")
#os.remove(geotiff)
elif geotiff.endswith("_utm_19S_cropped.tif"):
print(f"{geotiff} is the final product (transformed and cropped).")
else:
print(f"{geotiff} is yet to be transformed into UTM Zone 19S. Complete this step first, before removing the file from this folder.")
else:
print("No surplus files exist in this directory.")
'''Run the functions. Hash them out where necessary.'''
#data preparation steps
unzip()
mk_POEORB_dir()
downloadOSV()
#data processing steps, transformation, crop, and move surplus files
processGRD()
transform_geotiff()
crop_geotiff()
move_surplus_files()
| [
"noreply@github.com"
] | noreply@github.com |
8806780712e5054373bdc136bb537dece0d2b9ac | ffd2126e1ba5d1acea0bb0b3d011f4ccaf1c1f1f | /gia/gia/doctype/gia_sector/gia_sector.py | 4a1e4524728a0939102446bd86307c02279f077f | [
"MIT"
] | permissive | alkuhlani/gia | fd55c65b0f430f24c7fbe3aef5ea911af8642702 | 9af9737cef7b0b947baa21f46c7be381c4fc9d98 | refs/heads/master | 2022-12-10T02:45:47.907158 | 2020-09-04T16:37:10 | 2020-09-04T16:37:10 | 276,495,714 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Ahmed Mohammed Alkuhlani and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.model.document import Document
class GIASector(Document):
def validate(self):
if not self.parent_gia_sector:
frappe.throw(_("Please enter the parent"))
| [
"frappe@ubuntu.vm"
] | frappe@ubuntu.vm |
59b93ec9acf4cea910ecdcb8812f3bc6fcbda7ca | 297e6d66302aa4037b702b24ee0c119e0bbd38e2 | /free_wifi_reboot.py | 617c59d435e54118f228a1e28298747e1b106a42 | [] | no_license | talehm/Rebooting-TP-link-Router-with-Phyton- | 081d97e63e2f997598e69113f9b8d0774a5d0cb1 | bdfc8ced8226d99d6dde620e89851578275ced70 | refs/heads/master | 2020-04-15T21:42:01.444621 | 2020-01-27T13:32:28 | 2020-01-27T13:32:28 | 165,043,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,565 | py | import pexpect
import sys
import getpass
import time
import subprocess
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
# available since 2.26.0
from selenium.webdriver.support import expected_conditions as EC
import os
import os.path
from multiprocessing import Pool
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.options import Options
from datetime import datetime, timedelta
import logging
import logging.handlers
import logging.config
import socket
# Firefox driver connection
binary = FirefoxBinary('/usr/bin/firefox')
options = webdriver.FirefoxOptions()
options.set_headless()
#options = Options()
#options.headless = True
# driver = webdriver.Firefox() # specfiy web driver
today = datetime.today()
# GET LAST 1 WEEK DATE
d = today - timedelta(days=7)
date = "{:%d_%m_%Y}".format(d.date())
today = "{:%d_%m_%Y}".format(today.date())
# CREATE NEW LOG FILE
new_logfile = "logs/log_"+today+".log"
open(new_logfile, 'a').close()
# DELETE OLD LOG FILES
old_logfile = "logs/log_"+date+".log"
if os.path.exists("logs/"+old_logfile):
os.remove(old_logfile)
else:
pass
# READ DATA FROM FILE
selected_cmd_file = open('ip_list2.txt', 'r')
selected_cmd_file.seek(0)
# LOGGING CONFIGURATON
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d, %b %Y %I:%M:%S %p',
filename=new_logfile, filemode='w', level=logging.INFO)
# FUNCTION TO REBOOT THE DEVICES ACCORDING TO DIFFERENT WEB INTERFACE STRUCTURE.
def Model(driver, menu, tool, reboot):
driver.switch_to.frame("bottomLeftFrame")
systemtools = driver.find_element_by_id(menu) # FIND REBOOT MENU
systemtools.click()
time.sleep(1)
restart = driver.find_element_by_id(tool) # CHOOSE REBOOT FIELD
restart.click()
time.sleep(1)
driver.switch_to.default_content()
driver.switch_to.frame("mainFrame")
time.sleep(2)
reboot = driver.find_element_by_id(reboot) # CLICK TO REBOOT BUTTON
time.sleep(1)
reboot.click()
def webReboot(AP_name, ip_address):
try:
driver = webdriver.Firefox(
firefox_options=options) # specfiy web driver
# Connection to the host
# # open the device web interface
driver.get('http://'+ip_address)
# print (driver.current_url)
wait = WebDriverWait(driver, 30)
logging.info(AP_name+": Protocol - HTTP")
# Analyzing device model
model = driver.find_element_by_class_name(
'style1').text # get model of device
# Authorization
username = driver.find_element_by_id('userName') # find username input
username.send_keys("admin") # enter the username
password = driver.find_element_by_id(
'pcPassword') # find password input
password.send_keys("radmin") # enter the password
login = driver.find_element_by_id('loginBtn') # click login button
login.click()
time.sleep(3)
logging.info(AP_name+": Authorized")
# Check model of device and run the function
if "WR840N" in model:
logging.info(AP_name+": Model 840N")
Model(driver, "menu_tools", "menu_restart", "button_reboot")
else:
logging.info(AP_name+": Model 841N")
Model(driver, "a48", "a54", "reboot")
time.sleep(1)
alert = driver.switch_to_alert()
alert.accept()
logging.info(AP_name+": rebooted\n")
print(AP_name+": rebooted by HTTP")
time.sleep(1)
driver.close()
except Exception as e:
print(AP_name+": not rebooted by HTTP")
logging.info(AP_name+": not rebooted by HTTP")
def isOpen(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
def connection(hostname):
try:
# GET HOSTNAME AND IP ADDRESS OF DEVICE
AP_name, ip_address = hostname.split("\t")
logging.info("Connecting to: " + AP_name)
# SEND 3 ICMP REQUEST TO DEVICE
cmnd = "ping -c 3 -W 3 "+ip_address
ping = subprocess.check_output(
cmnd, stderr=subprocess.STDOUT, shell=True,
universal_newlines=True)
#response = os.system("ping -c 3 -W 3 "+ip_address)
except subprocess.CalledProcessError as exc:
logging.info(AP_name+": Status is down\n")
else:
logging.info(AP_name+": Status is up")
telnet_status = isOpen(ip_address, 23)
if telnet_status:
logging.info("Protocol: Telnet")
try:
cmnd = 'bash telnet.sh '+ip_address + ' ' + AP_name
telnet = subprocess.check_output(
cmnd, stderr=subprocess.STDOUT, shell=True,
universal_newlines=True)
except subprocess.CalledProcessError as exc:
if exc.returncode == 1 or exc.returncode == 127:
logging.info(AP_name+": rebooted by Telnet\n")
print(AP_name+": rebooted by Telnet")
else:
logging.info(AP_name+": NOT rebooted by Telnet\n")
print(AP_name+": NOT rebooted by Telnet")
else:
webReboot(AP_name, ip_address)
def main():
p = Pool(5)
p.map(connection, selected_cmd_file.readlines())
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
e2230aed752c8a73948aecc725580d22f370446b | 1e9fed88ce4a623970f7e53143753a170d4bdcda | /aat/tests/test_strategy.py | 37e4f8123735a0e70663ec060bc26eda308854e1 | [
"Apache-2.0"
] | permissive | krusty45/aat | 06dedbfe0abaf76c4a584ad441dc7badd093a939 | a14b652f7ff90761d0e1198a85d8fc02efeff0eb | refs/heads/master | 2020-06-24T09:34:30.981326 | 2019-07-09T19:34:22 | 2019-07-09T19:34:22 | 198,929,483 | 1 | 0 | Apache-2.0 | 2019-07-26T02:07:57 | 2019-07-26T02:07:56 | null | UTF-8 | Python | false | false | 453 | py | # for coverage
from ..strategy import *
class TestStrategy:
def setup(self):
pass
# setup() before each test method
def teardown(self):
pass
# teardown() after each test method
@classmethod
def setup_class(cls):
pass
# setup_class() before any methods in this class
@classmethod
def teardown_class(cls):
pass
# teardown_class() after any methods in this class
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
7ac55fae4dff112e3f68063f3e79fb7c3247eec9 | cb2a7e659b2ca21b391ac2bbed8e34aa5ed17bb6 | /nba/migrations/0001_initial.py | 2733b02467c4f5feb7069c681a5f1ef4a31bbf42 | [] | no_license | k24dizzle/meow | 04c124b38220072d7130acee4e1912e0023d3f5a | 5afd4bb274611ae8482e62f0c559d9fd960757c6 | refs/heads/master | 2021-01-21T13:53:18.007463 | 2016-05-18T21:12:20 | 2016-05-18T21:12:20 | 51,556,962 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-11 23:24
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('home', models.CharField(max_length=200)),
('away', models.CharField(max_length=200)),
('date', models.DateField(default=datetime.datetime(2016, 2, 11, 23, 24, 23, 747572, tzinfo=utc))),
],
),
]
| [
"zhaok24@uw.edu"
] | zhaok24@uw.edu |
8a633ce99beb82b6b65a0ed5d1c65801a5ef744e | ca7a1f0f16dcb8a7c077f6ef81413ea6b150513b | /pythonFiles/pydev/util.py | 995b3ac5c5d011846eebff34560c886e431df4bf | [
"MIT"
] | permissive | lee-vius/python-preview | 79af9b8aaeb9fe297dab3eb4af10370ec23a6724 | 1d953a8d08698693042ce763ec7861224661032f | refs/heads/master | 2020-09-03T00:53:51.811005 | 2019-11-05T19:18:23 | 2019-11-05T19:18:23 | 219,345,117 | 0 | 0 | MIT | 2019-11-03T18:19:20 | 2019-11-03T18:19:19 | null | UTF-8 | Python | false | false | 1,952 | py | import sys
import struct
from encodings import utf_8, ascii
try:
unicode
except:
unicode = str
try:
xrange
except:
xrange = range
if sys.version_info[0] >= 3:
def to_bytes(cmd_str):
return ascii.Codec.encode(cmd_str)[0]
else:
def to_bytes(cmd_str):
return cmd_str
if sys.version_info[0] >= 3:
def to_str(cmd_bytes):
return ascii.Codec.decode(cmd_bytes)[0]
else:
def to_str(cmd_bytes):
return cmd_bytes
UNICODE_PREFIX = to_bytes('U')
ASCII_PREFIX = to_bytes('A')
NONE_PREFIX = to_bytes('N')
def read_bytes(conn, count):
b = to_bytes('')
while len(b) < count:
received_data = conn.recv(count - len(b))
if received_data is None:
break
b += received_data
return b
def write_bytes(conn, b):
conn.sendall(b)
def read_int(conn):
# '!' represents network(=big-endian) byte order
# 'q' represent long long in c type, integer in python type, 8 standard size
return struct.unpack('!q', read_bytes(conn, 8))[0]
def write_int(conn, i):
write_bytes(conn, struct.pack('!q', i))
def read_string(conn):
str_len = read_int(conn)
if not str_len:
return ''
res = to_bytes('')
while len(res) < str_len:
res = res + conn.recv(str_len - len(res))
res = utf_8.decode(res)[0]
if sys.version_info[0] == 2:
try:
res = ascii.Codec.encode(res)[0]
except UnicodeEncodeError:
pass
return res
def write_string(conn, s):
if s is None:
write_bytes(conn, NONE_PREFIX)
elif isinstance(s, unicode):
b = utf_8.encode(s)[0]
b_len = len(b)
write_bytes(conn, UNICODE_PREFIX)
write_int(conn, b_len)
if b_len > 0:
write_bytes(conn, b)
else:
s_len = len(s)
write_bytes(conn, ASCII_PREFIX)
write_int(conn, s_len)
if s_len > 0:
write_bytes(conn, s) | [
"dongli0x00@gmail.com"
] | dongli0x00@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.