blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
05d1bf8ad8b46a5f75c6b7338e340c5de6919bfb | 01443c0d42543ccb20017e7a9f4bad998336f8c5 | /sort_pagerank.py | 7db934d00892f1090c309ef69cb07328bde45535 | [] | no_license | claytondus/cdnw-pagerank | 0756e9bc674fac26ce0acdc213a2d5ff78e79e6d | 51374723f793675c594f16b656338954f6634921 | refs/heads/master | 2021-01-17T06:48:16.873482 | 2016-05-01T20:29:50 | 2016-05-01T20:29:50 | 56,156,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | #!/usr/bin/env python
#COSC 560 Assignment 3
#PageRank
#sort_pagerank.py: Export sorted list of pages by PageRank in descending order
import sys, string
from operator import itemgetter
with open('pagerank_output') as pr_f:
ranks = [x.strip().split('\t') for x in pr_f.readlines()]
def getRank(item):
return float(item[1])
ranks_sorted = sorted(ranks, key=getRank, reverse=True)
for line in ranks_sorted:
print('\t'.join(line[0:2]))
| [
"claytondus@gmail.com"
] | claytondus@gmail.com |
b0b9f9a790d7425e16598e8858bed847799f0b47 | adf337d8f89413edc439d8433e647d2d74ca6b6c | /sheepCounter/SheepCounter-v1.0.py | 865614fa03546f7b6d4adc56c14f7c00cc1e7696 | [] | no_license | ROOKIEMIE/tools | 1c6bf5bc62f82c082aa2e33897ff1000476646f8 | 011420e7f3b088fcacff8a7f4efb3755c4fe30d6 | refs/heads/master | 2023-04-08T09:20:19.746286 | 2017-05-01T05:05:43 | 2017-05-01T05:05:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,668 | py | # -*- coding: utf-8 -*-
import time
import argparse
import shodan
import sys
'''
[!] Simple python tool to get the total of models indexed in "shodan.io".
[*] Research: "#stringbleed" (CVE-2017-5135)
[+] twitter: @stringbleed
[+] mail to: stringbleed@protonmail.com
[>] by:
[+] Bertin Jose (Costa Rica)
[+] twitter: @bertinjoseb
[+] Fernandez Ezequiel (Argentina)
[+] twitter: @capitan_alfa
'''
banner = '''
[+] Counter Sheep v1.0
'''
parser = argparse.ArgumentParser(description=' [*] Tool for get the total number of cablemodems vuln to #Stringbleed, (indexed into Shodan)')
parser.add_argument('--models-file', dest="MODELS", help='select any files with models name of cablemodems')
parser.add_argument('--model', dest="MODEL", help='select any model name')
args = parser.parse_args()
allModels = args.MODELS
anyModel = args.MODEL
class Colors:
BLUE = '\033[94m'
GREEN = '\033[32m'
RED = '\033[0;31m'
DEFAULT = '\033[0m'
ORANGE = '\033[33m'
WHITE = '\033[97m'
BOLD = '\033[1m'
BR_COLOUR = '\033[1;37;40m'
query = 'MODEL: '
sumTotal = [0]
FACETS = [
# ('org', 1000),
('country', 1000),
# ('city', 1000)
]
FACET_TITLES = {
# 'org' : 'Top Organizations',
'country' : 'Top Countries',
# 'city' : 'Top Cities',
}
# best harcoded !!!
freeAPIKEY = 'MM72AkzHXdHpC8iP65VVEEVrJjp7zkgd'
api = shodan.Shodan(freeAPIKEY)
# Function search --------------------------------------------------------------------------------------------------- #
def theModelIs(cblMDL):
#sumTotal = 0
cableModel = str(query+cblMDL)
result = api.count(cableModel, facets=FACETS)
print Colors.RED+" # ---------------------------------------------------------------------------- # "
print Colors.RED+' # '+Colors.GREEN+' Query:'+Colors.ORANGE+' \t\t\t\" '+cableModel+' \"'
print Colors.RED+" # ---------------------------------------------------------------------------- # "+Colors.DEFAULT
totalDevices = result['total']
print Colors.GREEN+' Total Results: '+Colors.ORANGE+'\t'+str(totalDevices)+Colors.DEFAULT+'\n'
sumTotal[0] += totalDevices
for facet in result['facets']:
print " "+Colors.BLUE+str(FACET_TITLES[facet])
for term in result['facets'][facet]:
print Colors.GREEN+' %s: %s' % (term['value'], Colors.ORANGE+str(term['count'])+Colors.DEFAULT )
print ''+Colors.DEFAULT
time.sleep(2)
# ------------------------------------------------------------------------------------------------------------------- #
print banner
if (bool(allModels)):
try:
cableModels = open(allModels,"r")
#cableModels =
for model in cableModels:
md = model[:-1]
theModelIs(md)
except Exception, e:
print Colors.GREEN+' Error: '+Colors.RED+''+str(e)
sys.exit(1)
elif bool(anyModel):
theModelIs(anyModel)
else:
print Colors.GREEN+"Select any method of search:"+Colors.ORANGE+" \"--model <model cablemodem> / --models-file <file with models of cablemodems>\""+Colors.DEFAULT
sys.exit(1)
print "--------------------------------------------------------------------------------- "
if sumTotal[0] > 1000000:
print Colors.GREEN+"Mas de un millon de Equipos (Indexados) vulnerables a #StringBleed: "+Colors.ORANGE+str(sumTotal[0])+Colors.DEFAULT
else:
print Colors.GREEN+"[*] Equipos (Indexados) vulnerables a #StringBleed : "+Colors.ORANGE+str(sumTotal[0])+Colors.GREEN
print "[*] Faltan [ "+Colors.RED+str(1000000 - sumTotal[0])+Colors.GREEN+" ] para llegar al millon"+Colors.DEFAULT
print "--------------------------------------------------------------------------------- "
| [
"stringbleed@protonmail.com"
] | stringbleed@protonmail.com |
ddbb58dc1c0ac65c60b740a28c21ddd894972a1d | 865011d963226c8d629581775b6e167e00ca5406 | /django_celery/celery.py | d42c6274d4fbff7a92224e81c115bbb92bf7e2e4 | [] | no_license | aeweiwi/django_celery | c183e022930ae91cc39afeb0cde75ccaf3d74bf9 | 3f9e0413696f2a1a5d38b7efa08a186950600d14 | refs/heads/master | 2020-09-03T03:42:42.179661 | 2019-11-03T23:00:01 | 2019-11-03T23:00:01 | 219,377,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from __future__ import absolute_import, unicode_literals
from celery import Celery
import os
# https://medium.com/@yedjoe/celery-4-periodic-task-in-django-9f6b5a8c21c7
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_celery.settings')
app = Celery('django_celery')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| [
"abdalrahman.eweiwi@googlemail.com"
] | abdalrahman.eweiwi@googlemail.com |
2038166bb5ac84d11471f7249b5c8b11785f40e7 | e3024bef2174896c323ea54fac4f7898b07065c4 | /duplicates_per_country.py | c99b1fdf9f2b0a4bae9ed8a0f2afd0c751ec5598 | [
"Apache-2.0"
] | permissive | iychoi/vsftpd-loganalyzer | 5266ff5e33af5c26672bd40bbc14b1dfb0360263 | 25ba1dc21d3c4b60b51a6ca0188a20f3834f9dda | refs/heads/master | 2020-03-28T21:57:24.013247 | 2018-09-19T18:35:54 | 2018-09-19T18:35:54 | 149,195,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | #!/usr/bin/python
import sys
import io
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
TRANSFERRED = {}
MAX_FILE_SIZE = {}
SUMMARY = {}
#(time.strftime("%Y/%m/%d %H:%M:%S", l.access_time), - 0
#l.transfertime, - 1
#l.remotehost, - 2
#l.filename, - 3
#l.bytecount, - 4
#l.getTransferType(), - 5
#l.getDirection(), - 6
#l.getCompletionStatus(), - 7
#loc["city"], - 8
#loc["region_code"], - 9
#loc["metro_code"], - 10
#loc["country_name"], - 11
#loc["latitude"], - 12
#loc["longitude"]) - 13
def filter_line(line):
fields = line.split("\t")
direction = fields[6]
if direction == "INCOMING":
return True
bytes_transferred = int(fields[4])
if bytes_transferred <= 0:
return True
return False
def analyze_line(line):
fields = line.split("\t")
country = fields[11]
filename = fields[3]
key = country + "\t" + filename
bytes_transferred = int(fields[4])
if key in TRANSFERRED:
TRANSFERRED[key] = TRANSFERRED[key] + bytes_transferred
else:
TRANSFERRED[key] = bytes_transferred
if key in MAX_FILE_SIZE:
if MAX_FILE_SIZE[key] < bytes_transferred:
MAX_FILE_SIZE[key] = bytes_transferred
else:
MAX_FILE_SIZE[key] = bytes_transferred
#print line
def analyze(path, max_lines):
processed_lines = 0;
with io.open(path, "r") as f:
for line in f:
if not filter_line(line):
analyze_line(line)
processed_lines += 1
if max_lines > 0 and processed_lines >= max_lines:
break
for key in TRANSFERRED.keys():
dupbytes = TRANSFERRED[key] - MAX_FILE_SIZE[key]
if dupbytes != 0:
fields = key.split("\t")
if fields[0] in SUMMARY:
SUMMARY[fields[0]] = SUMMARY[fields[0]] + dupbytes
else:
SUMMARY[fields[0]] = dupbytes
for key in SUMMARY.keys():
print "%s\t%d" % (key, SUMMARY[key])
def main(argv):
if len(argv) < 1:
print "command : ./bytes_per_city.py input_path"
elif len(argv) >= 1:
input_path = argv[0]
max_lines = 0 # no limit
if len(argv) == 2:
max_lines = int(argv[1])
analyze(input_path, max_lines)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"iychoi@email.arizona.edu"
] | iychoi@email.arizona.edu |
a17bcec1354f60b3df6a6f22d277fb5f3cb5e399 | 305e473c17f47b815668377bc90e13642510aace | /punch_version.py | 7e73a53c562eed61ec708d5241c654d92f5179d0 | [
"MIT"
] | permissive | xrmx/mau | 5d6f66811b1356331c98547cc7c778ff3a04d6ff | 0aafa67a1b6f02eda72fe60ea2775454c3ad0866 | refs/heads/main | 2023-03-21T06:22:14.447284 | 2021-03-11T10:58:05 | 2021-03-11T10:58:05 | 347,347,655 | 0 | 0 | MIT | 2021-03-13T11:08:21 | 2021-03-13T11:08:20 | null | UTF-8 | Python | false | false | 30 | py | major = 1
minor = 3
patch = 0
| [
"giordani.leonardo@gmail.com"
] | giordani.leonardo@gmail.com |
c5353c5966806fe32f090860851f7c370dee9ddb | 05c7e840adb2c78f54f32415a287160a2e618615 | /188/B/188B.py | 5041417f5e2b933da93ad932a628a2e4e500046c | [] | no_license | n-makoto/competitive-programming | 9d2030beed785c36a6b9ef0551e3e240c794d4d5 | f227e2b5ac975164a6337c8905e4503ebed38379 | refs/heads/master | 2023-04-28T04:50:48.030461 | 2021-05-22T13:32:51 | 2021-05-22T13:32:51 | 359,827,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # 問題読み間違えてた
# Nを個数だと思っていたがNは次元で個数は2個固定だった...
# -*- coding: utf-8 -*-
n = int(input())
s = list(map(int, input().split()))
t = list(map(int, input().split()))
result = []
for i in range(len(s)):
result.append(s[i] * t[i])
print("Yes" if sum(result) == 0 else "No")
| [
"wwwww.desire@gmail.com"
] | wwwww.desire@gmail.com |
9cc1358bd5b8cd896d7e86bdaff639958cf7daa7 | e1de2e5da6724e94bb3e762ac8e9feec7d492ae9 | /InteractivePython/SpaceShip/spaceship.py | c2a7a2b2e8ea733ac74fcf2e2d5e4feac7d17c26 | [] | no_license | razvanvoicu/coursera_assignments | c68a2a6688e2083f313ad1415c3affb073f92dac | eab40449a121ed5873aedc74eb03ec4b4da7d4fd | refs/heads/master | 2020-04-05T23:34:45.005051 | 2016-02-18T06:46:59 | 2016-02-18T06:46:59 | 35,404,123 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,594 | py | # http://www.codeskulptor.org/#user40_YaPYF0j0nuNI13u.py
import simplegui
import math
import random
# globals for user interface
WIDTH = 800
HEIGHT = 600
ANGULAR_SPEED = 0.1
SPEED_LIMIT = 10.0
FRICTION = 0.99
MISSILE_SPEED = 20.0
score = 0
lives = 3
time = 0
ship_rotation = 0
class ImageInfo:
def __init__(self, center, size, radius = 0, lifespan = None, animated = False):
self.center = center
self.size = size
self.radius = radius
if lifespan:
self.lifespan = lifespan
else:
self.lifespan = float('inf')
self.animated = animated
def get_center(self):
return self.center
def get_size(self):
return self.size
def get_radius(self):
return self.radius
def get_lifespan(self):
return self.lifespan
def get_animated(self):
return self.animated
# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim
# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png
# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png
debris_info = ImageInfo([320, 240], [640, 480])
debris_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png")
# nebula images - nebula_brown.png, nebula_blue.png
nebula_info = ImageInfo([400, 300], [800, 600])
nebula_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2014.png")
# splash image
splash_info = ImageInfo([200, 150], [400, 300])
splash_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png")
# ship image
ship_info = ImageInfo([45, 45], [90, 90], 20)
ship_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png")
# missile image - shot1.png, shot2.png, shot3.png
missile_info = ImageInfo([5,5], [10, 10], 3, 50)
missile_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png")
# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png
asteroid_info = ImageInfo([45, 45], [90, 90], 40)
asteroid_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png")
# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png
explosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)
explosion_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png")
# sound assets purchased from sounddogs.com, please do not redistribute
soundtrack = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3")
missile_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3")
missile_sound.set_volume(.5)
ship_thrust_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3")
explosion_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3")
# helper functions to handle transformations
def angle_to_vector(ang):
return [math.cos(ang), math.sin(ang)]
def dist(p,q):
return math.sqrt((p[0] - q[0]) ** 2+(p[1] - q[1]) ** 2)
# Ship class
class Ship:
def __init__(self, pos, vel, angle, image, info):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.thrust = False
self.angle = angle
self.angle_vel = 0
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
def draw(self,canvas):
thrust = 0
if self.thrust: thrust = 1
center = ship_info.get_center()
size = ship_info.get_size()
canvas.draw_image(
self.image,
[center[0]+thrust*size[0],center[1]],
size,
self.pos,
size,
self.angle
)
def update(self):
self.angle += ANGULAR_SPEED * ship_rotation
if self.thrust:
angvect = angle_to_vector(self.angle)
self.vel = [self.vel[0] + angvect[0], self.vel[1] + angvect[1]]
velmag = math.sqrt(self.vel[0]*self.vel[0] + self.vel[1]*self.vel[1])
if velmag > SPEED_LIMIT:
self.vel = [self.vel[0]*SPEED_LIMIT/velmag,self.vel[1]*SPEED_LIMIT/velmag]
else:
self.vel = [self.vel[0]*FRICTION,self.vel[1]*FRICTION]
self.pos = [self.pos[0]+self.vel[0], self.pos[1]+self.vel[1]]
if self.pos[0] > WIDTH: self.pos[0] -= WIDTH
if self.pos[1] > HEIGHT: self.pos[1] -= HEIGHT
if self.pos[0] < 0 : self.pos[0] += WIDTH
if self.pos[1] < 0 : self.pos[1] += HEIGHT
if self.thrust:
ship_thrust_sound.play()
else:
ship_thrust_sound.pause()
# Sprite class
class Sprite:
def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.angle = ang
self.angle_vel = ang_vel
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
self.lifespan = info.get_lifespan()
self.animated = info.get_animated()
self.age = 0
if sound:
sound.rewind()
sound.play()
def draw(self, canvas):
center = self.image_center
size = self.image_size
canvas.draw_image(
self.image,
center,
size,
self.pos,
size,
self.angle
)
def update(self):
self.angle += ANGULAR_SPEED * self.angle_vel
angvect = angle_to_vector(self.angle)
self.pos = [self.pos[0]+self.vel[0], self.pos[1]+self.vel[1]]
if self.pos[0] > WIDTH: self.pos[0] -= WIDTH
if self.pos[1] > HEIGHT: self.pos[1] -= HEIGHT
if self.pos[0] < 0 : self.pos[0] += WIDTH
if self.pos[1] < 0 : self.pos[1] += HEIGHT
def draw(canvas):
global time
# animiate background
time += 1
wtime = (time / 4) % WIDTH
center = debris_info.get_center()
size = debris_info.get_size()
canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])
canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
canvas.draw_text("Score",[11*WIDTH/12,HEIGHT/24],20,"yellow")
canvas.draw_text(str(score),[11*WIDTH/12,1.8*HEIGHT/24],20,"yellow")
canvas.draw_text("Lives",[WIDTH/72,HEIGHT/24],20,"yellow")
canvas.draw_text(str(lives),[WIDTH/72,1.8*HEIGHT/24],20,"yellow")
# draw ship and sprites
my_ship.draw(canvas)
a_rock.draw(canvas)
if a_missile:
a_missile.draw(canvas)
# update ship and sprites
my_ship.update()
a_rock.update()
if a_missile:
a_missile.update()
def keydown(key):
global ship_rotation, ship_thrust, a_missile
if key == simplegui.KEY_MAP["left"]:
ship_rotation = -1
elif key == simplegui.KEY_MAP["right"]:
ship_rotation = 1
elif key == simplegui.KEY_MAP["up"]:
my_ship.thrust = True
elif key == simplegui.KEY_MAP["space"]:
vec = angle_to_vector(my_ship.angle)
pos = [ my_ship.pos[0]+my_ship.radius*vec[0],
my_ship.pos[1]+my_ship.radius*vec[1] ]
a_missile = Sprite(
[pos[0]+vec[0]*my_ship.radius,pos[1]+vec[1]*my_ship.radius],
[MISSILE_SPEED*vec[0]+my_ship.vel[0],MISSILE_SPEED*vec[1]+my_ship.vel[1]],
0, 0, missile_image, missile_info, missile_sound
)
else:
pass
def keyup(key):
global ship_rotation, ship_thrust
if key == simplegui.KEY_MAP["left"]:
ship_rotation = 0
elif key == simplegui.KEY_MAP["right"]:
ship_rotation = 0
elif key == simplegui.KEY_MAP["up"]:
my_ship.thrust = False
else:
pass
# timer handler that spawns a rock
def rock_spawner():
global a_rock, a_missile
a_rock = Sprite([WIDTH*random.random(), HEIGHT*random.random()],
[-0.5+random.random(), -0.5*random.random()],
-3.14+6.28*random.random(),
-1+2*random.random(),
asteroid_image, asteroid_info)
# initialize frame
frame = simplegui.create_frame("Asteroids", WIDTH, HEIGHT)
# initialize ship and two sprites
my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, ship_image, ship_info)
a_rock = Sprite([WIDTH*random.random(), HEIGHT*random.random()],
[-0.5+random.random(), -0.5*random.random()],
-3.14+6.28*random.random(),
-1+2*random.random(),
asteroid_image, asteroid_info)
a_missile = None
# register handlers
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
timer = simplegui.create_timer(1000.0, rock_spawner)
# get things rolling
timer.start()
frame.start()
| [
"razvan.voicu.sg@gmail.com"
] | razvan.voicu.sg@gmail.com |
5788a77d944bc3088ee6f66a012a6ab05cddfc51 | 3b3174caf944205ef1e0cf3c05d02d59e74cb80c | /Competitive Programming/Data Structures - HackerRank/Insert-a-node-at-a-specific-position-linked-list/solution.py | e5ebeea041b6ca2e466d0d9562427999e3192572 | [] | no_license | kunal768/ECM-HACKERS-17 | ea69b16206ec964f3230122736515717b14b2f49 | a02cafee6f3d6b4faf8983bca840bbb61b21b0b5 | refs/heads/master | 2020-03-29T14:27:14.380497 | 2019-05-27T01:54:22 | 2019-05-27T01:54:22 | 150,018,379 | 2 | 2 | null | 2018-10-21T14:06:56 | 2018-09-23T19:31:35 | Python | UTF-8 | Python | false | false | 364 | py | def insertNodeAtPosition(head, data, position):
if position == 0:
return SinglyLinkedListNode(data,head)
temp = head
new_node = SinglyLinkedListNode(data)
count = 0
while count<position:
count += 1
prev = temp
temp = temp.next
prev.next = new_node
new_node.next = temp
return head
| [
"noreply@github.com"
] | noreply@github.com |
ec980710f8460f62c205e7ede9e319c6d560e8e3 | c620c3c962d13fb37f9d8533f1881920caf02778 | /labs/sagelandia/text.py | 4769bb57c5fd863c3fffe2fb52dc0221f7e1c92e | [] | no_license | sagehogue/cg-labs | 5f0e9b3ee53a8606543259c561029313608c5fba | ae61f268db55754c6f2317851b63e7efbc0cfc95 | refs/heads/master | 2020-03-07T23:51:57.229715 | 2018-05-02T22:30:20 | 2018-05-02T22:30:20 | 127,791,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | import sys
from colorama import init
init(strip=not sys.stdout.isatty()) # strip colors if stdout is redirected
from termcolor import cprint
from pyfiglet import figlet_format
cprint(figlet_format('Sagelandia', font='dosrebel'), 'magenta', 'on_white', attrs=['bold']) | [
"Sagehogue@gmail.com"
] | Sagehogue@gmail.com |
5fc0ca89c67fd3b4814d1922d7ba8bcbc17ae8af | 807a2cc360d3957a3664910cf157980ec9c85857 | /scripts/test_device_manage.py | fe3f6870eb18ec3b5ed4c2f2334418717dd1a12b | [] | no_license | ThomasYoung76/app_workspace | 2248bf5cb107720801769f04eefbc17e5dddb92a | 1b46045b268696af9581934717ba1b5df17a1bf3 | refs/heads/master | 2021-09-13T04:24:55.932359 | 2018-04-25T02:44:31 | 2018-04-25T02:44:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,664 | py | # coding: utf-8
"""
设备管理
"""
import sys
sys.path.append('..')
from libs import *
import unittest
class DeviceManage(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sj = Controller()
def setUp(self):
self.sj.driver.implicitly_wait(5)
def tearDown(self):
self.sj.driver.implicitly_wait(2)
flag = self.sj.app_back_main_page()
if not flag:
try:
self.sj.driver.find_element_by_id('com.fhsj.jbqbl.launcher:id/local_device_back_btn').click()
flag = self.sj.app_is_main_page()
except:
pass
if not flag:
try:
self.sj.driver.find_element_by_id("com.fhsj.jbqbl.launcher:id/menu_back_btn").click()
flag = self.sj.app_is_main_page()
except:
pass
def test_device_manage_84(self):
"""单个手控盘注册\去注册"""
user = self.sj.app_login(user_num=user_num, password=user_password)
self.sj.app_menu('device')
self.sj.app_local_device('handler')
self.sj.app_local_device_handler(register_num=3)
self.sj.app_local_device('save')
# 回到主页复位
self.sj.app_back_main_page()
sleep(1)
self.sj.app_main_page(text='复位')
self.sj.driver.find_element_by_name('确定').click()
sleep(5)
# 检查注册成功
self.sj.app_menu('device')
self.sj.app_local_device('handler')
checkboxes = self.sj.driver.find_elements_by_class_name('android.widget.CheckBox')
is_checked = checkboxes[3].get_attribute('checked')
self.assertTrue(is_checked, 'true')
# 去注册
self.sj.app_local_device_handler(register_num=3)
self.sj.app_local_device('save')
# 回到主页复位
self.sj.app_back_main_page()
self.sj.app_main_page(text='复位')
self.sj.driver.find_element_by_name('确定').click()
sleep(5)
# 检查去注册成功
self.sj.app_menu('device')
self.sj.app_local_device('handler')
checkboxes = self.sj.driver.find_elements_by_class_name('android.widget.CheckBox')
is_checked = checkboxes[3].get_attribute('checked')
self.assertTrue(is_checked, 'false')
def test_device_manage_85(self):
"""批量注册\去注册所有的手控盘"""
user = self.sj.app_login(user_num=user_num, password=user_password)
self.sj.app_menu('device')
self.sj.app_local_device('handler')
self.sj.app_local_device_handler(is_batch_register=True)
sleep(0.5)
# 从="[548,484][570,530]" 号="[783,484][805,530]" 到="[908, 484][930, 530]" 号="[1148,484][1170,530]"
self.sj.driver.tap([(1050, 458)])
# sleep(1)
# TouchAction(self.sj.driver).tap(x=1020, y=458)
self.sj.app_handler_batch_register(is_register=True)
self.sj.app_local_device('save')
# 回到主页复位
self.sj.app_menu(is_click_menu=False, is_click_back=True)
self.sj.app_main_page(text='复位')
self.sj.driver.find_element_by_name('确定').click()
sleep(30) # 等待30s,是为了等待首页上故障信息显示完成
# 检查注册成功
self.sj.app_menu('device')
self.sj.app_local_device('handler')
checkboxes = self.sj.driver.find_elements_by_class_name('android.widget.CheckBox')
for k in range(4):
for i in range(8):
self.assertEqual(checkboxes[i].get_attribute('checked'), 'true')
self.sj.driver.swipe(1560, 800, 900, 367, 1000)
# ------------------------去注册-----------------------------
# 进入批量注册界面
self.sj.app_local_device_handler(is_batch_register=True)
# 从="[548,483][570,529]" 号="[783,483][805,529]" 到="[908, 483][930, 529]" 号="[1148,483][1170,529]"
sleep(3)
self.sj.driver.tap([(1050, 458)])
# 去注册
self.sj.app_handler_batch_register(is_register=False)
self.sj.app_local_device('save')
# 回到主页复位
sleep(1)
self.sj.app_menu(is_click_menu=False, is_click_back=True)
self.sj.app_main_page(text='复位')
self.sj.driver.find_element_by_name('确定').click()
sleep(30) # 等待30s,是为了等待首页上故障信息显示完成
# 检查去注册成功
self.sj.app_menu('device')
self.sj.app_local_device('handler')
checkboxes = self.sj.driver.find_elements_by_class_name('android.widget.CheckBox')
for k in range(4):
for i in range(8):
self.assertEqual(checkboxes[i].get_attribute('checked'), 'false')
self.sj.driver.swipe(1560, 800, 900, 367, 1000)
def test_device_manage_86(self):
"""单个广播设备注册/去注册功能,并保存"""
user = self.sj.app_login(user_num=user_num, password=user_password)
self.sj.app_menu('device')
self.sj.app_local_device('broadcast')
self.sj.app_local_device_broadcast(7, point=60, is_register=True)
try:
# 在联动编程界面中检查
self.sj.app_menu(is_click_menu=False, menu_id='linkage')
self.sj.app_linkage_programme('add')
self.sj.app_linkage_add('广播控制点设置')
self.sj.app_linkage_add_broadcast(is_input=True)
sleep(1)
self.sj.app_linkage_add_broadcast_input(input_panel='7', input_point='1', is_submit=True)
sleep(2)
finally:
# 回到菜单页
self.sj.driver.find_element_by_id('com.fhsj.jbqbl.launcher:id/linkage_broadcast_back_btn').click()
self.sj.driver.find_element_by_id('com.fhsj.jbqbl.launcher:id/linkage_programme_back_btn').click()
# 去注册
self.sj.app_menu(is_click_menu=False, menu_id='device')
self.sj.app_local_device('broadcast')
self.sj.app_local_device_broadcast(7, point=60, is_register=False)
def test_device_manage_87(self):
"""广播控制盘设置不同点数正确"""
user = self.sj.app_login(user_num=user_num, password=user_password)
self.sj.app_menu('device')
self.sj.app_local_device('broadcast')
self.sj.app_local_device_broadcast(5, point=30, is_register=True)
# 检查值为30点
self.sj.app_menu(is_click_menu=False, menu_id='device')
self.sj.app_local_device('broadcast')
self.assertTrue(self.sj.app_local_device_broadcast_check_point(5, 30))
# 注册为60点
self.sj.app_local_device_broadcast(5, point=60, is_register=True)
# 检查值为60点
self.sj.app_menu(is_click_menu=False, menu_id='device')
self.sj.app_local_device('broadcast')
self.assertTrue(self.sj.app_local_device_broadcast_check_point(5, 60))
# 选择90点注册
self.sj.app_local_device_broadcast(5, point=90, is_register=True)
# 检查值为90点
self.sj.app_menu(is_click_menu=False, menu_id='device')
self.sj.app_local_device('broadcast')
self.assertTrue(self.sj.app_local_device_broadcast_check_point(5, 90))
# 取消注册5号广播盘
self.sj.app_local_device_broadcast(5, 30, is_register=False)
if __name__ == "__main__":
# unittest.main(verbosity=2)
run_suite(DeviceManage)
| [
"thomasyoung76@163.com"
] | thomasyoung76@163.com |
e66c7c0d19ad240c8ffa6d62cd6fc1363b0b85b8 | e8a5ed62dcfa9935d8c9fae0b9593a79ace1f780 | /hw2/hw2_generative_test.py | 00a03dc3d6bab533e5ec22d978e1d417271a00a5 | [] | no_license | Cooper111/ML2017FALL-2 | 47d690546bba963a07a86ce6cbca0ce168d704f1 | c989e949bf8cef0380947774f6699502b81efafe | refs/heads/master | 2020-04-02T17:14:16.204763 | 2018-07-09T12:21:48 | 2018-07-09T12:21:48 | 154,649,133 | 1 | 0 | null | 2018-10-25T10:02:32 | 2018-10-25T10:02:32 | null | UTF-8 | Python | false | false | 2,180 | py | # #!/bin/bash
# python hw2_generative_test.py ./train.csv ./test.csv ./X_train ./Y_train ./X_test ./prediction.csv
# python hw2_generative_train.py ./train.csv ./test.csv ./X_train ./Y_train ./X_test ./prediction.csv
import sys
import numpy as np
import pandas
def sigmoid(z):
"""
:type z: float
:return type: float
"""
# # Prevent overflow.
# z = np.clip(z, -500, 500)
# Calculate activation signal
return 1 / (1 + np.exp(-z))
if __name__ == "__main__":
# Load file path of train.csv from arguments.
TRAIN_CSV_FILE_PATH = sys.argv[1]
# Load file path of test.csv from arguments.
TEST_CSV_FILE_PATH = sys.argv[2]
# Load file path of X_train from arguments.
X_TRAIN_FILE_PATH = sys.argv[3]
# Load file path of Y_train from arguments.
Y_TRAIN_FILE_PATH = sys.argv[4]
# Load file path of X_test from arguments.
X_TEST_FILE_PATH = sys.argv[5]
# Load file path of prediction.csv from arguments.
PREDICTION_CSV_FILE_PATH = sys.argv[6]
# Read x data to test from testing set using pandas.
x_data_test = pandas.read_csv(X_TEST_FILE_PATH).values
# Read std and mean from npy files.
x_data_train_std = np.load('generative_std.npy')
x_data_train_mean = np.load('generative_mean.npy')
# Normalization on testing set.
for n in range(x_data_test.shape[1]):
if (x_data_train_std[n] != 0):
x_data_test[:, n] = (x_data_test[:, n] - x_data_train_mean[n]) / x_data_train_std[n]
# Read weights and bias from npy files.
w = np.load('generative_weights.npy')
b = np.load('generative_bias.npy')
# Z (Gaussian Distribution).
z = np.dot(w, x_data_test.T) + b
# Put z into simoid function.
y = sigmoid(z)
# Predict y.
yPredicted = (y >= 0.5).astype(np.int)
stringID = list()
for n in range(yPredicted.shape[0]):
stringID.append(str(n + 1))
resultCSV_dict = {
"id": stringID,
"label": yPredicted.reshape(-1)
}
resultCSV = pandas.DataFrame(resultCSV_dict)
resultCSV.to_csv(PREDICTION_CSV_FILE_PATH, index=False) | [
"noreply@github.com"
] | noreply@github.com |
53e8a341e3b79a72db11aa096f172759ad5a84a7 | 55830c1bb38731e14e7e13e78bc9f3b7d85fef50 | /알고리즘&자료구조/dongbin_code/8-5.py | 44a6115785bad13bd3aea419912ce4cf89deeb1b | [] | no_license | zepetto7065/study | bfba0a994678bd985397090c84dd1966cf39f40d | ed406c37cdd02f435870f8c37a80f6b99d4c3f98 | refs/heads/main | 2022-11-05T10:22:49.240357 | 2022-11-02T11:52:12 | 2022-11-02T11:52:12 | 152,854,603 | 0 | 0 | null | 2020-06-23T03:48:59 | 2018-10-13T09:08:25 | null | UTF-8 | Python | false | false | 281 | py | x = int(input())
d = [0] * 30001
for i in range(2, x + 1) :
d[i] = d[i - 1] + 1
if i % 2 == 0 :
d[i] = min(d[i], d[i // 2] + 1)
if i % 3 == 0 :
d[i] = min(d[i], d[i // 3] + 1)
if i % 5 == 0 :
d[i] = min(d[i], d[i // 5] + 1)
print(d[x])
| [
"zepetto.yoo@gmail.com"
] | zepetto.yoo@gmail.com |
0ccb62474a0317f86dfe9138ec3b8c5878be2948 | fb00b570251ba52df467e4cc030a30e778f8a970 | /Atividade 02 - semana 04/questão4_semana4_atividade02_runcodes.py | a3048ea0063d9e885ce77e9effdf8b688eb5e1ef | [] | no_license | SirLeonardoFerreira/Atividades-ifpi | 7379f9df4640fd1ee3623d80e4341f495e855895 | e366ee3f801dc9a1876c7399a2eefd37a03d0a55 | refs/heads/master | 2023-01-05T04:03:30.774277 | 2020-11-02T00:56:10 | 2020-11-02T00:56:10 | 287,967,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | def signo(dia, mes):
if (21 <= dia <= 31 and mes == 3) or (1 <= dia <= 19 and mes == 4):
return 'Áries'
elif (20 <= dia <= 30 and mes == 4) or (1 <= dia <= 20 and mes == 5):
return 'Touro'
elif (21 <= dia <= 31 and mes == 5) or (1 <= dia <= 21 and mes == 6):
return 'Gêmeos'
elif (22 <= dia <= 30 and mes == 6) or (1 <= dia <= 22 and mes == 7):
return 'Câncer'
elif (23 <= dia <= 31 and mes == 7) or (1 <= dia <= 22 and mes == 8):
return 'Leão'
elif (23 <= dia <= 31 and mes == 8) or (1 <= dia <= 22 and mes == 9):
return 'Virgem'
elif (23 <= dia <= 30 and mes == 9) or (1 <= dia <= 22 and mes == 10):
return 'Libra'
elif (23 <= dia <= 31 and mes == 10) or (1 <= dia <= 21 and mes == 11):
return 'Escorpião'
elif (22 <= dia <= 30 and mes == 11) or (1 <= dia <= 21 and mes == 12):
return 'Sagitário'
elif (22 <= dia <= 31 and mes == 12) or (1 <= dia <= 19 and mes == 1):
return 'Capricórnio'
elif (20 <= dia <= 31 and mes == 1) or (1 <= dia <= 18 and mes == 2):
return 'Aquário'
elif (19 <= dia <= 29 and mes == 2) or (1 <= dia <= 20 and mes == 3):
return 'Peixes'
def main():
dia_nascimento = int(input())
mes_nascimento = int(input())
mensagem_signo = signo(dia_nascimento, mes_nascimento)
print(f'{mensagem_signo}')
if __name__=='__main__':
main()
| [
"lleoalves02@gmail.com"
] | lleoalves02@gmail.com |
41f1489d34806f49f1db22318bc3ec09d7de57bb | 2976c264ac4ccd966f1ff44488c6f68672d0ec4a | /form_tests.py | f67520624ced1b83ffca18e2c2fd1c7327639f4f | [
"MIT"
] | permissive | YunMeMeThaw/python_exercises | 3ccc57a456039c314a1f03993d1781e503c6aac6 | 151d5d3695d578059611ac09c94b3677442197d7 | refs/heads/master | 2020-03-19T09:13:00.548075 | 2018-06-26T16:14:12 | 2018-06-26T16:14:12 | 136,269,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from flask import Flask
from flask import render_template
from flask import request
app = Flask(__name__)
@app.route("/hello")
def index():
name = request.args.get('name', 'Nobody')
if name:
greeting = f"Hello, {name}"
else:
greeting = "Hello World"
return render_template("index.html", greeting=greeting)
if __name__ == "__main__":
app.run()
| [
"yunmemethaw16@gmail.com"
] | yunmemethaw16@gmail.com |
6adaf26c83041f163d6f9002d77e24deeb133c0f | 30ea9abff7438755bfc8a483ae843152d3e49b9b | /力扣习题/118杨辉三角/pascalstriangle.py | 28285769718b8d071b795a07cd59ee1e588a6057 | [
"MIT"
] | permissive | houcy/AlgorithmLearning | 2dee945a4f9fefc981020c365664bcd65e5994c4 | 92e3dd6ae8d27cd8fb1a3a7035b2f7e0eb86a7dc | refs/heads/master | 2022-12-25T19:55:51.323740 | 2020-10-09T04:24:11 | 2020-10-09T04:24:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,049 | py | class Solution:
'''
非递归算法
'''
def generate(self, numRows: int) -> [[]]:
res = []
if numRows == 1:
res.append([1])
elif numRows > 1:
# res.append([1])
for k in range(0, numRows):
item = []
for i in range(0, k + 1):
if i == 0 or i == k:
item.append(1)
else:
item.append(res[-1][i - 1] + res[-1][i])
res.append(item[:])
item.clear()
return res
s = Solution()
print(s.generate(5))
class Solution2:
'''
递归算法
'''
def generate(self, numRows: int) -> [[]]:
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
item = []
res = self.generate(numRows - 1)
item.append(1)
for i in range(1, numRows - 1):
item.append(res[-1][i - 1] + res[-1][i])
item.append(1)
res.append(item)
return res
class Solution3:
'''
满脑子骚操作
'''
def generate(self, numRows: int) -> [[]]:
res = [
[1],
[1,1],
[1,2,1],
[1,3,3,1],
[1,4,6,4,1],
[1,5,10,10,5,1],
[1,6,15,20,15,6,1],
[1,7,21,35,35,21,7,1],
[1,8,28,56,70,56,28,8,1],
[1,9,36,84,126,126,84,36,9,1],
[1,10,45,120,210,252,210,120,45,10,1],
[1,11,55,165,330,462,462,330,165,55,11,1],
[1,12,66,220,495,792,924,792,495,220,66,12,1],
[1,13,78,286,715,1287,1716,1716,1287,715,286,78,13,1],
[1,14,91,364,1001,2002,3003,3432,3003,2002,1001,364,91,14,1],
[1,15,105,455,1365,3003,5005,6435,6435,5005,3003,1365,455,105,15,1],
[1,16,120,560,1820,4368,8008,11440,12870,11440,8008,4368,1820,560,120,16,1],
[1,17,136,680,2380,6188,12376,19448,24310,24310,19448,12376,6188,2380,680,136,17,1],
[1,18,153,816,3060,8568,18564,31824,43758,48620,43758,31824,18564,8568,3060,816,153,18,1],
[1,19,171,969,3876,11628,27132,50388,75582,92378,92378,75582,50388,27132,11628,3876,969,171,19,1],
[1,20,190,1140,4845,15504,38760,77520,125970,167960,184756,167960,125970,77520,38760,15504,4845,1140,190,20,1],
[1,21,210,1330,5985,20349,54264,116280,203490,293930,352716,352716,293930,203490,116280,54264,20349,5985,1330,210,21,1],
[1,22,231,1540,7315,26334,74613,170544,319770,497420,646646,705432,646646,497420,319770,170544,74613,26334,7315,1540,231,22,1],
[1,23,253,1771,8855,33649,100947,245157,490314,817190,1144066,1352078,1352078,1144066,817190,490314,245157,100947,33649,8855,1771,253,23,1],
[1,24,276,2024,10626,42504,134596,346104,735471,1307504,1961256,2496144,2704156,2496144,1961256,1307504,735471,346104,134596,42504,10626,2024,276,24,1],
[1,25,300,2300,12650,53130,177100,480700,1081575,2042975,3268760,4457400,5200300,5200300,4457400,3268760,2042975,1081575,480700,177100,53130,12650,2300,300,25,1],
[1,26,325,2600,14950,65780,230230,657800,1562275,3124550,5311735,7726160,9657700,10400600,9657700,7726160,5311735,3124550,1562275,657800,230230,65780,14950,2600,325,26,1],
[1,27,351,2925,17550,80730,296010,888030,2220075,4686825,8436285,13037895,17383860,20058300,20058300,17383860,13037895,8436285,4686825,2220075,888030,296010,80730,17550,2925,351,27,1],
[1,28,378,3276,20475,98280,376740,1184040,3108105,6906900,13123110,21474180,30421755,37442160,40116600,37442160,30421755,21474180,13123110,6906900,3108105,1184040,376740,98280,20475,3276,378,28,1],
[1,29,406,3654,23751,118755,475020,1560780,4292145,10015005,20030010,34597290,51895935,67863915,77558760,77558760,67863915,51895935,34597290,20030010,10015005,4292145,1560780,475020,118755,23751,3654,406,29,1]
]
return res[0:numRows] | [
"ab2defg145@gmail.com"
] | ab2defg145@gmail.com |
6b812257a2c63cbb11074411cd0612de587693e1 | 82141737c911d8e6fffb7e074ec2860a9ddfa70d | /RayIntersectScene.py | 8f7315b455e5d27c9775f82cb7346a43a98d2a4e | [] | no_license | VernBuck/ray_tracer_p3b | e40882815ed2d87a0971d92758c807ae5be2af59 | ba8adaf600e80d7ca1e012f4cc67c5096b916ab5 | refs/heads/master | 2022-01-18T01:31:48.620845 | 2019-07-22T23:32:10 | 2019-07-22T23:32:10 | 198,316,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | class RayIntersectScene(object):
#add surface material
def __init__(self,ray):
self.ray = ray
def eyeRays(self,ray):
def shadowRays(self,ray):
def reflectionRays(self,rays):
| [
"backvernon@gmail.com"
] | backvernon@gmail.com |
ec0e646f2ec63b0e75ed91ed499a604d5341c78d | c5a7f59c1f3f3da58d1799a7650d388071ad6231 | /models/teacher_role.py | 48947b676ee25a62e072fa3d23b667902a36e82b | [] | no_license | duyutc812/odoo_do_an | 9bbe88d2452dfa6c72f3d65c1772b5de61963b29 | ef12849eb65d948e190112ed45006b6961006566 | refs/heads/master | 2022-11-21T02:12:45.854524 | 2020-07-24T11:33:08 | 2020-07-24T11:33:08 | 268,104,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class TeacherRole(models.Model):
_name = 'lib.teacher.role'
_description = 'Chức vụ giảng viên'
name = fields.Char('Chức vụ giảng viên')
@api.constrains('name')
def _constraint_name(self):
if self.search([('name', 'ilike', self.name), ('id', '!=', self.id)]):
raise ValidationError(_('Tên chức vụ của giảng viên đã tồn tại!'))
@api.onchange('name')
def _onchange_name(self):
self.name = self.name.title() if self.name else ''
def unlink(self):
for rec in self:
teacher = self.env['lib.teacher'].search([
('role', '=', rec.id)
])
if teacher:
raise ValidationError(_('Không thể xoá chức vụ giảng viên khi thông tin của giảng '
'viên thuộc chức vụ này còn tồn tại!'))
return super(TeacherRole, self).unlink() | [
"65037887+duyutc812@users.noreply.github.com"
] | 65037887+duyutc812@users.noreply.github.com |
4048107e3e4ff90dee2a704098bfab7eb02eba7d | 255d5e4d83112b8696baef71408e6ed27b222c4d | /neighbour/models.py | 3dde774739c60d3ac0a20b7caa3a50aa09b792b7 | [
"MIT"
] | permissive | omarion3698/Neighbourhood | 74a1812c91785cf714437dca67ad2970bf79732d | 06ceea3dcc242c389627fbb625e834a900d0241d | refs/heads/master | 2023-01-23T11:24:50.329371 | 2020-11-03T10:28:13 | 2020-11-03T10:28:13 | 308,272,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,218 | py | from django.db import models
from django.contrib.auth.models import User
from tinymce.models import HTMLField
import datetime as dt
from django.db.models import Q
Priority=(
('Informational', 'Informational'),
('High Priority', 'High Priority'),
)
# Create your models here.
class Neighbourhood(models.Model):
neighbourhood_name = models.CharField(max_length = 60)
def __str__(self):
return self.neighbourhood_name
def create_neighbourhood(self):
self.save()
@classmethod
def delete_neighbourhood(cls, neighbourhood_name):
cls.objects.filter(neighbourhood_name=neighbourhood_name).delete()
@classmethod
def find_neighbourhood(cls, search_term):
search_results = cls.objects.filter(neighbourhood_name__icontains = search_term)
return search_results
def update_neighbourhood(self, neighbourhood_name):
self.neighbourhood_name = neighbourhood_name
self.save()
class Notifications(models.Model):
title = models.CharField(max_length=100)
notification = HTMLField()
priority = models.CharField(max_length=15, choices=Priority, default="Informational")
author = models.ForeignKey(User, on_delete=models.CASCADE)
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE
)
post_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Healthservices(models.Model):
healthservices = models.CharField(max_length=100)
def __str__(self):
return self.healthservices
def save_healthservices(self):
self.save()
@classmethod
def delete_healthservices(cls, healthservices):
cls.objects.filter(healthservices=healthservices).delete()
class Business(models.Model):
logo = models.ImageField(upload_to='businesslogo/')
description = HTMLField()
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
email = models.EmailField()
address = models.CharField(max_length=100)
contact = models.IntegerField()
def __str__(self):
return self.name
class Health(models.Model):
logo = models.ImageField(upload_to='healthlogo/')
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
email = models.EmailField()
contact = models.IntegerField()
address = models.CharField(max_length=100)
Healthservices = models.ManyToManyField(Healthservices)
def __str__(self):
return self.name
class Authorities(models.Model):
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
email = models.EmailField()
contact = models.IntegerField()
address = models.CharField(max_length=100)
def __str__(self):
return self.name
class Profile(models.Model):
avatar = models.ImageField(upload_to='avatars/', blank = True)
username = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
email = models.EmailField()
bio = HTMLField()
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE)
def __str__(self):
return self.name
class BlogPost(models.Model):
title = models.CharField(max_length=150)
image = models.ImageField(upload_to='post/')
post = HTMLField()
username = models.ForeignKey(User, on_delete=models.CASCADE)
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE)
post_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
@classmethod
def search_blogpost(cls, search_term):
blogs = cls.objects.filter(Q (username__username=search_term) | Q (neighbourhood__neighbourhood=search_term) | Q (title__icontains=search_term))
return blogs
class Comment(models.Model):
comment = models.CharField(max_length=300)
username = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(BlogPost, on_delete=models.CASCADE) | [
"omaribinbakarivic@gmail.com"
] | omaribinbakarivic@gmail.com |
050c411aaa9bcbc6728761a0ab7b139c4c841531 | 98d4d7a73283939892424b4299a1cf3a562cd060 | /__init__.py | 0b3774b647c7217aaa1c342499eadb948a9d4e11 | [] | no_license | phellmes/Holt-Tools | f82312d220895fc7325df7bf369896a475c8b6b4 | 065dbef26bc68841e620bb1a78238f463dc47f6c | refs/heads/main | 2023-02-24T22:57:16.163910 | 2021-02-04T13:01:54 | 2021-02-04T13:01:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,531 | py | #region INFORMATION
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
bl_info = {
"name" : "Holt Tools",
"author" : "Curtis Holt",
"description" : "Just some workflow tools I put together.",
"blender" : (2, 90, 1),
"version" : (0, 0, 3),
"location" : "View3D",
"warning" : "",
"category" : "Generic"
}
#endregion
#region IMPORTS
import bpy
from bpy.props import *
from bpy.types import (Panel,Menu,Operator,PropertyGroup)
from . easybpy import *
#endregion
#region PROPERTIES
class HTProperties(PropertyGroup):
#region PROPERTIES - CLEANUP
cleanup_mode : EnumProperty(
name = "Cleanup Mode",
description = "The mode of cleanup",
items=[
('OUTLINER', "Outliner", ""),
('OBJECT', "Object", ""),
('MATERIALS', "Materials", ""),
],
default = "OUTLINER"
)
autosmooth_angle : IntProperty(
name = "Autosmooth Angle",
description = "The angle for autosmoothing",
default = 60,
min = 0,
max = 180
)
#endregion
#region PROPERTIES - SELECTION
selection_mode : EnumProperty(
name = "Selection Mode",
description = "The mode of selection",
items=[
('SELECT_ALL_INCLUDING',"Select All Including",""),
('SELECT_ALL_TYPE',"Select By Type",""),
('SELECT_BY_VERTEX',"Select By Vertex Count","")
],
default = "SELECT_ALL_INCLUDING"
)
select_string : StringProperty(
name = "Select Similar String",
description = "Used for finding objects that include this in their name",
default = "Cube"
)
is_case_sensitive : BoolProperty(
name = "Is Case Sensitive",
description = "Toggles whether to consider case when comparing names",
default = True
)
select_types : EnumProperty(
name = "Select Types",
description = "Different types of object to select",
items=[
('ARMATURES',"Armatures",""),
('CAMERAS',"Cameras",""),
('CURVES',"Curves",""),
('EMPTIES',"Empties",""),
('GREASE_PENCILS',"Grease Pencils",""),
('HAIR',"Hair",""),
('LATTICES',"Lattices",""),
('LIGHTS',"Lights",""),
('LIGHT PROBES',"Light Probes",""),
('MESHES',"Meshes",""),
('METAS',"Metas",""),
('POINT_CLOUDS',"Point Clouds",""),
('SURFACES',"Surfaces",""),
('TEXT',"Text",""),
('VOLUMES',"Volumes",""),
],
default = "MESHES"
)
tag_string : StringProperty(
name = "Tag String",
description = "Tag to be added as a prefix or suffix",
default = "Tag"
)
delimiter_string : StringProperty(
name = "Delimiter String",
description = "Delimiter to use for prefixes and suffixes",
default = "_"
)
vertex_count : IntProperty(
name = "Vertex Count",
description = "Vertex count for comparing objects to choose selection",
default = 10000
)
comparison_mode : EnumProperty(
name = "Comparison Mode",
description = "Mode to compare the vertex count",
items = [
('GREATER', "Greater Than", ""),
('LESS', "Less Than", ""),
('EQUAL', "Equal To", "")
],
default = "GREATER"
)
#endregion
#region PROPERTIES - LIGHTING
light_add_global : IntProperty(
name = "Light Add Global",
description = "Value to add to all lights globally",
default = 5
)
light_multiply_global : FloatProperty(
name = "Light Multiply Global",
description = "Value to multiply light sources by",
default = 1.5
)
light_mode : EnumProperty(
name = "Light Mode",
description = "The mode for modifying light strength",
items=[
('ADDITIVE',"Additive",""),
('MULTIPLICATIVE', "Multiplicative", "")
],
default = "ADDITIVE"
)
light_target : EnumProperty(
name = "Light Target",
description = "The target for lighting changes",
items=[
('LIGHT_OBJECTS', "Light Objects", ""),
('EMISSIVE_MATERIALS',"Emissive Materials","")
],
default = "LIGHT_OBJECTS"
)
light_mat_includes : StringProperty(
name = "Material Name Includes",
description = "A string that must be included in a material name",
default = "Emis_"
)
light_node_includes : StringProperty(
name = "Node Name Includes",
description = "A string that must be included in a node name",
default = "Light_"
)
#endregion
#region PROPERTIES - OPTIMIZATION
decimate_rate : FloatProperty(
name = "Decimate Rate",
description = "The rate to quickly decimate selected object",
default = 0.1,
min = 0.0,
max = 1.0
)
#endregion
#endregion
#region OPERATORS - CLEANUP - OUTLINER
class HOLTTOOLS_OT_OrganizeOutliner(bpy.types.Operator):
# Calling Organize Outliner
bl_idname = "outliner.organize_outliner"
bl_label = "Organize Outliner"
bl_description = "Organizes the outliner into categories"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
organize_outliner()
return {'FINISHED'}
class HOLTTOOLS_OT_ConvertSuffixes(bpy.types.Operator):
# Calling Convert Suffixes
bl_idname = "object.convert_suffixes"
bl_label = "Convert Suffixes"
bl_description = "Convert .001 suffixes to _1"
bl_options = {'REGISTER','UNDO'}
def execute(self, context):
convert_suffixes()
return {'FINISHED'}
class HOLTTOOLS_OT_PurgeUnwantedData(bpy.types.Operator):
bl_idname = "object.purge_unwanted_data"
bl_label = "Purge Unwanted Data"
bl_description = "Remove all data that isn't being used"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
clear_unwanted_data()
return {'FINISHED'}
class HOLTTOOLS_OT_DeepClean(bpy.types.Operator):
# Doing a deep-clean
bl_idname = "outliner.deep_clean"
bl_label = "Deep Clean"
bl_description = "Just clean the blend file me, will ya?"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
organize_outliner()
convert_suffixes()
return {'FINISHED'}
#endregion
#region OPERATORS - CLEANUP - OBJECT
class HOLTTOOLS_OT_SetAutoSmooth(bpy.types.Operator):
bl_idname = "object.set_auto_smooth"
bl_label = "Set Auto Smooth"
bl_description = "Sets auto smooth true and gives angle"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
ht_tool = context.scene.ht_tool
set_smooth_angle(ao(), ht_tool.autosmooth_angle)
return {'FINISHED'}
#endregion
#region OPERATORS - CLEANUP - MATERIALS
class HOLTTOOLS_OT_RemoveUnusedSlots(bpy.types.Operator):
bl_idname = "object.remove_unused_slots"
bl_label = "Remove Unused Slots"
bl_description = "Removes unused material slots from selected object"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
remove_unused_material_slots()
return {'FINISHED'}
#endregion
#region OPERATORS - SELECTION
class HOLTTOOLS_OT_SelectAllIncluding(bpy.types.Operator):
# Calling Select All Including
bl_idname = "object.select_all_including"
bl_label = "Select All Including"
bl_description = "Selects all objects including ht_tools.select_type"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
if ht_tool.is_case_sensitive:
select_objects_including(ht_tool.select_string, True)
else:
select_objects_including(ht_tool.select_string, False)
return {'FINISHED'}
class HOLTTOOLS_OT_SelectAllType(bpy.types.Operator):
# Calling Select All Type
bl_idname = "object.select_all_type"
bl_label = "Select All Type"
bl_description = "Selects all objects of ht_tools.select_string"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
if ht_tool.select_types == "MESHES":
select_all_meshes()
if ht_tool.select_types == "CURVES":
select_all_curves()
if ht_tool.select_types == "SURFACES":
select_all_surfaces()
if ht_tool.select_types == "METAS":
select_all_metas()
if ht_tool.select_types == "TEXT":
select_all_text()
if ht_tool.select_types == "HAIR":
select_all_hair()
if ht_tool.select_types == "POINT_CLOUDS":
select_all_point_clouds()
if ht_tool.select_types == "VOLUMES":
select_all_volumes()
if ht_tool.select_types == "ARMATURES":
select_all_armatures()
if ht_tool.select_types == "LATTICES":
select_all_lattices()
if ht_tool.select_types == "EMPTIES":
select_all_empties()
if ht_tool.select_types == "GREASE_PENCILS":
select_all_grease_pencils()
if ht_tool.select_types == "CAMERAS":
select_all_cameras()
if ht_tool.select_types == "LIGHTS":
select_all_lights()
if ht_tool.select_types == "LIGHT PROBES":
select_all_light_probes()
return {'FINISHED'}
class HOLTTOOLS_OT_NameAddPrefix(bpy.types.Operator):
bl_idname = "object.name_add_prefix"
bl_label = "Name Add Prefix"
bl_description = "Adds the tag string as a prefix"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
add_prefix_to_name(so(), ht_tool.tag_string, ht_tool.delimiter_string)
return {'FINISHED'}
class HOLTTOOLS_OT_NameAddSuffix(bpy.types.Operator):
bl_idname = "object.name_add_suffix"
bl_label = "Name Add Suffix"
bl_description = "Adds the tag string as a suffix"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
add_suffix_to_name(so(), ht_tool.tag_string, ht_tool.delimiter_string)
return {'FINISHED'}
class HOLTTOOLS_OT_SelectByVertexCount(bpy.types.Operator):
bl_idname = "object.select_by_vertex_count"
bl_label = "Select By Vertex Count"
bl_description = "Selects objects by comparing given vertex count"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
select_objects_by_vertex(ht_tool.vertex_count, ht_tool.comparison_mode)
return {'FINISHED'}
#endregion
#region OPERATORS - LIGHTING
class HOLTTOOLS_OT_AddLightIntensityGlobal(bpy.types.Operator):
# Add Light Intensity Global
bl_idname = "object.add_light_intensity_global"
bl_label = "Add Light Intensity Global"
bl_description = "Adds intensity to all lights in the scene"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
if ht_tool.light_target == "LIGHT_OBJECTS":
select_all_lights()
light_power_add(ht_tool.light_add_global)
if ht_tool.light_target == "EMISSIVE_MATERIALS":
mats = get_all_materials()
for m in mats:
if ht_tool.light_mat_includes in m.name:
nodes = get_nodes(m)
for n in nodes:
if n.type == 'EMISSION':
if ht_tool.light_node_includes in n.name:
if ht_tool.light_node_includes in n.name:
n.inputs[1].default_value += ht_tool.light_add_global
if n.type == 'BSDF_PRINCIPLED':
if ht_tool.light_node_includes in n.name:
n.inputs[18].default_value += ht_tool.light_add_global
return {'FINISHED'}
class HOLTTOOLS_OT_SubtractLightIntensityGlobal(bpy.types.Operator):
# Add Light Intensity Global
bl_idname = "object.subtract_light_intensity_global"
bl_label = "Subtract Light Intensity Global"
bl_description = "Subtracts intensity to all lights in the scene"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
if ht_tool.light_target == "LIGHT_OBJECTS":
select_all_lights()
light_power_add(-ht_tool.light_add_global)
if ht_tool.light_target == "EMISSIVE_MATERIALS":
mats = get_all_materials()
for m in mats:
if ht_tool.light_mat_includes in m.name:
nodes = get_nodes(m)
for n in nodes:
if n.type == 'EMISSION':
if ht_tool.light_node_includes in n.name:
n.inputs[1].default_value -= ht_tool.light_add_global
if n.type == 'BSDF_PRINCIPLED':
if ht_tool.light_node_includes in n.name:
n.inputs[18].default_value -= ht_tool.light_add_global
return {'FINISHED'}
class HOLTTOOLS_OT_MultiplyLightIntensityGlobal(bpy.types.Operator):
# Multiply Light Intensity Global
bl_idname = "object.multiply_light_intensity_global"
bl_label = "Multiply Light Intensity Global"
bl_description = "Multiplies intensity of all lights in the scene"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
if ht_tool.light_target == "LIGHT_OBJECTS":
select_all_lights()
light_power_multiply(ht_tool.light_multiply_global)
if ht_tool.light_target == "EMISSIVE_MATERIALS":
mats = get_all_materials()
for m in mats:
if ht_tool.light_mat_includes in m.name:
nodes = get_nodes(m)
for n in nodes:
if n.type == 'EMISSION':
if ht_tool.light_node_includes in n.name:
n.inputs[1].default_value *= ht_tool.light_multiply_global
if n.type == 'BSDF_PRINCIPLED':
if ht_tool.light_node_includes in n.name:
n.inputs[18].default_value *= ht_tool.light_multiply_global
return{'FINISHED'}
#endregion
#region OPERATORS - OPTIMIZATION
class HOLTTOOLS_OT_QuickDecimate(bpy.types.Operator):
# Quick Decimate
bl_idname = "object.quick_decimate"
bl_label = "Quick Decimate"
bl_description = "Quickly decimates object based on ht_tool.decimate_rate"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
objs = so()
for o in objs:
mod = add_decimate(o)
mod.ratio = ht_tool.decimate_rate
apply_all_modifiers(o)
return {'FINISHED'}
#endregion
#region PANELS
class OBJECT_PT_HoltToolsCleanup(Panel):
bl_idname = "OBJECT_PT_HoltToolsCleanup"
bl_label = "Holt Tools - Cleanup"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Holt Tools"
def draw(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
box = layout.box()
box.label(text="Cleanup Mode")
col = box.column()
row = col.prop(ht_tool, "cleanup_mode", text="")
row = col.row(align=True)
if ht_tool.cleanup_mode == 'OUTLINER':
row.operator("outliner.organize_outliner")
row = col.row(align=True)
row.operator("object.convert_suffixes")
row = col.row(align=True)
row.operator("object.purge_unwanted_data")
row = col.row(align=True)
row.operator("outliner.deep_clean", text = "^ Deep Clean ^")
row = col.row(align=True)
if ht_tool.cleanup_mode == "OBJECT":
if context.active_object != None:
if context.active_object.mode == 'EDIT':
row = col.row()
row.separator()
row = col.row()
row.operator("mesh.normals_make_consistent", text="Recalculate Normals")
row = col.row()
row.operator("mesh.remove_doubles", text="Merge By Distance")
row = col.row()
row.separator()
if context.active_object != None:
if context.active_object.mode == 'OBJECT':
row = col.label(text="( more in edit mode )")
row = col.row()
row.operator("mesh.customdata_custom_splitnormals_clear", text="Clean Custom Split Normals")
row = col.row()
row.operator("anim.keyframe_clear_v3d", text="Clear Keyframes")
row = col.row()
row.label(text="Auto Smooth")
row = col.row()
row.prop(ht_tool, "autosmooth_angle", text="")
row = col.row()
row.operator("object.set_auto_smooth", text="^ Set Auto Smooth ^")
pass
if ht_tool.cleanup_mode == "MATERIALS":
row = col.row()
row.operator("object.remove_unused_slots", text="Remove Unused Slots")
pass
class OBJECT_PT_HoltToolsSelection(Panel):
bl_idname = "OBJECT_PT_HoltToolsSelection"
bl_label = "Holt Tools - Selection"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Holt Tools"
def draw(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
box = layout.box()
box.label(text="Selection Modes")
col = box.column()
row = col.row(align=True)
row.prop(ht_tool,"selection_mode", text="")
row = col.row(align=True)
if ht_tool.selection_mode == 'SELECT_ALL_INCLUDING':
# Select all including
box2 = box.box()
col = box2.column()
row = col.row(align=True)
row.label(text="Select all objects including:")
row = col.row(align=True)
row.prop(ht_tool, "select_string", text = "")
row = col.row(align=True)
row.prop(ht_tool, "is_case_sensitive", text="Case Sensitive")
row = col.row(align=True)
row.operator("object.select_all_including", text="^ Select All Including ^")
row = col.row(align=True)
# Tagging objects
box2 = box.box()
col = box2.column()
row = col.row(align=True)
row.label(text="Tag Objects")
row = col.row(align=True)
row.prop(ht_tool, "tag_string", text="")
row.scale_x=-20
row.prop(ht_tool, "delimiter_string", text="")
row = col.row(align=True)
row.operator("object.name_add_prefix", text="Prefix")
row.operator("object.name_add_suffix", text="Suffix")
if ht_tool.selection_mode == 'SELECT_ALL_TYPE':
# Select all by type
row.label(text="Select all objects of this type:")
row = col.row(align=True)
row.prop(ht_tool, "select_types", text="")
row = col.row(align=True)
row.separator()
row = col.row(align=True)
row.operator("object.select_all_type", text="^ Select All Type ^")
if ht_tool.selection_mode == "SELECT_BY_VERTEX":
# Select by vertex
row.label(text="Comparison:")
row = col.row(align=True)
row.prop(ht_tool, "comparison_mode", text="")
row = col.row(align=True)
row.label(text="Vertex Count:")
row = col.row(align=True)
row.prop(ht_tool, "vertex_count", text="")
row = col.row(align=True)
row.separator()
row = col.row(align=True)
row.operator("object.select_by_vertex_count", text="^ Select ^")
class OBJECT_PT_HoltToolsLighting(Panel):
bl_idname = "OBJECT_PT_HoltToolsLighting"
bl_label = "Holt Tools - Lighting"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Holt Tools"
def draw(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
box = layout.box()
#box.label(text="Global Lighting Mode")
col = box.column()
row = col.row(align=True)
row.label(text="Global Lighting Mode")
row = col.row(align=True)
row.prop(ht_tool, "light_mode", text="")
row = col.row(align=True)
row.label(text="Target")
row = col.row(align=True)
row.prop(ht_tool, "light_target", text="")
row = col.row(align=True)
if ht_tool.light_target == "EMISSIVE_MATERIALS":
row.label(text="Material Name Includes:")
row = col.row(align=True)
row.prop(ht_tool, "light_mat_includes", text="")
row = col.row(align=True)
row.label(text="Node Name Includes:")
row = col.row(align=True)
row.prop(ht_tool, "light_node_includes", text="")
row = col.row(align=True)
row.separator()
if ht_tool.light_mode == "ADDITIVE":
row = col.row(align=True)
row.prop(ht_tool, "light_add_global", text="")
row = col.row(align=True)
row.operator("object.subtract_light_intensity_global", text="-")
row.operator("object.add_light_intensity_global", text="+")
if ht_tool.light_mode == "MULTIPLICATIVE":
row = col.row(align=True)
row.scale_x = 20
row.prop(ht_tool, "light_multiply_global", text="")
row.scale_x = 0
row.operator("object.multiply_light_intensity_global", text="X")
class OBJECT_PT_HoltToolsOptimization(Panel):
bl_idname = "OBJECT_PT_HoltToolsOptimization"
bl_label = "Holt Tools - Optimization"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Holt Tools"
def draw(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
b = layout.box()
b.label(text="Quick Decimation")
column = b.column()
row = column.row()
row.prop(ht_tool, "decimate_rate", text="")
row = column.row()
row.operator("object.quick_decimate", text="Quick Decimate")
class OBJECT_PT_HoltToolsInfo(Panel):
bl_idname = "OBJECT_PT_HoltToolsInfo"
bl_label = "Holt Tools - Info"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Holt Tools"
def draw(self, context):
layout = self.layout
scene = context.scene
ht_tool = scene.ht_tool
#---
#Operations Layout
box = layout.box()
box.label(text = "Created by Curtis Holt")
#---------- Box - Support
b = box.box()
b.label(text="Support")
column = b.column()
row = column.row()
row.scale_y = 1.2
row.operator("wm.url_open", text = "Donate", icon='WORLD').url = "https://www.curtisholt.online/donate"
#----------
#---------- Box - Social
b = box.box()
b.label(text="Social")
column = b.column()
row = column.row()
row.scale_y = 1.2
row.operator("wm.url_open", text="YouTube", icon='FILE_MOVIE').url = "https://www.youtube.com/CurtisHolt"
row.operator("wm.url_open", text="Twitter", icon='COMMUNITY').url = "https://www.twitter.com/curtisjamesholt"
row = column.row()
row.scale_y = 1.2
row.operator("wm.url_open", text="Website", icon='WORLD').url = "https://www.curtisholt.online"
row.operator("wm.url_open", text="Instagram", icon='COMMUNITY').url = "https://www.instagram.com/curtisjamesholt"
#endregion
#region REGISTRATION
classes = (
HTProperties,
# Cleanup
HOLTTOOLS_OT_OrganizeOutliner,
HOLTTOOLS_OT_ConvertSuffixes,
HOLTTOOLS_OT_DeepClean,
HOLTTOOLS_OT_PurgeUnwantedData,
HOLTTOOLS_OT_SetAutoSmooth,
HOLTTOOLS_OT_RemoveUnusedSlots,
# Selection
HOLTTOOLS_OT_SelectAllIncluding,
HOLTTOOLS_OT_SelectAllType,
HOLTTOOLS_OT_NameAddPrefix,
HOLTTOOLS_OT_NameAddSuffix,
HOLTTOOLS_OT_SelectByVertexCount,
# Lighting
HOLTTOOLS_OT_AddLightIntensityGlobal,
HOLTTOOLS_OT_SubtractLightIntensityGlobal,
HOLTTOOLS_OT_MultiplyLightIntensityGlobal,
# Optimization
HOLTTOOLS_OT_QuickDecimate,
# Panels
OBJECT_PT_HoltToolsCleanup,
OBJECT_PT_HoltToolsSelection,
OBJECT_PT_HoltToolsLighting,
OBJECT_PT_HoltToolsOptimization,
OBJECT_PT_HoltToolsInfo
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
bpy.types.Scene.ht_tool = PointerProperty(type=HTProperties)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
del bpy.types.Scene.ht_tool
if __name__ == "__main__":
register()
#endregion | [
"29793215+curtisjamesholt@users.noreply.github.com"
] | 29793215+curtisjamesholt@users.noreply.github.com |
c4004c31f6f741fa0ea0b2920df0bf29178c8391 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/egtpglobalsbase/egtpglobalsbase.py | 7fa4e1ff57e59ad6470ff3f0e36395a91ba0bda3 | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,711 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class EgtpGlobalsBase(Base):
"""
The EgtpGlobalsBase class encapsulates a list of egtpGlobalsBase resources that are managed by the user.
A list of resources can be retrieved from the server using the EgtpGlobalsBase.find() method.
The list can be managed by using the EgtpGlobalsBase.add() and EgtpGlobalsBase.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'egtpGlobalsBase'
_SDM_ATT_MAP = {
'EnableDynamicQosCtrl': 'enableDynamicQosCtrl',
'EnableGatewayArp': 'enableGatewayArp',
'GatewayArpRequestRate': 'gatewayArpRequestRate',
'MaxMbrUAndD': 'maxMbrUAndD',
'MaxOutstandingGatewayArpRequests': 'maxOutstandingGatewayArpRequests',
'MaxOutstandingReleases': 'maxOutstandingReleases',
'MaxOutstandingRequests': 'maxOutstandingRequests',
'ObjectId': 'objectId',
'SendOneArpFromEachInterface': 'sendOneArpFromEachInterface',
'SetupRateInitial': 'setupRateInitial',
'TeardownRateInitial': 'teardownRateInitial',
'TsSpec': 'tsSpec',
'UseMaxRatesForDcp': 'useMaxRatesForDcp',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(EgtpGlobalsBase, self).__init__(parent, list_op)
@property
def EnableDynamicQosCtrl(self):
# type: () -> bool
"""
Returns
-------
- bool: Enable Dynamic QoS Enforcement
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableDynamicQosCtrl'])
@EnableDynamicQosCtrl.setter
def EnableDynamicQosCtrl(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableDynamicQosCtrl'], value)
@property
def EnableGatewayArp(self):
# type: () -> bool
"""
Returns
-------
- bool: When enabled, every IP address will ARP the specified gateway.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableGatewayArp'])
@EnableGatewayArp.setter
def EnableGatewayArp(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableGatewayArp'], value)
@property
def GatewayArpRequestRate(self):
# type: () -> int
"""
Returns
-------
- number: Maximum ARP request rate
"""
return self._get_attribute(self._SDM_ATT_MAP['GatewayArpRequestRate'])
@GatewayArpRequestRate.setter
def GatewayArpRequestRate(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['GatewayArpRequestRate'], value)
@property
def MaxMbrUAndD(self):
# type: () -> int
"""
Returns
-------
- number:
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxMbrUAndD'])
@MaxMbrUAndD.setter
def MaxMbrUAndD(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['MaxMbrUAndD'], value)
@property
def MaxOutstandingGatewayArpRequests(self):
# type: () -> int
"""
Returns
-------
- number: Threshold at which the plugin begins throttling back the number of new ARP requests sent out.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxOutstandingGatewayArpRequests'])
@MaxOutstandingGatewayArpRequests.setter
def MaxOutstandingGatewayArpRequests(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['MaxOutstandingGatewayArpRequests'], value)
@property
def MaxOutstandingReleases(self):
# type: () -> int
"""
Returns
-------
- number:
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxOutstandingReleases'])
@MaxOutstandingReleases.setter
def MaxOutstandingReleases(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['MaxOutstandingReleases'], value)
@property
def MaxOutstandingRequests(self):
# type: () -> int
"""
Returns
-------
- number:
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxOutstandingRequests'])
@MaxOutstandingRequests.setter
def MaxOutstandingRequests(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['MaxOutstandingRequests'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def SendOneArpFromEachInterface(self):
# type: () -> bool
"""
Returns
-------
- bool: When set, each interface will send one ARP request.
"""
return self._get_attribute(self._SDM_ATT_MAP['SendOneArpFromEachInterface'])
@SendOneArpFromEachInterface.setter
def SendOneArpFromEachInterface(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SendOneArpFromEachInterface'], value)
@property
def SetupRateInitial(self):
# type: () -> int
"""
Returns
-------
- number: Initial setup rate
"""
return self._get_attribute(self._SDM_ATT_MAP['SetupRateInitial'])
@SetupRateInitial.setter
def SetupRateInitial(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['SetupRateInitial'], value)
@property
def TeardownRateInitial(self):
# type: () -> int
"""
Returns
-------
- number: Initial teardown rate
"""
return self._get_attribute(self._SDM_ATT_MAP['TeardownRateInitial'])
@TeardownRateInitial.setter
def TeardownRateInitial(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['TeardownRateInitial'], value)
@property
def TsSpec(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['TsSpec'])
@TsSpec.setter
def TsSpec(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TsSpec'], value)
@property
def UseMaxRatesForDcp(self):
# type: () -> bool
"""
Returns
-------
- bool: Use default rates (DCP mode)
"""
return self._get_attribute(self._SDM_ATT_MAP['UseMaxRatesForDcp'])
@UseMaxRatesForDcp.setter
def UseMaxRatesForDcp(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UseMaxRatesForDcp'], value)
def update(self, EnableDynamicQosCtrl=None, EnableGatewayArp=None, GatewayArpRequestRate=None, MaxMbrUAndD=None, MaxOutstandingGatewayArpRequests=None, MaxOutstandingReleases=None, MaxOutstandingRequests=None, SendOneArpFromEachInterface=None, SetupRateInitial=None, TeardownRateInitial=None, TsSpec=None, UseMaxRatesForDcp=None):
# type: (bool, bool, int, int, int, int, int, bool, int, int, str, bool) -> EgtpGlobalsBase
"""Updates egtpGlobalsBase resource on the server.
Args
----
- EnableDynamicQosCtrl (bool): Enable Dynamic QoS Enforcement
- EnableGatewayArp (bool): When enabled, every IP address will ARP the specified gateway.
- GatewayArpRequestRate (number): Maximum ARP request rate
- MaxMbrUAndD (number):
- MaxOutstandingGatewayArpRequests (number): Threshold at which the plugin begins throttling back the number of new ARP requests sent out.
- MaxOutstandingReleases (number):
- MaxOutstandingRequests (number):
- SendOneArpFromEachInterface (bool): When set, each interface will send one ARP request.
- SetupRateInitial (number): Initial setup rate
- TeardownRateInitial (number): Initial teardown rate
- TsSpec (str):
- UseMaxRatesForDcp (bool): Use default rates (DCP mode)
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, EnableDynamicQosCtrl=None, EnableGatewayArp=None, GatewayArpRequestRate=None, MaxMbrUAndD=None, MaxOutstandingGatewayArpRequests=None, MaxOutstandingReleases=None, MaxOutstandingRequests=None, SendOneArpFromEachInterface=None, SetupRateInitial=None, TeardownRateInitial=None, TsSpec=None, UseMaxRatesForDcp=None):
# type: (bool, bool, int, int, int, int, int, bool, int, int, str, bool) -> EgtpGlobalsBase
"""Adds a new egtpGlobalsBase resource on the server and adds it to the container.
Args
----
- EnableDynamicQosCtrl (bool): Enable Dynamic QoS Enforcement
- EnableGatewayArp (bool): When enabled, every IP address will ARP the specified gateway.
- GatewayArpRequestRate (number): Maximum ARP request rate
- MaxMbrUAndD (number):
- MaxOutstandingGatewayArpRequests (number): Threshold at which the plugin begins throttling back the number of new ARP requests sent out.
- MaxOutstandingReleases (number):
- MaxOutstandingRequests (number):
- SendOneArpFromEachInterface (bool): When set, each interface will send one ARP request.
- SetupRateInitial (number): Initial setup rate
- TeardownRateInitial (number): Initial teardown rate
- TsSpec (str):
- UseMaxRatesForDcp (bool): Use default rates (DCP mode)
Returns
-------
- self: This instance with all currently retrieved egtpGlobalsBase resources using find and the newly added egtpGlobalsBase resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained egtpGlobalsBase resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, EnableDynamicQosCtrl=None, EnableGatewayArp=None, GatewayArpRequestRate=None, MaxMbrUAndD=None, MaxOutstandingGatewayArpRequests=None, MaxOutstandingReleases=None, MaxOutstandingRequests=None, ObjectId=None, SendOneArpFromEachInterface=None, SetupRateInitial=None, TeardownRateInitial=None, TsSpec=None, UseMaxRatesForDcp=None):
# type: (bool, bool, int, int, int, int, int, str, bool, int, int, str, bool) -> EgtpGlobalsBase
"""Finds and retrieves egtpGlobalsBase resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve egtpGlobalsBase resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all egtpGlobalsBase resources from the server.
Args
----
- EnableDynamicQosCtrl (bool): Enable Dynamic QoS Enforcement
- EnableGatewayArp (bool): When enabled, every IP address will ARP the specified gateway.
- GatewayArpRequestRate (number): Maximum ARP request rate
- MaxMbrUAndD (number):
- MaxOutstandingGatewayArpRequests (number): Threshold at which the plugin begins throttling back the number of new ARP requests sent out.
- MaxOutstandingReleases (number):
- MaxOutstandingRequests (number):
- ObjectId (str): Unique identifier for this object
- SendOneArpFromEachInterface (bool): When set, each interface will send one ARP request.
- SetupRateInitial (number): Initial setup rate
- TeardownRateInitial (number): Initial teardown rate
- TsSpec (str):
- UseMaxRatesForDcp (bool): Use default rates (DCP mode)
Returns
-------
- self: This instance with matching egtpGlobalsBase resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of egtpGlobalsBase data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the egtpGlobalsBase resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"pdobrinskiy@yahoo.com"
] | pdobrinskiy@yahoo.com |
f5d4cf6f485d762c5643ead19f6f44edcc5d2d96 | 0485a490f466bd1d02eaae96d277888781208c0e | /tests/single_instruction_translation_validation/mcsema/register-variants/movb_r8_rh/Output/test-z3.py | e85a3ad441dea371dd1ab92ebdf22d518b6ae522 | [
"LicenseRef-scancode-unknown-license-reference",
"NCSA"
] | permissive | Mthandazo42/validating-binary-decompilation | c0e2d54cd79e609bfa35802975bddfa52e646fad | c0fcd6f099e38195dcbbac9e8c13a825865c5cb5 | refs/heads/master | 2022-11-11T13:18:13.033044 | 2020-06-25T05:49:01 | 2020-06-25T05:49:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,269 | py | #############################################
######## Auto Generated Proof Scripts #######
#############################################
import z3
import sys
status=True
test_name="UnK"
if(len(sys.argv) > 1):
test_name = sys.argv[1]
def solve(msg, lvar, xvar, s):
global status
s.set("timeout", 60000)
res = s.check()
if(z3.unknown == res):
print(test_name + "::" + msg + "::unk")
status = "Unknown"
if(z3.sat == res):
if("UNDEF" in xvar.sexpr()):
print(test_name + "::" + msg + "::undef-sat")
else:
m = s.model()
print(test_name + "::" + msg + "::sat")
print("\n")
print("query", s)
print("\n")
print("model", m)
print("\n")
print("xvar =", m.evaluate(xvar))
print("lvar =", m.evaluate(lvar))
print("\n")
status = False
##############################
## X86 specific variables ####
##############################
### GPRs
VX_RAX = z3.BitVec('VX_RAX',64)
VX_RBX = z3.BitVec('VX_RBX',64)
VX_RCX = z3.BitVec('VX_RCX',64)
VX_RDX = z3.BitVec('VX_RDX',64)
VX_RSI = z3.BitVec('VX_RSI',64)
VX_RDI = z3.BitVec('VX_RDI',64)
### Flags
VX_CF = z3.BitVec('VX_CF',1)
VX_PF = z3.BitVec('VX_PF',1)
VX_ZF = z3.BitVec('VX_ZF',1)
VX_SF = z3.BitVec('VX_SF',1)
VX_AF = z3.BitVec('VX_AF',1)
VX_OF = z3.BitVec('VX_OF',1)
### YMM Registers
VX_YMM1 = z3.BitVec('VX_YMM1', 256)
VX_YMM2 = z3.BitVec('VX_YMM2', 256)
## Undef
VX_UNDEF_1 = z3.BitVec('VX_UNDEF_1', 1)
VX_UNDEF_BOOL = z3.Bool('VX_UNDEF_BOOL')
##############################
## X86 specific variables ####
##############################
### GPRs
VL_RAX = z3.BitVec('VL_RAX',64)
VL_RBX = z3.BitVec('VL_RBX',64)
VL_RCX = z3.BitVec('VL_RCX',64)
VL_RDX = z3.BitVec('VL_RDX',64)
VL_RSI = z3.BitVec('VL_RSI',64)
VL_RDI = z3.BitVec('VL_RDI',64)
### Flags
VL_CF = z3.BitVec('VL_CF',8)
VL_PF = z3.BitVec('VL_PF',8)
VL_ZF = z3.BitVec('VL_ZF',8)
VL_SF = z3.BitVec('VL_SF',8)
VL_AF = z3.BitVec('VL_AF',8)
VL_OF = z3.BitVec('VL_OF',8)
### YMM Registers
VL_YMM1_0 = z3.BitVec('VL_YMM1_0', 64)
VL_YMM1_1 = z3.BitVec('VL_YMM1_1', 64)
VL_YMM1_2 = z3.BitVec('VL_YMM1_2', 64)
VL_YMM1_3 = z3.BitVec('VL_YMM1_3', 64)
VL_YMM2_0 = z3.BitVec('VL_YMM2_0', 64)
VL_YMM2_1 = z3.BitVec('VL_YMM2_1', 64)
VL_YMM2_2 = z3.BitVec('VL_YMM2_2', 64)
VL_YMM2_3 = z3.BitVec('VL_YMM2_3', 64)
##############################
## Proof variables ###########
##############################
V_R = z3.BitVec('V_R',64)
V_F = z3.BitVec('V_F',1)
V_Y = z3.BitVec('V_Y',256)
## Solver instance
s = z3.Solver()
##############################
## Default constraints #######
##############################
### GPRs
s.add(VX_RAX == VL_RAX)
s.add(VX_RBX == VL_RBX)
s.add(VX_RCX == VL_RCX)
s.add(VX_RDX == VL_RDX)
s.add(VX_RDI == VL_RDI)
s.add(VX_RSI == VL_RSI)
### Flags
s.add(z3.Or(VL_CF == 0, VL_CF == 1))
s.add(z3.Or(VL_ZF == 0, VL_ZF == 1))
s.add(z3.Or(VL_PF == 0, VL_PF == 1))
s.add(z3.Or(VL_SF == 0, VL_SF == 1))
s.add(z3.Or(VL_AF == 0, VL_AF == 1))
s.add(z3.Or(VL_OF == 0, VL_OF == 1))
s.add(z3.Extract(0,0, VL_CF) == VX_CF)
s.add(z3.Extract(0,0, VL_SF) == VX_SF)
s.add(z3.Extract(0,0, VL_ZF) == VX_ZF)
s.add(z3.Extract(0,0, VL_PF) == VX_PF)
s.add(z3.Extract(0,0, VL_AF) == VX_AF)
s.add(z3.Extract(0,0, VL_OF) == VX_OF)
### Ymms
s.add(z3.Concat(VL_YMM1_3, VL_YMM1_2, VL_YMM1_1, VL_YMM1_0) == VX_YMM1)
s.add(z3.Concat(VL_YMM2_3, VL_YMM2_2, VL_YMM2_1, VL_YMM2_0) == VX_YMM2)
## =******= AF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_AF)))
xvar = (V_F == VX_AF)
s.add(lvar != xvar)
solve("AF", lvar, xvar, s)
s.pop()
## =******= CF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_CF)))
xvar = (V_F == VX_CF)
s.add(lvar != xvar)
solve("CF", lvar, xvar, s)
s.pop()
## =******= OF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_OF)))
xvar = (V_F == VX_OF)
s.add(lvar != xvar)
solve("OF", lvar, xvar, s)
s.pop()
## =******= PF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_PF)))
xvar = (V_F == VX_PF)
s.add(lvar != xvar)
solve("PF", lvar, xvar, s)
s.pop()
## =******= RAX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RAX), z3.Extract(55, 48, VL_RAX), z3.Extract(47, 40, VL_RAX), z3.Extract(39, 32, VL_RAX), z3.Extract(31, 24, VL_RAX), z3.Extract(23, 16, VL_RAX), z3.Extract(15, 8, VL_RAX), z3.Extract(7, 0, VL_RAX)))
xvar = (V_R == VX_RAX)
s.add(lvar != xvar)
solve("RAX", lvar, xvar, s)
s.pop()
## =******= RBX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RBX), z3.Extract(55, 48, VL_RBX), z3.Extract(47, 40, VL_RBX), z3.Extract(39, 32, VL_RBX), z3.Extract(31, 24, VL_RBX), z3.Extract(23, 16, VL_RBX), z3.Extract(15, 8, VL_RBX), z3.Extract(7, 0, (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(15, 8, VL_RAX)) & z3.BitVecVal(256 - 1, 64)))))
xvar = (V_R == z3.Concat(z3.Extract(63, 8, VX_RBX), z3.Extract(15, 8, VX_RAX)))
s.add(lvar != xvar)
solve("RBX", lvar, xvar, s)
s.pop()
## =******= RCX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RCX), z3.Extract(55, 48, VL_RCX), z3.Extract(47, 40, VL_RCX), z3.Extract(39, 32, VL_RCX), z3.Extract(31, 24, VL_RCX), z3.Extract(23, 16, VL_RCX), z3.Extract(15, 8, VL_RCX), z3.Extract(7, 0, VL_RCX)))
xvar = (V_R == VX_RCX)
s.add(lvar != xvar)
solve("RCX", lvar, xvar, s)
s.pop()
## =******= RDX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RDX), z3.Extract(55, 48, VL_RDX), z3.Extract(47, 40, VL_RDX), z3.Extract(39, 32, VL_RDX), z3.Extract(31, 24, VL_RDX), z3.Extract(23, 16, VL_RDX), z3.Extract(15, 8, VL_RDX), z3.Extract(7, 0, VL_RDX)))
xvar = (V_R == VX_RDX)
s.add(lvar != xvar)
solve("RDX", lvar, xvar, s)
s.pop()
## =******= SF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_SF)))
xvar = (V_F == VX_SF)
s.add(lvar != xvar)
solve("SF", lvar, xvar, s)
s.pop()
## =******= ZF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_ZF)))
xvar = (V_F == VX_ZF)
s.add(lvar != xvar)
solve("ZF", lvar, xvar, s)
s.pop()
if(status == True):
print('[6;30;42m' + 'Test-Pass: ' + '[0m' + test_name)
else:
if(status == False):
print('[0;30;41m' + 'Test-Fail: ' + '[0m' + test_name)
else:
print('[6;30;47m' + 'Test-Unk: ' + '[0m' + test_name)
| [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
98e9c1b4929fd591c700b18d00580b3300ea5ff9 | bfb4de0b0b0602fab213c73cf4f4b4874d3e1dd3 | /app.py | cd4b4d243e526ba22f86cf6ea2b2c896ae66e79b | [] | no_license | EnglundE/mongo-flask-walkthrough | 8e0ced2a594291bdd39a4953f5eb4d6478d2f628 | d966f5797f94e18b282109accea53cd125de6804 | refs/heads/main | 2023-06-28T04:34:25.576490 | 2021-07-30T22:44:23 | 2021-07-30T22:44:23 | 383,476,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,628 | py | import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
@app.route("/")
@app.route("/get_tasks")
def get_tasks():
tasks = list(mongo.db.tasks.find())
return render_template("tasks.html", tasks=tasks)
@app.route("/search", methods=["GET", "POST"])
def search():
query = request.form.get("query")
tasks = list(mongo.db.tasks.find({"$text": {"$search": query}}))
return render_template("tasks.html", tasks=tasks)
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
# check if username already exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("register"))
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
# put the new user into 'session' cookie
session["user"] = request.form.get("username").lower()
flash("Registration Successful")
return redirect(url_for("profile", username=session["user"]))
return render_template("register.html")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
# check if username exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# ensure hashed password matches user input
if check_password_hash(existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(request.form.get("username")))
return redirect(url_for("profile", username=session["user"]))
else:
# invalid password match
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
else:
# username doesnt exist
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/profile/<username>", methods=["GET", "POST"])
def profile(username):
# grab the session user's username from db
username = mongo.db.users.find_one(
{"username": session["user"]})["username"]
if session["user"]:
return render_template("profile.html", username=username)
return redirect(url_for("login"))
@app.route("/logout")
def logout():
# remove user from session cookies
flash("You have been logged out")
session.pop("user")
return redirect(url_for("login"))
@app.route("/add_task", methods=["GET", "POST"])
def add_task():
if request.method == "POST":
is_urgent = "on" if request.form.get("is_urgent") else "off"
task = {
"category_name": request.form.get("category_name"),
"task_name": request.form.get("task_name"),
"task_description": request.form.get("task_description"),
"is_urgent": is_urgent,
"due_date": request.form.get("due_date"),
"created_by": session["user"]
}
mongo.db.tasks.insert_one(task)
flash("Task Successfully Added")
return redirect(url_for("get_tasks"))
categories = mongo.db.categories.find().sort("category_name", 1)
return render_template("add_task.html", categories=categories)
@app.route("/edit_task/<task_id>", methods=["GET", "POST"])
def edit_task(task_id):
if request.method == "POST":
is_urgent = "on" if request.form.get("is_urgent") else "off"
submit = {
"category_name": request.form.get("category_name"),
"task_name": request.form.get("task_name"),
"task_description": request.form.get("task_description"),
"is_urgent": is_urgent,
"due_date": request.form.get("due_date"),
"created_by": session["user"]
}
mongo.db.tasks.update({"_id": ObjectId(task_id)}, submit)
flash("Task Successfully Updated")
task = mongo.db.tasks.find_one({"_id": ObjectId(task_id)})
categories = mongo.db.categories.find().sort("category_name", 1)
return render_template("edit_task.html", task=task, categories=categories)
@app.route("/delete_task/<task_id>")
def delete_task(task_id):
mongo.db.tasks.remove({"_id": ObjectId(task_id)})
flash("Task Successfully Deleted")
return redirect(url_for("get_tasks"))
@app.route("/get_categories")
def get_categories():
categories = list(mongo.db.categories.find().sort("category_name", 1))
return render_template("categories.html", categories=categories)
@app.route("/add_category", methods=["GET", "POST"])
def add_category():
if request.method == "POST":
category = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.insert_one(category)
flash("New Category Added")
return redirect(url_for("get_categories"))
return render_template("add_category.html")
@app.route("/edit_category/<category_id>", methods=["GET", "POST"])
def edit_category(category_id):
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.update({"_id": ObjectId(category_id)}, submit)
flash("Category Successfully Updated")
return redirect(url_for("get_categories"))
category = mongo.db.categories.find_one({"_id": ObjectId(category_id)})
return render_template("edit_category.html", category=category)
@app.route("/delete_category/<category_id>")
def delete_category(category_id):
mongo.db.categories.remove({"_id": ObjectId(category_id)})
flash("Category Successfully Deleted")
return redirect(url_for("get_categories"))
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=True)
| [
"erik.englund1@gmail.com"
] | erik.englund1@gmail.com |
f9b5f4ccf4f2eacb3430cf9aeb146f1a36b21331 | 9ee2b96ed7ef15df9444205f1c2b4d53118127e4 | /Insert,Pagelayout,View_automation/SQT.sikuli/SQT.py | b17b41c77d1421c8470971bf5bc6c9681f15abd8 | [] | no_license | jahin1/Software-Testing-Project-Using-Sikuli- | 3802c0319a7051a47a356a8a618844d06671d1ff | 2d7b354238f668983be935ce2582cf7a65fd4006 | refs/heads/master | 2020-03-23T00:07:25.127111 | 2018-07-13T18:53:13 | 2018-07-13T18:53:13 | 140,845,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | click("1523375115160.png")
click("1523022073258.png")
if exists("1523374129602.png"):
print "test case Passed"
else:
print "test case failed"
exit(1)
click("1523022153139.png")
if exists("1523374529041.png"):
print "test case Passed"
else:
print "test case failed"
exit(1)
click("1523022168955.png")
if exists("1523374562201.png"):
print "test case Passed"
else:
print "test case failed"
exit(1)
click("1523022185001.png")
if exists("1523374586544.png"):
print "test case Passed"
else:
print "test case failed"
exit(1)
click("1523022262603.png")
if exists("1523374624171.png"):
print "test case Passed"
else:
print "test case failed"
exit(1)
click("1523022396989.png")
click("1523022427112.png")
wait("1523375731665.png")
wait("1523375748536.png")
click("1523022593850.png")
if exists("1523374680897.png"):
print "test case Passed"
else:
print "test case failed"
exit(1)
click("1523374739540.png")
wait("1523375769676.png")
wait("1523375790963.png")
click("1523374835549.png")
if exists("1523374877065.png"):
print "test case Passed"
else:
print "test case failed"
exit(1)
click("1523374899185.png")
| [
"jahin600@gmail.com"
] | jahin600@gmail.com |
263d2694fdd7881219ac1882decf094b2abfef6c | 29888dd55149caaa8af0a4103292d2852732a1e2 | /functions/dsintegr.py | b4ee539f7d76d196700488eb8ed6a4b02a33915a | [] | no_license | chinthirla/Python | 927eda03cfa2c4c4ef6a1559f3ba069c129af4da | 479852a11663470c988f263e5d3306dc80a40e5b | refs/heads/main | 2023-04-06T17:56:26.454828 | 2021-04-24T12:17:53 | 2021-04-24T12:17:53 | 339,814,009 | 0 | 0 | null | 2021-04-24T12:24:42 | 2021-02-17T18:11:54 | Python | UTF-8 | Python | false | false | 284 | py | def displayint(x):
i=0
while i<x:
i += 1
print(i)
num=int(input("Enter number upto where you want to display integers:"))
displayint(num)
def displayvalues(z):
for i in range(1,z):
print(i)
x=int(input("Enter your values:"))
x=x+1
displayvalues(x)
| [
"vijay.itblabs@gmail.com"
] | vijay.itblabs@gmail.com |
77d1e8b340c066ed0706c0e202e80c621bac1c98 | b729d3201d8010859262b2069aab97a44194255f | /cms/templatetags/__init__.py | 8e876a3dcf82cb13c1482c36208c173f813fb5ae | [
"BSD-2-Clause"
] | permissive | onespacemedia/cms | 0be6f6cb7d5ec9fe61cfb02c6f5db23251974122 | 23ab6216e2580bd9b91dbf5c087973bce98f6b5e | refs/heads/develop | 2021-07-18T13:42:22.821708 | 2021-07-13T13:45:17 | 2021-07-13T13:45:17 | 10,475,661 | 13 | 17 | NOASSERTION | 2021-05-19T14:35:44 | 2013-06-04T09:55:56 | JavaScript | UTF-8 | Python | false | false | 43 | py | '''Template extensions used by the CMS.'''
| [
"lewis.collard@onespacemedia.com"
] | lewis.collard@onespacemedia.com |
5049a48ad064e7d70498f271e8ef8f6c19d7b3f0 | 910ab3c80d54f2954c8b2b30eb7f6497c38fe3af | /project/migrations/0001_initial.py | 698061d894950e70edbf998fd1edf21209451a43 | [
"MIT"
] | permissive | Charles-Ndugire/neighbourhood-actives | ee7a7c38a79e6658764fc36403370239281a2d24 | f6b737886aa1146785abaf946c5160beffd83a24 | refs/heads/master | 2023-04-05T01:26:39.613232 | 2021-04-12T22:54:42 | 2021-04-12T22:54:42 | 356,206,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,493 | py | # Generated by Django 3.1.2 on 2021-04-11 20:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0002_profile_bio'),
]
operations = [
migrations.CreateModel(
name='Neighbourhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('logo', models.ImageField(upload_to='logo/')),
('description', models.TextField()),
('health_number', models.IntegerField(blank=True, null=True)),
('police_number', models.IntegerField(blank=True, null=True)),
('occupant_count', models.IntegerField(blank=True, null=True)),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hood', to='users.profile')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, null=True)),
('post', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hood_post', to='project.neighbourhood')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_owner', to='users.profile')),
],
),
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100)),
('description', models.TextField(blank=True)),
('neighbourhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='business', to='project.neighbourhood')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owner', to='users.profile')),
],
),
]
| [
"ndugirecharles@gmail.com"
] | ndugirecharles@gmail.com |
4e781f8bc54500812086fb8392d010655f63c3f8 | d8cc752d0f797be42022fd829a136c83c783ca83 | /yolo_face_2/data_convert.py | ddb38d911e861721bb858b13fc17971be5216221 | [] | no_license | Zhaoshimei/GenderRecognition_ProjectCode | 2687d78792f94ffd03e82024a3f4130a33161f0f | 225877897f0e8f3230afe2d72a321700dd70713b | refs/heads/master | 2020-05-31T16:54:45.712170 | 2019-06-06T12:15:24 | 2019-06-06T12:15:24 | 190,391,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | """
data_conversion.py 是将 wider_faced的标注数据集转成yolov3所需要的labels
- 每个图片转成对应名称的label
- 由于yolov3需要的是归一化之后的中心坐标和w,h所以在convert方法中进行了归一化和数据转换
@author: XP
"""
import cv2
def convert(size, box): #size是图片尺寸 box是坐标
dw = 1./size[0]
dh = 1./size[1]
x = (box[0] + box[1])/2.0
y = (box[2] + box[3])/2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def readfile(filename):
fo = open(filename, "r") # 读入标注结果集
my_label_file = open("my_label.txt",'a+')
while True:
key = next(fo, -1)
if key == -1:
break
key = key.replace('\n', '')
img_name = key
key1 = key.split("/")
key1 = key1[1].split(".")
key1 = key1[0] #获取图片名称
# list_file = open('My_labels/%s.txt' % (key1), 'w') #新建对应图片的label,存放在My_labels文件夹下
value = []
key = "C:\DL-face\dataset\WIDER_train\images/%s"%(key) #该图片位置
# print(key)
# image = cv2.imread(key) #用opencv读取该图片
# image_size = []
# print(image.shape[0],image.shape[1])
# image_size = [image.shape[1],image.shape[0]] #得到图片尺寸,为了后面进行归一化
num = next(fo, -1)
# my_label_file.write(img_name)
count = 0
str_write = img_name
for i in range(int(num)): #遍历每一张标注的脸
value = next(fo, -1).split(' ')
box = [int(value[0]),int(value[0])+int(value[2]),int(value[1]),int(value[1])+int(value[3])]
if int(value[2]) > 29 and int(value[3]) > 29:
# x, y, w, h = convert(image_size,box)
x_min = box[0]
y_min = box[2]
x_max = box[1]
y_max = box[3]
# print(x, y, w, h)
str_write += ' %s,%s,%s,%s,0' % (x_min,y_min,x_max,y_max)
count += 1
# my_label_file.write(' %s,%s,%s,%s,0' % (x_min,y_min,x_max,y_max)) #将转换后的坐标写入label
if count != 0:
my_label_file.write(str_write+'\n')
fo.close()
filename = "C:\DL-face\dataset\wider_face_split\wider_face_train_bbx_gt.txt" #标注文件位置
readfile(filename) | [
"402531616@qq.com"
] | 402531616@qq.com |
97c446607c92c3a64958f0ff1508423344ccb52e | dbf3bdf62448d16a0b0d3524a93fbde5efaf86f5 | /wglapi.py | cfaa1ca8bf914f66e7f9c9cd13b5e8c0d49a359c | [
"MIT"
] | permissive | bgirard/apitrace | 45f97370952c144b7bf8ffa399e010d534af8290 | 3eb1921fcb5a4f92425dd9198763a4fe2c02a83d | refs/heads/master | 2021-01-17T23:44:58.226173 | 2011-07-29T19:39:25 | 2011-07-29T19:39:25 | 2,121,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,334 | py | ##########################################################################
#
# Copyright 2008-2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""WGL API description"""
from glapi import *
from winapi import *
from wglenum import *
wglapi = API("WGL")
HGLRC = Alias("HGLRC", HANDLE)
PROC = Opaque("PROC")
PFD = Flags(DWORD, [
"PFD_DOUBLEBUFFER",
"PFD_STEREO",
"PFD_DRAW_TO_WINDOW",
"PFD_DRAW_TO_BITMAP",
"PFD_SUPPORT_GDI",
"PFD_SUPPORT_OPENGL",
"PFD_GENERIC_FORMAT",
"PFD_NEED_PALETTE",
"PFD_NEED_SYSTEM_PALETTE",
"PFD_SWAP_EXCHANGE",
"PFD_SWAP_COPY",
"PFD_SWAP_LAYER_BUFFERS",
"PFD_GENERIC_ACCELERATED",
"PFD_SUPPORT_DIRECTDRAW",
"PFD_SUPPORT_COMPOSITION",
"PFD_DEPTH_DONTCARE",
"PFD_DOUBLEBUFFER_DONTCARE",
"PFD_STEREO_DONTCARE",
])
PIXELFORMATDESCRIPTOR = Struct("PIXELFORMATDESCRIPTOR", [
(WORD, "nSize"),
(WORD, "nVersion"),
(PFD, "dwFlags"),
(BYTE, "iPixelType"),
(BYTE, "cColorBits"),
(BYTE, "cRedBits"),
(BYTE, "cRedShift"),
(BYTE, "cGreenBits"),
(BYTE, "cGreenShift"),
(BYTE, "cBlueBits"),
(BYTE, "cBlueShift"),
(BYTE, "cAlphaBits"),
(BYTE, "cAlphaShift"),
(BYTE, "cAccumBits"),
(BYTE, "cAccumRedBits"),
(BYTE, "cAccumGreenBits"),
(BYTE, "cAccumBlueBits"),
(BYTE, "cAccumAlphaBits"),
(BYTE, "cDepthBits"),
(BYTE, "cStencilBits"),
(BYTE, "cAuxBuffers"),
(BYTE, "iLayerType"),
(BYTE, "bReserved"),
(DWORD, "dwLayerMask"),
(DWORD, "dwVisibleMask"),
(DWORD, "dwDamageMask"),
])
POINTFLOAT = Struct("POINTFLOAT", [
(FLOAT, "x"),
(FLOAT, "y"),
])
GLYPHMETRICSFLOAT = Struct("GLYPHMETRICSFLOAT", [
(FLOAT, "gmfBlackBoxX"),
(FLOAT, "gmfBlackBoxY"),
(POINTFLOAT, "gmfptGlyphOrigin"),
(FLOAT, "gmfCellIncX"),
(FLOAT, "gmfCellIncY"),
])
LPGLYPHMETRICSFLOAT = Pointer(GLYPHMETRICSFLOAT)
COLORREF = Alias("COLORREF", DWORD)
LAYERPLANEDESCRIPTOR = Struct("LAYERPLANEDESCRIPTOR", [
(WORD, "nSize"),
(WORD, "nVersion"),
(DWORD, "dwFlags"),
(BYTE, "iPixelType"),
(BYTE, "cColorBits"),
(BYTE, "cRedBits"),
(BYTE, "cRedShift"),
(BYTE, "cGreenBits"),
(BYTE, "cGreenShift"),
(BYTE, "cBlueBits"),
(BYTE, "cBlueShift"),
(BYTE, "cAlphaBits"),
(BYTE, "cAlphaShift"),
(BYTE, "cAccumBits"),
(BYTE, "cAccumRedBits"),
(BYTE, "cAccumGreenBits"),
(BYTE, "cAccumBlueBits"),
(BYTE, "cAccumAlphaBits"),
(BYTE, "cDepthBits"),
(BYTE, "cStencilBits"),
(BYTE, "cAuxBuffers"),
(BYTE, "iLayerPlane"),
(BYTE, "bReserved"),
(COLORREF, "crTransparent"),
])
LPLAYERPLANEDESCRIPTOR = Pointer(LAYERPLANEDESCRIPTOR)
WGLSWAP = Struct("WGLSWAP", [
(HDC, "hdc"),
(UINT, "uiFlags"),
])
HPBUFFERARB = Alias("HPBUFFERARB", HANDLE)
wglapi.add_functions([
# WGL
StdFunction(HGLRC, "wglCreateContext", [(HDC, "hdc")]),
StdFunction(BOOL, "wglDeleteContext", [(HGLRC, "hglrc")]),
StdFunction(HGLRC, "wglGetCurrentContext", [], sideeffects=False),
StdFunction(BOOL, "wglMakeCurrent", [(HDC, "hdc"), (HGLRC, "hglrc")]),
StdFunction(BOOL, "wglCopyContext", [(HGLRC, "hglrcSrc"), (HGLRC, "hglrcDst"), (UINT, "mask")]),
StdFunction(Int, "wglChoosePixelFormat", [(HDC, "hdc"), (Pointer(Const(PIXELFORMATDESCRIPTOR)), "ppfd")]),
StdFunction(Int, "wglDescribePixelFormat", [(HDC, "hdc"), (Int, "iPixelFormat"), (UINT, "nBytes"), Out(Pointer(PIXELFORMATDESCRIPTOR), "ppfd")]),
StdFunction(HDC, "wglGetCurrentDC", [], sideeffects=False),
StdFunction(PROC, "wglGetDefaultProcAddress", [(LPCSTR, "lpszProc")], sideeffects=False),
StdFunction(Int, "wglGetPixelFormat", [(HDC, "hdc")], sideeffects=False),
StdFunction(BOOL, "wglSetPixelFormat", [(HDC, "hdc"), (Int, "iPixelFormat"), (Pointer(Const(PIXELFORMATDESCRIPTOR)), "ppfd")]),
StdFunction(BOOL, "wglSwapBuffers", [(HDC, "hdc")]),
StdFunction(BOOL, "wglShareLists", [(HGLRC, "hglrc1"), (HGLRC, "hglrc2")]),
StdFunction(HGLRC, "wglCreateLayerContext", [(HDC, "hdc"), (Int, "iLayerPlane")]),
StdFunction(BOOL, "wglDescribeLayerPlane", [(HDC, "hdc"), (Int, "iPixelFormat"), (Int, "iLayerPlane"), (UINT, "nBytes"), Out(Pointer(LAYERPLANEDESCRIPTOR), "plpd")]),
StdFunction(Int, "wglSetLayerPaletteEntries", [(HDC, "hdc"), (Int, "iLayerPlane"), (Int, "iStart"), (Int, "cEntries"), (Array(Const(COLORREF), "cEntries"), "pcr")]),
StdFunction(Int, "wglGetLayerPaletteEntries", [(HDC, "hdc"), (Int, "iLayerPlane"), (Int, "iStart"), (Int, "cEntries"), Out(Array(COLORREF, "cEntries"), "pcr")], sideeffects=False),
StdFunction(BOOL, "wglRealizeLayerPalette", [(HDC, "hdc"), (Int, "iLayerPlane"), (BOOL, "bRealize")]),
StdFunction(BOOL, "wglSwapLayerBuffers", [(HDC, "hdc"), (UINT, "fuPlanes")]),
StdFunction(BOOL, "wglUseFontBitmapsA", [(HDC, "hdc"), (DWORD, "first"), (DWORD, "count"), (DWORD, "listBase")]),
StdFunction(BOOL, "wglUseFontBitmapsW", [(HDC, "hdc"), (DWORD, "first"), (DWORD, "count"), (DWORD, "listBase")]),
StdFunction(DWORD, "wglSwapMultipleBuffers", [(UINT, "n"), (Array(Const(WGLSWAP), "n"), "ps")]),
StdFunction(BOOL, "wglUseFontOutlinesA", [(HDC, "hdc"), (DWORD, "first"), (DWORD, "count"), (DWORD, "listBase"), (FLOAT, "deviation"), (FLOAT, "extrusion"), (Int, "format"), (LPGLYPHMETRICSFLOAT, "lpgmf")]),
StdFunction(BOOL, "wglUseFontOutlinesW", [(HDC, "hdc"), (DWORD, "first"), (DWORD, "count"), (DWORD, "listBase"), (FLOAT, "deviation"), (FLOAT, "extrusion"), (Int, "format"), (LPGLYPHMETRICSFLOAT, "lpgmf")]),
# WGL_ARB_buffer_region
StdFunction(HANDLE, "wglCreateBufferRegionARB", [(HDC, "hDC"), (Int, "iLayerPlane"), (UINT, "uType")]),
StdFunction(VOID, "wglDeleteBufferRegionARB", [(HANDLE, "hRegion")]),
StdFunction(BOOL, "wglSaveBufferRegionARB", [(HANDLE, "hRegion"), (Int, "x"), (Int, "y"), (Int, "width"), (Int, "height")]),
StdFunction(BOOL, "wglRestoreBufferRegionARB", [(HANDLE, "hRegion"), (Int, "x"), (Int, "y"), (Int, "width"), (Int, "height"), (Int, "xSrc"), (Int, "ySrc")]),
# WGL_ARB_extensions_string
StdFunction(Const(CString), "wglGetExtensionsStringARB", [(HDC, "hdc")], sideeffects=False),
# WGL_ARB_pixel_format
StdFunction(BOOL, "wglGetPixelFormatAttribivARB", [(HDC, "hdc"), (Int, "iPixelFormat"), (Int, "iLayerPlane"), (UINT, "nAttributes"), (Array(WGLenum, "nAttributes"), "piAttributes"), Out(Array(Int, "nAttributes"), "piValues")], sideeffects=False),
StdFunction(BOOL, "wglGetPixelFormatAttribfvARB", [(HDC, "hdc"), (Int, "iPixelFormat"), (Int, "iLayerPlane"), (UINT, "nAttributes"), (Array(WGLenum, "nAttributes"), "piAttributes"), Out(Array(FLOAT, "nAttributes"), "pfValues")], sideeffects=False),
StdFunction(BOOL, "wglChoosePixelFormatARB", [(HDC, "hdc"), (Const(Array(WGLenum, "__AttribList_size(piAttribIList)")), "piAttribIList"), (Const(Array(FLOAT, "__AttribList_size(pfAttribFList)")), "pfAttribFList"), (UINT, "nMaxFormats"), Out(Array(Int, "(*nNumFormats)"), "piFormats"), Out(Pointer(UINT), "nNumFormats")]),
# WGL_ARB_make_current_read
StdFunction(BOOL, "wglMakeContextCurrentARB", [(HDC, "hDrawDC"), (HDC, "hReadDC"), (HGLRC, "hglrc")]),
StdFunction(HDC, "wglGetCurrentReadDCARB", [], sideeffects=False),
# WGL_ARB_pbuffer
StdFunction(HPBUFFERARB, "wglCreatePbufferARB", [(HDC, "hDC"), (Int, "iPixelFormat"), (Int, "iWidth"), (Int, "iHeight"), (Const(Array(WGLenum, "__AttribList_size(piAttribList)")), "piAttribList")]),
StdFunction(HDC, "wglGetPbufferDCARB", [(HPBUFFERARB, "hPbuffer")], sideeffects=False),
StdFunction(Int, "wglReleasePbufferDCARB", [(HPBUFFERARB, "hPbuffer"), (HDC, "hDC")]),
StdFunction(BOOL, "wglDestroyPbufferARB", [(HPBUFFERARB, "hPbuffer")]),
StdFunction(BOOL, "wglQueryPbufferARB", [(HPBUFFERARB, "hPbuffer"), (WGLenum, "iAttribute"), Out(Pointer(Int), "piValue")]),
# WGL_ARB_render_texture
StdFunction(BOOL, "wglBindTexImageARB", [(HPBUFFERARB, "hPbuffer"), (Int, "iBuffer")]),
StdFunction(BOOL, "wglReleaseTexImageARB", [(HPBUFFERARB, "hPbuffer"), (Int, "iBuffer")]),
StdFunction(BOOL, "wglSetPbufferAttribARB", [(HPBUFFERARB, "hPbuffer"), (Const(Array(WGLenum, "__AttribList_size(piAttribList)")), "piAttribList")]),
# WGL_ARB_create_context
StdFunction(HGLRC, "wglCreateContextAttribsARB", [(HDC, "hDC"), (HGLRC, "hShareContext"), (Const(Array(WGLenum, "__AttribList_size(attribList)")), "attribList")]),
# WGL_EXT_extensions_string
StdFunction(Const(CString), "wglGetExtensionsStringEXT", [], sideeffects=False),
# WGL_EXT_make_current_read
StdFunction(BOOL, "wglMakeContextCurrentEXT", [(HDC, "hDrawDC"), (HDC, "hReadDC"), (HGLRC, "hglrc")]),
StdFunction(HDC, "wglGetCurrentReadDCEXT", [], sideeffects=False),
# WGL_EXT_pixel_format
StdFunction(BOOL, "wglGetPixelFormatAttribivEXT", [(HDC, "hdc"), (Int, "iPixelFormat"), (Int, "iLayerPlane"), (UINT, "nAttributes"), (Array(WGLenum, "nAttributes"), "piAttributes"), Out(Array(Int, "nAttributes"), "piValues")], sideeffects=False),
StdFunction(BOOL, "wglGetPixelFormatAttribfvEXT", [(HDC, "hdc"), (Int, "iPixelFormat"), (Int, "iLayerPlane"), (UINT, "nAttributes"), (Array(WGLenum, "nAttributes"), "piAttributes"), Out(Array(FLOAT, "nAttributes"), "pfValues")], sideeffects=False),
StdFunction(BOOL, "wglChoosePixelFormatEXT", [(HDC, "hdc"), (Pointer(Const(WGLenum)), "piAttribIList"), (Pointer(Const(FLOAT)), "pfAttribFList"), (UINT, "nMaxFormats"), Out(Array(Int, "nMaxFormats"), "piFormats"), Out(Pointer(UINT), "nNumFormats")]),
# WGL_EXT_swap_control
StdFunction(BOOL, "wglSwapIntervalEXT", [(Int, "interval")]),
StdFunction(Int, "wglGetSwapIntervalEXT", [], sideeffects=False),
# WGL_NV_vertex_array_range
StdFunction(OpaquePointer(Void), "wglAllocateMemoryNV", [(GLsizei, "size"), (GLfloat, "readfreq"), (GLfloat, "writefreq"), (GLfloat, "priority")]),
StdFunction(Void, "wglFreeMemoryNV", [(OpaquePointer(Void), "pointer")]),
# GL_WIN_swap_hint
StdFunction(Void, "glAddSwapHintRectWIN", [(GLint, "x"), (GLint, "y"), (GLsizei, "width"), (GLsizei, "height")]),
# must be last
StdFunction(PROC, "wglGetProcAddress", [(LPCSTR, "lpszProc")]),
])
| [
"jfonseca@vmware.com"
] | jfonseca@vmware.com |
d6d0c5f7e6ba4e99a5344f0cde10dc55711c988d | 8c01e82a6dad7c1d780a0599ba1a36b3c28fce5d | /test-pubsub-widgets/pubsub.py | 437bfbedbf688960108bcbd3a2db04e055a268ac | [] | no_license | chinmaykolhatkar/samples | 93d411cd2ed9c17b0d33f800b3feb806ecc64789 | 7e8eba1f335b49117d3f3bbdd8408853fd079d52 | refs/heads/master | 2020-02-26T15:16:46.034705 | 2018-03-29T06:47:36 | 2018-03-29T06:48:07 | 83,665,750 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,745 | py | import urllib2, json, sys, websocket, thread, time, random, os, signal
m = {}
queries = {}
def getRandomRealNumber():
return str(random.uniform(0, 1.0))
def createSubscribeQuery(topic):
q = {}
q['type'] = 'subscribe'
q['topic'] = topic
return json.dumps(q)
def createUnsubscribeQuery(topic):
q = {}
q['type'] = 'subscribe'
q['topic'] = topic
return json.dumps(q)
def createPublishSchemaQuery(ads, no):
d = {}
d['id'] = float(no)
d['type'] = 'schemaQuery'
d['context'] = ads['context']
q = {}
q['type'] = 'publish'
q['topic'] = ads['query']['topic']
q['data'] = d
return json.dumps(q)
def createPublishSnapshotDataQuery(ads, sq, no):
q = {}
q['type'] = 'publish'
q['topic'] = ads['query']['topic']
q['data'] = {}
q['data']['id'] = float(no)
q['data']['type'] = 'dataQuery'
q['data']['countdown'] = 299
q['data']['incompleteResultOK'] = True
q['data']['data'] = {}
q['data']['data']['schemaKeys'] = ads['context']['schemaKeys']
q['data']['data']['fields'] = []
v = sq['data']['data'][0]['values']
for i in v:
q['data']['data']['fields'].append(i['name'])
return json.dumps(q)
def createPublishDimensionalDataQuery(ads, sq, no):
q = {}
q['type'] = 'publish'
q['topic'] = ads['query']['topic']
q['data'] = {}
q['data']['id'] = float(no)
q['data']['type'] = 'dataQuery'
q['data']['countdown'] = 299
q['data']['incompleteResultOK'] = True
q['data']['data'] = {}
q['data']['data']['time'] = {}
q['data']['data']['time']['latestNumBuckets'] = 10
q['data']['data']['time']['bucket'] = '1m'
q['data']['data']['incompleteResultOK'] = True
q['data']['data']['keys'] = {}
for i in ads['context']['keys']:
q['data']['data']['keys'][i] = [ads['context']['keys'][i]]
q['data']['data']['fields'] = []
v = sq['data']['data'][0]['dimensions'][0]['additionalValues']
for i in v:
q['data']['data']['fields'].append(i['name'])
return json.dumps(q)
def sendQueries(ws, r, i):
ws.send(createSubscribeQuery(i['result']['topic'] + '.' + r))
ws.send(createPublishSchemaQuery(i, r))
result = json.loads(ws.recv())
ws.send(createUnsubscribeQuery(i['result']['topic'] + '.' + r))
r = getRandomRealNumber()
ws.send(createSubscribeQuery(i['result']['topic'] + '.' + r))
if (result['data']['data'][0]['schemaType'] == 'snapshot'):
ws.send(createPublishSnapshotDataQuery(i, result, r))
else:
ws.send(createPublishDimensionalDataQuery(i, result, r))
return r
def getFileWriter(topic):
if topic not in m:
m[topic] = open(os.getcwd() + '/' + topic, 'w')
return m[topic]
def sendForAllDataSources(ws, appDataSources):
for i in appDataSources:
r = getRandomRealNumber()
r = sendQueries(ws, r, i)
queries[r] = i
url = "http://" + sys.argv[1] + "/ws/v2/applications/" + sys.argv[2]
content = urllib2.urlopen(url).read()
j = json.loads(content)
appDataSources = j['appDataSources']
wsUrl = "ws://" + sys.argv[1] + "/pubsub"
ws = websocket.create_connection(wsUrl)
sendForAllDataSources(ws, appDataSources)
while True:
result = ws.recv()
r = json.loads(result)
parts = r['topic'].split(".")
t = parts[1] + '.' + parts[2]
f = getFileWriter(t)
f.write(result + '\n')
f.flush()
if int(r['data']['countdown']) == 1:
break
ws.close()
for i in m:
m[i].close()
# def on_message(ws, message):
# print message
# def on_error(ws, error):
# print error
# def on_close(ws):
# print "### closed ###"
# def on_open(ws):
# print "### opening ###"
# def run(*args):
# print "SEnding publish query"
# ws.send('{"type":"subscribe","topic":"DTAppMetricsResult.0.36096280516265744"}')
# ws.send('{"type":"publish","topic":"DTAppMetricsQuery","data":{"id":0.36096280516265744,"type":"dataQuery","data":{"fields":["percentFiltered","avgRecordSize"],"schemaKeys":{"appName":"Metric-Primitives-App-CCP","appUser":"chinmay","logicalOperatorName":""},"time":{"latestNumBuckets":10}},"countdown":299,"incompleteResultOK":true}}')
# print "SEnding data query"
# time.sleep(30000)
# ws.close()
# print "thread terminating..."
# thread.start_new_thread(run, ())
# # for i in range(len(appDataSources)):
# # print "DS " + str(i)
# # print appDataSources[i]
# wsUrl = "ws://" + sys.argv[1] + "/pubsub"
# ws = websocket.WebSocketApp(wsUrl,
# on_message = on_message,
# on_error = on_error,
# on_close = on_close)
# ws.on_open = on_open
# ws.run_forever()
| [
"chinmay@apache.org"
] | chinmay@apache.org |
e73ac069cf9a25f20afe6f175ef97bd07faf9361 | 4ba9d97128908720d92bd83f3f7fc95f8c66bf35 | /ICP11/Codes/venv/Scripts/find_spark_home.py | a6a49d5a79916a16d1899b045f9f0c54cedfe59b | [] | no_license | SarikaReddyKota/BDP | 55aec35670548d88dce2cadaff6111d9c76ef7fc | 10c62407d299cad43899b98cba5a3c322dbc8f9a | refs/heads/master | 2022-11-21T04:49:14.015937 | 2020-07-29T03:56:56 | 2020-07-29T03:56:56 | 270,848,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,927 | py | #!C:\Users\sarik\PycharmProjects\BDP_ICP11\venv\Scripts\python.exe
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script attempt to determine the correct setting for SPARK_HOME given
# that Spark may have been installed on the system with pip.
from __future__ import print_function
import os
import sys
def _find_spark_home():
"""Find the SPARK_HOME."""
# If the environment has SPARK_HOME set trust it.
if "SPARK_HOME" in os.environ:
return os.environ["SPARK_HOME"]
def is_spark_home(path):
"""Takes a path and returns true if the provided path could be a reasonable SPARK_HOME"""
return (os.path.isfile(os.path.join(path, "bin/spark-submit")) and
(os.path.isdir(os.path.join(path, "jars")) or
os.path.isdir(os.path.join(path, "assembly"))))
paths = ["../", os.path.dirname(os.path.realpath(__file__))]
# Add the path of the PySpark module if it exists
import_error_raised = False
if sys.version < "3":
import imp
try:
module_home = imp.find_module("pyspark")[1]
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
import_error_raised = True
else:
from importlib.util import find_spec
try:
module_home = os.path.dirname(find_spec("pyspark").origin)
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
import_error_raised = True
# Normalize the paths
paths = [os.path.abspath(p) for p in paths]
try:
return next(path for path in paths if is_spark_home(path))
except StopIteration:
print("Could not find valid SPARK_HOME while searching {0}".format(paths), file=sys.stderr)
if import_error_raised:
print(
"\nDid you install PySpark via a package manager such as pip or Conda? If so,\n"
"PySpark was not found in your Python environment. It is possible your\n"
"Python environment does not properly bind with your package manager.\n"
"\nPlease check your default 'python' and if you set PYSPARK_PYTHON and/or\n"
"PYSPARK_DRIVER_PYTHON environment variables, and see if you can import\n"
"PySpark, for example, 'python -c 'import pyspark'.\n"
"\nIf you cannot import, you can install by using the Python executable directly,\n"
"for example, 'python -m pip install pyspark [--user]'. Otherwise, you can also\n"
"explicitly set the Python executable, that has PySpark installed, to\n"
"PYSPARK_PYTHON or PYSPARK_DRIVER_PYTHON environment variables, for example,\n"
"'PYSPARK_PYTHON=python3 pyspark'.\n", file=sys.stderr)
sys.exit(-1)
if __name__ == "__main__":
print(_find_spark_home())
| [
"64293062+SarikaReddyKota@users.noreply.github.com"
] | 64293062+SarikaReddyKota@users.noreply.github.com |
2e999c973aac31f12762203fa7fab89b4d6ff576 | 4583a8273fc62e7325362f6fc30b4d29442e521e | /scrape.py | d77ecaed5e4c0a803ce956a2813eebcb04f8c738 | [] | no_license | KshitijBudhani/YtbDwn | 92c352d4bfec6d23d74ef39f03c7707ee4c69f68 | a79ed1e1f69b8f2e88a99a1cac20abe58dd42d20 | refs/heads/master | 2021-01-23T22:31:29.165809 | 2016-02-19T10:09:37 | 2016-02-19T10:09:37 | 52,080,285 | 1 | 0 | null | 2016-02-19T10:24:21 | 2016-02-19T10:24:20 | null | UTF-8 | Python | false | false | 14,442 | py | #!/usr/bin/python
import pafy
import sys
import urwid
import pyperclip
import urllib
import os
import subprocess
import time
import re
import urllib2
from bs4 import BeautifulSoup
import base64
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
print ""
os.system("export PATH=$PATH:PWD")
def exit_program(button): ####called when EXIT called
sys.exit()
############## -if no args then its youtube ##########
if len(sys.argv)==1:
URL = raw_input("Enter URL\n")
######global variables###########3
title=""
author=""
vid_len=0
avail_stream_VideoO=None
avail_stream_audioO=None
avail_stream_both=None
downSeconloop=None
downThirdloop=None
basecommand = "aria2c -j 10 -x 16 -m 0 -k 1M -s 25 -c "
filename=""
comp_command=""
downurl=None
dwndir = "~/Desktop"
###################Getting info about video#################
def getpafy(URL):
try:
vid = pafy.new(str(URL))
global author
global title
global vid_len
global avail_stream_both
global avail_stream_audioO
global avail_stream_VideoO
author = vid.author
title = vid.title
vid_len = vid.length
avail_stream_both = vid.streams
avail_stream_audioO = vid.audiostreams
avail_stream_VideoO = vid.videostreams
except RuntimeError,e:
print str(e)
sys.exit()
except IOError,e:
print str(e)
print("please check the network Connection")
retry = raw_input("Retry? y/n ")
if retry in ('Y','y'):
getpafy(URL)
elif retry in ('N','n'):
sys.exit()
except ValueError,e:
print str(e)
print "Please check URL provided"
sys.exit()
#############parsing url for first time###########
getpafy(URL) # recursively call for parsing until Success
########################################################
#########################definitions#########################################
def on_clicked_cont(button): ##to call when continue pressed
raise urwid.ExitMainLoop()
def menuVAOnly(button): #called when user selects video/audio only in second loop
raise urwid.ExitMainLoop()
def chosen_URL(button,choice): #######show url of chosen format #####modify so that it calls axel to dowload the given url ############### called when a particular stream is selected
v_chosen = urwid.Text([u'Video Format :- ', str(choice), u'\n'])
v_URL = urwid.Text([u'Downloadable URL :- ', str(choice.url), u'\n'])
done = urwid.Button(u'Copy URL to Clipboard')
down = urwid.Button(u'Download using aria')
ext = urwid.Button(u'Exit')
urwid.connect_signal(done, 'click', Copy_exit,choice)
urwid.connect_signal(ext, 'click', exit_program)
urwid.connect_signal(down,'click',Down_aria,choice)
main1.original_widget = urwid.Filler(urwid.Pile([v_chosen,v_URL,urwid.AttrMap(done, None, focus_map='reversed'),urwid.AttrMap(down, None, focus_map='reversed'),urwid.AttrMap(ext, None, focus_map='reversed')]))
##############################Displaying Video formats definitions########################
def menuAV(title, avail_stream_both): ###menu displaying formats with both audio and video ######### 2nd loop
body = [urwid.Text(title), urwid.Divider()]
for c in avail_stream_both:
button = urwid.Button(str(c) + " ----->" + str(c.resolution) + "----->" + str((float(c.get_filesize())/1024)/1024))
urwid.connect_signal(button, 'click', chosen_URL, c)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
button = urwid.Button("Only Video/Audio Formats")
urwid.connect_signal(button, 'click', menuVAOnly)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
button = urwid.Button("EXIT")
urwid.connect_signal(button, 'click', exit_program)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
##########################################################################333
def menuVAOnlyMenu(title, avail_stream_VideoO,avail_stream_audioO): ###menu displaying formats with only audio or video ## must handle cases with audio and video alone ## for 3rd loop
body = [urwid.Text(title), urwid.Divider()]
for x in avail_stream_VideoO:
button = urwid.Button(str(x).split('@',1)[0] + "---->" +x.resolution + "----->" + str((float(x.get_filesize())/1024)/1024))
urwid.connect_signal(button, 'click', chosen_URL, x)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
for x1 in avail_stream_audioO:
button = urwid.Button(str(x1))
urwid.connect_signal(button, 'click', chosen_URL, x1)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
button = urwid.Button("EXIT")
urwid.connect_signal(button, 'click', exit_program)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
#################3333##################################################
def Copy_exit(button,choice): ####called when user selects copy to clipboard
pyperclip.copy(str(choice.url))
spam = pyperclip.paste()
sys.exit()
def Down_aria(button,choice): ### called when user select download using aria ## it modifys flags which are further used to decide which loop to enter ### modifies "folename" "url" and "comp_command"(command to be executed with aria)
global filename
global comp_command
global downSeconloop
global downThirdloop
global downurl
filename = title + "." + choice.extension
comp_command = basecommand + "-o " + filename
if str(choice.mediatype) == "normal" :
downSeconloop=1
elif str(choice.mediatype) == "video" or str(choice.mediatype) == "audio" :
downThirdloop=1
downurl = urllib.unquote(str(choice.url))
raise urwid.ExitMainLoop()
############################# print basic video info######################## 1st Loop info #################333#####can be done in a function too
palette = [('banner', 'black', 'light gray'),]
txt = urwid.Text(('banner', u"Hello !!! \nRequested Video Information....\n "))
p_title = urwid.Text(("Title :- %s" %title))
p_author = urwid.Text(("Channel :- %s" %author))
p_len = urwid.Text(("Length :- "+"%d"%(vid_len/60) + ":" + "%d"%(vid_len%60)))
button_cont = urwid.Button(u'Press Enter to Continue') #continue button
urwid.connect_signal(button_cont, 'click', on_clicked_cont)
button_exit= urwid.Button(u'Press Enter to Exit') #exit button
urwid.connect_signal(button_exit, 'click', exit_program)
div = urwid.Divider(top=0)
pile = urwid.Pile([txt,p_title,p_author,p_len,div,urwid.AttrMap(button_cont, None , focus_map='reversed'),urwid.AttrMap(button_exit, None, focus_map='reversed')])
main2=urwid.Filler(pile)
####### starting first loop #########
loop = urwid.MainLoop(main2, palette=[('reversed', 'standout', '')])
loop.run()
####### First loop ending , Clear Screen for next screen
print "" #Dummy print for clear to work ?? find reason for this
subprocess.call("clear")
####################################################
##############starting the second loop###########displaying files with both audio and video
main1 = urwid.Padding(menuAV(u'Available Formats {normal:- contains both audio and video}', avail_stream_both), left=2, right=2)
top = urwid.Overlay(main1, urwid.SolidFill(u'\N{MEDIUM SHADE}'),align='center', width=('relative', 60),valign='middle', height=('relative', 90),min_width=20, min_height=9)
urwid.MainLoop(top, palette=[('reversed', 'standout', '')]).run()
####################exiting audioVideo loop############check if download was requested###########################
if downSeconloop==1: ######
regex = re.compile('[^a-zA-Z0-9.]')
filename = regex.sub('_', str(filename))
filename=filename.replace("__","_")
filename=filename.replace("__","_")
print filename
print "downloadinf to %s\n" %dwndir
a=os.system("aria2c --out "+str(filename)+" -j 10 -x 16 -m 0 -k 1M -s 25 " +"-d %s" %dwndir + " \"%s\" " %downurl)
#print a
if downSeconloop==1: ###if download was for a/v files ####exit as furher videos not needed
sys.exit()
################################################################33#################################3########
###########starting 3rd loop ###########################333###############################
##########skipping 3rd iteratio if already downloaded##########
if downSeconloop != 1 : ############## execute only if video from 2nd loop not selecetd ###### known after first loop 2nd loop is executed and user choses "only audio/video" option
main1 = urwid.Padding(menuVAOnlyMenu(u'Available Formats {Only Video OR Only Audio}', avail_stream_VideoO,avail_stream_audioO), left=2, right=2)
top = urwid.Overlay(main1, urwid.SolidFill(u'\N{MEDIUM SHADE}'),align='center', width=('relative', 60),valign='middle', height=('relative', 90),min_width=20, min_height=9)
urwid.MainLoop(top, palette=[('reversed', 'standout', '')]).run()
###########################################################333
#################download from 3rd loop######################333
if downThirdloop == 1 : ####downThirdloop=1 means a video from third loop is selected for downloads
regex = re.compile('[^a-zA-Z0-9.]')
filename = regex.sub('_', str(filename))
filename=filename.replace("__","_")
filename=filename.replace("__","_")
print filename
print "downloadinf to %s\n" %dwndir
a=os.system("aria2c --out "+str(filename)+" -j 10 -x 16 -m 0 -k 1M -s 25 " +"-d %s" %dwndir + " \"%s\" " %downurl)
sys.exit()
#####################################exit after all########################
elif str(sys.argv[1])=="-a" :
#########################handle animes##############################33
print "Anime it is (gogoanime.com/kissanime.com)"
URL_a=raw_input("Enter Url\n")
gogo = "gogoanime"
kissan = "kissanime"
chosen_resolution=None
chosen_anime_url=None
#anime_name = "testanime.mp4"
def anime_menu(title, vid_url_res): ###menu displaying formats with both audio and video ######### 2nd loop
body = [urwid.Text(title), urwid.Divider()]
for c in vid_url_res:
button = urwid.Button(str(c[1]) )
urwid.connect_signal(button, 'click', dwn_anime, c)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
button = urwid.Button("EXIT")
urwid.connect_signal(button, 'click', exit_program)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
def dwn_anime(button,vid_info):
global chosen_anime_url
global chosen_resolution
chosen_resolution = str(vid_info[1])
chosen_anime_url = str(vid_info[0])
raise urwid.ExitMainLoop()
if gogo in URL_a:
try:
f = urllib.urlopen(URL_a).read()
except:
print "connectivity error\n"
soup = BeautifulSoup(f)
select = soup.find_all("select", id="selectQuality")
option_list = select[0].find_all("option")
vid_url_res=[]
for o in option_list:
vid_url_res.append(( o["value"] , o.get_text()))
main1 = urwid.Padding(anime_menu(u'Available Formats {normal:- contains both audio and video}', vid_url_res), left=2, right=2)
top = urwid.Overlay(main1, urwid.SolidFill(u'\N{MEDIUM SHADE}'),align='center', width=('relative', 60),valign='middle', height=('relative', 90),min_width=20, min_height=9)
urwid.MainLoop(top, palette=[('reversed', 'standout', '')]).run()
###################download the shit####################
print "downloadind to Desktop"
filename = str(((soup.find_all("meta",attrs={'name':'description'}))[0])["content"])
filename = filename + ".mp4"
filename = filename.replace(" ", "_")
print filename
a=os.system("aria2c --out " + str(filename) + " -j 10 -x 16 -m 0 -k 1M -s 35 " + " -d ~/Desktop " +" %s " %str(chosen_anime_url))
sys.exit()
#os.system("aria2c %s"%str(chosen_anime_url))
elif kissan in URL_a:
driver = webdriver.PhantomJS(service_args=['--ssl-protocol=any'])
driver.maximize_window()
try :
driver.get(str(URL_a))
wait = WebDriverWait(driver, 30)
search = wait.until(EC.presence_of_element_located((By.ID, "selectQuality")))
except:
print "connection timeout"
driver.quit()
sys.exit()
a=driver.find_element_by_id("selectQuality")#.text
bodyText = driver.find_element_by_tag_name('body').text
x=a.find_elements_by_tag_name("option")
vid_url_res=[]
for k in x:
vid_url_res.append(( base64.b64decode(str(k.get_attribute("value"))), str(k.text)))
main1 = urwid.Padding(anime_menu(u'Available Formats {normal:- contains both audio and video}', vid_url_res), left=2, right=2)
top = urwid.Overlay(main1, urwid.SolidFill(u'\N{MEDIUM SHADE}'),align='center', width=('relative', 60),valign='middle', height=('relative', 90),min_width=20, min_height=9)
urwid.MainLoop(top, palette=[('reversed', 'standout', '')]).run()
###################download the shit####################
print "downloadind to Desktop"
filename = str((driver.find_element_by_name("description")).get_attribute("content"))
for rep in ["Various","formats","from","240p","720p" ,"HD","to","or","even","1080p", "(" , ")" , "HTML5", "available", "for", "mobile", "devices","f",".","_____" ]:
filename = filename.replace(rep,"")
filename = filename.rstrip("_")
filename = filename.rstrip(" ")
filename=filename.replace(" ","_")
filename = filename + ".mp4"
print filename
#print chosen_anime_url
chosen_anime_url = chosen_anime_url.rstrip(" ")
a=os.system("aria2c --out " + str(filename) + " -j 10 -x 16 -m 0 -k 1M -s 35 " + " -d ~/Desktop " +" \"%s\" " %str(chosen_anime_url))
driver.quit()
sys.exit()
######################################################################
else :
print "Please check the URL\n"
sys.exit()
######################### -d for direct download links #######################3
elif str(sys.argv[1])=="-d" :
d_url=raw_input("Enter the Url\n")
print "" #Dummy print for clear to work ?? find reason for this
subprocess.call("clear")
d_filename=raw_input("Input filename\n")
print "" #Dummy print for clear to work ?? find reason for this
subprocess.call("clear")
dwndir = "~/Desktop"
print "Downloading to Desktop"
os.system("aria2c --out "+str(d_filename)+" -j 10 -x 16 -m 0 -k 1M -s 25 " +"-d %s" %dwndir + " \"%s\" " %d_url)
#sys.exit()
| [
"sherkipraneet@gmail.com"
] | sherkipraneet@gmail.com |
2ac80964f95d3353dd2891a30f5ed346f0f70fa4 | b0aa9a49077e514b6997dc1405069434970239ec | /Compare within margin.py | f95787349656797c3869fc6f9cf1b893991636ae | [] | no_license | Katarzyna-Bak/Coding-exercises | 3cadde05f845b0b1a52ae22dcea82e61644f52da | 5309e5b3fe662d28afda6969a1988dadda8e67c3 | refs/heads/main | 2023-08-17T08:38:04.812286 | 2021-10-07T16:04:47 | 2021-10-07T16:04:47 | 401,006,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | """
Create a function close_compare that accepts 3 parameters: a, b, and an optional margin. The function should return whether a is lower than, close to, or higher than b.
a is considered "close to" b if margin is greater than or equal to the distance between a and b.
Please note the following:
When a is close to b, return 0.
Otherwise...
When a is less than b, return -1.
When a is greater than b, return 1.
If margin is not given, treat it as zero.
Assume: margin >= 0
Tip: Some languages have a way to make parameters optional.
Example 1
If a = 3, b = 5, and margin = 3, then close_compare(a, b, margin) should return 0.
This is because a and b are no more than 3 numbers apart.
Example 2
If a = 3, b = 5, and margin = 0, then close_compare(a, b, margin) should return -1.
This is because the distance between a and b is greater than 0, and a is less than b.
"""
def close_compare(a, b, margin = 0):
if abs(a-b) <= margin: return 0
elif a > b: return 1
else: return -1
print("Tests:")
print(close_compare(2, 5, 3))
print(close_compare(5, 5, 3))
print(close_compare(8, 5, 3))
print(close_compare(8.1, 5, 3))
print(close_compare(1.99, 5, 3)) | [
"noreply@github.com"
] | noreply@github.com |
825accd3872929d9287bb3b4c66b0585d16507fe | 350db570521d3fc43f07df645addb9d6e648c17e | /1299_Replace_Elements_with_Greatest_Element_on_Right_Side/solution.py | c1d77900854ca9a59cc3073bb3f87162f7eb586d | [] | no_license | benjaminhuanghuang/ben-leetcode | 2efcc9185459a1dd881c6e2ded96c42c5715560a | a2cd0dc5e098080df87c4fb57d16877d21ca47a3 | refs/heads/master | 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | '''
1299. Replace Elements with Greatest Element on Right Side
Level: Easy
https://leetcode.com/problems/replace-elements-with-greatest-element-on-right-side
'''
'''
Solution:
'''
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
| [
"bhuang@rms.com"
] | bhuang@rms.com |
ca5d2f735aaee931762726e44f8ffc69d56dab76 | ddd35c693194aefb9c009fe6b88c52de7fa7c444 | /Live 10.1.18/ATOM/channel_strip.py | fbd60c5b943b861e81fa7cbe0be8417f4de3f5ce | [] | no_license | notelba/midi-remote-scripts | 819372d9c22573877c7912091bd8359fdd42585d | e3ec6846470eed7da8a4d4f78562ed49dc00727b | refs/heads/main | 2022-07-30T00:18:33.296376 | 2020-10-04T00:00:12 | 2020-10-04T00:00:12 | 301,003,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\ATOM\channel_strip.py
# Compiled at: 2020-05-05 13:23:28
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import liveobj_valid
from ableton.v2.control_surface.components import ChannelStripComponent as ChannelStripComponentBase
class ChannelStripComponent(ChannelStripComponentBase):
empty_color = b'Mixer.EmptyTrack'
def _update_select_button(self):
if liveobj_valid(self._track) and self.song.view.selected_track == self._track:
self.select_button.color = b'Mixer.Selected'
else:
self.select_button.color = b'DefaultButton.Off'
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/ATOM/channel_strip.pyc
| [
"notelba@example.com"
] | notelba@example.com |
3e38362030882758775c5e399054becb65a21b6f | 049ee4623c92f67adeb0bfebc44a8a698dca23a7 | /theCode/utils.py | cfbbb7ab9e8ac55c832712ed741c356f7b39f176 | [] | no_license | Meghana07/CUB-Attn-GAN | 4f9339d50dd0e4c45a034d89d218ff3a6fe88d7e | 62af8aa3f8fa18c7b7d0dbf0bb7eded4c1694b78 | refs/heads/master | 2022-12-12T05:48:45.553454 | 2020-08-22T20:13:31 | 2020-08-22T20:13:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,101 | py | import os
import errno
import time
import numpy as np
from torch.nn import init
import torch
import torch.nn as nn
from PIL import Image, ImageDraw, ImageFont
from copy import deepcopy
import skimage.transform
from miscc.config import cfg
# For visualization !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!MARKER!!!!!!!!!!!!!!!!!!!!!!!!
COLOR_DIC = {0:[128,64,128], 1:[244, 35,232],
2:[70, 70, 70], 3:[102,102,156],
4:[190,153,153], 5:[153,153,153],
6:[250,170, 30], 7:[220, 220, 0],
8:[107,142, 35], 9:[152,251,152],
10:[70,130,180], 11:[220,20, 60],
12:[255, 0, 0], 13:[0, 0, 142],
14:[119,11, 32], 15:[0, 60,100],
16:[0, 80, 100], 17:[0, 0, 230],
18:[0, 0, 70], 19:[0, 0, 0]}
FONT_MAX = 50
def drawCaption(convas, captions, ixtoword, vis_size, off1=2, off2=2):
num = captions.size(0)
img_txt = Image.fromarray(convas)
# get a font
# fnt = None # ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
print ("CURRENT WORKING DIRCTORY : " , os.getcwd())
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
# get a drawing context
d = ImageDraw.Draw(img_txt)
sentence_list = []
for i in range(num):
cap = captions[i].data.cpu().numpy()
sentence = []
for j in range(len(cap)):
if cap[j] == 0:
break
word = ixtoword[cap[j]].encode('ascii', 'ignore').decode('ascii')
d.text(((j + off1) * (vis_size + off2), i * FONT_MAX), '%d:%s' % (j, word[:6]),
font=fnt, fill=(255, 255, 255, 255))
sentence.append(word)
sentence_list.append(sentence)
return img_txt, sentence_list
def build_super_images(real_imgs, captions, ixtoword,
attn_maps, att_sze, lr_imgs=None,
batch_size=cfg.TRAIN.BATCH_SIZE,
max_word_num=cfg.TEXT.WORDS_NUM):
build_super_images_start_time = time.time()
nvis = 8
real_imgs = real_imgs[:nvis]
if lr_imgs is not None:
lr_imgs = lr_imgs[:nvis]
if att_sze == 17:
vis_size = att_sze * 16
else:
vis_size = real_imgs.size(2)
text_convas = \
np.ones([batch_size * FONT_MAX,
(max_word_num + 2) * (vis_size + 2), 3],
dtype=np.uint8)
# print("keyword |||||||||||||||||||||||||||||||")
# print("max_word_num : " , max_word_num)
# print("keyword |||||||||||||||||||||||||||||||")
for i in range(max_word_num):
istart = (i + 2) * (vis_size + 2)
iend = (i + 3) * (vis_size + 2)
text_convas[:, istart:iend, :] = COLOR_DIC[i]
real_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear')(real_imgs)
# [-1, 1] --> [0, 1]
real_imgs.add_(1).div_(2).mul_(255)
real_imgs = real_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
real_imgs = np.transpose(real_imgs, (0, 2, 3, 1))
pad_sze = real_imgs.shape
middle_pad = np.zeros([pad_sze[2], 2, 3])
post_pad = np.zeros([pad_sze[1], pad_sze[2], 3])
if lr_imgs is not None:
lr_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear')(lr_imgs)
# [-1, 1] --> [0, 1]
lr_imgs.add_(1).div_(2).mul_(255)
lr_imgs = lr_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
lr_imgs = np.transpose(lr_imgs, (0, 2, 3, 1))
# batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17
seq_len = max_word_num
img_set = []
num = nvis # len(attn_maps)
text_map, sentences = \
drawCaption(text_convas, captions, ixtoword, vis_size)
text_map = np.asarray(text_map).astype(np.uint8)
bUpdate = 1
for i in range(num):
#print ("loop " , i ," of " , num == 8)
attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze)
# --> 1 x 1 x 17 x 17
attn_max = attn.max(dim=1, keepdim=True)
attn = torch.cat([attn_max[0], attn], 1)
#
attn = attn.view(-1, 1, att_sze, att_sze)
attn = attn.repeat(1, 3, 1, 1).data.numpy()
# n x c x h x w --> n x h x w x c
attn = np.transpose(attn, (0, 2, 3, 1))
num_attn = attn.shape[0]
#
img = real_imgs[i]
if lr_imgs is None:
lrI = img
else:
lrI = lr_imgs[i]
row = [lrI, middle_pad]
#print("rowwwwwwwwwwwwwwwww : ", row)
row_merge = [img, middle_pad]
row_beforeNorm = []
minVglobal, maxVglobal = 1, 0
for j in range(num_attn):
#print ("looop " , j , " of " , seq_len+1)
one_map = attn[j]
#print("First one map : " , one_map.shape)
#print("attn.shape : " , attn.shape)
# print("if (vis_size // att_sze) > 1: " , (vis_size // att_sze) > 1)
# print("vis_size : " , vis_size)
# print("att_sze : " , att_sze)
# print("vis_size//att_sze : " , vis_size//att_sze)
if (vis_size // att_sze) > 1:
one_map = \
skimage.transform.pyramid_expand(one_map, sigma=20,
upscale=vis_size // att_sze)
# print("one_map in if : " , one_map.shape)
row_beforeNorm.append(one_map)
#print("row_beforeNorm.append(one_map)" ,len(row_beforeNorm))
minV = one_map.min()
maxV = one_map.max()
if minVglobal > minV:
minVglobal = minV
if maxVglobal < maxV:
maxVglobal = maxV
#print("seq_len : " , seq_len)
for j in range(seq_len + 1):
#print ("loooop " , j , " of " , seq_len+1)
if j < num_attn:
one_map = row_beforeNorm[j]
one_map = (one_map - minVglobal) / (maxVglobal - minVglobal)
one_map *= 255
#
# print ("PIL_im = " , Image.fromarray(np.uint8(img)))
# print ("PIL_att = " , Image.fromarray(np.uint8(one_map[:,:,:3])))
# print ("img.size( :" , img.shape)
# print ("one_map.size( :" , one_map.shape)
PIL_im = Image.fromarray(np.uint8(img))
PIL_att = Image.fromarray(np.uint8(one_map[:,:,:3]))
merged = \
Image.new('RGBA', (vis_size, vis_size), (0, 0, 0, 0))
#print ("merged : " , merged.size)
mask = Image.new('L', (vis_size, vis_size), (210))
#print (" mask : " , mask.size)
merged.paste(PIL_im, (0, 0))
#print (" merged.paste(PIL_im) : " , merged.size )
############################################################
merged.paste(PIL_att, (0, 0), mask)
#print (" merged.paste(PIL_att) : " , merged.size)#########################
merged = np.array(merged)[:, :, :3]
#print (" np.array(merged)[:::3] : " , merged.size )#########################
############################################################
else:
#print (" IN THE ELSE post_pad : " , post_pad.shape)
one_map = post_pad
#print (" one_map : " , one_map.shape )
merged = post_pad
#print (" OUTTING THE ELSE : " , merged.shape )
#print (" row : " , len(row))
row.append(one_map[:,:,:3])
#print (" row.appedn(one_map) : " , len(row))
row.append(middle_pad)
#print (" row.append(middle_pad) : " , len(row))
#
#print (" row_merge : " , len(row_merge))
row_merge.append(merged)
#print (" row_merge.append(mereged) : " , len(row_merge) )
row_merge.append(middle_pad)
#print (" row_merge.append(middle_pad) : " , len(row_merge) )
####################################################################
# print("row.shape : ", len(row))
# for i in range(len(row)):
# print('arr', i,
# " => dim0:", len(row[i]),
# " || dim1:", len(row[i][0]),
# " || dim2:", len(row[i][0][0]))
# #print(row)
# print("row[0].shape : ", len(row[0]))
# #print(row[0])
# print("row[0][0].shape : ", len(row[0][0]))
# #print(row[0][0])
# print("row[0][0][0].shape : ", len(row[0][0][0]))
# #print(row[0][0][0])
# print("row[1].shape : ", len(row[1]))
# #print(row[1])
# print("row[1][0].shape : ", len(row[1][0]))
# #print(row[1][0])
# print("row[1][0][0].shape : ", len(row[1][0][0]))
# #print(row[1][0][0])
# print("row[2].shape : ", len(row[2]))
# #print(row[2])
# print("row[2][0].shape : ", len(row[2][0]))
# #print(row[2][0])
# print("row[2][0][0].shape : ", len(row[2][0][0]))
# #print(row[2][0][0])
# print("row[3].shape : ", len(row[3]))
# #print(row[2])
# print("row[3][0].shape : ", len(row[3][0]))
# #print(row[2][0])
# print("row[3][0][0].shape : ", len(row[3][0][0]))
# #print(row[2][0][0])
# print("row[4].shape : ", len(row[4]))
# #print(row[2])
# print("row[4][0].shape : ", len(row[4][0]))
# #print(row[2][0])
# print("row[4][0][0].shape : ", len(row[4][0][0]))
#print(row[2][0][0])
row = np.concatenate(row, 1)
#print (" row.conatent(1) : " , len(row))########################################
row_merge = np.concatenate(row_merge, 1)
#print (" : " , )############################
####################################################################
txt = text_map[i * FONT_MAX: (i + 1) * FONT_MAX]
if txt.shape[1] != row.shape[1]:
print('txt', txt.shape, 'row', row.shape)
bUpdate = 0
break
#####################################################################
row = np.concatenate([txt, row, row_merge], 0)#######################
img_set.append(row)##################################################
#####################################################################
# print("keyword |||||||||||||||||||||||||||||||")
# print("bUpdate : " , bUpdate)
# print("keyword |||||||||||||||||||||||||||||||")
if bUpdate:
img_set = np.concatenate(img_set, 0)
img_set = img_set.astype(np.uint8)
print("keyTime |||||||||||||||||||||||||||||||")
print("build_super_images_time : " , time.time() - build_super_images_start_time)
print("KeyTime |||||||||||||||||||||||||||||||")
return img_set, sentences
else:
print("keyTime |||||||||||||||||||||||||||||||")
print("build_super_images_start_time : " , time.time() - build_super_images_start_time)
print("KeyTime |||||||||||||||||||||||||||||||")
return None
def build_super_images2(real_imgs, captions, cap_lens, ixtoword,
attn_maps, att_sze, vis_size=256, topK=5):
batch_size = real_imgs.size(0)
max_word_num = np.max(cap_lens)
text_convas = np.ones([batch_size * FONT_MAX,
max_word_num * (vis_size + 2), 3],
dtype=np.uint8)
real_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear')(real_imgs)
# [-1, 1] --> [0, 1]
real_imgs.add_(1).div_(2).mul_(255)
real_imgs = real_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
real_imgs = np.transpose(real_imgs, (0, 2, 3, 1))
pad_sze = real_imgs.shape
middle_pad = np.zeros([pad_sze[2], 2, 3])
# batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17
img_set = []
num = len(attn_maps)
text_map, sentences = \
drawCaption(text_convas, captions, ixtoword, vis_size, off1=0)
text_map = np.asarray(text_map).astype(np.uint8)
bUpdate = 1
for i in range(num):
attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze)
#
attn = attn.view(-1, 1, att_sze, att_sze)
attn = attn.repeat(1, 3, 1, 1).data.numpy()
# n x c x h x w --> n x h x w x c
attn = np.transpose(attn, (0, 2, 3, 1))
num_attn = cap_lens[i]
thresh = 2./float(num_attn)
#
img = real_imgs[i]
row = []
row_merge = []
row_txt = []
row_beforeNorm = []
conf_score = []
for j in range(num_attn):
one_map = attn[j]
mask0 = one_map > (2. * thresh)
conf_score.append(np.sum(one_map * mask0))
mask = one_map > thresh
one_map = one_map * mask
if (vis_size // att_sze) > 1:
one_map = \
skimage.transform.pyramid_expand(one_map, sigma=20,
upscale=vis_size // att_sze)
minV = one_map.min()
maxV = one_map.max()
one_map = (one_map - minV) / (maxV - minV)
row_beforeNorm.append(one_map)
sorted_indices = np.argsort(conf_score)[::-1]
for j in range(num_attn):
one_map = row_beforeNorm[j]
one_map *= 255
#
PIL_im = Image.fromarray(np.uint8(img))
PIL_att = Image.fromarray(np.uint8(one_map))
merged = \
Image.new('RGBA', (vis_size, vis_size), (0, 0, 0, 0))
mask = Image.new('L', (vis_size, vis_size), (180)) # (210)
merged.paste(PIL_im, (0, 0))
merged.paste(PIL_att, (0, 0), mask)
merged = np.array(merged)[:, :, :3]
row.append(np.concatenate([one_map, middle_pad], 1))
#
row_merge.append(np.concatenate([merged, middle_pad], 1))
#
txt = text_map[i * FONT_MAX:(i + 1) * FONT_MAX,
j * (vis_size + 2):(j + 1) * (vis_size + 2), :]
row_txt.append(txt)
# reorder
row_new = []
row_merge_new = []
txt_new = []
for j in range(num_attn):
idx = sorted_indices[j]
row_new.append(row[idx])
row_merge_new.append(row_merge[idx])
txt_new.append(row_txt[idx])
row = np.concatenate(row_new[:topK], 1)
row_merge = np.concatenate(row_merge_new[:topK], 1)
txt = np.concatenate(txt_new[:topK], 1)
if txt.shape[1] != row.shape[1]:
print('Warnings: txt', txt.shape, 'row', row.shape,
'row_merge_new', row_merge_new.shape)
bUpdate = 0
break
row = np.concatenate([txt, row_merge], 0)
img_set.append(row)
if bUpdate:
img_set = np.concatenate(img_set, 0)
img_set = img_set.astype(np.uint8)
return img_set, sentences
else:
return None
####################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| [
"ammarnasraza@gmail.com"
] | ammarnasraza@gmail.com |
f21589f87a5a5eebfb731f3b51ea5f9bfc71e054 | dd65c369dffe379b532887918e6de07c65ef4480 | /2020/day6.py | 1f4691af19b16ecf979ee550a4c9144da8c6ebbd | [] | no_license | nigelzor/advent-of-code | 0f5d6f274d9e2b9d52c3a316449cdf1e28c5dd00 | 4c97c226b5b633daee92498bfafb83a90bc3cd55 | refs/heads/master | 2023-01-06T21:08:30.678597 | 2022-12-25T06:19:00 | 2022-12-25T06:19:00 | 160,392,403 | 0 | 0 | null | 2022-12-03T06:02:35 | 2018-12-04T17:10:30 | Python | UTF-8 | Python | false | false | 844 | py | import doctest
def main():
part1 = []
part2 = []
with open('day6_input.txt') as f:
any_answered = set()
all_answered = None
for line in f:
parts = set(line.strip())
if not parts:
part1.append(any_answered)
part2.append(all_answered)
any_answered = set()
all_answered = None
else:
any_answered |= parts
if all_answered is None:
all_answered = parts
else:
all_answered &= parts
if any_answered:
part1.append(any_answered)
part2.append(all_answered)
print(sum(len(g) for g in part1))
print(sum(len(g) for g in part2))
if __name__ == "__main__":
doctest.testmod()
main()
| [
"ngentleman@gmail.com"
] | ngentleman@gmail.com |
4583e3828fbedf16d430af6da779ed4da48e7704 | 99d79ada2d3b7746573f071823ec61f5f853d7a3 | /tools/mlir_to_verilog_main.py | 3cffb21c960fe37bb87bcd6456bea03e1743cd97 | [
"MIT"
] | permissive | phanrahan/magma | d8062c6163e2c2c2cedef82317dc8cc40038220a | b05fe5303ed17e668c6ec2ec3558cd5a52eff787 | refs/heads/master | 2023-08-23T18:08:22.494869 | 2023-08-08T18:53:05 | 2023-08-17T16:16:44 | 84,332,281 | 227 | 21 | NOASSERTION | 2023-09-14T21:32:19 | 2017-03-08T14:57:09 | Python | UTF-8 | Python | false | false | 2,100 | py | import argparse
import dataclasses
import logging
import os
import sys
from typing import Dict
from magma.backend.mlir.mlir_to_verilog import mlir_to_verilog, MlirToVerilogOpts
from magma.common import slice_opts
logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING").upper())
def _field_to_argument_params(field: dataclasses.Field) -> Dict:
if field.default_factory is not dataclasses.MISSING:
raise TypeError(field)
params = {}
params["required"] = field.default is dataclasses.MISSING
if field.type is bool and not params["required"] and not field.default:
params["action"] = "store_true"
return params
if not params["required"]:
params["default"] = field.default
params["action"] = "store"
params["type"] = field.type
return params
def _add_dataclass_arguments(parser: argparse.ArgumentParser, cls: type):
assert dataclasses.is_dataclass(cls)
for field in dataclasses.fields(cls):
params = _field_to_argument_params(field)
parser.add_argument(f"--{field.name}", **params)
def main(prog_args = None) -> int:
parser = argparse.ArgumentParser(
"Compile a (MLIR) .mlir file to verilog (.v/.sv)"
)
parser.add_argument(
"infile",
metavar="<input filename>",
action="store",
type=str,
help="Input MLIR file",
)
parser.add_argument(
"--outfile",
metavar="<output filename>",
action="store",
type=argparse.FileType("w"),
required=False,
default=sys.stdout,
)
_add_dataclass_arguments(parser, MlirToVerilogOpts)
args = parser.parse_args(prog_args)
opts = slice_opts(vars(args), MlirToVerilogOpts)
logging.debug(f"Running with opts: {opts}")
if opts.split_verilog and args.outfile is not sys.stdout:
logging.warning(
f"outfile ({args.outfile.name}) ignored with split_verilog enabled"
)
with open(args.infile, "r") as f_in:
mlir_to_verilog(f_in, args.outfile, opts)
if __name__ == "__main__":
exit(main())
| [
"raj.setaluri@gmail.com"
] | raj.setaluri@gmail.com |
3365a607498921c50dae634db78930a8cbf6ffc8 | c9c557233b61fa2e19c42384980c7195f9030b94 | /Tuplas_Desafio075.py | 649796aba8794bbc72fcad5626ce2ec51120b12c | [
"MIT"
] | permissive | MarcioRSanches/Estudos_Python | 2783cf737567427c5995b47431f72aa5c676c21b | 2fef374b82645d4efced42f3dc666bb0603cd858 | refs/heads/master | 2021-06-14T07:21:54.199711 | 2020-06-10T22:23:19 | 2020-06-10T22:23:19 | 254,486,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | num = (int(input('Digite um valor: ')),
int(input('Digite outro valor: ')),
int(input('Digite mais um outro valor: ')),
int(input('Digite o último valor: ')))
print(f'Você digitou estes valores: {num}')
print(f'O valor 9 pareceu {num.count(9)} vezes')
if 3 in num:
print(f'O primeiro valor 3 foi digitado na posição {num.index(3)+1}')
else:
print('O valor 3 não foi digitado nenhuma vez!')
print('Os valores pares digitados foram ',end=' ')
for n in num:
if n % 2 == 0:
print(n , end=' ')
| [
"marciosanches@outlook.com"
] | marciosanches@outlook.com |
273065afed54862d523780b8b44bcd8399c3a642 | bee4c482501ae1665adab8820e02f683e25bbfb8 | /Emdee five for life/payload.py | 8a7b44d848b83d55539dd95389679f6432273c4d | [] | no_license | MrrRaph/HTB-Web-Challs | f200801f7101f0a3269ef1f71a54a04fae07d867 | 9a53d8c3476e9e1bb849eb1787b436659328d09a | refs/heads/master | 2022-11-28T19:06:46.558430 | 2020-08-01T23:49:10 | 2020-08-01T23:49:10 | 284,323,201 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | #!/usr/bin/python3
import sys
from selenium import webdriver
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.keys import Keys
from hashlib import md5
def getMD5(s, encoding='utf-8'):
return md5(s.encode(encoding)).hexdigest()
def main(url):
print("[+] Creating instance of Firefox..")
DRIVER_PATH = '/usr/local/bin/geckodriver'
BINARY_PATH = '/usr/bin/firefox-esr'
HTB_URL = 'http://' + url
ops = Options()
ops.add_argument('--headless')
ops.binary_location = BINARY_PATH
serv = Service(DRIVER_PATH)
browser = webdriver.Firefox(service=serv, options=ops)
print('[+] Fetching ' + HTB_URL)
browser.get(HTB_URL)
string_to_be_encode = browser.find_element_by_css_selector('body h3').text
md5encoded = getMD5(string_to_be_encode)
print("[+] MD5 Encoded String: " + md5encoded)
print("[+] Send MD5 string to input and submit")
inputhtml = browser.find_element_by_css_selector('input[name="hash"]')
inputhtml.send_keys(md5encoded)
submithtml = browser.find_element_by_css_selector('input[type="submit"]')
submithtml.click()
flag = browser.find_element_by_css_selector('body p').text
print("[+] Found flag: " + flag)
if __name__ == "__main__":
main(sys.argv[1])
| [
"dray.raph@gmail.com"
] | dray.raph@gmail.com |
b67f4820739167037cfe772b823e0f843c0b32ea | cb322a99df4c92e669cac7a05d2b1679cbbaa09a | /manage.py | cc297dca3a789a2355b4d8f42f56d345032c2fd7 | [] | no_license | Lecongnghi/Text-Classification | 73f505ba7b5aa90443244482a715094fd0887604 | da8ad95973bea14d8153e02ef31648359f68bc53 | refs/heads/master | 2022-12-06T07:53:59.195128 | 2020-08-15T15:04:28 | 2020-08-15T15:04:28 | 287,767,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | # manage
import sys
import os
from subprocess import run, check_output
argv = sys.argv
name = argv[0]
command = argv[1]
VALID_CMD = ["train", "evaluate","server"]
def run_command(command):
cmd_list = command.split(" ")
try:
out = str(check_output(["python3", "-V"]))
if out and int(out[9]) == 3:
run(["python3"] + cmd_list)
return
except:
run(["python"] + cmd_list)
def recommend(wrong_cmd):
for cmd in VALID_CMD:
cmd_list = [x for x in cmd]
wcmd_list = [x for x in wrong_cmd]
if len([x for x in cmd_list if x in wcmd_list]) > 3:
return ("'%s' not found! Do you mean '%s'" %(wrong_cmd, cmd))
return (wrong_cmd + " not found!\n try: 'train', 'evaluate', 'server'")
def main():
if command in VALID_CMD:
if command == "train":
run_command("app/source/train_md.py")
elif command == "evaluate":
run_command("app/source/utils.py")
else:
run_command("app/source/index.py")
return "\nExit."
return recommend(command)
print(main()) | [
"67694770+Lecongnghi@users.noreply.github.com"
] | 67694770+Lecongnghi@users.noreply.github.com |
9388b820997f08a5b4c015a481950a6e17e4375f | bfc1b4d9908fa9f80612c5d05bbfce06599d4879 | /utils4py/formate.py | e4609e038f0e1fd42cd5af5b7efb76e964f420e5 | [] | no_license | hbyhl/utils4py | 9f68d0d8e9eae220cff75c64e7249ab33b6a7412 | 499c6e38287474e47b9038040f7598e65fde5c03 | refs/heads/master | 2020-08-24T05:31:25.723062 | 2020-03-30T11:43:37 | 2020-03-30T11:43:37 | 216,769,099 | 0 | 0 | null | 2019-10-22T09:01:26 | 2019-10-22T09:01:26 | null | UTF-8 | Python | false | false | 1,378 | py | #!usr/bin/env python
# -*- coding: utf-8 -*-
# Desc:
# FileName: formate.py
# Author:yhl
# Version:
# Last modified: 2019-12-20 10:48
import datetime
import json
import traceback
from utils4py import TextUtils
DT_FORMAT = "%Y-%m-%d %H:%M:%S"
DATE_FORMAT = "%Y-%m-%d"
DM_FORMAT = "%Y-%m"
LDT_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
def format_kv(kv_log):
"""
:param kv_log:
:return:
"""
return "\t".join(map(
lambda x: "{}={}".format(*x),
[(k, TextUtils.to_string(kv_log[k]).replace('\t', ' ')) for k in sorted(kv_log.keys())]
))
def format_to_json(obj):
"""
:param obj:
:return:
"""
if isinstance(obj,dict):
tmp_dict = {}
for k, v in obj.items():
tmp_dict[k] = str(v)
return json.dumps(tmp_dict)
else:
return str(obj)
def trans_fen_to_yuan(amount):
if not amount:
return 0
amount = int(amount)
yuan = amount / 100.00
return yuan
def trans_wan_to_bai_rate(rate):
if not rate:
return ""
else:
rate = int(rate)
pecent_rate = rate / 10000.00
return format(pecent_rate, '.0%')
def format_datetime(date_time, fomat=DT_FORMAT):
if date_time and isinstance(date_time, datetime.datetime):
return date_time.strftime(fomat)
return ''
if __name__ == '__main__':
print(trans_fen_to_yuan(10))
| [
"huilong.yang@42pay.com"
] | huilong.yang@42pay.com |
bfeae1eab29d79d32e31556bc11be53ae2c7c3be | 1f03b2a5fa873d63f2a129c9dfa548fecbb9f1fa | /cathome/views.py | f0a18d09a20751deb7535f1202fd97300c8a40b5 | [] | no_license | lin07340/cathome | a2ee1881ddbab2b68eab37714028b59d33518d59 | f6cbbddab35ace572db07dca562cb02e9f091fcb | refs/heads/master | 2022-12-11T11:57:11.385469 | 2018-09-30T12:52:47 | 2018-09-30T13:11:47 | 149,864,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,807 | py | # -*- coding: UTF-8 -*-
from cathome import app, db
from flask import render_template, redirect, flash, get_flashed_messages, request, send_from_directory
from .models import User, Image, Comment
import re, hashlib, os
import random, json
from flask_login import login_user, logout_user, login_required, current_user
import uuid
from .qcloud import save_to_cloud
@app.route('/')
@app.route('/index/')
def index():
images = Image.query.order_by(db.desc(Image.id)).limit(5).all()
return render_template('index.html', images=images)
@app.route('/index/<int:page>/<int:per_page>/')
@app.route('/<int:page>/<int:per_page>/')
def index_more(page, per_page):
paginate = Image.query.order_by(db.desc(Image.id)).paginate(page=page, per_page=per_page, error_out=False)
map = {'has_next': paginate.has_next}
images = []
for image in paginate.items:
comments = []
show_comments = Comment.query.filter_by(image=image).filter_by(status=0).all()
for i in range(len(show_comments), max(0, len(show_comments) - 3), -1):
comment = show_comments[i - 1]
comments.append(
{'content': comment.content, 'username': comment.user.name, 'from_user_id': comment.from_user_id})
imgvo = {'id': image.id,
'url': image.url,
'comment_count': len(show_comments),
'username': image.user.name,
'user_id': image.user_id,
'head_url': image.user.head_url,
'created_date': str(image.create_date),
'comments': comments,
'show_comments_count': min(3, len(image.comments))}
images.append(imgvo)
map['images'] = images
return json.dumps(map)
@app.route('/regs/', methods={'get', 'post'})
def regs():
username = request.values.get('username').strip()
password = request.values.get('password').strip()
if username == '' or password == '':
return redirect_with_message('/regloginpage/', u'用户名或密码不能为空', 'reglogin')
if re.match(re.compile('^[a-zA-Z]+\w+$'), username) == None:
return redirect_with_message('/regloginpage/', u'用户名不符合规则,请输入字母或数字,并以字母开头', 'reglogin')
elif len(username) > 12 or len(username) < 6:
return redirect_with_message('/regloginpage/', u'用户名长度不对,请输入6~12字符的用户名', 'reglogin')
elif len(password) > 24 or len(password) < 8:
return redirect_with_message('/regloginpage/', u'密码长度不对,请输入8~24字符的密码', 'reglogin')
else:
user = User.query.filter_by(name=username).first()
if user != None:
return redirect_with_message('/regloginpage/', u'用户名已存在', 'reglogin')
# username and password is correct:
m = hashlib.md5()
salt = ''.join((random.sample(u'1234567890qwertyuiopasdfghjklzxcvbnmQWERYTUFVSDFWFLIL;+_', 24)))
m.update((password + salt).encode('utf-8'))
password = m.hexdigest()
db.session.add(User(username, password, salt,
'http://images.nowcoder.com/head/' + str(random.randint(0, 1000)) + 't.png'))
db.session.commit()
user = User.query.filter_by(name=username).first()
login_user(user)
next = request.values.get('next')
if next != None and next.startswith('/') > 0:
return redirect(next)
return redirect('/')
@app.route('/login/', methods={'get', 'post'})
def login():
username = request.values.get('username').strip()
password = request.values.get('password').strip()
if username == '' or password == '':
return redirect_with_message('/regloginpage/', u'用户名或密码不能为空', 'reglogin')
user = User.query.filter_by(name=username).first()
if user == None:
return redirect_with_message('/regloginpage/', u'没有此用户名', 'reglogin')
# username is true:
user = User.query.filter_by(name=username).first()
salt_store = user.salt
password_store = user.password
m = hashlib.md5()
m.update((password + salt_store).encode('utf-8'))
password = m.hexdigest()
if password != password_store:
return redirect_with_message('/regloginpage/', u'密码错误', 'reglogin')
login_user(user)
next = request.values.get('next')
if next != None and next.startswith('/') > 0:
return redirect(next)
return redirect('/')
@app.route('/regloginpage/', methods={'get', 'post'})
def regloginpage():
mes = ''
for m in get_flashed_messages(with_categories=False, category_filter=['reglogin']):
mes += m
return render_template('login.html', mes=mes, next=request.args.get('next'))
@app.route('/image/<int:image_id>/')
@login_required
def image(image_id):
msg = ''
for m in get_flashed_messages(with_categories=False, category_filter=['remove_comment']):
msg += m
image = Image.query.filter_by(id=image_id).first()
comments = Comment.query.filter_by(image=image).order_by(db.desc(Comment.id)).all()
if image == None:
return redirect('/')
return render_template('pageDetail.html', image=image, comments=comments, msg=msg)
@app.route('/profile/<int:user_id>/')
@login_required
def profile(user_id):
mes = ''
for m in get_flashed_messages(with_categories=False, category_filter=['upload','remove']):
mes += m
user = User.query.filter_by(id=user_id).first()
if user == None:
return redirect('/')
pagination = Image.query.filter_by(user_id=user_id).paginate(page=1, per_page=3)
return render_template('profile.html', user=user, has_next=pagination.has_next, images=pagination.items, mes=mes)
@app.route('/profile/images/<int:user_id>/<int:page>/<int:per_page>/')
def user_images(user_id, page, per_page):
pagination = Image.query.filter_by(user_id=user_id).paginate(page=page, per_page=per_page)
map = {'has_next': pagination.has_next}
images = []
for image in pagination.items:
comments = Comment.query.filter_by(image=image).filter_by(status=0).all()
imgvo = {'id': image.id, 'url': image.url, 'comment_count': len(comments)}
images.append(imgvo)
map['images'] = images
return json.dumps(map)
@app.route('/add_comment/', methods={'post'})
def add_comment():
content = request.values.get('content')
image_id = int(request.values.get('image_id'))
if current_user.name != 'GUEST':
print(current_user)
comment = Comment(content, image_id, current_user.id)
db.session.add(comment)
db.session.commit()
map = {'code': 0,
'content': content,
'user_name': current_user.name,
'user_id': current_user.id,
'comment_id': comment.id
}
return json.dumps(map)
else:
return json.dumps({'code': -2})
@app.route('/remove_comment/<int:comment_id>/')
def remove_comment(comment_id):
comment = Comment.query.filter_by(id=comment_id).first()
if current_user.id == comment.image.user_id or current_user.id == comment.from_user_id:
remove_comment_method(comment)
return redirect_with_message('/image/' + str(comment.image.id) + '/', u'删除成功!', 'remove_comment')
return redirect_with_message('/image/' + str(comment.image.id) + '/', u'您没有删除权限!', 'remove_comment')
def remove_comment_method(comment):
comment.status = 1
print('comment remove')
db.session.commit()
def save_to_local(file, file_name):
save_dir = app.config['UPLOAD_DIR']
file.save(os.path.join(save_dir, file_name))
return '/image/' + file_name
@app.route('/upload/', methods={'post'})
def upload():
file = request.files['file']
file_ext = ''
file_name = ''
if file.filename.find('.') > 0:
file_ext = file.filename.rsplit('.')[1].strip().lower()
if file_ext in app.config['ALLOW_EXT']:
file_name = str(uuid.uuid1()).replace('-', '') + '.' + file_ext
# url = save_to_local(file, file_name)
url = save_to_cloud(file, file_name)
if url != None:
image = Image(url, current_user.id)
db.session.add(image)
db.session.commit()
return redirect_with_message('/profile/' + str(current_user.id) + '/', u'保存成功', 'upload')
else:
return redirect_with_message('/profile/' + str(current_user.id) + '/', u'出错,请重试!', 'upload')
else:
return redirect_with_message('/profile/' + str(current_user.id) + '/', u'文件格式不对', 'upload')
else:
return redirect_with_message('/profile/' + str(current_user.id) + '/', u'文件格式不对', 'upload')
@app.route('/remove_image/<int:image_id>')
def remove_image(image_id):
image = Image.query.filter_by(id=image_id).first()
if current_user.id == image.user.id:
comments = Comment.query.filter_by(image=image).all()
for comment in comments:
comment.status = 1
db.session.delete(image)
db.session.commit()
return redirect_with_message('/profile/' + str(current_user.id) + '/', u'删除成功', 'remove')
return redirect_with_message('/profile/' + str(current_user.id) + '/', u'没有删除权限', 'remove')
@app.route('/image/<image_name>/')
def view_image(image_name):
return send_from_directory(app.config['UPLOAD_DIR'], image_name)
def redirect_with_message(target, message, category):
if message != None:
flash(message, category=category)
return redirect(target)
@login_required
@app.route('/logout/')
def logout():
logout_user()
return redirect_with_message('/', u'退出登陆', 'regloginpage')
| [
"670726095@qq.com"
] | 670726095@qq.com |
69bf863e941e1d75d011ad7cd0d4764f313b0046 | 894bb89cdafd31a36e07dfb8d5739082a3d3c704 | /co2_program5.py | efa7580b57ed4c82212838041bc6efa69ba9fc55 | [] | no_license | sharbinaashraf07/pythonProject | c8a01991f3c85c24c16691ebaca69aa06ec8b581 | b27887735ea5c9b97fff2cdfea0eb0208dc06f44 | refs/heads/master | 2023-04-07T00:39:56.196031 | 2021-04-24T14:48:21 | 2021-04-24T14:48:21 | 347,103,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | #pyramid with step number accepted from user.
def pattern(number):
for i in range(1, number + 1):
for j in range(1, i + 1):
print(j * i, end=" ")
print("")
n = int(input("Enter a number : "))
pattern(n) | [
"sharbinaashraf1999@gmail.com"
] | sharbinaashraf1999@gmail.com |
79a1db3d60a6a67825f0c80335493395c8897fbc | adea8d48bcf38351ba55eb515e62ae3a35e3d03c | /CALCULATOR.py | eaa74508b8fd2a5791a05b43c481cad3a29e2af8 | [] | no_license | edake1/Programming-Projects | 6c9891169d17ef794262ce0f3439f20a0d68e5fb | 6d894aa06d71ca3f842a5d3152071ae237cb3e8e | refs/heads/main | 2023-02-03T10:34:09.657443 | 2020-12-22T14:51:24 | 2020-12-22T14:51:24 | 310,450,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | from tkinter import *
root = Tk()
root.title('SIMPLE CALCULATOR')
frame = LabelFrame(root, text="CALCULATOR", padx=20, pady=10)
frame.pack(padx=100, pady=50)
def my_Click():
button0.configure(text = name.get())
button1.configure(text = name.get())
button2.configure(text=name.get())
button3.configure(text=name.get())
button4.configure(text = name.get())
button5.configure(text = name.get())
button6.configure(text = name.get())
button7.configure(text = name.get())
button8.configure(text = name.get())
button9.configure(text = name.get())
name = StringVar()
name_entered = Entry(root, width=30, textvariable=name)
name_entered.pack()
name_entered.focus()
button0 = Button(frame, text='0', command=my_Click).grid(row=0, column=0)
button1 = Button(frame, text='1', command=my_Click).grid(row=0, column=1)
button2 = Button(frame, text='2', command=my_Click).grid(row=0, column=2)
button3 = Button(frame, text='3', command=my_Click).grid(row=1, column=0)
button4 = Button(frame, text='4', command=my_Click).grid(row=1, column=1)
button5 = Button(frame, text='5', command=my_Click).grid(row=1, column=2)
button6 = Button(frame, text='6', command=my_Click).grid(row=2, column=0)
button7 = Button(frame, text='7', command=my_Click).grid(row=2, column=1)
button8 = Button(frame, text='8', command=my_Click).grid(row=2, column=2)
button9 = Button(frame, text='9', command=my_Click).grid(row=3, column=1)
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
ee2798634484ddb530cf7b917209d4cafc4a2673 | 6906a911f03e369569352893728275fee287680b | /manage.py | ee088666d8485d80bbf23484229d28f3d7dc11e1 | [] | no_license | crowdbotics-apps/joe-1319 | ab62638b43c303219230789c2c000e6e32377591 | 34cb120f2eac820357206348b3f281d81561ca51 | refs/heads/master | 2022-12-10T23:07:45.356298 | 2019-03-12T21:18:58 | 2019-03-12T21:18:58 | 175,297,690 | 0 | 0 | null | 2022-12-08T19:43:51 | 2019-03-12T21:17:56 | Python | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "joe_1319.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7e69bddbb2f2e8c010b4c1ce2dc78f8a893c720f | 0da202878a9a4cb741a62a2a8ef206e29eb157ed | /tasks/task04/wrappers.py | 4b18faee79594c262fe58cd56b3b319b2cc58767 | [] | no_license | jencmart/mff-dee-reinforcement-learning-npfl122 | 637ccd127bd73dd3b7d5f5ad39832a3f4dd36d11 | 0ae12b22f6f1ad5cead409ec1bfc9eb2b47d6a13 | refs/heads/master | 2023-02-19T00:38:32.464039 | 2021-01-18T00:04:41 | 2021-01-18T00:04:41 | 306,834,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,445 | py | #!/usr/bin/env python3
import sys
import gym
import numpy as np
class EvaluationWrapper(gym.Wrapper):
def __init__(self, env, seed=None, evaluate_for=100, report_each=10):
super().__init__(env)
self._evaluate_for = evaluate_for
self._report_each = report_each
self.seed(seed)
self.action_space.seed(seed)
self.observation_space.seed(seed)
self._episode_running = False
self._episode_returns = []
self._evaluating_from = None
@property
def episode(self):
return len(self._episode_returns)
def reset(self, start_evaluation=False):
if self._evaluating_from is not None and self._episode_running:
raise RuntimeError("Cannot reset a running episode after `start_evaluation=True`")
if start_evaluation and self._evaluating_from is None:
self._evaluating_from = self.episode
self._episode_running = True
self._episode_return = 0
return super().reset()
def step(self, action):
if not self._episode_running:
raise RuntimeError("Cannot run `step` on environments without an active episode, run `reset` first")
observation, reward, done, info = super().step(action)
self._episode_return += reward
if done:
self._episode_running = False
self._episode_returns.append(self._episode_return)
if self.episode % self._report_each == 0:
print("Episode {}, mean {}-episode return {:.2f} +-{:.2f}".format(
self.episode, self._evaluate_for, np.mean(self._episode_returns[-self._evaluate_for:]),
np.std(self._episode_returns[-self._evaluate_for:])), file=sys.stderr)
if self._evaluating_from is not None and self.episode >= self._evaluating_from + self._evaluate_for:
print("The mean {}-episode return after evaluation {:.2f} +-{:.2f}".format(
self._evaluate_for, np.mean(self._episode_returns[-self._evaluate_for:]),
np.std(self._episode_returns[-self._evaluate_for:]), file=sys.stderr))
self.close()
sys.exit(0)
return observation, reward, done, info
class DiscretizationWrapper(gym.ObservationWrapper):
def __init__(self, env, separators, tiles=None):
super().__init__(env)
self._separators = separators
self._tiles = tiles
if tiles is None:
states = 1
for separator in separators:
states *= 1 + len(separator)
self.observation_space = gym.spaces.Discrete(states)
else:
self._first_tile_states, self._rest_tiles_states = 1, 1
for separator in separators:
self._first_tile_states *= 1 + len(separator)
self._rest_tiles_states *= 2 + len(separator)
self.observation_space = gym.spaces.MultiDiscrete([
self._first_tile_states + i * self._rest_tiles_states for i in range(tiles)])
self._separator_offsets, self._separator_tops = [], []
for separator in separators:
self._separator_offsets.append(0 if len(separator) <= 1 else (separator[1] - separator[0]) / tiles)
self._separator_tops.append(math.inf if len(separator) <= 1 else separator[-1] + (separator[1] - separator[0]))
def observation(self, observations):
state = 0
for observation, separator in zip(observations, self._separators):
state *= 1 + len(separator)
state += np.digitize(observation, separator)
if self._tiles is None:
return state
else:
states = [state]
for t in range(1, self._tiles):
state = 0
for i in range(len(self._separators)):
state *= 2 + len(self._separators[i])
value = observations[i] + ((t * (2 * i + 1)) % self._tiles) * self._separator_offsets[i]
if value > self._separator_tops[i]:
state += 1 + len(self._separators[i])
else:
state += np.digitize(value, self._separators[i])
states.append(self._first_tile_states + (t - 1) * self._rest_tiles_states + state)
return states
class DiscreteCartPoleWrapper(DiscretizationWrapper):
def __init__(self, env, bins=8):
super().__init__(env, [
np.linspace(-2.4, 2.4, num=bins + 1)[1:-1], # cart position
np.linspace(-3, 3, num=bins + 1)[1:-1], # pole angle
np.linspace(-0.5, 0.5, num=bins + 1)[1:-1], # cart velocity
np.linspace(-2, 2, num=bins + 1)[1:-1], # pole angle velocity
])
class DiscreteMountainCarWrapper(DiscretizationWrapper):
def __init__(self, env, bins=None, tiles=None):
if bins is None:
bins = 24 if tiles is None or tiles <= 1 else 12 if tiles <= 3 else 8
super().__init__(env, [
np.linspace(-1.2, 0.6, num=bins + 1)[1:-1], # car position
np.linspace(-0.07, 0.07, num=bins + 1)[1:-1], # car velocity
], tiles)
class DiscreteLunarLanderWrapper(DiscretizationWrapper):
def __init__(self, env):
super().__init__(env, [
np.linspace(-.4, .4, num=5 + 1)[1:-1], # x
np.linspace(-.075,1.35,num=6 + 1)[1:-1], # y
np.linspace(-.5, .5, num=5 + 1)[1:-1], # vel x
np.linspace(-.8, .8, num=7 + 1)[1:-1], # vel y
np.linspace(-.2, .2, num=3 + 1)[1:-1], # rot
np.linspace(-.2, .2, num=5 + 1)[1:-1], # ang vel
[.5], #lc
[.5], #rc
])
self._expert = gym.make("LunarLander-v2")
self._expert.seed(42)
def expert_trajectory(self):
state, trajectory, done = self._expert.reset(), [], False
initial_state = self.observation(state)
while not done:
action = gym.envs.box2d.lunar_lander.heuristic(self._expert, state)
state, reward, done, _ = self._expert.step(action)
trajectory.append((action, reward, self.observation(state)))
return initial_state, trajectory
gym.envs.register(
id="MountainCar1000-v0",
entry_point="gym.envs.classic_control:MountainCarEnv",
max_episode_steps=1000,
reward_threshold=-110.0,
)
| [
"martin.jenc@getmanta.com"
] | martin.jenc@getmanta.com |
13b235a66727792736ec940ae4bc3cc630a0c1fb | d44215864e30ad8039a1a294875e4222e3d23ebd | /build/geometry-hydro-devel/tf/catkin_generated/pkg.installspace.context.pc.py | dbd62ce2cd49cece7fca3f4fcc8794848494ff9a | [] | no_license | prathyusha-shine/abhiyan1.0 | 5c3eebfbbacb8b364180b9c2bd377c73cf29e693 | bf9be6462c132465ddbf8c20b1e9a4e1eabd596e | refs/heads/master | 2020-12-31T01:23:32.911145 | 2015-05-31T06:19:16 | 2015-05-31T06:19:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/sudha/catkin_ws/install/include".split(';') if "/home/sudha/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;message_filters;message_runtime;roscpp;sensor_msgs;std_msgs;tf2_ros;rosconsole".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ltf".split(';') if "-ltf" != "" else []
PROJECT_NAME = "tf"
PROJECT_SPACE_DIR = "/home/sudha/catkin_ws/install"
PROJECT_VERSION = "1.10.8"
| [
"sudha@sudha.(none)"
] | sudha@sudha.(none) |
727a12491030f78e21a7e888ebd6042f1aaa0389 | 8d127541e59cf5d3b15eac8e3079d745c7370a36 | /palach/unit-tests.py | 204528d44164bb8758815458b9c2fac5533a97c2 | [] | no_license | e-sim/palach-game | 6a95f7393256eace50632da82085c6e313ba688b | 5d04794fe09ac4a74b90b11526ffa2748b819ea4 | refs/heads/master | 2020-04-18T20:41:58.439007 | 2019-08-21T21:40:23 | 2019-08-21T21:40:23 | 167,744,729 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,713 | py |
import numpy as np
# tests to do:
# create/print empty matrix CHECK
# print matrix correctly
# print individual body parts
# print everything together
def scaffold():
scaff_mat = np.full((10,20), ".", dtype=str)
# add the actual scaffold (is that what it's even called?)
scaff_mat[9,0] = "a"
scaff_mat[9,1] = "b"
scaff_mat[:, 1] = ("c" for i in scaff_mat)
# print(np.array2string(x, precision=2, separator=','
# print(np.array2string(scaff_mat, separator=' ').strip("'"))
return scaff_mat
def print_scaff(scaff):
for i in scaff:
# print(i)
for j in i:
print(j, end='')
print("\n")
scaffold = scaffold()
#print_scaff(scaffold)
# Palach.py
# Russian hangman program
# eventually can also be used for other languages
#
# command: palach.py language level
# language wordlist files must be named as [languagename].txt
#!/usr/bin/env python3
import sys, random, re, logging
import numpy as np
''' this prints the game blanks/letters to the screen'''
def game_print(letters):
otp = ""
for l in letters:
otp = "{0} {1}".format(otp, l)
print(otp)
def main():
logger = logging.getLogger("palach_log")
logger.setLevel(logging.DEBUG)
#lang = sys.argv[1]
lang = "test" #this is for debuggingpurposes
dict_file = lang + ".txt" # later will use .csv
with open(dict_file, "r") as d:
# VV this will need to change once i use the csv file
wordlist = d.readlines()
#play_word = random.choice(wordlist).strip()
play_word = "hello"
letter_set = set()
for char in play_word:
letter_set.add(char)
#TODO: either don't allow two-word items or deal with them somehow-- make blank not appear?
guessed_letters = set()
wrong_letters = set()
print(letter_set)
# TODO: ideally i'll have either another list or part of the same csv file that
# translates all the text for different languages
print("hi erica!")
player_sees = ["_" for x in range(len(play_word))]
game_print(player_sees)
blanks_left = len(play_word)
# makes the matrix to form the hangman & scaffold
#TODO fix loop structure
while blanks_left: #and hangman isn't finished
guess = input("угадай букву: ")
if len(guess) > 1:
print("только одна буква")
continue
if guess in guessed_letters:
print("уже угадал!") #prob should put it in passive
continue
if guess in letter_set:
#cleanup
guessed_letters.add(guess)
letter_set.discard(guess)
# add to player_sees in correct place
indeces = [match.start() for match in re.finditer(guess, play_word)]
#TODO what if there's two matches??????
for i in indeces:
# this adds the guess to the proper place in the player_sees list
player_sees[i] = guess
blanks_left -= 1
#print(player_sees) <--- i'm just testing turning this off
# also print wrong guesses, plus the hangman
else:
print("угадай еще раз")
# yes i'm testing using print :(
#print(guess + "/t" + guessed_letters) <--- won't print set fyi
# TODO add to wrong guesses & print that
guessed_letters.add(guess)
wrong_letters.add(guess)
#update hangman
#continue
num_letters = len(guessed_letters)
game_print(player_sees)
#scaffold()
if __name__ == "__main__":
main() | [
"ejsim@uw.edu"
] | ejsim@uw.edu |
e38fc08214ef76a023d6437854df171b18b8787e | 499bc57c6e82ae95acecbb11053b4a2e05acaafe | /p02/q10_find_largest.py | a6fe4edd7c9750583d0009c9a7e95549a58d44bc | [] | no_license | aaaaaaaaaaaaaaaaace/cp2019 | 4178e09251e92c3ddedad47b9c78b5838c037580 | e73408ba9fbb716b2541b0c4c662cebf5d61b800 | refs/heads/master | 2020-04-24T00:40:42.721284 | 2019-04-28T14:56:05 | 2019-04-28T14:56:05 | 171,573,360 | 0 | 1 | null | 2019-10-19T12:45:15 | 2019-02-20T00:39:13 | Python | UTF-8 | Python | false | false | 168 | py | n = 1
x = n**3
while x < 12000:
n = n + 1
x = n**3
if x > 12000:
break
n = n - 1
print('The largest integer n such that n^3 is less than 12000 is' , n)
| [
"noreply@github.com"
] | noreply@github.com |
fd57ab7ba85d0ae8524c7824bae4fa17d758af0e | 9718a90a275f28e76a29ec47690bdadf0ac42dd3 | /yml.py | 6cdf433d4f16afe980f8900e5adfa3957138a7f9 | [] | no_license | rty83924/check_m3u8 | f6cd0237a3832be0b6b982709be9e656e36e48d0 | 13ee1b839c4b3a6295456d0f8f881194e1691bac | refs/heads/master | 2022-11-22T20:36:17.190758 | 2020-07-13T03:45:18 | 2020-07-13T03:45:18 | 278,595,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,861 | py | import yaml
import os
a = os.path.dirname(os.path.abspath(__file__))
# Loader=yaml.FullLoader 取消yml.load 不安全僅告
class sendurl:
def __init__(self):
pass
stream_name = list()
domain_name = list()
stream_app = list()
self.stream_name = stream_name
self.domain_name = domain_name
self.stream_app = stream_app
def domain(self):
with open('{}/config/domain.yml'.format(a), 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
f.close()
data = data['domain']
data = list(data)
self.domain_name = data
return self.domain_name
def streamName(self):
with open('{}/config/app.yml'.format(a), 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
f.close()
data = data['app']
data = list(data)
self.stream_name = data
return self.stream_name
def streamApp(self):
with open('{}/config/stream.yml'.format(a), 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
f.close()
data = data['stream']
data = list(data)
self.stream_app = data
#print(self.stream_app)
return self.stream_app
class saveurl:
#sendurl = sendurl(self)
def __init__(self):
pass
def tryurl(self):
urllist= list()
domain = sendurl().domain()
streamApp = sendurl().streamApp()
streamName = sendurl().streamName()
for i in domain:
for y in streamApp:
for j in streamName:
urllist.append('https://{}/{}/{}'.format(i, y, j))
return urllist
if __name__ == '__main__':
# sendurl = sendurl()
# print(sendurl.domain())
# print(sendurl.streamApp())
saveurl = saveurl()
print(saveurl.tryurl()) | [
"rty83924@gmail.com"
] | rty83924@gmail.com |
e8e9dae290021734b27888fec8c99911efa8f68b | 545851661d0d5cc4f99aca0a3f19a24cde47d704 | /board.py | 13fb0c8b2be545d9ad8e962406a412048abff19d | [] | no_license | space-blob/toy-baduk | 3decaeddc6a384c000d882a5ed27e5853ae748f0 | ea8f37b1f151af2630a4d959b2816f3935d4419f | refs/heads/master | 2020-12-18T15:03:47.594816 | 2020-01-21T19:50:50 | 2020-01-21T19:50:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | #######################################
# Exercise 2: Implement enough of #
# this class that the stones appear on#
# the board when space is pressed #
#######################################
from enum import Enum
class Stone(Enum):
BLACK = 1
WHITE = 2
EMPTY = 0
class Board:
"""
This is a class to hold our game board object.
It doesn't matter how it is implemented as long as
calling board.put_stone_at(1,1, Stone.BLACK) will later mean that
board.get_stone_at(1,1) == Stone.BLACK
"""
def __init__(self, width, height):
"""
This is called an initialiser. If you need to do some setup when the
board is created with board = Board(), do it here.
perhaps making something to store the stones in might be worthwhile?
"""
pass
def get_stone_at(self, x, y):
return Stone.EMPTY
def set_stone_at(self, x, y, stone):
pass
def _neighbors(self, x, y):
"""
By convention, fields starting with _ are meant to be accessed only from inside the class.
"""
pass
def _count_liberties(self, x, y):
pass
def is_alive(self, x, y):
pass
| [
"matthew.watson@noisenet.com.au"
] | matthew.watson@noisenet.com.au |
8aba36714ad49704fa0bde10281ae544f509ab13 | 5a4191c9621f513af1435672c28957063dc18c13 | /saratov24/middlewares.py | 76afe5277f501bc8e20bb6b787b082ca02dac101 | [] | no_license | Ny0xCF/Information-retrieval | b3612d34fe0ff19211593bb1cbfab0d0deebc65b | c0aadbfbb35341ac5440860c136239c1cabe3805 | refs/heads/main | 2023-02-05T09:04:38.991084 | 2020-12-24T14:44:57 | 2020-12-24T14:44:57 | 323,800,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,603 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class Saratov24SpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class Saratov24DownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"denis.shepelev@exactprosystems.com"
] | denis.shepelev@exactprosystems.com |
ebc71c2db007b50f958c8a4feccd507aedaee1c8 | c113b16336bf5d2416632821b9d4f7b7160ad45f | /src/app/migrations/0005_auto_20210422_1620.py | 367783483e4c97ecf62fad222e29176c6bfa2406 | [
"MIT"
] | permissive | currobeltran/TFG | 59a79c8c7fd12e01c94d6a3d59eec6e6a0d306ab | 3137edd1baeb02f2811c272bbbf3884c3af6d3fe | refs/heads/main | 2023-08-09T11:17:27.105945 | 2021-09-21T17:55:05 | 2021-09-21T17:55:05 | 341,180,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | # Generated by Django 3.1.6 on 2021-04-22 16:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20210422_1615'),
]
operations = [
migrations.AlterField(
model_name='asignatura',
name='CreditosGA',
field=models.FloatField(),
),
migrations.AlterField(
model_name='asignatura',
name='CreditosGR',
field=models.FloatField(),
),
]
| [
"franciscobel@correo.ugr.es"
] | franciscobel@correo.ugr.es |
63b205c5d6cff1d5d1608a1ec32f83e78195167b | c2884b526df441736d132e907c2500107d726785 | /code/main.py | ed32f2b6e98ba9380203ade9cc671db5dfe7f0b8 | [
"MIT"
] | permissive | miniminisu/dcgan-code-cu-foam-3D | eead01188ee1a62ea32db4316b8abb103695d4e6 | 9b7e4b184da33fa259ab42f28d9d3d77fe28bd3e | refs/heads/master | 2023-06-05T17:56:28.738437 | 2021-06-28T07:16:36 | 2021-06-28T07:16:36 | 380,934,044 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,663 | py | from __future__ import print_function
import argparse
import os
import random
# import torch
import torch.nn as nn
# import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
from torch.autograd import Variable
from dataset import HDF5Dataset
# from hdf5_io import save_hdf5
import dcgan
import numpy as np
import utils
np.random.seed(43)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='3D', help='3D')
parser.add_argument('--dataroot', default='preprocess\copper_foam_64', help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=0)
parser.add_argument('--batchSize', type=int, default=32, help='input batch size') # 批处理大小
parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network') # 训练数据图像的大小
parser.add_argument('--nz', type=int, default=500, help='size of the latent z vector') # z的维度
parser.add_argument('--ngf', type=int, default=64) # number of generator filter # 生成器滤波7 器的数量
parser.add_argument('--ndf', type=int, default=8) # number of discriminator filter # 鉴别器滤波器的数量
parser.add_argument('--niter', type=int, default=1000, help='number of epochs to train for') # epoch的数量
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002') # 学习率大小
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') # adam优化器的参数
parser.add_argument('--cuda', default=True, action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
#parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='', help='folder to output images and model checkpoints')
opt = parser.parse_args()
print(opt)
out_dir = './results'+'_imageSize='+str(opt.imageSize)+'_batchSize='+str(opt.batchSize)+'_nz='+str(opt.nz)+'_ngf='+str(opt.ngf)+'_ndf='+str(opt.ndf)+'/'
if not os.path.exists(out_dir):
os.mkdir(out_dir)
utils.save_prgs(opt, out_dir)
# 结果保存在哪里
try:
os.makedirs(opt.outf)
except OSError:
pass
opt.manualSeed = 43 # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# 将图像转为-1或者1的数值
if opt.dataset in ['3D']:
dataset = HDF5Dataset(opt.dataroot, input_transform=transforms.Compose([transforms.ToTensor()]))
assert dataset
# 加载数据集
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers))
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 1 # 图像的通道数,我的数据不是RGB三通道,只是单通道黑白图像
# 这个函数可以用于各种类型的层选择不同的初始化的方式。
# custom weights initialization called on netG and netD
def weights_init(m):
# 获得m实例的类名
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# 创建一个DCGAN3D_G的对象
netG = dcgan.DCGAN3D_G(opt.imageSize, nz, nc, ngf, ngpu)
netG.apply(weights_init) # 初始化G的权重
# 判断生成器G的模型路径是否为空
# 如果路径不为空,那么就去加载这个路径下的模型参数
if opt.netG != '':
print('通过加载模型来生成10张图像')
netG.load_state_dict(torch.load(opt.netG)) # 加载已训练的模型
netG.cuda() # GPU加速
for i in range(10):
fixed_noise = torch.FloatTensor(1, nz, 13, 13, 13).normal_(0, 1) # 生成图片用的噪声
fixed_noise.cuda() # GPU加速
fake = netG(fixed_noise) # 使用已经训练好的模型来生成图片
utils.save_tiff(fake, out_dir + 'fake_generate_again'+str(i)+'.tiff')
print('10张重构生成完毕')
# 打印生成器G的结构
print(netG)
# 创建一个DCGAN3D_D的对象
netD = dcgan.DCGAN3D_D(opt.imageSize, nz, nc, ndf, ngpu)
netD.apply(weights_init) # # 初始化D的权重
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
# 交叉熵损失
criterion = nn.BCELoss()
# 创建z变量
# input是每次训练需要喂入的数据[128,1,64,64,64]
# noise是每次训练喂入的噪声[128,512,1,1,1],512是z的维度,z可以更改
# fixed_noise[1,512,7,7,7]
# fixed_noise_TI[1,512,1,1,1]
input, noise, fixed_noise, fixed_noise_TI = None, None, None, None
input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1, 1) # 训练用的噪声
fixed_noise = torch.FloatTensor(1, nz, 5, 5, 5).normal_(0, 1) # 生成图片用的噪声
fixed_noise_TI = torch.FloatTensor(1, nz, 1, 1, 1).normal_(0, 1) # 生成图片用的噪声
label = torch.FloatTensor(opt.batchSize)
real_label = 1
# real_label = np.random.uniform(0.7, 0.9)
fake_label = 0
# fake_label = np.random.uniform(0, 0.3)
# 让变量可以用GPU计算
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
fixed_noise_TI = fixed_noise_TI.cuda()
# 创建好变量
input = Variable(input)
label = Variable(label)
noise = Variable(noise)
fixed_noise = Variable(fixed_noise)
fixed_noise_TI = Variable(fixed_noise_TI)
# 创建优化器
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
# 损失文件路径
loss_csv_path = out_dir + "training_curve.csv"
# 开始训练!!!
gen_iterations = 0
for epoch in range(opt.niter):
# 迭代一批训练数据
for i, data in enumerate(dataloader, 0):
# 把loss写入csv文件
f = open(out_dir + "training_curve.csv", "a")
############################
# (1) 更新D网络: maximize log(D(x)) + log(1 - D(G(z))) 这个值一开始肯定非常的大,因为D(X)为1,D(G(z))为0。训练刚开始,左边一坨非常大,右边一坨也非常大
###########################
# train with real
netD.zero_grad()
real_cpu = data
batch_size = real_cpu.size(0)
# input.data.resize_(real_cpu.size()).copy_(real_cpu)
# label.data.resize_(batch_size).fill_(real_label)
input.resize_(real_cpu.size()).copy_(real_cpu) # 一批真实数据()(16,1,128,128)
label.resize_(batch_size).fill_(real_label) # 真实数据的标签设置为(16个0.9)
output = netD(input) # 这里相当于是D(x)
errD_real = criterion(output.squeeze(), label) # 计算交叉熵损失 log(D(z))
errD_real.backward() # 反向传播
D_x = output.data.mean() # 顺带求一下D(x)的均值为多少,鉴别器认为x为真的概率
# train with fake
# noise.data.resize_(batch_size, nz, 1, 1, 1)
# noise噪声z初始化为0,1之间的小数
noise.resize_(batch_size, nz, 1, 1, 1)
noise.data.normal_(0, 1) # 噪声z为均值为0,标准差为1的正太分布数,但是为什么还是有大于1的数字?
fake = netG(noise).detach() # G(z)
# 标签平滑处理
label.data.fill_(fake_label)
output = netD(fake) # D(G(z))
errD_fake = criterion(output.squeeze(), label) # 交叉熵损失
errD_fake.backward() # 反向传播
D_G_z1 = output.data.mean() # 顺便求一下D(G(z))的均值
errD = errD_real + errD_fake # 公式两边相加
optimizerD.step() # 优化D
############################
# (2) 更新G网络: maximize log(D(G(z)))
###########################
g_iter = 1
while g_iter != 0:
netG.zero_grad()
label.data.fill_(1.0) # fake labels are real for generator cost
noise.data.normal_(0, 1) # 这个noise与上面的noise肯定不一样
fake = netG(noise)
output = netD(fake)
errG = criterion(output.squeeze(), label)
errG.backward()
D_G_z2 = output.data.mean() # # 顺便求一下D(G(z))的均值
optimizerG.step() # 优化G
g_iter -= 1
gen_iterations += 1
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
# errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
errD.data.item(), errG.data.item(), D_x, D_G_z1, D_G_z2)) # D(G(z1)是先更新鉴别器后计算的值,D(G(z2))是后更新生成器计算的值)
f.write('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
# errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
errD.data.item(), errG.data.item(), D_x, D_G_z1, D_G_z2))
f.write('\n')
f.close()
# 每5个epoch生成中间结果和断点模型
if epoch % 1 == 0:
# 生成图片
# fake_z = netG(noise)
fake = netG(fixed_noise)
# fake_batch = netG(batch_noise[0])
fake_TI = netG(fixed_noise_TI)
# for i in range(len(fake_batch)):
utils.save_tiff(fake_TI, out_dir+'fake_TI_{0}.tiff'.format(epoch))
utils.save_tiff(fake, out_dir + 'fake_{0}.tiff'.format(epoch))
# save_hdf5(fake_TI.data, work_dir+'fake_batch_{0}.tiff'.format(epoch))
# save_hdf5(fake_z.data, work_dir + 'fake_z_{0}.hdf5'.format(epoch))
# save_hdf5(fake.data, work_dir+'fake_samples_{0}.hdf5'.format(epoch))
# save_hdf5(fake_TI.data, work_dir+'fake_TI_{0}.hdf5'.format(epoch))
# 保存模型断点
torch.save(netG.state_dict(), out_dir + 'netG_epoch_%d.pth' % epoch)
torch.save(netD.state_dict(), out_dir + 'netD_epoch_%d.pth' % epoch)
# 损失曲线绘图
utils.save_learning_curve(out_dir)
| [
"269178748@qq.com"
] | 269178748@qq.com |
61cf16649f12b5e42bac2e5608be5e14cfd68243 | 2f2b107e66a874b457e5b446842ffa445fb53966 | /ejercicio15.py | 393b3a54bbd0113cd566890d40b78216f13afa0a | [] | no_license | JORGEANDREE/Trabajo | 22c823336f9b1b7284d3c1e78fb15dafed477629 | d15a81e195cfda052c4cd11d0df24b74d43e00c2 | refs/heads/master | 2020-09-03T03:24:21.837880 | 2019-11-03T23:11:38 | 2019-11-03T23:11:38 | 219,373,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | #calcular el area de un rectangulo
empuje,p,g,v=0,0,0,0
#asignacion de valores
p=45
g=40
v=16
#calculo
empuje=p*g*v
#mostrar valores
print("p es:",p)
print("g es:",g)
print("v es:",v)
print("el empuje es:",empuje) | [
"jorgeandreeserrepesantisteban@hotmail.com"
] | jorgeandreeserrepesantisteban@hotmail.com |
ebbc0dd5d846d2bc493872f57dc429124b5aa129 | c962551c85fb5ea0c90f88e07601788637fc1ed3 | /trendapp/views.py | e62b217a1896fe783efb4ddbd3b372e67f318390 | [] | no_license | solahyun/Hackathon_gis | c1c195d761597eedd98532652e60175a01cef435 | 30e10d2509f9a3cb3f3529a150a5ab8314f9a83f | refs/heads/master | 2023-08-31T03:56:34.604739 | 2021-10-12T19:21:51 | 2021-10-12T19:21:51 | 416,252,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | from django.conf.urls import url
from django.shortcuts import render
from django.views.generic import TemplateView, ListView
from trendapp.models import NewModel
class BasicTemplateView(TemplateView):
template_name = 'trendapp/base.html'
class TrendListView(ListView):
template_name = 'trendapp/bukgu.html'
model = NewModel
def let_write(request):
if request.method == "POST":
if request.POST.get("input_text"):
image = "{% static 'img/1.png' %}"
return render(request, context={'text': image})
else:
return render(request, context={'text': ""}) | [
"sola0419@likelion.org"
] | sola0419@likelion.org |
ee29004e65e670b2df622f983fb6ad0c07afa32e | eb5044f3301e1b9c608fc5d9a28d9e1f208490ba | /initBot.py | 2aa6d5d43d4edf14a21d4a95c8a21f9052060e54 | [] | no_license | Gabsii/gabsii-chatbot | a8ea4eed822bba04b07308d2941d54064bad1a81 | 06de87d2ac60a728e1e3e62bc5ba421df06d7fb8 | refs/heads/master | 2020-06-10T03:42:37.198206 | 2019-10-18T11:25:41 | 2019-10-18T11:25:41 | 193,571,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,895 | py | ############################################
# #
# I exported my data from a #
# WhatsApp chat using an extension #
# from the Chrome Store #
# (Backup WhatsApp Chats) #
# #
# But it's broken since csv's don't #
# handle line breaks (\n) well #
# #
############################################
import csv
import sqlite3
class InitBotData(object):
def __init__(self):
super().__init__()
self.csv = 'data.csv'
self.conn = sqlite3.connect('chatbot.db')
def testCSV(self):
with open(self.csv, newline='', encoding="utf-8") as csvfile:
reader = csv.reader(csvfile, delimiter=';', quotechar='"',
quoting=csv.QUOTE_ALL, skipinitialspace=True)
counter = 1
wrongLen = 0
maxCheck = 60000
next(reader)
for row in reader:
if counter < maxCheck:
if len(row) != 10:
print("_____")
print('row: ' + str(counter) + ', len: ' + str(len(row)))
print(row)
print("_____")
wrongLen += 1
else:
break
counter += 1
print('len. ' + str(wrongLen))
def createTables(self):
# datetime TEXT NOT NULL
# sender TEXT NOT NULL
# message TEXT NOT NULL
# media TEXT
# quotedMessage TEXT
# quotedMessageDatetime TEXT
try:
cursor = self.conn.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS chatdata
(datetime TEXT,
sender TEXT NOT NULL,
message TEXT NOT NULL,
media TEXT,
quotedMessage TEXT,
quotedMessageDatetime TEXT)''')
self.conn.commit()
return True
except sqlite3.Error as e:
print("An error occurred:", e.args[0])
def insertData(self):
try:
cursor = self.conn.cursor()
with open(self.csv, newline='', encoding="utf-8") as csvfile:
reader = csv.reader(csvfile, delimiter=';', quotechar='"',
quoting=csv.QUOTE_ALL, skipinitialspace=True)
next(reader)
data = []
for row in reader:
datetime = row[1] + " " + row[2]
sender = row[4]
message = row[5]
media = row[6] if row[6] != 'null' else None
quotedMessage = row[7] if row[7] != 'null' else None
quotedMessageDatetime = row[8] + " " + row[9] if row[8] != 'null' else None
rowData = (datetime, sender, message, media, quotedMessage, quotedMessageDatetime)
data.append(rowData)
if reader.line_num % 10000:
cursor.executemany('INSERT INTO chatdata VALUES (?, ?, ?, ?, ?, ?)', data)
data = []
if len(data) >= 1:
cursor.executemany('INSERT INTO chatdata VALUES (?, ?, ?, ?, ?, ?)', data)
self.conn.commit()
except sqlite3.Error as e:
print("An error occurred:", e.args[0])
def closeConnection(self):
return self.conn.close()
def main():
init = InitBotData()
init.createTables()
print("start insert")
init.insertData()
init.closeConnection()
print("insert done")
if __name__ == '__main__':
main() | [
"lukas.gabsi@tele2.at"
] | lukas.gabsi@tele2.at |
dc84ed85efea76c6d1ec5005d6bcb25b271a1939 | fc9ec5bf40107d072b2e1efbdb5a1184c35ef52d | /learning/01.python-basics/02.while-loop.py | 4f89f3ba7765a2767255d92b2b8b79dd934436c0 | [
"MIT"
] | permissive | dinotumu/code.py | 8f73193963490b7275dd2aa25dad28f200b58314 | d1a569bcecabbf44126b70e7b71ef4a761b2177c | refs/heads/master | 2021-05-26T08:45:51.437854 | 2020-06-16T17:52:55 | 2020-06-16T17:52:55 | 128,076,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | #!/usr/bin/python3
"""
Module Docstring
"""
__author__ = "Dinesh Tumu"
__version__ = "0.1.0"
__license__ = "MIT"
# imports
# init variables
# assigning variables to a function
var_1 = range(10)
def main():
""" Main entry point of the app """
print("\nWhile Loops\n")
# while expression:
# statement(s)
iter_1 = 0
while (iter_1 < 10):
iter_1 += 1
print("{}".format(iter_1), end = " ")
else:
print("\nLoop ended with iter_1 = {}".format(iter_1))
iter_2 = 0
while iter_2 < 10:
print("{}".format(iter_2), end = " ")
iter_2 += 1
else:
print("\nLoop ended with iter_2 = {}".format(iter_2))
iter_3 = 0
while iter_3 in var_1:
print("{}".format(iter_3), end = " ")
iter_3 += 1
else:
print("\nLoop ended with iter_3 = {}".format(iter_3))
iter_4 = 0
while iter_4 < 5:
iter_4 += 1
if iter_4 > 3: break
if iter_4 == 2: continue
print(f'{iter_4} ')
if __name__ == "__main__":
""" This is executed when run from the command line """
main() | [
"saidinesh.tumu@gmail.com"
] | saidinesh.tumu@gmail.com |
1d052b81082b3d7bfb01013465626aa3181cfe2b | 96fdd7660878ce8ab4ebe4938835b8acc5ec11a2 | /hooks/pre_gen_project.py | ad32077404e7e0e0a1fb7ba1e5c3e606c9b9db5b | [
"BSD-3-Clause",
"MIT"
] | permissive | ninjabit/cookiecutter-serverless-aws-lambda | 9b486f47802d475fa3d255078337db533760f811 | 1f4175f8d7e5bb0efd7d7667c8e1e6a12d0289c9 | refs/heads/master | 2020-10-01T18:05:10.792574 | 2019-12-12T14:52:26 | 2019-12-12T14:52:26 | 227,594,267 | 0 | 0 | NOASSERTION | 2019-12-12T11:48:05 | 2019-12-12T11:48:04 | null | UTF-8 | Python | false | false | 334 | py | import re
import sys
MODULE_REGEX = r'^[_a-zA-Z][_a-zA-Z0-9]+$'
module_name = '{{ cookiecutter.lambda_name}}'
if not re.match(MODULE_REGEX, module_name):
print('ERROR: The project slug (%s) is not a valid Python module name. Please do not use a - and use _ instead' % module_name)
#Exit to cancel project
sys.exit(1)
| [
"benjamin.weigel@europace.de"
] | benjamin.weigel@europace.de |
6d1ebf41ab4811a8adc2865d675e4b20db67c5ee | bc547e7d9e4b2c1e49edc2daaa735c9afb87f5ae | /test/test_all_fault_handlers.py | 49068eb0020cb52f7c87f865ccede508daaabba8 | [
"MIT"
] | permissive | farisachugthai/dynamic_ipython | f7ed092ff23b785fc8c545390c581338a64b9bda | 7572a01f09998812830379644c45af4df67a3e45 | refs/heads/master | 2022-11-05T11:48:48.344585 | 2021-08-28T04:25:05 | 2021-08-28T04:25:05 | 178,786,145 | 7 | 0 | MIT | 2022-10-25T10:16:39 | 2019-04-01T04:35:37 | Python | UTF-8 | Python | false | false | 1,785 | py | #!/usr/bin/env python3
import os
import shutil
import tempfile
import unittest
from os.path import abspath, realpath, isfile, exists
import pytest
from IPython.testing.globalipapp import get_ipython
from default_profile.startup.all_fault_handlers import tempdir, in_tempdir, in_dir
def remove_tmpdir(dir):
try:
shutil.rmtree(dir)
except (NotADirectoryError, FileNotFoundError, OSError):
pass
except PermissionError:
raise
@pytest.fixture
def cwd():
return os.path.abspath(os.path.curdir)
class FixturesTest(unittest.TestCase):
def setUp(self):
# unittest's version of the tmpdir fixture
self.tmpdir = tempfile.mkdtemp()
self.addCleanup(remove_tmpdir, self.tmpdir)
# def test_rehashx_does_not_raise(self):
# are you allowed to do this?
# would something like this work
# with self.assertRaises(None):
# Wait this isn't a context manager??? hold the fuck up.
# with not self.assertRaises(Exception):
# get_ipython().run_line_magic('rehashx')
def test_tempdir():
with tempdir() as tmpdir:
fname = os.path.join(tmpdir, 'example_file.txt')
with open(fname, 'wt') as fobj:
fobj.write('a string\\n')
assert not exists(tmpdir)
def test_in_tempdir(cwd):
with in_tempdir() as tmpdir:
with open('test.txt', 'wt') as f:
f.write('some text')
assert isfile('test.txt')
assert isfile(os.path.join(tmpdir, 'test.txt'))
assert not exists(tmpdir)
def test_given_directory(cwd):
# Test InGivenDirectory
with in_dir(cwd) as tmpdir:
assert tmpdir == abspath(cwd)
with in_dir(cwd) as tmpdir:
assert tmpdir == cwd
if __name__ == "__main__":
unittest.main()
| [
"farischugthai@gmail.com"
] | farischugthai@gmail.com |
91a920b2e347c791a85c6a08064cb15ea9a8588d | a7173c10b8f51708808435338fde644d4d7c169e | /models/se_resnext.py | 84b06da18e85814912fdef78682f994c577aa3bd | [
"MIT"
] | permissive | ORlGlN/MTANet | 86b4cf9357e4de1ea5de662cf593fe7b7561a56c | 65f5c356b18400bd1d1b80cffa1ec9f8c6570d2a | refs/heads/master | 2021-05-21T13:58:14.887454 | 2020-02-04T13:58:15 | 2020-02-04T13:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,967 | py | '''
New for ResNeXt:
1. Wider bottleneck
2. Add group for conv2
'''
import torch.nn as nn
import math
__all__ = ['SE_ResNeXt', 'se_resnext_50', 'se_resnext_101', 'se_resnext_152']
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, num_group=32):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes*2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes*2)
self.conv2 = nn.Conv2d(planes*2, planes*2, kernel_size=3, stride=stride,
padding=1, bias=False, groups=num_group)
self.bn2 = nn.BatchNorm2d(planes*2)
self.conv3 = nn.Conv2d(planes*2, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if planes == 64:
self.globalAvgPool = nn.AvgPool2d(56, stride=1)
elif planes == 128:
self.globalAvgPool = nn.AvgPool2d(28, stride=1)
elif planes == 256:
self.globalAvgPool = nn.AvgPool2d(14, stride=1)
elif planes == 512:
self.globalAvgPool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(in_features=planes * 4, out_features=round(planes / 4))
self.fc2 = nn.Linear(in_features=round(planes / 4), out_features=planes * 4)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1), 1, 1)
out = out * original_out
out += residual
out = self.relu(out)
return out
class SE_ResNeXt(nn.Module):
def __init__(self, block, layers, num_classes=1000, num_group=32):
self.inplanes = 64
super(SE_ResNeXt, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], num_group)
self.layer2 = self._make_layer(block, 128, layers[1], num_group, stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], num_group, stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], num_group, stride=2)
# self.avgpool = nn.AvgPool2d(7, stride=1)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, num_group, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, num_group=num_group))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, num_group=num_group))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
def se_resnext_50(**kwargs):
"""Constructs a ResNeXt-50 model.
"""
model = SE_ResNeXt(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def se_resnext_101(**kwargs):
"""Constructs a ResNeXt-101 model.
"""
model = SE_ResNeXt(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def se_resnext_152(**kwargs):
"""Constructs a ResNeXt-152 model.
"""
model = SE_ResNeXt(Bottleneck, [3, 8, 36, 3], **kwargs)
return model | [
"noreply@github.com"
] | noreply@github.com |
1adfeb8bc7250a5a26649f5ce020d42d243eb8e8 | a9ddfe9219a10867178aac15dc8ef14d69369fef | /mongo-c-driver/docs/source/sphinx/source/conf.py | cc35378b18336dbb2077832747030aaf6c71fc88 | [
"Apache-2.0"
] | permissive | orachun/difsys | 0e2a331e5da1f29a094c4e57b8deef36c89fe9f7 | cafb9c6c9c097213a19788de2f8c520feca13b3a | refs/heads/master | 2021-01-19T02:41:32.837731 | 2013-09-13T04:21:31 | 2013-09-13T04:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,012 | py | # -*- coding: utf-8 -*-
#
# MongoDB C Driver documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 22 12:23:03 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MongoDB C Driver'
copyright = u'2011, 10gen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8'
# The full version, including alpha/beta/rc tags.
release = '0.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MongoDBCDriverdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MongoDBCDriver.tex', u'MongoDB C Driver Documentation',
u'10gen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# ('index', 'mongodbcdriver', u'MongoDB C Driver Documentation',
# [u'10gen, Inc.'], 1)
#]
| [
"orachun@orachun-ThinkPad-X220.(none)"
] | orachun@orachun-ThinkPad-X220.(none) |
01c067e1d9683fc8b0effb113ec0376691b3119f | e97064b7c510d5530f2400d44a24e6a5781f6305 | /plotphon.py | 3ec5221a806723c84312a36c0e61b6ee6c8dc27d | [] | no_license | Tosykie/qetools | b67a42797d7b0538b66d34ecd5c23cafa0520501 | de35731a553aaefa5fb616f3d4da8253ce3d887f | refs/heads/master | 2023-03-20T09:49:51.469342 | 2020-08-27T11:53:50 | 2020-08-27T11:53:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py |
# -*- coding: utf-8 -*-
"""
@author: ponychen
plot phono band wth k point distance not k index
"""
import numpy as np
import sys
import math
def parse_filband(feig, npl=10):
# feig : filband in bands.x input file
# npl : number per line
feig=open(feig)
l=feig.readline()
nbnd=int(l.split(',')[0].split('=')[1])
nks=int(l.split(',')[1].split('=')[1].split('/')[0])
eig=np.zeros((nks,nbnd+1),dtype=np.float32)
kpoints=np.zeros([nks,3],dtype=np.float32)
for i in range(nks):
l=feig.readline()
kpoints[i,:]=list(map(float,l.split()))
if i==0:
kpath=0.0
else:
kpath+=np.sqrt(np.sum((kpoints[i,:]-kpoints[i-1,:])**2))
eig[i,-1]=kpath
count=0
# npl: max number of bands in one line
n=math.ceil(nbnd/npl)
for j in range(n):
l=feig.readline()
for k in range(len(l.split())):
eig[i][count]=l.split()[k] # str to float
count=count+1
feig.close()
return eig, nbnd, nks
eig, nbnd, nks=parse_filband(sys.argv[1],npl=10)
with open('freq.txt',"w") as f:
for j in range(nbnd):
for i in range(nks):
line=str(eig[i,-1])+" "+str(eig[i,j])+"\n"
f.write(line)
f.write("\n")
| [
"noreply@github.com"
] | noreply@github.com |
f353cbe703642087199de04d98620034ae4ba881 | e708a7987722c3cad5d948a168c06f3ab7fd7965 | /provenance/utils.py | 46b83c106723cbfecff9bb4fe44107403ebd14c0 | [
"MIT"
] | permissive | bmabey/provenance | 2e56a4728dbe69e311bd1c9d9831ea7a4ca3f92a | d29ad2ffc39fbc389600df092da9e7df4f920100 | refs/heads/trunk | 2021-01-09T05:27:33.624822 | 2020-12-02T17:54:19 | 2020-12-02T18:05:43 | 78,248,180 | 34 | 12 | NOASSERTION | 2020-12-02T20:32:32 | 2017-01-07T00:06:56 | Python | UTF-8 | Python | false | false | 4,616 | py | import inspect
from collections import OrderedDict, Sequence
import toolz as t
import toolz.curried as tc
from boltons import funcutils as bfu
UNSPECIFIED_ARG = '::unspecified::'
def args_extractor(f, merge_defaults=False):
"""
Takes a function, inspects it's parameter lists, and returns a
function that will return all of the named and key arguments
back as a dictionary. The varargs are also returned which don't
have a names.
"""
spec = inspect.getfullargspec(f)
if spec.defaults:
param_defaults = dict(zip(spec.args[-len(spec.defaults):], spec.defaults))
else:
param_defaults = {}
named_param_defaults = spec.kwonlydefaults or {}
default_dicts = {}
num_named_args = len(spec.args)
if merge_defaults is True and hasattr(f, '__merge_defaults__'):
merge_defaults = f.__merge_defaults__
if merge_defaults:
default_dicts = t.pipe(
t.merge(named_param_defaults, param_defaults),
tc.valfilter(lambda v: isinstance(v, dict)),
)
if isinstance(merge_defaults, Sequence):
default_dicts = {k: default_dicts[k] for k in merge_defaults}
def _args_dict(args, kargs):
unnamed_args = dict(zip(spec.args, args[0:num_named_args]))
varargs = args[num_named_args:]
kargs = t.merge(kargs, unnamed_args)
for k, d in default_dicts.items():
kargs[k] = t.merge(d, kargs.get(k) or {})
return varargs, kargs
else:
def _args_dict(args, kargs):
unnamed_args = dict(zip(spec.args, args[0:num_named_args]))
varargs = args[num_named_args:]
kargs = t.merge(kargs, unnamed_args)
return varargs, kargs
return _args_dict
def with_merged_defaults(*kwargs_to_default):
"""
Introspects the argspec of the function being decorated to see what
keyword arguments take dictionaries. If a dictionary is passed in when
then function is called then it is merged with the dictionary defined
in the parameter list.
"""
merge_defaults = True
if len(kwargs_to_default) > 0:
merge_defaults = kwargs_to_default
def _with_merged_defaults(f):
extract_kargs = args_extractor(f, merge_defaults)
@bfu.wraps(f)
def _merge_defaults(*args, **kargs):
vargs, kargs = extract_kargs(args, kargs)
return f(*vargs, **kargs)
_merge_defaults.__merge_defaults__ = merge_defaults
return _merge_defaults
return _with_merged_defaults
def is_curry_func(f):
"""
Checks if f is a toolz or cytoolz function by inspecting the available attributes.
Avoids explicit type checking to accommodate all versions of the curry fn.
"""
return hasattr(f, 'func') and hasattr(f, 'args') and hasattr(f, 'keywords')
def _func_param_info(argspec):
params = argspec.args
defaults = argspec.defaults or []
start_default_ix = -max(len(defaults), 1) - 1
values = [UNSPECIFIED_ARG] * (len(params) - len(defaults)) + list(defaults[start_default_ix:])
return OrderedDict(zip(params, values))
def param_info(f):
if is_curry_func(f):
argspec = inspect.getfullargspec(f.func)
num_args = len(f.args)
args_to_remove = argspec.args[0:num_args] + list(f.keywords.keys())
base = _func_param_info(argspec)
return t.dissoc(base, *args_to_remove)
return _func_param_info(inspect.getfullargspec(f))
def inner_function(partial_fn):
"""Returns the wrapped function of either a partial or curried function."""
fn = partial_fn.func
if '__module__' not in dir(fn):
# for some reason the curry decorator nests the actual function
# metadata one level deeper
fn = fn.func
return fn
def partial_fn_info(partial_fn):
fn = inner_function(partial_fn)
varargs, kargs = args_extractor(fn)(partial_fn.args, partial_fn.keywords)
return {
'varargs': varargs,
'kargs': kargs,
'module': fn.__module__,
'name': fn.__name__,
}
# TODO: consider using the functions in joblib.func_inspect, e.g. for the fn name
def fn_info(fn):
if 'func' in dir(fn):
return partial_fn_info(fn)
return {'name': fn.__name__, 'module': fn.__module__, 'varargs': (), 'kargs': {}}
def when_type(type):
def _decorator(f):
@bfu.wraps(f)
def _when_type(val):
if isinstance(val, type):
return f(val)
else:
return val
return _when_type
return _decorator
| [
"ben@benmabey.com"
] | ben@benmabey.com |
c850a0ab18466434e370f60ebaf3101739c98932 | e25d2176f43dd0747970d5e746c8a9ffd61b0bee | /farm/loadvars.py | 0293388e79dc4ffc20b2b6747ee5681fd9f7254a | [] | no_license | Rothamsted-Ecoinformatics/FarmDataTools | 8942f3b7a11b543464735592c1811f0286b884c8 | a787665d18c0e4deb422f094bdd383328ca70a9b | refs/heads/master | 2022-07-07T10:27:26.551159 | 2022-06-22T10:10:22 | 2022-06-22T10:10:22 | 183,418,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | import json
from service import FarmService
myFarm = FarmService().myFarm()
for crop in ['sugar beet','winter oilseed rape','spring barley','grass','spring beans','phacelia','black oats','seed mix','terralife betasola','parkers PS009 mix']:
#data = farm.term.send({'name':crop,'vocabulary':{'id':'7','resource': 'taxonomy_vocabulary'}})
#print(data) | [
"richard.ostler@rothamsted.ac.uk"
] | richard.ostler@rothamsted.ac.uk |
3ca71c49a6da7cfa420cf73cb3475c330888bddc | 0e65380b2cf1386a21d1d54f22240f5b3fd3d0d0 | /1121.py | 2c12ee6c777dba4225e75560c187371785338d59 | [] | no_license | hwanginbeom/python | b6023e6082e8942f06a859c7fd63e5a2a405772f | 01afbd4f54cda1e2994f2454ff2b01fef3e13116 | refs/heads/master | 2018-11-05T10:52:42.414380 | 2018-08-28T09:30:41 | 2018-08-28T09:30:41 | 105,129,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | IntVar() 0- 정수값으로 만들어 준는 것 | [
"rydn2002@gmail.com"
] | rydn2002@gmail.com |
d7f3e690090261c6ebfd8660bb04bbba1b3485b7 | eb01c6bd82aae1591d905e445ba4838648ecf16e | /secrets.py | bf401aa70bd10cf847506691b43ee67857487c94 | [] | no_license | javierlm/SpotifyGeneratePlaylist | 303c60a257a07210d99a85dc5d635cf85af26d45 | 570bd2acfa17c2c7fd31758a2258aeeda3185fe1 | refs/heads/master | 2022-06-20T02:09:32.456036 | 2020-05-10T14:43:42 | 2020-05-10T14:43:42 | 261,228,519 | 0 | 0 | null | 2020-05-04T15:49:46 | 2020-05-04T15:49:45 | null | UTF-8 | Python | false | false | 156 | py | # Make sure to fill in your spotify client_id, client_secret, and user_id information
spotify_user_id = ''
spotify_client_id = ''
spotify_client_secret = '' | [
"jlmedina.tfe.92@gmail.com"
] | jlmedina.tfe.92@gmail.com |
c0123317ab83f06b2a6be2df901bc6be16742418 | e80091b9398afb1c65eacc643a8f4dd8807b87d5 | /utils/common.py | 3a5859e6c9d9aaa9f075f420aef30d0d0493b5c9 | [
"BSD-2-Clause"
] | permissive | dhinkel/updawg | ac75735af8da874f576a1f5235fd59dc572ba09d | b39c89df30f9e22f0f7dbaaad37dd0a309339c22 | refs/heads/master | 2020-11-29T21:27:01.246466 | 2020-01-10T06:34:33 | 2020-01-10T06:34:33 | 230,218,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 22:54:03 2020
@author: dh
"""
import functools
def obj_iter_to_str(obj_list, iter_type=list):
'''
Print list/set to use obj.__str__ instead of obj.__repr__ for each obj
'''
if iter_type == list:
lchar = '['
rchar = ']'
if iter_type == set:
lchar = '{'
rchar = '}'
out_str_list = []
for obj in obj_list:
out_str_list.append(str(obj))
if out_str_list:
aux_str = ", ".join(out_str_list)
out_str = f'{lchar}{aux_str}{rchar}'
else:
out_str = ''
return out_str
obj_list_to_str = functools.partial(obj_iter_to_str, iter_type=list)
obj_set_to_str = functools.partial(obj_iter_to_str, iter_type=set)
def is_hashable(obj):
try:
hash(obj)
except TypeError:
output = False
else:
output = True
return output | [
"dh@DESKTOP-IU38F2U.localdomain"
] | dh@DESKTOP-IU38F2U.localdomain |
f52c44a47fdc4accf6660f53d29b15b9d5675b58 | 5d4b215924138fe192027ffb07ae8fc5a180f8c7 | /utils/dataset/trajectory_dataset.py | 7e4451def6bc1fdda247a67c3e1927e59ef6c408 | [] | no_license | MarSaKi/vln-bert | dbf10d3c908d576882e4295b747ef9e18f887fca | 4cc3539df036f47e1e9b02bd64ef3734ec389da6 | refs/heads/master | 2023-02-25T16:21:40.395419 | 2021-01-27T21:54:44 | 2021-01-27T21:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,501 | py | # pylint: disable=no-member, not-callable
import os
import networkx as nx
import numpy as np
import torch
from pytorch_pretrained_bert.tokenization import BertTokenizer
from torch.utils.data import Dataset
from utils.dataset.common import (
get_headings,
get_viewpoints,
load_distances,
load_json_data,
load_nav_graphs,
randomize_tokens,
save_json_data,
tokenize,
)
from utils.dataset.pano_features_reader import PanoFeaturesReader
class TrajectoryDataset(Dataset):
def __init__(
self,
vln_path: str,
tokenizer: BertTokenizer,
pano_features_reader: PanoFeaturesReader,
max_instruction_length: int,
max_path_length: int,
max_num_boxes: int,
**kwargs,
):
# load and tokenize data (with caching)
tokenized_path = f"_tokenized_{max_instruction_length}".join(
os.path.splitext(vln_path)
)
if os.path.exists(tokenized_path):
self._data = load_json_data(tokenized_path)
else:
self._data = load_json_data(vln_path)
tokenize(self._data, tokenizer, max_instruction_length)
save_json_data(self._data, tokenized_path)
# map path ids to indices
self._index_to_data = []
for i, item in enumerate(self._data):
for j in range(len(item["instructions"])):
self._index_to_data.append((i, j))
# load navigation graphs
scan_list = [item["scan"] for item in self._data]
self._graphs = load_nav_graphs(scan_list)
self._distances = load_distances(scan_list)
# get all of the viewpoints for this dataset
self._viewpoints = get_viewpoints(
self._data, self._graphs, pano_features_reader
)
self._pano_features_reader = pano_features_reader
self._max_instruction_length = max_instruction_length
self._max_path_length = max_path_length
self._max_num_boxes = max_num_boxes
def __len__(self):
return len(self._index_to_data)
def __getitem__(self, index):
# get indices
data_index, instruction_index = self._index_to_data[index]
scan_id = self._data[data_index]["scan"]
heading = self._data[data_index]["heading"]
# get the ground truth path features
gt_path = self._data[data_index]["path"]
gt_features, gt_boxes, gt_masks = self._get_path_features(
scan_id, gt_path, heading
)
# get the ground truth instruction data
gt_instr_tokens = self._data[data_index]["instruction_tokens"][
instruction_index
]
gt_instr_mask = self._data[data_index]["instruction_token_masks"][
instruction_index
]
gt_segment_ids = self._data[data_index]["instruction_segment_ids"][
instruction_index
]
# Negative 1: swap instructions
lang_path = gt_path[:]
lang_features, lang_boxes, lang_masks = self._get_path_features(
scan_id, lang_path, heading
)
# TODO: should these be from the same scan?
lang_index = np.random.randint(len(self._data))
lang_data_index, lang_instruction_index = self._index_to_data[lang_index]
lang_instr_tokens = self._data[lang_data_index]["instruction_tokens"][
lang_instruction_index
]
lang_instr_mask = self._data[lang_data_index]["instruction_token_masks"][
lang_instruction_index
]
lang_segment_ids = self._data[lang_data_index]["instruction_segment_ids"][
lang_instruction_index
]
# Negative 2: hard alternative path
# easy_path = self._get_easy_negative_path(scan_id, gt_path)
easy_path = self._get_hard_negative_path(scan_id, gt_path)
if easy_path is None:
easy_path = self._get_backup_negative_path(scan_id, gt_path)
easy_features, easy_boxes, easy_masks = self._get_path_features(
scan_id, easy_path, heading
)
easy_instr_tokens = gt_instr_tokens[:]
easy_instr_mask = gt_instr_mask[:]
easy_segment_ids = gt_segment_ids[:]
# Negative 3: hard alternative path
hard_path = self._get_hard_negative_path(scan_id, gt_path)
if hard_path is None:
hard_path = self._get_backup_negative_path(scan_id, gt_path)
hard_features, hard_boxes, hard_masks = self._get_path_features(
scan_id, hard_path, heading
)
hard_instr_tokens = gt_instr_tokens[:]
hard_instr_mask = gt_instr_mask[:]
hard_segment_ids = gt_segment_ids[:]
# convert data into tensors
image_features = torch.tensor(
[gt_features, lang_features, easy_features, hard_features]
).float()
image_boxes = torch.tensor(
[gt_boxes, lang_boxes, easy_boxes, hard_boxes]
).float()
image_masks = torch.tensor(
[gt_masks, lang_masks, easy_masks, hard_masks]
).long()
instr_tokens = torch.tensor(
[gt_instr_tokens, lang_instr_tokens, easy_instr_tokens, hard_instr_tokens]
).long()
instr_mask = torch.tensor(
[gt_instr_mask, lang_instr_mask, easy_instr_mask, hard_instr_mask]
).long()
segment_ids = torch.tensor(
[gt_segment_ids, lang_segment_ids, easy_segment_ids, hard_segment_ids]
).long()
# randomly mask instruction tokens
if self._masked_language:
instr_tokens, instr_targets = randomize_tokens(
instr_tokens, instr_mask, self._tokenizer
)
else:
instr_targets = torch.ones_like(instr_tokens) * -1
# set target
target = torch.tensor(0).long()
# construct null return items
co_attention_mask = torch.zeros(
2, self._max_path_length * self._max_num_boxes, self._max_instruction_length
).long()
path_id = torch.tensor(self._data[data_index]["path_id"])
return (
target,
image_features,
image_boxes,
image_masks,
instr_tokens,
instr_mask,
instr_targets,
segment_ids,
co_attention_mask,
path_id,
)
def _get_easy_negative_path(self, scan_id, path):
""" Create a negative path from the source to a random neighbor."""
g, d = self._graphs[scan_id], self._distances[scan_id]
source, goal = path[0], path[-1]
# get valid neighbors within 4 and 6 hops and greater than 3m from the goal
max_hops, min_hops, min_distance = 6, 4, 3
neighbors = nx.single_source_shortest_path_length(g, source, cutoff=max_hops)
neighbors = [k for k, v in neighbors.items() if v >= min_hops]
valid = [node for node in neighbors if d[goal][node] > min_distance]
if len(valid) == 0:
return
# return the shortest path to a random negative target viewpoint
negative = np.random.choice(valid)
return nx.dijkstra_path(g, source, negative)
def _get_hard_negative_path(self, scan_id, path):
""" Create a negative path that starts along the path then goes to a random neighbor."""
g, d = self._graphs[scan_id], self._distances[scan_id]
offset = np.random.randint(1, len(path) - 1)
source, goal = path[offset], path[-1]
# get valid neighbors within 4 and 6 hops and greater than 3m from the goal
max_hops, min_hops, min_distance = 6 - offset, 4 - offset, 3
neighbors = nx.single_source_shortest_path_length(g, source, cutoff=max_hops)
neighbors = [k for k, v in neighbors.items() if v >= min_hops]
valid = [node for node in neighbors if d[goal][node] > min_distance]
if len(valid) == 0:
return
# return the shortest path to a random negative target viewpoint
negative = np.random.choice(valid)
return path[:offset] + nx.dijkstra_path(g, source, negative)
def _get_backup_negative_path(self, scan_id, path):
""" Create a negative path by swapping one of the viewpoints randomly. """
negative_path = path[:] # copy path
swap_index = np.random.randint(len(negative_path))
swap_image_id = np.random.choice(list(self._viewpoints[scan_id] - set(path)))
negative_path[swap_index] = swap_image_id
return negative_path
# TODO: move to utils
def _get_path_features(self, scan_id, path, first_heading):
""" Get features for a given path. """
headings = get_headings(self._graphs[scan_id], path, first_heading)
# for next headings duplicate the last
next_headings = headings[1:] + [headings[-1]]
path_length = min(len(path), self._max_path_length)
path_features, path_boxes, path_masks = [], [], []
for path_idx, path_id in enumerate(path[:path_length]):
key = scan_id + "-" + path_id
# get image features
features, boxes = self._pano_features_reader[
key.encode(), headings[path_idx], next_headings[path_idx],
]
num_boxes = min(len(boxes), self._max_num_boxes)
# pad features and boxes (if needed)
pad_features = np.zeros((self._max_num_boxes, 2048))
pad_features[:num_boxes] = features[:num_boxes]
pad_boxes = np.zeros((self._max_num_boxes, 12))
pad_boxes[:num_boxes, :11] = boxes[:num_boxes, :11]
pad_boxes[:, 11] = np.ones(self._max_num_boxes) * path_idx
box_pad_length = self._max_num_boxes - num_boxes
pad_masks = [1] * num_boxes + [0] * box_pad_length
path_features.append(pad_features)
path_boxes.append(pad_boxes)
path_masks.append(pad_masks)
# pad path lists (if needed)
for path_idx in range(path_length, self._max_path_length):
pad_features = np.zeros((self._max_num_boxes, 2048))
pad_boxes = np.zeros((self._max_num_boxes, 12))
pad_boxes[:, 11] = np.ones(self._max_num_boxes) * path_idx
pad_masks = [0] * self._max_num_boxes
path_features.append(pad_features)
path_boxes.append(pad_boxes)
path_masks.append(pad_masks)
return np.vstack(path_features), np.vstack(path_boxes), np.hstack(path_masks)
| [
"arjun.majum@gmail.com"
] | arjun.majum@gmail.com |
7870dd1a3fdc75e1e8927dd970bb00d39bd9e2ed | 065194d35b773511f400f92f73c8fb5172f13fc1 | /check_ga_rt.py | 03f100de8909753526d39cd806496775b186b80d | [] | no_license | Marfeel/check_ga_rt | 90e62b21ff95c2aadceaf164491b0908bb9107a8 | 47f4ed188c00b0b98d5768637da649a1efdacb36 | refs/heads/master | 2021-01-10T17:01:25.740584 | 2017-02-15T16:41:29 | 2017-02-15T16:41:29 | 48,429,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,155 | py | import argparse
import nagiosplugin
import httplib2
import time
from oauth2client import file
from oauth2client import client
from oauth2client import tools
from apiclient.discovery import build
from apiclient.errors import HttpError
class RealtimeAnalytics(nagiosplugin.Resource):
def __init__(self, credentials, filters, view, dimensions, events, reverse):
self.filters = filters
self.view = view
self.credentials = credentials
self.dimensions = dimensions
self.events = events
self.reverse = reverse
def probe(self):
http = httplib2.Http()
http = self.credentials.authorize(http)
service = build('analytics', 'v3', http=http)
for n in range(0, 2):
try:
request = service.data().realtime().get(
ids="ga:%s"%(self.view),
metrics="rt:activeUsers",
dimensions=self.dimensions,
filters=self.filters)
response = request.execute()
eventsMetrics = { k[0]: k[1] for k in response["rows"]}
totalErrors = 0
if self.events:
if not self.reverse:
for event in self.events.split(","):
if event in eventsMetrics:
totalErrors += int(eventsMetrics[event])
yield nagiosplugin.Metric(event,int(eventsMetrics[event]),min=0, context='activeUsers')
else:
for event, value in eventsMetrics.items():
if event not in self.events:
totalErrors += int(eventsMetrics[event])
yield nagiosplugin.Metric(event,int(eventsMetrics[event]),min=0, context='activeUsers')
else:
for row in response["rows"]:
totalErrors += int(row[1])
yield nagiosplugin.Metric(row[0],int(row[1]),min=0, context='activeUsers')
yield nagiosplugin.Metric('TotalErrors',totalErrors,min=0, context='activeUsers')
break
except HttpError, error:
if error.resp.reason in ['backendError', 'internalServerError']:
time.sleep(20)
class LoadSummary(nagiosplugin.Summary):
def ok(self, results):
msgs = ''
for result in results:
msgs += '{0} \n'.format(result)
return msgs
@nagiosplugin.guarded
def main():
argp = argparse.ArgumentParser(description=__doc__)
argp.add_argument('-w', '--warning', type=int, default=0,
help='return warning if activeUsers is outside RANGE')
argp.add_argument('-c', '--critical', type=int, default=0,
help='return critical if activeUsers is outside RANGE')
argp.add_argument('-C', '--credentialsFile', action='store',required=True)
argp.add_argument('-t', '--timeout', type=int, default=20,
help='abort after this number of seconds')
argp.add_argument('-D', '--authData', action='store',required=True)
argp.add_argument('-F', '--filters', action='store',required=True)
argp.add_argument('-d', '--dimensions', action='store',required=True)
argp.add_argument('-V', '--view', action='store',required=True)
argp.add_argument('-e', '--events', action='store', required=False)
argp.add_argument('-v', '--verbose', action='count', default=0,
help='increase output verbosity (use up to 3 times)')
args = argp.parse_args()
events = ''
reverse = False
if args.events:
events = args.events if args.events[1] != '-' else args.events[2:]
reverse = args.events[1] == '-'
check = nagiosplugin.Check(
RealtimeAnalytics(authenticate(args.authData, args.credentialsFile),
args.filters,
args.view,
args.dimensions,
events,
reverse),
nagiosplugin.ScalarContext('activeUsers',
nagiosplugin.Range("%s" % args.warning),
nagiosplugin.Range("%s" % args.critical)),
LoadSummary())
check.main(verbose=args.verbose,timeout=args.timeout)
def authenticate(authData, credentialsFile):
storage = file.Storage(authData)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(
client.flow_from_clientsecrets(
credentialsFile,
scope=[
'https://www.googleapis.com/auth/analytics.readonly',
],
message=tools.message_if_missing(credentialsFile)),
storage,
tools.argparser.parse_args(args=[]))
return credentials
if __name__ == '__main__':
main() | [
"joan.tomas@gmail.com"
] | joan.tomas@gmail.com |
26a661d5c57eb94eb354be41f680590dd1e164e1 | 3895bb91c9d04b249ec08e3fb9015ca13865ea86 | /book_outlet/urls.py | fe757715268663e85213a5cdd4fd6126dad03a1f | [] | no_license | Aaryan8751/Django_Working_With_Models | 377af3f378461a5bcfdb02b2912cf5c4ab9190ad | 2c72c8daf0525cc45eef4777d7052287ae56ccaf | refs/heads/main | 2023-06-14T00:16:49.200095 | 2021-07-04T13:52:32 | 2021-07-04T13:52:32 | 382,821,143 | 0 | 0 | null | 2021-07-04T13:52:33 | 2021-07-04T10:17:35 | Python | UTF-8 | Python | false | false | 156 | py | from django.urls import path
from . import views
urlpatterns = [
path("",views.index),
path("<slug:slug>",views.book_detail,name="book-detail")
]
| [
"60098288+Aaryan8751@users.noreply.github.com"
] | 60098288+Aaryan8751@users.noreply.github.com |
f5585e1bfdc49c756ef3bfaf615208b7d8092a70 | 2016aa7d628c1642cb435a628183af1a1c2d1711 | /analysis/analysis_tool.py | 206a2e6701ee1392e3320323fcab7dfdbc87366f | [] | no_license | NoahMauthe/decompilation_analysis | 3c59cca425236f6f489b6289d48b49f739bc97d4 | 1e90ce95ff58e587a698bd29fdbbbdf701e15180 | refs/heads/master | 2023-08-28T22:09:52.218712 | 2021-09-16T13:25:11 | 2021-09-16T13:25:11 | 327,853,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,916 | py | import fnmatch
import glob
import json
import logging
import os
import shutil
import signal
import time
import itertools
from json.decoder import JSONDecodeError
from subprocess import Popen, TimeoutExpired
from API.Exceptions import ConfigurationError
from API.Objects import App
from analysis import filewriter
from analysis.apkanalyzer import run_apk_analyzer, ApkAnalyzerError, standardize_fernflower, standardize_procyon, \
standardize_cfr, standardize_jadx, dex_for_cfr, dex_for_fernflower, dex_for_jadx, dex_for_procyon
from analysis.method import Method
LOGGER = logging.getLogger('analysis.tool')
LOGGER.setLevel(logging.DEBUG)
TIMEOUT = 300
CONVERT_DEX = {
'cfr': dex_for_cfr,
'fernflower': dex_for_fernflower,
'jadx': dex_for_jadx,
'procyon': dex_for_procyon,
}
NORMALIZE = {
'cfr': standardize_cfr,
'fernflower': standardize_fernflower,
'jadx': standardize_jadx,
'procyon': standardize_procyon,
}
class APKError(Exception):
pass
class Packer(Exception):
pass
class ConversionError(Exception):
pass
def parse_apkid(file_path):
"""Parses APKiDs output to determine whether a packer was present.
Parameters
----------
file_path : str
The log file to analyze
Returns
-------
str or None:
The name of the discovered packer or None
"""
with open(file_path, 'r') as file:
for line in file:
if 'packer :' in line:
LOGGER.info(f'Found a packed application!')
return line.split('packer :')[1].strip()
return None
def packer(apk_path, directory):
"""Uses APKiD to detect whether an apk was packed.
Parameters
----------
apk_path : str
The application to run the detection on.
directory : str
The base directory for logfiles and output
Returns
-------
str or None:
The name of the packer or None
"""
out = os.path.join(directory, 'apkid')
os.makedirs(out, exist_ok=True)
file_path = os.path.join(out, 'output.log')
error_path = os.path.join(out, 'error.log')
apkid_path = 'apkid'
with open(file_path, 'w+') as file:
with open(error_path, 'w+') as error_file:
process = Popen([apkid_path, apk_path], stdout=file, stderr=error_file)
try:
process.wait(timeout=TIMEOUT)
except TimeoutExpired:
LOGGER.error(f'APKiD timed out for {apk_path.split("/")[-1]}')
return parse_apkid(file_path)
def dex2jar(apk_path, directory, package):
"""Converts an apk to a jar file for further analysis.
Parameters
----------
apk_path : str
The application to convert.
directory : str
The base directory for logfiles and output
Returns
-------
str:
The path of the converted jar file.
"""
out = os.path.join(directory, 'dex2jar')
log_dir = os.path.join(out, 'logs')
files = os.path.join(out, 'files')
os.makedirs(log_dir, exist_ok=True)
os.makedirs(files, exist_ok=True)
jar = os.path.join(files, package + '.jar')
dex2jar_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tools', 'dex2jar', 'dex-tools',
'build', 'distributions', 'dex-tools-2.1-SNAPSHOT', 'd2j-dex2jar.sh')
dex2jar_args = [dex2jar_path, '-o', jar, apk_path, '--force']
with open(os.path.join(log_dir, 'stdout.log'), 'w+') as stdout_file:
with open(os.path.join(log_dir, 'stderr.log'), 'w+') as stderr_file:
process = Popen(args=dex2jar_args, stdout=stdout_file, stderr=stderr_file)
try:
process.wait(TIMEOUT)
except TimeoutExpired:
raise ConversionError(f'{package} conversion to jar timed out after {TIMEOUT}')
if process.returncode != 0:
raise ConversionError(f'{package} conversion return with nonzero exit code {process.returncode}')
elif not os.path.exists(jar):
raise ConversionError(f'{package} conversion was unsuccessful')
LOGGER.info(f'Created {jar}')
return jar
def run_decompiler(args, file_path, log_dir, tool, wd=None):
"""Runs a java based decompiler with the given args.
A convenience method, as all of the decompilers share a common structure.
Parameters
----------
args : list
The arguments passed to the decompiler.
file_path : str
The path of the application to be decompiled.
Only used for logging.
log_dir : str
The directory to store the log files in.
tool : str
The name of the tool to be run for logging.
wd : str
Sets the working directory for the decompiler.
"""
if wd is None:
wd = os.getcwd()
with open(os.path.join(log_dir, 'stdout.log'), 'w+') as stdout_file:
with open(os.path.join(log_dir, 'stderr.log'), 'w+') as stderr_file:
process = Popen(args=args, stdout=stdout_file, stderr=stderr_file, cwd=wd)
try:
process.wait(timeout=TIMEOUT)
except TimeoutExpired as e:
LOGGER.error(f'{tool} timed out for {file_path.split("/")[-1]}')
os.kill(process.pid, signal.SIGKILL)
raise e
def parse_jadx(log_file):
"""Parses the log generated by jadx to discover methods that failed decompilation.
Parameters
----------
log_file : str
The log file containing the failure information.
Returns
-------
dict:
The decompilation failure information containing:
timeout : bool
Always False as timeouts are caught before.
methods : list
A list of all failed methods, qualified with their class name.
"""
jadx_results = dict()
with open(log_file, 'r') as log:
for line in log:
line = line.strip()
if ' errors occurred in following nodes:' in line:
break
if line.startswith('ERROR - ['):
try:
info = line.split('] ', 1)[1]
reason, info = info.split(' in method: ', 1)
try:
info, details = info.split(', details: ', 1)
except ValueError:
info = info.split(', file:', 1)[0]
n = next(log).strip()
if n.startswith('ERROR') or n.startswith('INFO'):
log = itertools.chain([n], log)
details = "Failed to extract details"
else:
details = n
reason += f', details: {details}'
method = info.split(', file:', 1)[0]
method, ret_type = method.split(':')
method, args = method.split('(')
splits = method.split('.')
method = splits[-1]
class_ = '.'.join(splits[:-1])
method = method + '(' + args + ':' + ret_type
class_name, signature = standardize_jadx(class_, method)
jadx_results[signature] = reason
except IndexError:
LOGGER.exception(f'Encountered an error while parsing jadx for {log_file}')
return {'jadx': {
'timeout': False,
'methods': jadx_results
}}
def jadx(apk_path, directory):
"""Runs the jadx decompiler on an apk.
Parameters
----------
apk_path : str
The application to run the decompiler on.
directory : str
The base directory for logfiles and output
Returns
-------
dict:
The decompilation failure information
"""
jadx_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tools', 'jadx', 'build', 'jadx', 'bin',
'jadx')
out = os.path.join(directory, 'decompiler', 'jadx')
log_dir = os.path.join(out, 'logs')
os.makedirs(log_dir, exist_ok=True)
file_path = os.path.join(log_dir, 'stdout.log')
jadx_args = [jadx_path, '--log-level', 'ERROR', '-d', out, '--no-res', '-j', '4', '--show-bad-code',
apk_path]
try:
run_decompiler(jadx_args, apk_path, log_dir, 'jadx')
except TimeoutExpired:
return {'jadx': {'timeout': True}}
move = Popen(['mv sources files'], shell=True, cwd=out)
try:
move.wait(timeout=TIMEOUT)
except TimeoutExpired:
LOGGER.exception('Move command timed out')
return parse_jadx(file_path)
return parse_jadx(file_path)
def parse_cfr(out):
"""Parses the summary generated by cfr to discover methods that failed decompilation.
Parameters
----------
out : str
The directory containing all cfr output
Returns
-------
dict:
The decompilation failure information containing:
timeout : bool
Always False as timeouts are caught before.
methods : list
A list of all failed methods, qualified with their class name.
"""
cfr_results = dict()
with open(os.path.join(out, 'summary.txt')) as summary:
for line in summary:
if line.startswith('FAILED_METHOD:'):
rest, reason = line.split('FAILED_METHOD:\t')[-1].strip().split(';', 1)
reason = reason.replace(';', '')
class_name, signature = rest.split(' ', 1)
cfr_results[standardize_cfr(class_name, signature)[1]] = reason
return {'cfr': {
'timeout': False,
'methods': cfr_results
}}
def cfr(jar_path, directory):
"""Runs the cfr decompiler on a jar.
Parameters
----------
jar_path : str
The application to run the decompiler on.
directory : str
The base directory for logfiles and output
Returns
-------
dict:
The decompilation failure information
"""
out = os.path.join(directory, 'decompiler', 'cfr')
log_dir = os.path.join(out, 'logs')
files_dir = os.path.join(out, 'files')
os.makedirs(log_dir, exist_ok=True)
os.makedirs(files_dir, exist_ok=True)
wd = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tools', 'cfr', 'target',
'classes')
cfr_args = ['java', 'org.benf.cfr.reader.Main', jar_path, '--outputdir', files_dir, '--silent']
try:
run_decompiler(cfr_args, jar_path, log_dir, 'cfr', wd)
except TimeoutExpired:
return {'cfr': {'timeout': True}}
try:
return parse_cfr(files_dir)
except FileNotFoundError:
return {'cfr': {'timeout': False, 'classes': {}}}
def parse_procyon(out):
"""Parses the files generated by procyon to discover methods that failed decompilation.
Procyon does not provide a convenient log or summary file, instead we have to parse all files and look for comments.
Parameters
----------
out : str
The directory containing the decompiled files.
Returns
-------
dict:
The decompilation failure information containing:
timeout : bool
Always False as timeouts are caught before.
methods : list
A list of all failed methods, qualified with their class name.
list:
The list of files with no failures as those can be removed.
"""
LOGGER.debug('Started parsing procyon output')
procyon_results = dict()
files_to_remove = []
for file in fnmatch.filter(glob.iglob(os.path.join(out, '**'), recursive=True), '*.java'):
file_name = file.split(out)[-1][1:-5].replace('/', '.')
found_error = False
with open(file, 'r') as j_file:
for line in j_file:
if 'The method "' in line and '" could not be decompiled.' in line:
found_error = True
class_name, signature = standardize_procyon(file_name, line.split('"')[1])
procyon_results[signature] = line.strip().split("could not be decompiled. ")[1]
if not found_error:
files_to_remove.append(file)
LOGGER.debug('Finished parsing procyon output')
return {'procyon': {
'timeout': False,
'methods': procyon_results
}}, files_to_remove
def procyon(jar_path, directory):
"""Runs the procyon decompiler on a jar.
Parameters
----------
jar_path : str
The application to run the decompiler on.
directory : str
The base directory for logfiles and output
Returns
-------
dict:
The decompilation failure information
list:
The list of files that can be removed.
"""
out = os.path.join(directory, 'decompiler', 'procyon')
log_dir = os.path.join(out, 'logs')
files_dir = os.path.join(out, 'files')
os.makedirs(log_dir, exist_ok=True)
os.makedirs(files_dir, exist_ok=True)
procyon_jar = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tools', 'procyon', 'build',
'Procyon.Decompiler', 'libs', 'procyon-decompiler-1.0-SNAPSHOT.jar')
procyon_args = ['java', '-jar', procyon_jar, '-jar', jar_path, '-o', files_dir, '--log-level', '3']
try:
run_decompiler(procyon_args, jar_path, log_dir, 'procyon')
except TimeoutExpired:
return {'procyon': {'timeout': True}}, []
results, procyon_files = parse_procyon(files_dir)
return results, procyon_files
def parse_fernflower(log_dir):
"""Parses the log file generated by fernflower to discover methods that failed decompilation.
Parameters
----------
log_dir : str
The directory containing the log file.
Returns
-------
dict:
The decompilation failure information containing:
timeout : bool
Always False as timeouts are caught before.
methods : list
A list of all failed methods, qualified with their class name.
"""
fernflower_results = dict()
with open(os.path.join(log_dir, 'stdout.log'), 'r') as log_file:
file_name = ''
for line in log_file:
if 'Decompiling class ' in line:
file_name = line.strip().split('Decompiling class ')[1]
elif "couldn't be decompiled." in line:
signature = line.split('Method ')[1].split(" couldn't be decompiled.")[0].strip()
class_name, signature = standardize_fernflower(file_name, signature)
fernflower_results[signature] = next(log_file).strip()
return {'fernflower': {
'timeout': False,
'methods': fernflower_results
}}
def fernflower(jar_path, directory):
"""Runs the fernflower decompiler on a jar.
Parameters
----------
jar_path : str
The application to run the decompiler on.
directory : str
The base directory for logfiles and output
Returns
-------
dict:
The decompilation failure information
"""
out = os.path.join(directory, 'decompiler', 'fernflower')
log_dir = os.path.join(out, 'logs')
files_dir = os.path.join(out, 'files')
os.makedirs(log_dir, exist_ok=True)
os.makedirs(files_dir, exist_ok=True)
fernflower_jar = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tools', 'fernflower', 'build',
'libs', 'fernflower.jar')
fernflower_args = ['java', '-Xmx4096m', '-jar', fernflower_jar, jar_path, files_dir]
timeout = False
try:
run_decompiler(fernflower_args, jar_path, log_dir, 'fernflower')
except TimeoutExpired:
LOGGER.error(f'Fernflower timed out after {TIMEOUT} seconds for file {jar_path.split("/")[-1]}')
timeout = True
# convert = Popen(['jar xf *.jar; rm *.jar'], shell=True, cwd=files_dir)
# try:
# if convert.wait(TIMEOUT) != 0:
# except TimeoutExpired:
# LOGGER.exception(f'Unzippping and removing jar created by fernflower timed out')
if timeout:
result = parse_fernflower(log_dir)
attrs = result.get('fernflower', {})
attrs['timeout'] = True
result['fernflower'] = attrs
return result
else:
return parse_fernflower(log_dir)
def _handle_error(func, path, exc_info):
"""Instead of raising errors for shutil functions, log them and continue"""
LOGGER.error(f'Failed to delete {path}')
def save_to_file(directory, package_name, result):
"""
Saves the results as a .decre (DECompilation REsult) file
Parameters
----------
directory : str
The directory the file will be saved to.
package_name: str
The name of the package this information is associated with.
result : dict
The errors produced by various decompiler.
Returns
-------
str
"""
file_name = os.path.join(directory, package_name + '.decre')
with open(file_name, 'w') as file:
json.dump(result, file, sort_keys=True, indent=4)
return file_name
def reduce_size(directory, procyon_files, dex):
"""Reduces the size on disk by compressing the decompiler outputs and removing unnecessary files.
At the moment, complete removal is necessary, as otherwise we will run out of disk space.
Parameters
----------
directory : str
The directory all the files are located in.
procyon_files : list
A list of files generated by procyon with no failures.
dex : bool
If set, only dex decompilers were run and there are fewer files to be removed.
"""
LOGGER.info(f'Removing generated outputs in directory {directory}')
if dex:
decompilers = ['jadx']
else:
shutil.rmtree(os.path.join(directory, 'dex2jar', 'files'), onerror=_handle_error)
decompilers = ['cfr', 'jadx', 'fernflower']
cfr_path = os.path.join(directory, 'decompiler', 'cfr')
try:
shutil.move(os.path.join(cfr_path, 'files', 'summary.txt'), os.path.join(cfr_path, 'logs', 'summary.txt'))
os.remove(os.path.join(directory, 'apkanalyzer', 'methods.log'))
except FileNotFoundError:
LOGGER.error("CFR summary not found, probably decompilation was stopped by an apkanalyzer error.\n"
"Skipping further removal attempts.")
return
for file in procyon_files:
os.remove(file)
for root, dirs, files in os.walk(os.path.join(directory, 'decompiler', 'procyon', 'files'), topdown=False):
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except OSError:
pass
shutil.rmtree(os.path.join(directory, 'decompiler', 'procyon', 'logs'), onerror=_handle_error)
for decompiler in decompilers:
shutil.rmtree(os.path.join(directory, 'decompiler', decompiler, 'files'), onerror=_handle_error)
def analyse_app(file, directory, package_name, out, category, downloads, dex):
"""Given an apk file, converts it to jar, checks for packers and runs decompilers and similarity analysis.
Parameters
----------
file : str
The apk file to run the analysis on.
directory : str
The directory containing the file to put output and logfiles in a subdirectory.
package_name : str
The package name of the application to analyze.
out : str
The path of the folder to save the output to.
category : str
The name of the category (family in case of malware) the app belongs to.
downloads : str
The number of downloads an app has in string representation.
dex : bool
If set, only dex compatible decompilers will be run.
Returns
-------
list:
The list of decompiled procyon files with no failures. Those can be removed without losing any information
(Provided no similarity analysis is run).
bool:
Whether the files should be preserved as they are.
Necessary to avoid errors when reading existing decompilation information from file.
"""
all_start = time.time()
if os.path.exists(os.path.join(out, f'{package_name}.ecsv')):
LOGGER.info(f'Found existing decompilation results')
cfr_path = os.path.join(directory, 'decompiler', 'cfr', 'logs', 'summary.txt')
jadx_path = os.path.join(directory, 'decompiler', 'jadx', 'logs', 'stdout.log')
procyon_path = os.path.join(directory, 'decompiler', 'procyon', 'files')
fernflower_path = os.path.join(directory, 'decompiler', 'fernflower', 'logs', 'stderr.log')
exists = True
for path in [cfr_path, jadx_path, procyon_path, fernflower_path]:
if not os.path.exists(path):
exists = False
LOGGER.info('Existing sources were incomplete, rerunning decompilation')
break
if exists:
return [], True
del_directories = ['decompiler', 'dex2jar', 'apkid']
for del_dir in del_directories:
del_path = os.path.join(directory, del_dir)
if os.path.exists(del_path):
shutil.rmtree(del_path, onerror=_handle_error)
if dex:
decompile_dex(directory, file, package_name, out, category, downloads)
LOGGER.info(f'Processing of {package_name} with dex decompilers took {time.time() - all_start} in total')
return [], False
else:
procyon_files = decompile_apk(directory, file, package_name, out, category, downloads)
LOGGER.info(f'Processing of {package_name} took {time.time() - all_start} in total')
return procyon_files, False
def _create_methods(methods, dex_compatible, lookup, matches, timed_out, reasons):
decompilers = ['cfr', 'fernflower', 'jadx', 'procyon']
created_methods = set()
for signature in methods.keys():
csv_str = f'{signature};{methods[signature]};'
csv_end = ""
compatible = dex_compatible[signature]
for decompiler in decompilers:
if decompiler in timed_out:
csv_str += 'T;'
csv_end += 'T;'
else:
dex = compatible.get(decompiler, None)
if not dex:
csv_str += 'N;'
csv_end += 'N;'
continue
if dex in lookup[decompiler]:
dec_matches = matches[decompiler]
dec_matches[dex] = dec_matches[dex] + [signature]
matches[decompiler] = dec_matches
csv_str += 'F;'
csv_end += reasons.get(decompiler, dict()).get(dex, '') + ';'
else:
csv_str += 'S;'
csv_end += 'S;'
created_methods.add(Method(csv_str + csv_end))
return created_methods, matches
def combine_results(decompiler_results, apk_analyzer_results, apk_path):
data = {'size': os.path.getsize(apk_path),
'method_count': apk_analyzer_results['method_count']
}
matches = dict()
reasons = dict()
timed_out = set()
for decompiler in decompiler_results.keys():
if decompiler_results[decompiler].get('timeout', False):
timed_out.add(decompiler)
matches[decompiler] = dict()
continue
methods = decompiler_results[decompiler].get('methods', dict())
match = dict()
reas = dict()
for method in methods.keys():
match[method] = []
reas[method] = methods[method]
matches[decompiler] = match
reasons[decompiler] = reas
dex_compatible = dict()
methods = apk_analyzer_results['methods']
signatures = methods.keys()
for method in signatures:
for decompiler in decompiler_results.keys():
dex = dex_compatible.get(method, {})
dex[decompiler] = CONVERT_DEX[decompiler](method)
dex_compatible[method] = dex
lookup = dict()
for decompiler in decompiler_results.keys():
lookup[decompiler] = set(matches[decompiler].keys())
created_methods, debug_data = _create_methods(methods, dex_compatible, lookup, matches, timed_out, reasons)
data['methods'] = created_methods
return data, debug_data
def decompile_dex(directory, file, package_name, path, category, downloads):
"""Decompiles an apk with all dex decompilers present.
Parameters
----------
directory : str
The parent directory for all logfiles and output.
file : str
The apk to decompile.
package_name : str
The package name of the apk, identifying it uniquely.
downloads : str
The number of app downloads represented as str.
category : str
The category of the application.
path : str
The path of the output folder.
Returns
-------
"""
os.makedirs(os.path.join(directory, 'decompiler'), exist_ok=True)
apk_path = file[:-3] + 'apk'
if not os.path.exists(apk_path):
raise APKError()
apk_analyzer_dir = os.path.join(directory, 'apkanalyzer')
os.makedirs(apk_analyzer_dir, exist_ok=True)
try:
LOGGER.info(f'Running apkanalyzer on {package_name}')
apk_analyzer_results = run_apk_analyzer(apk_path, TIMEOUT, apk_analyzer_dir)
except ApkAnalyzerError:
LOGGER.error(f'APK {package_name} failed processing with apkanalyzer.')
filewriter.apk_error(path, package_name)
return
packer_name = packer(apk_path, directory)
LOGGER.info(f'Decompiling sources for {package_name} with jadx')
decompiler_results = jadx(apk_path, directory)
LOGGER.debug(decompiler_results)
LOGGER.info(f'Finished decompiling {package_name} with all dex decompilers')
combined_results, debug_data = combine_results(decompiler_results, apk_analyzer_results, apk_path)
filewriter.results(path, packer_name, package_name, combined_results, decompiler_results, category, downloads)
filewriter.debug(path, package_name, debug_data)
def decompile_apk(directory, file, package_name, path, category, downloads):
"""Decompiles an apk with all decompilers present.
Parameters
----------
directory : str
The parent directory for all logfiles and output.
file : str
The apk to decompile.
package_name : str
The package name of the apk, identifying it uniquely.
downloads : str
The number of app downloads represented as str.
category : str
The category of the application.
path : str
The path of the output folder.
Returns
-------
list:
The list of procyon files with no failures.
"""
os.makedirs(os.path.join(directory, 'decompiler'), exist_ok=True)
apk_path = file[:-3] + 'apk'
if not os.path.exists(apk_path):
raise APKError()
apk_analyzer_dir = os.path.join(directory, 'apkanalyzer')
os.makedirs(apk_analyzer_dir, exist_ok=True)
try:
LOGGER.info(f'Running apkanalyzer on {package_name}')
apk_analyzer_results = run_apk_analyzer(apk_path, TIMEOUT, apk_analyzer_dir)
except ApkAnalyzerError:
LOGGER.error(f'APK {package_name} failed processing with apkanalyzer.')
filewriter.apk_error(path, package_name)
return []
LOGGER.info(f'Converting sources for {package_name} to jar')
try:
jar_path = dex2jar(apk_path, directory, package_name)
except ConversionError:
LOGGER.error(f'APK {package_name} encountered a conversion error.')
filewriter.conversion_error(path, package_name)
return []
packer_name = packer(apk_path, directory)
decompiler_results = {}
LOGGER.info(f'Decompiling sources for {package_name} with jadx')
jadx_result = jadx(apk_path, directory)
decompiler_results.update(jadx_result)
LOGGER.info(f'Decompiling sources for {package_name} with cfr')
cfr_result = cfr(jar_path, directory)
decompiler_results.update(cfr_result)
LOGGER.info(f'Decompiling sources for {package_name} with procyon')
procyon_result, procyon_files = procyon(jar_path, directory)
decompiler_results.update(procyon_result)
LOGGER.info(f'Decompiling sources for {package_name} with fernflower')
fernflower_result = fernflower(jar_path, directory)
decompiler_results.update(fernflower_result)
LOGGER.debug(decompiler_results)
LOGGER.info(f'Finished decompiling {package_name} with all decompilers')
combined_results, debug_data = combine_results(decompiler_results, apk_analyzer_results, apk_path)
filewriter.results(path, packer_name, package_name, combined_results, decompiler_results, category, downloads)
filewriter.debug(path, package_name, debug_data)
# failures = debug(decompiler_results, apk_analyzer_results)
return procyon_files
def analyse(out, base_path, preserve_dirs, config, dex):
"""Analyses a collection of apk files regarding their decompilation failures.
Parameters
----------
out : str
The path of the output directory.
base_path : str
The common base path for all apks.
preserve_dirs : bool
If set, does not remove output files afterwards.
WARNING! This option will result in huge output.
config : str
Path to a configuration directory. Has to contain a file RUN, otherwise the analysis won't start.
Used to stop the analysis gracefully if removed.
dex : bool
If set, only dex compatible decompilers will be run.
Returns
-------
"""
pain_count = 0
apk_count = 0
all_similarities = []
ready_files = []
os.makedirs(config, exist_ok=True)
try:
with open(os.path.join(config, 'processed.json'), 'r') as processed_files:
ready_files = json.load(processed_files).get('files')
LOGGER.info(ready_files)
except (FileNotFoundError, JSONDecodeError):
LOGGER.info(f'No processed files found')
initial = len(ready_files)
os.makedirs(out, exist_ok=True)
files = fnmatch.filter(glob.iglob(os.path.join(base_path, '**'), recursive=True), '*.apk')
for file in files:
if file in ready_files:
continue
try:
app = App.from_file(file.replace('.apk', '.pain'))
package_name = app.package_name()
if app.store() == 'F-Droid':
downloads = '-1'
else:
downloads = str(app.downloads())
category = app.category_name()
pain_count += 1
except ConfigurationError as e:
LOGGER.exception(e)
continue
except FileNotFoundError as e:
LOGGER.debug(e)
package_name = file.split('/')[-1][:-4]
category = file.split('/')[-3]
downloads = '-1'
directory = os.path.dirname(file)
LOGGER.info(f'Processing apk {package_name}')
try:
procyon_files, from_file = analyse_app(file, directory, package_name, out, category, downloads, dex)
if from_file:
initial += 1
elif not preserve_dirs:
reduce_size(directory, procyon_files, dex)
apk_count += 1
ready_files.append(file)
except ConversionError:
LOGGER.exception(f'Apk {package_name} failed to be converted to .jar format')
except APKError:
LOGGER.info(f'Apk {package_name} is not present')
except Packer:
LOGGER.info(f'Apk {package_name} was packed, skipping further analysis')
# except:
# LOGGER.critical(f'Apk {package_name} encountered an unexpected error')
try:
open(os.path.join(config, 'RUN'), 'r')
except FileNotFoundError:
with open(os.path.join(config, 'processed.json'), 'w+') as processed_files:
json.dump({'files': ready_files}, processed_files, sort_keys=True, indent=4)
LOGGER.info(f'Interrupted processing after completing the analysis of {len(ready_files) - initial}'
f' files for a total of {len(ready_files)}')
return all_similarities
LOGGER.info(f'Found {pain_count:<5} apk_info files\n\t'
f'and {apk_count:<5} apk files in total\n\t'
f' {initial:<5} were analyzed beforehand')
with open(os.path.join(config, 'processed.json'), 'w+') as processed_files:
json.dump({'files': ready_files}, processed_files, sort_keys=True, indent=4)
def fix(out_path, base_path):
files = fnmatch.filter(glob.iglob(os.path.join(base_path, '**'), recursive=True), '*.apk')
total = max(len(files), 1)
now = 1
for file in files:
package_name = file.split('/')[-1][:-4]
directory = os.path.dirname(file)
LOGGER.info(f'Processing apk {package_name}')
src = os.path.join(out_path, f'{package_name}.ecsv')
if not os.path.exists(src):
LOGGER.error(f'Did not find existing decompilation results')
now += 1
continue
with open(src, 'r') as in_file:
content = in_file.read().strip()
if content.startswith('Packer'):
LOGGER.info(f'No need to fix {package_name}')
now += 1
continue
if content.startswith('ERROR:'):
LOGGER.info(f'.ecsv for {package_name} signaled error, skipping')
now += 1
continue
last_csv_header_line = 0
current_line = 0
for line in content.split('\n'):
line = line.strip()
if line == 'signature;size;C;F;J;P;C-R;F-R;J-R;P-R;':
last_csv_header_line = current_line
current_line += 1
if last_csv_header_line != 7:
content = '\n'.join(content.split('\n')[last_csv_header_line:])
apk_analyzer_dir = os.path.join(directory, 'apkanalyzer')
os.makedirs(apk_analyzer_dir, exist_ok=True)
try:
# LOGGER.info(f'Running apkanalyzer to fix {package_name}')
apk_analyzer_results = run_apk_analyzer(file, TIMEOUT, apk_analyzer_dir)
except ApkAnalyzerError:
LOGGER.error(f'APK {package_name} failed processing with apkanalyzer.')
filewriter.apk_error(out_path, package_name)
continue
packer_name = packer(file, directory)
out = f'Packer:\t{packer_name}\n' \
f'Methods:\t{apk_analyzer_results.get("method_count", -1)}\n' \
f'Size:\t{os.path.getsize(file)}\n' \
f'Downloads:\t-1\n' \
f'Family:\tandrozoo\n' \
f'##########\n\n' \
f'{content}\n'
with open(os.path.join(out_path, f'{package_name}.ecsv'), 'w') as out_file:
out_file.write(out)
LOGGER.info(f'Fixed {package_name}:\t{now} / {total}\t({(now / total) * 100 :.2f}%)')
now += 1
| [
"s8nomaut@stud.uni-saarland.de"
] | s8nomaut@stud.uni-saarland.de |
7faf818aca884cd70f540d9f8533c853c43689f0 | ab5810cfcf66f89225dcd520b8caf129a4e68cc2 | /datasets/FB_comments/process_data.py | 4a36b48019b889e042625dab0a2d244468d6c22d | [
"MIT",
"Python-2.0"
] | permissive | Unique-Divine/PyTorch-Deep-Learning-Tutorial | f5337b4b4cfa2304eb77b49242605750cd9faad0 | 2db0a5b45827e34d07e0f28339bc8a51581f7e73 | refs/heads/master | 2023-05-08T14:15:59.830187 | 2021-05-31T01:45:13 | 2021-05-31T01:45:13 | 271,871,646 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,035 | py | # standard DS stack
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import pandas as pd
# embed static images in the ipynb
get_ipython().run_line_magic('matplotlib', 'inline')
train_df = pd.read_csv("datasets/FB_comments/training.csv", header=None)
test_df = pd.read_csv("datasets/FB_comments/testing.csv", header=None)
print("Original dataset shapes\n"
+f"Training set:{train_df.shape}, Testing set:{test_df.shape}")
train_df.head()
# def integer_check(vec):
# """Args: vec (np.ndarray, 1D): a vector."""
# if np.all(((vec % 1) == 0)) == True:
# print("This vector contains only integers")
# else:
# print("This vector contains non-integer values")
# integer_check(vec=np.array(train_df.iloc[:,-1]))
train = np.array(train_df)
test = np.array(test_df)
X_train, Y_train = train[:,:-1], train[:,-1]
X_test, Y_test = test[:,:-1], test[:,-1]
from sklearn.preprocessing import StandardScaler
def scale_features(X_train, X_test):
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
scale_features(X_train, X_test)
reduction_method = "pca"
# reduction_method = "kbest"
if reduction_method == "pca":
# Principal component analysis (PCA) feature reduction
from sklearn.decomposition import PCA
pca = PCA(n_components=10)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
elif reduction_method == "kbest":
# SelectKBest feature selection
from sklearn.feature_selection import SelectKBest, f_regression
X_train = SelectKBest(f_regression, k=10).fit_transform(X_train, Y_train)
X_test = SelectKBest(f_regression, k=10).transform(X_test)
rng = np.random.RandomState(5)
def random_shrink(X, Y, shrink=0.5):
"""Shrinks the dataset size.
Args:
X (np.ndarray): feature matrix
Y (np.ndarray): target matrix
shrink (float, optional): Percentage of samples desired.
Defaults to 0.5, i.e. a 50% reduction in the number of samples.
Returns:
X_small, Y_small : Random samples of the input sets
"""
n_samples = X.shape[0]
# if n_samples % 2 == 0:
# n_samples = n_samples
# elif n_samples % 2 == 1:
# n_samples -= 1
sorted_indices = np.arange(n_samples)
random_indices = rng.choice(sorted_indices, int(shrink * n_samples))
X_small = X[random_indices]
Y_small = Y[random_indices]
return X_small, Y_small
X_train, Y_train = random_shrink(X_train, Y_train, shrink=0.25)
X_test, Y_test = random_shrink(X_test, Y_test)
print("Dataset shapes after PCA and random sampling\n"
+f"X_train.shape:{X_train.shape}, Y_train.shape:{Y_train.shape}\n"
+f"X_test.shape:{X_test.shape}, Y_test.shape:{Y_test.shape}")
def present_data():
return X_train, Y_train, X_test, Y_test
# Use the following line in the output file to read use these variables
# exec(open(<filename.py>).read())
[X_train, Y_train, X_test, Y_test] = [A for A in present_data()]
| [
"u.divine@columbia.edu"
] | u.divine@columbia.edu |
fe0e28aab3f175900cc8a3b6b7de42698c030e1d | 69f65a7d4a42fed187463f172a0e6bc98361348a | /coffeesensor/test/dummysensor.py | b6198f1bd793286d86ddc8d087531fda4b03599f | [] | no_license | denisw160/CoffeeMonitor | 7e64352ba7cd69b2b0d88aca59e712d56cbc320e | f6b934d84a4c90d9974e59f31e5f1c99b142a9bf | refs/heads/master | 2021-06-03T08:04:30.469723 | 2020-04-22T20:22:42 | 2020-04-22T20:22:42 | 155,524,520 | 0 | 0 | null | 2021-05-07T06:13:16 | 2018-10-31T08:37:13 | Java | UTF-8 | Python | false | false | 3,173 | py | import argparse
import datetime
import json
import random
import socket
import ssl
import time
import paho.mqtt.client as mqtt
#
# This scripts starts a dummy service, that sends random sensor values to the backend.
# For transmission a mqtt broker is used. The format of the data is json:
# {"timestamp":"2012-04-23T18:25:43.511Z", "weight":[double], "allocated":[boolean]}
#
# The mqtt topic "me/wirries/coffeesensor" is used.
#
# require modules
# - paho-mqtt (pip)
#
# Defaults
MQTT_HOST = "localhost"
MQTT_PORT = 1883
MQTT_TOPIC = "me/wirries/coffeesensor"
MQTT_KEEPALIVE_INTERVAL = 45
INTERVAL = 15
# Functions
# Define on_publish event function
def on_publish(client, userdata, mid):
print "Message published..."
# Define isNotBlank function for testing, if a String is blank
def is_not_blank(s):
if s and s.strip():
return True
return False
# Starting server
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--server", required=False, help="server / default: " + MQTT_HOST, default=MQTT_HOST)
ap.add_argument("-p", "--port", required=False, help="port / default: " + str(MQTT_PORT), default=str(MQTT_PORT))
ap.add_argument("-c", "--ca", required=False, help="path to ca file (full chain) to verify the SSL connection")
ap.add_argument("-t", "--topic", required=False, help="topic for publish / default: " + MQTT_TOPIC, default=MQTT_TOPIC)
ap.add_argument("-u", "--user", required=False, help="username for login")
ap.add_argument("-w", "--password", required=False, help="password for user")
args = vars(ap.parse_args())
host = args["server"]
port = args["port"]
ca = args["ca"]
topic = args["topic"]
user = args["user"]
password = args["password"]
print "CoffeeSensor (dummy) started - for stopping please press CRTL-c"
print " - MQTT-Server:", host
print " - MQTT-Port:", port
print " - MQTT-Keepalive:", MQTT_KEEPALIVE_INTERVAL
print " - MQTT-Topic:", topic
loginEnabled = is_not_blank(user) and is_not_blank(password)
if loginEnabled:
print " - use login for connection with user:", user
sslEnabled = is_not_blank(ca)
if sslEnabled:
print " - use ca file for SSL connection:", ca
# Initiate MQTT Client
mqttc = mqtt.Client(client_id="CoffeeSensor_" + socket.gethostname())
# Register publish callback function
mqttc.on_publish = on_publish
# Setup login for connection
if loginEnabled:
mqttc.username_pw_set(user, password)
# Setup SSL connection
if sslEnabled:
mqttc.tls_set(ca, tls_version=ssl.PROTOCOL_TLSv1_2)
mqttc.tls_insecure_set(False)
# Connect with MQTT Broker
mqttc.connect(host, port, MQTT_KEEPALIVE_INTERVAL)
# Running server
try:
while True:
timestamp = str(datetime.datetime.now().isoformat())
weight = random.uniform(0.3, 2.8)
allocated = random.choice([True, False])
msg = json.dumps({"timestamp": timestamp, "weight": weight, "allocated": allocated})
# Publish message to MQTT Broker
mqttc.publish(topic, msg)
# wait for next update
time.sleep(INTERVAL)
except KeyboardInterrupt:
print "Strg-C called"
finally:
print "CoffeeSensor stopped"
# Disconnect from MQTT_Broker
mqttc.disconnect()
| [
"denis@wirries.me"
] | denis@wirries.me |
e987910d4d77723e6b8370fde4adb13723676a69 | e22b87776fcdf296291be1518cd34c89874d2cdf | /imune-api/server/controllers/user_controller.py | d6d836ac3817e74b0965c23551ffd751935324d3 | [] | no_license | davidpagliotto/megahack-criptovaley-time18-backend | bb6ebe9c821fe4c0e7cf2d79e80dc108f8b0f018 | cf98b19edb0c6a68597ef26c7c652e3edcd7911c | refs/heads/master | 2022-06-05T23:03:34.908823 | 2020-05-05T00:12:28 | 2020-05-05T00:12:28 | 259,664,186 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | from fastapi import APIRouter, Depends
from server.filter.filter import get_token
from server.models.user_model import User, UserOutput
from server.services.user_service import UserService
router = APIRouter()
user_router = {
"router": router,
"prefix": "/users",
"tags": ["User"],
"dependencies": [Depends(get_token)]
}
@router.post(path="", response_model=UserOutput)
async def post_user(user: User):
user_service = UserService()
return await user_service.upsert(user)
| [
"dpagliotto@gmail.com"
] | dpagliotto@gmail.com |
aba1fe1222e36f72353fd0c6c5a21047cc2cedee | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03212/s618244450.py | 185edf86b67bdc8518f2d9341edb2c2cdcd3ecfc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | N = input()
L=len(N)
from itertools import product
check_num=[3,5,7]
check=[]
for l in range(1,L+1):
for p in product(range(3),repeat=l):
c=''
for p_ in p:
c+=str(check_num[p_])
if len(set(c))==3 and int(c)<=int(N):
check.append(int(c))
print(len(check)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2d0adc952438c69679bd10f9ed3e38b115ead65f | ae863b3243008c781c5cb28164e36c824b0a2c5b | /AriticleBlog/AriticleBlog/settings.py | 0e52cdc441392a2681ad16e9945fb4e545be6feb | [] | no_license | njw-666/mygit | 490366c18cbc21533b152531e1416e649f010df1 | f73a815dcf4e03d4965bff92981973d2b0b42ea3 | refs/heads/master | 2021-01-04T11:50:16.445057 | 2020-04-09T16:22:16 | 2020-04-09T16:22:16 | 240,534,220 | 0 | 0 | null | 2020-04-30T15:07:26 | 2020-02-14T15:04:37 | CSS | UTF-8 | Python | false | false | 3,297 | py | """
Django settings for AriticleBlog project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9v9y45z29pnsm4v0+0fg(7r()ru^!eo7*-1&0)c76c6n@q_qn1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Article',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AriticleBlog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AriticleBlog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST':"127.0.0.1",
'USER':"root",
'PASSWORD':"",
'PORT':"3306",
'NAME': "Article"
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFIELS_DIRS=(
os.path.join(BASE_DIR,"static"),
) | [
"1037769494@qq.com"
] | 1037769494@qq.com |
0841a27a08a0f3ffff02d972bb085b667907eb60 | caeb9c7ad9606ed440dcb6f852e6de6d7eb0dd91 | /unqomp/ancillaallocation.py | 9d1c9946563b7d25bce91d103c6cfa2716a7bc17 | [
"MIT"
] | permissive | eth-sri/Unqomp | 2e05395d4d68ca643ccfa9a2fa09693cbc792fd4 | 9d7e885af1ebfdeab7e8059d13149aadeed8a6d6 | refs/heads/master | 2023-03-29T19:26:36.273288 | 2021-04-01T09:38:58 | 2021-04-01T09:38:58 | 344,804,721 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,334 | py | from qiskit.circuit import Qubit, QuantumRegister, QuantumCircuit, Gate
from itertools import count
allocation_time_counter = count()
class AncillaRegister(QuantumRegister):
def __init__(self, nb_qubits, name = None):
if name:
QuantumRegister.__init__(self, nb_qubits, name)
else:
QuantumRegister.__init__(self, nb_qubits)
self._allocation_date = next(allocation_time_counter)
class AncillaGate:
def __init__(self, gate, nb_ancillas = 0, extra_qfree = []):
self._gate = gate
self._nb_ancillas = nb_ancillas
self._extra_qfree = extra_qfree
class AncillaCircuit(QuantumCircuit):
# Mostly delegates to circuit, except for mcx, mcry where we use our custom implementation
# plus allocates ancillas for gates
def __init__(self, *regs, name = None):
self._nb_ancillas = 0
self._extra_qfree_gates = [] # records custom gates to consider qfree when uncomputing
self._ancillas_list = []
if isinstance(regs[0], int):
QuantumCircuit.__init__(self, regs[0], name = name)
elif isinstance(regs[0], AncillaRegister):
QuantumCircuit.__init__(self, regs[0], name = name)
self._nb_ancillas += len(regs[0][:])
self._ancillas_list.append(regs[0])
else:
QuantumCircuit.__init__(self, regs[0], name = name)
for reg in regs[1:]:
self.add_register(reg)
def append(self, instruction, qargs = None, cargs = None):
if isinstance(instruction, AncillaGate) and instruction._nb_ancillas > 0:
anc = AncillaRegister(instruction._nb_ancillas)
self.add_register(anc) # updates nb ancillas
assert cargs is None
for qf in instruction._extra_qfree:
if not qf in self._extra_qfree_gates:
self._extra_qfree_gates.append(qf)
QuantumCircuit.append(self, instruction._gate, [*qargs, *anc[:]])
elif isinstance(instruction, AncillaGate):
for qf in instruction._extra_qfree:
if not qf in self._extra_qfree_gates:
self._extra_qfree_gates.append(qf)
QuantumCircuit.append(self, instruction._gate, qargs)
else:
QuantumCircuit.append(self, instruction, qargs, cargs)
def add_register(self, reg):
QuantumCircuit.add_register(self, reg)
if isinstance(reg, AncillaRegister):
self._ancillas_list.append(reg)
self._nb_ancillas += len(reg[:])
def new_ancilla_register(self, num_qubits, name = None):
a = AncillaRegister(num_qubits, name)
self.add_register(a)
return a
def to_ancilla_gate(self, is_qfree = False):
# self should have registers in the following order: first ctrls, then target then ancillas
gate = self.to_gate()
extra_qfree_gates = [gate] if is_qfree else self._extra_qfree_gates
return AncillaGate(gate, self._nb_ancillas, extra_qfree_gates)
def addQfreeGate(self, gate):
self._extra_qfree_gates.append(gate)
def circuitWithUncomputation(self):
from unqomp.uncomputation import uncomputeAllAncillas
return uncomputeAllAncillas(self, [(gate, True) for gate in self._extra_qfree_gates])
def mcx(self, ctrls, target, negated_ctrls = []): #allows for negated ctrls
from unqomp.examples.mcx import makeMCX
def makeNegatedMCXGate(num_ctrls):
ctrls = QuantumRegister(num_ctrls)
target = QuantumRegister(1)
if num_ctrls <= 2:
circuit = AncillaCircuit(ctrls, target)
for i in negated_ctrls:
circuit.x(ctrls[i])
if num_ctrls == 2:
circuit.ccx(ctrls[0], ctrls[1], target)
elif num_ctrls == 1:
circuit.cx(ctrls[0], target)
for i in negated_ctrls:
circuit.x(ctrls[i])
return circuit.to_ancilla_gate(True)
mcx_gate = makeMCX(n)
circuit = AncillaCircuit(ctrls)
anc = circuit.new_ancilla_register(mcx_gate._nb_ancillas)
circuit.add_register(target)
for i in negated_ctrls:
circuit.x(ctrls[i])
mcx_gate_w_uncomp = mcx_gate.circuitWithUncomputation().to_gate()
circuit.append(mcx_gate_w_uncomp, [*ctrls[:], target, *anc[:]])
for i in negated_ctrls:
circuit.x(ctrls[i])
return circuit.to_ancilla_gate(True)
n = len(ctrls[:])
mcx_gate = None
if len(negated_ctrls) > 0:
mcx_gate = makeNegatedMCXGate(n)
if mcx_gate._nb_ancillas > 0:
anc = self.new_ancilla_register(mcx_gate._nb_ancillas)
self.addQfreeGate(mcx_gate._gate)
QuantumCircuit.append(self, mcx_gate._gate, [*ctrls[:], *anc[:], target])
else:
mcx_gate = makeMCX(n).to_ancilla_gate()
self.append(mcx_gate, [*ctrls[:], target])
def mcry(self, rot_coeff, ctrls, target):
from unqomp.examples.mcx import makeMCRY
n = len(ctrls[:])
mcry_gate = makeMCRY(rot_coeff, n).to_ancilla_gate()
self.append(mcry_gate, [*ctrls[:], target])
| [
"anouk.paradis@inf.ethz.ch"
] | anouk.paradis@inf.ethz.ch |
71687720ff526965a20c77c9db597830ce3187b5 | 714058081fe435ed89b94cfa94587338e64672cb | /marqeta/response_models/digital_wallet_token_hash.py | 44fe321135afec4b24ddcf5ac0ed83bccebdd7f4 | [
"MIT"
] | permissive | andyw8/marqeta-python | bc194944c08e8c8327a8a20bac3dc615b2e2a95f | 23e0a66a5d7b20f3f992e44ae22b33a0eebdbce2 | refs/heads/master | 2020-05-20T14:25:39.398668 | 2019-04-01T23:53:55 | 2019-04-01T23:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from datetime import datetime, date
import json
class DigitalWalletTokenHash(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def token(self):
return self.json_response.get('token', None)
def __repr__(self):
return '<Marqeta.response_models.digital_wallet_token_hash.DigitalWalletTokenHash>' + self.__str__()
| [
"amaratkere@marqeta.com"
] | amaratkere@marqeta.com |
83733859a6f173de026d413bbab758ac945c0521 | 60cbb360cafadbe20d2acce0dbce0273ad7e1ef3 | /src/digitalmarket/urls.py | 397fa52df4852d21feb5bd0a9e0c31b29c6e5527 | [] | no_license | mugabiisaac/my-digital-market | b7bbaeceb78c103c0350ae86a1130b221cf6fb2e | a863671aa6d879fb432725be997bd19308d2d67c | refs/heads/master | 2020-06-20T01:08:36.484931 | 2017-06-13T08:29:03 | 2017-06-13T08:29:03 | 94,189,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | """digitalmarket URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from checkout.views import CheckoutTestView, CheckoutAjaxView
from dashboard.views import DashboardView
from products.views import UserLibraryListView
urlpatterns = [
url(r'^$', DashboardView.as_view(), name='dashboard'),
url(r'^test/$', CheckoutTestView.as_view(), name='test'),
url(r'^checkout/$', CheckoutAjaxView.as_view(), name='checkout'),
url(r'^admin/', include(admin.site.urls)),
url(r'^products/',include("products.urls", namespace='products')),
url(r'^seller/',include("sellers.urls", namespace='sellers')),
url(r'^tags/',include("tags.urls", namespace='tags')),
url(r'^library/', UserLibraryListView.as_view(), name='library'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"imugabi1@gmail.com"
] | imugabi1@gmail.com |
dffd6f6fe5891a7e426f0158bd5e4e502f624aba | 7651545b8a439fe4e2de23cde5b4c500caf2ccee | /config.py | bd806dd79a1ff851512706bdef0b139615ee5608 | [] | no_license | stephancill/metis-ai-bot | 2f578e0393e42d90e14bff98df1845d1f7166d8e | d6abdf05021666357bd9877ca5a9d6ab516605f7 | refs/heads/master | 2021-03-16T09:17:26.972256 | 2018-01-15T17:28:42 | 2018-01-15T17:28:42 | 115,908,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | import os
class Config:
follow_ids = ["914663684928139264"] # @metis_ai
# follow_ids = ["944717267304108032"] # @mcafee2cash
# Spend a fraction of BTC balance per signal (default 10%)
try:
btc_spend = float(os.environ["btc_spend"])
except:
btc_spend = 0.0
# Price to sell at
# buy * sell_multiplier = sell price (default 1.1)
try:
sell_multiplier = float(os.environ["sell_multiplier"])
except:
sell_multiplier = 1.1
# Buy price multiplier - price ahead of pump (default 1.02)
try:
buy_multiplier = float(os.environ["buy_multiplier"])
except:
buy_multiplier = 1.02
# Telegram channel names to log to
try:
telegram_log_channels = os.environ["telegram_log_channels"].split()
except:
telegram_log_channels = ["lotmtestchannel"]
| [
"stephanus.cilliers@gmail.com"
] | stephanus.cilliers@gmail.com |
f2ad77732dc1ca78d44e09c8446115b5677f35a4 | 5d2d214fff5892d381d0328bca3db04b14e358fb | /archives/old_2017-2018_code/code/tests/LED/led_select.py | 6409e9bb97bf8e7b48ab35f37ee6d5aa34d43504 | [
"MIT"
] | permissive | DFEC-R2D2/r2d2 | 9552705188ed6e3d8c144881eb7c9ddfacfd8072 | 9b64233865ebfe9f0ca3f1b400b55cc8d6494adf | refs/heads/master | 2021-01-20T13:27:36.953410 | 2018-10-30T21:37:50 | 2018-10-30T21:37:50 | 90,496,130 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,699 | py | #!/usr/bin/env python
from __future__ import print_function
from time import sleep
import numpy as np
import os
from random import randint
try:
from Adafruit_LED_Backpack.Matrix8x8 import Matrix8x8
from Adafruit_LED_Backpack.BicolorMatrix8x8 import BicolorMatrix8x8
from Adafruit_LED_Backpack.BicolorMatrix8x8 import RED, GREEN
except ImportError:
class fake_i2c(object):
buffer = []
def __init__(self, **kwargs): pass
def set_led(self, a, b, c): pass
def set_pixel(self, a, b, c): pass
def clear(self): pass
def writeList(self, a, b): pass
def begin(self): pass
def start(self): pass
class Matrix8x8(fake_i2c):
_device = fake_i2c()
def __init__(self, **kwargs): pass
class BicolorMatrix8x8(fake_i2c):
def __init__(self, **kwargs): pass
# OFF = 0
# GREEN = 1
# RED = 2
# YELLOW = 3
class LEDDisplay(object):
"""
This class
"""
MONO = 0
BI = 1
def __init__(self, i2c_addr=0x70, led_type=0):
# self.delay = delay
self.im = []
if led_type == self.MONO:
limit = 2
self.display = Matrix8x8(address=i2c_addr)
elif led_type == self.BI:
limit = 4
self.display = BicolorMatrix8x8(address=i2c_addr)
else:
raise Exception('Invalid LEDDisplay')
for i in [0, 1, 2, 3, 4, 5, 6, 7]:
self.im.append(np.random.randint(0, limit, (8, 8)))
self.led_type = led_type
self.display.begin()
self.display.clear()
self.next = 0
def __del__(self):
self.clear()
sleep(0.005)
def clear(self):
self.display.clear()
self.display._device.writeList(0, self.display.buffer)
def set(self, x, y, color):
if self.led_type == self.MONO:
if color > 0:
self.display.set_pixel(x, y, 1)
else:
self.display.set_pixel(x, y, 0)
elif self.led_type == self.BI:
if 0 < x > 7 or 0 < y > 7:
# Ignore out of bounds pixels.
return
# Set green LED based on 1st bit in value.
# print('color', color)
self.display.set_led(y * 16 + x, 1 if color & GREEN > 0 else 0)
# Set red LED based on 2nd bit in value.
self.display.set_led(y * 16 + x + 8, 1 if color & RED > 0 else 0)
def displaySet(self, im):
for x in [0, 1, 2, 3, 4, 5, 6, 7]:
for y in [0, 1, 2, 3, 4, 5, 6, 7]:
color = im[x][y]
self.set(x, y, color)
self.display._device.writeList(0, self.display.buffer)
def setSolid(self, color=None):
if color is None:
self.display.buffer = bytearray([0xff]*16)
else:
g = 0xff if color & GREEN > 0 else 0
r = 0xff if color & RED > 0 else 0
self.display.buffer = bytearray([g, r] * 8)
self.write()
def setRandom(self):
self.display.buffer = bytearray(os.urandom(16))
self.display._device.writeList(0, self.display.buffer)
def update(self):
# im = self.im[self.next]
# self.displaySet(im)
if self.led_type == self.BI:
self.setSolid(self.next)
self.next += 1
if self.next == 4:
self.next = 0
else:
self.setRandom()
#self.setSolid()
# self.next += 1
# if self.next == len(self.im):
# self.next = 0
def write(self):
self.display._device.writeList(0, self.display.buffer)
class LogicFunctionDisplay(object):
"""
Array of LEDDisplays
"""
MONO = 0
BI = 1
def __init__(self, led_addrs, led_type=0):
self.leds = []
for addr in led_addrs:
if led_type == self.MONO:
led = LEDDisplay(i2c_addr=addr, led_type=0)
elif led_type == self.BI:
led = LEDDisplay(i2c_addr=addr, led_type=1)
else:
raise Exception('Wrong type of led display')
self.leds.append(led)
def update(self):
for led in self.leds:
led.update()
def setBrightness(self, bright):
if 0 > bright > 15:
return
for led in self.leds:
led.display.set_brightness(bright)
if __name__ == "__main__":
while 1:
leds = [0]*7
setLED = input("Enter LED Number (1-6) -->\n")
if setLED == 1:
led = LEDDisplay(0x70,1)
elif setLED == 2:
led = LEDDisplay(0x71,1)
elif setLED == 3:
led = LEDDisplay(0x72,1)
elif setLED == 4:
led = LEDDisplay(0x73,1)
elif setLED == 5:
led = LEDDisplay(0x74,1)
elif setLED == 6:
led = LEDDisplay(0x75,1)
elif setLED == 7:
leds[1] = LEDDisplay(0x70,1)
leds[2] = LEDDisplay(0x71,1)
leds[3] = LEDDisplay(0x72,1)
leds[4] = LEDDisplay(0x73,1)
leds[5] = LEDDisplay(0x74,1)
leds[6] = LEDDisplay(0x75,1)
if setLED != 7:
choice = input("Enter LED option --> (color)\n>>")
for x in [0, 1, 2, 3, 4, 5, 6, 7]:
for y in [0, 1, 2, 3, 4, 5, 6, 7]:
if x == randint(0,8) or y == randint(0,8):
led.set(x, y, choice)
else:
led.set(x,y,4)
led.write()
sleep(1)
led.clear()
elif setLED == 7:
choice = input("Enter LED option for all --> (color)\n>>")
for x in [0, 1, 2, 3, 4, 5, 6, 7]:
for y in [0, 1, 2, 3, 4, 5, 6, 7]:
if x == randint(0,8) or y == randint(0,8):
##if x == choice[0] and y == choice[1]:
for i in range(1,7):
leds[i].set(x, y, choice)
else:
for i in range(1,7):
leds[i].set(x,y,4)
for i in range(1,7):
leds[i].write()
sleep(5)
for i in range(1,7):
leds[i].clear()
#
# try:
# led.start()
#
# except KeyboardInterrupt:
# print('<<<<<<<< keyboard >>>>>>>>>>>')
# led.joing()
# led.terminate()
| [
"dfecr2d2@users.noreply.github.com"
] | dfecr2d2@users.noreply.github.com |
3712937801b4655d2f06e615f42f6119be1d0be2 | d9e5f868392cc846a14577e2578332dd389766a5 | /ex13.py | 2a4652a2c2319f92b92f4fdfda224686a6f5811d | [] | no_license | quanlidavid/Learn_Python_the_Hard_Way | 8d8d9c9906d1e6b0de1a1dae78fbf4fd150c466c | bc591552efbeb2db588c831bf5280cbe21e11246 | refs/heads/master | 2021-05-16T11:18:13.171264 | 2017-09-27T05:56:20 | 2017-09-27T05:56:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from sys import argv
script, frist, second, third = argv
print("The script is called:", script)
print("Your first variable is:", frist)
print("Your second variable is:", second)
print("Your third variable is:", third)
list1=['aa','bb']
a,b=list1
print(a,b)
| [
"quanlidavid@gmail.com"
] | quanlidavid@gmail.com |
c167c5819bfa452fa8fdba057ff142fbdbde00fe | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /090_logging/examples/nuke/34-python_in_production-logging_to_a_qt_widget/logger.py | c0e05b78da6905f18952e733200c169b31a72bf1 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,774 | py | import logging
import sys
class Logger(object):
LOGGER_NAME = "Zurbrigg"
FORMAT_DEFAULT = "[%(name)s][%(levelname)s] %(message)s"
LEVEL_DEFAULT = logging.DEBUG
PROPAGATE_DEFAULT = True
_logger_obj = None
@classmethod
def logger_obj(cls):
if not cls._logger_obj:
if cls.logger_exists():
cls._logger_obj = logging.getLogger(cls.LOGGER_NAME)
else:
cls._logger_obj = logging.getLogger(cls.LOGGER_NAME)
cls._logger_obj.setLevel(cls.LEVEL_DEFAULT)
cls._logger_obj.propagate = cls.PROPAGATE_DEFAULT
fmt = logging.Formatter(cls.FORMAT_DEFAULT)
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(fmt)
cls._logger_obj.addHandler(stream_handler)
return cls._logger_obj
@classmethod
def logger_exists(cls):
return cls.LOGGER_NAME in logging.Logger.manager.loggerDict.keys()
@classmethod
def set_level(cls, level):
lg = cls.logger_obj()
lg.setLevel(level)
@classmethod
def set_propagate(cls, propagate):
lg = cls.logger_obj()
lg.propagate = propagate
@classmethod
def debug(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.debug(msg, *args, **kwargs)
@classmethod
def info(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.info(msg, *args, **kwargs)
@classmethod
def warning(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.warning(msg, *args, **kwargs)
@classmethod
def error(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.error(msg, *args, **kwargs)
@classmethod
def critical(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.critical(msg, *args, **kwargs)
@classmethod
def log(cls, level, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.log(level, msg, *args, **kwargs)
@classmethod
def exception(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.exception(msg, *args, **kwargs)
@classmethod
def write_to_file(cls, path, level=logging.WARNING):
file_handler = logging.FileHandler(path)
file_handler.setLevel(level)
fmt = logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s")
file_handler.setFormatter(fmt)
lg = cls.logger_obj()
lg.addHandler(file_handler)
if __name__ == "__main__":
Logger.set_propagate(False)
Logger.debug("debug message")
Logger.info("info message")
Logger.warning("warning message")
Logger.error("error message")
Logger.critical("critical message")
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
cad547896bbf46dc8b946ddcf72d1ff6225ba041 | f46b5273355447564604bad0c962c853fa3158b6 | /data/coco.py | 7fe691b2114e83af56bab983574f9752b88de7ae | [
"MIT"
] | permissive | Maetamongminji/SKT_AI_Fellowship | 72662c00adfc4ba4c4453d45c3851c7f64f5ab87 | 1872339ead578d7597694bb00545b6dc7518f6ad | refs/heads/main | 2023-06-24T13:15:21.208598 | 2021-07-25T10:25:25 | 2021-07-25T10:25:25 | 388,705,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,252 | py | # -*- coding: utf-8 -*-
"""COCO.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1v7mKIZPb2XjS7O53nBaqC8JEbmwEw9xe
py파일로 저장해서 ups -> data 아래 위치로 commit할 것
+ 이 안에 get_coco class 만들 것
+ COCOSSL class도!
### __Checking GPU informations__
"""
'''
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ')
print('and then re-execute this cell.')
else:
print(gpu_info)
'''
import warnings
warnings.filterwarnings('ignore')
"""### __Google Drive Mount__"""
from google.colab import drive
drive.mount('/content/drive')
"""### __Changing directory & Git Cloning__"""
cd /content/drive/MyDrive/SKT/ups/
import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
from data.augmentations import *
# ShearX, ShearY, TranslateX, TranslateXabs, TranslateY, TranslateYabs, Rotate, AutoContrast, Invert, Equalize, Flip, Solarize, SolarizeAdd, Posterize,
# Contrast, Color, Brightness, Sharpness, Cutout, CutoutAbs, SamplePairing, Identity, augment_list, Lighting, CutoutDefault, CutoutRandom, RandAugment
# Above are defined classes in 'ups/augmentations.py' (All imported)
import pickle
import os
# !git clone https://github.com/Maetamongminji/SKT_AI_Fellowship.git # already cloned
"""### __Unzip files in G-Drive(DONE)__"""
# cd /content/drive/MyDrive/SKT_AI_Fellowship/ups/data/datasets/coco-2017
'''
# done!
!unzip -qq '/content/drive/MyDrive/SKT_AI_Fellowship/ups/data/datasets/coco-2017/annotations_trainval2017.zip'
!unzip -qq '/content/drive/MyDrive/SKT_AI_Fellowship/ups/data/datasets/coco-2017/train2017.zip'
!unzip -qq '/content/drive/MyDrive/SKT_AI_Fellowship/ups/data/datasets/coco-2017/val2017.zip'
!unzip -qq '/content/drive/MyDrive/SKT_AI_Fellowship/ups/data/datasets/coco-2017/test2017.zip'
'''
"""#### __Downloading dataset using Python API(tried but failed)__"""
# !pip install numpy cython
# !git clone https://github.com/cocodataset/cocoapi.git
# cd /content/drive/MyDrive/SKT_AI_Fellowship/cocoapi/PythonAPI
"""#### __Getting dataset using FiftyOne zoo(tried but failed)__"""
# !sudo python3 setup.py build_ext install
"""
- fiftyone installing 할 때 botocore 랑 urllib3 랑 버전 충돌 일어나서 미리 urllib 버전 맞춰줘야 함
- error message
ERROR: botocore 1.20.105 has requirement urllib3<1.27,>=1.25.4, but you'll have urllib3 1.24.3 which is incompatible. """
'''
pip install --upgrade urllib3==1.25.11
pip install fiftyone # All requirements are satisfied
'''
'''
import fiftyone as fo
import fiftyone.zoo as foz
# List available zoo datasets
#
print(foz.list_zoo_datasets(), '\n') # 'coco-2014', 'coco-2017' are also in the list
#
# Load the COCO-2017 validation split into a FiftyOne dataset
#
# This will download the dataset from the web, if necessary
#
dataset = foz.load_zoo_dataset("coco-2017",
label_types = "detections",
classes = "person",
split = "train") # 먼저 coco-2017만 써보자 # max_samples 따로 설정해주지 않았기 때문에 디폴트로 모든 가능한 sample 뽑음
# Give the dataset a new name, and make it persistent so that you can
# work with it in future sessions
dataset.name = "coco-2017-train-all"
dataset.persistent = True
# Visualize the dataset in the App
session_valid = fo.launch_app(dataset)
'''
### fuouo
'''
import fiftyone.utils.openimages as fouo
import pandas as pd
class_list = fouo.get_classes()
class_series = pd.Series(class_list)
'''
'''
class_series[260:272] # 260~271까지가 Human 에 대한 것
'''
'''
# Load the COCO-2017 validation split into a FiftyOne dataset
#
# This will download the dataset from the web, if necessary
#
dataset = foz.load_zoo_dataset("coco-2017",
split="train",
max_samples = 10000) # 너무 오래 걸려 일단 만 개로 테스트
# Give the dataset a new name, and make it persistent so that you can
# work with it in future sessions
# max sample 따로 설정해주지 않았기 때문에 디폴트로 모든 가능한 sample 뽑는 것으로...!
dataset.name = "coco-2017-train-example"
dataset.persistent = True
'''
# Visualize the in the App
# session = fo.launch_app(dataset) # coco-2017-train-example
"""cifar-10, cifar-100 이용했을 때 main class에서 RuntimeError 뜸
File "train-cifar.py", line 277, in <module>
main()
File "train-cifar.py", line 228, in main
test_loss, test_acc = test(args, test_loader, test_model)
File "/content/drive/MyDrive/SKT_AI_Fellowship/ups/utils/evaluate.py", line 32, in test
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
File "/content/drive/MyDrive/SKT_AI_Fellowship/ups/utils/misc.py", line 41, in accuracy
correct_k = correct[:k].view(-1).float().sum(0)
RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
0% 0/79 [00:00<?, ?it/s]
--> 에러 해결법은 view ftn 대신 reshape ftn 써주는 것(utils의 misc.py 안에 있는 accuracy class 안의 view -> reshape으로 변경)
### __Getting Data from G-Drive__
"""
# cd /content/drive/MyDrive/SKT_AI_Fellowship/ups/data/datasets/coco-2017/
# print(os.getcwd())
'''
dir_coco_2017 = '/content/drive/MyDrive/SKT_AI_Fellowship/ups/data/datasets/coco-2017/'
print(os.listdir(dir_coco_2017))
'''
"""- train2017:
- val2017: 5000 images(jpg format)
- test2017:
- annotations: 6 json files(captions, instances, person_keypoints files for training and validation each)
- captions: 텍스트로 된 그림에 대한 설명
- **instances: 그림에 있는 사람/사물에 대한 category와 영역 mask**
- person_keypoint: 사람의 자세 데이터
"""
class COCOSSL(datasets.CocoDetection):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=True, pseudo_idx=None, pseudo_target=None,
nl_idx=None, nl_mask=None):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.targets = np.array(self.targets)
self.nl_mask = np.ones((len(self.targets), len(np.unique(self.targets))))
if nl_mask is not None:
self.nl_mask[nl_idx] = nl_mask
if pseudo_target is not None:
self.targets[pseudo_idx] = pseudo_target
if indexs is not None:
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
self.nl_mask = np.array(self.nl_mask)[indexs]
self.indexs = indexs
else:
self.indexs = np.arange(len(self.targets))
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, self.indexs[index], self.nl_mask[index]
def lbl_unlbl_split(lbls, n_lbl, n_class):
lbl_per_class = n_lbl // n_class
lbls = np.array(lbls)
lbl_idx = []
unlbl_idx = []
for i in range(n_class):
idx = np.where(lbls == i)[0]
np.random.shuffle(idx)
lbl_idx.extend(idx[:lbl_per_class])
unlbl_idx.extend(idx[lbl_per_class:])
return lbl_idx, unlbl_idx
# dir_coco_2017 = '/content/drive/MyDrive/SKT_AI_Fellowship/ups/data/datasets/coco-2017/'
# dir_coco_2017 + 'train2017/'
"""___
__Why M=3, N=4 are selected?__
- CIFAR-10 has been extensively studied with previous
data augmentation methods and we first test this proposed
method on this data. The default augmentations for all
methods include flips, pad-and-crop and Cutout [8]. N and
M were selected based on the validation performance on 5K
held out examples from the training set for 1 and 5 settings
for N and M, respectively. Results indicate that RandAugment achieves either competitive (i.e. within 0.1%) or stateof-the-art on CIFAR-10 across four network architectures
(Table 2). As a more challenging task, we additionally compare the efficacy of RandAugment on CIFAR-100 for WideResNet-28-2 and Wide-ResNet-28-10. On the held out 5K
dataset, we sampled 2 and 4 settings for N and M, respectively (i.e. N={1, 2} and M={2, 6, 10, 14}). For WideResNet-28-2 and Wide-ResNet-28-10, we find that N=1,
M=2 and N=2, M=14 achieves best results, respectively.
Again, RandAugment achieves competitive or superior results across both architectures (Table 2).
from https://arxiv.org/pdf/1909.13719.pdf
"""
def get_coco(root=datasets, n_lbl=4000, ssl_idx=None, pseudo_lbl=None, itr=0, split_txt=''):
os.makedirs(root, exist_ok=True) #create the root directory for saving data
# augmentations
transform_train = transforms.Compose([
RandAugment(3,4),
#from https://arxiv.org/pdf/1909.13719.pdf. For CIFAR-10 M=3, N=4
#위에 추가 설명
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=400, padding=int(160*0.125), padding_mode='reflect'), # COCO는 640x480이라 함
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
CutoutRandom(n_holes=1, length=16, random=True)
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616))
])
if ssl_idx is None:
# Read from Google Drive
I = io.imread(base_dataset%(dataType,img['file_name']))
base_dataset = datasets.CocoDetection(root, train=True, download=True)
train_lbl_idx, train_unlbl_2idx = lbl_unlbl_split(base_dataset.targets , n_lbl, 10)
os.makedirs('data/splits', exist_ok=True)
f = open(os.path.join('data/splits', 'cifar10_basesplit_{n_lbl}_{split_txt}.pkl'),"wb")
lbl_unlbl_dict = {'lbl_idx': train_lbl_idx, 'unlbl_idx': train_unlbl_idx}
pickle.dump(lbl_unlbl_dict,f)
else:
lbl_unlbl_dict = pickle.load(open(ssl_idx, 'rb'))
train_lbl_idx = lbl_unlbl_dict['lbl_idx']
train_unlbl_idx = lbl_unlbl_dict['unlbl_idx']
lbl_idx = train_lbl_idx
if pseudo_lbl is not None:
pseudo_lbl_dict = pickle.load(open(pseudo_lbl, 'rb'))
pseudo_idx = pseudo_lbl_dict['pseudo_idx']
pseudo_target = pseudo_lbl_dict['pseudo_target']
nl_idx = pseudo_lbl_dict['nl_idx']
nl_mask = pseudo_lbl_dict['nl_mask']
lbl_idx = np.array(lbl_idx + pseudo_idx)
#balance the labeled and unlabeled data
if len(nl_idx) > len(lbl_idx):
exapand_labeled = len(nl_idx) // len(lbl_idx)
lbl_idx = np.hstack([lbl_idx for _ in range(exapand_labeled)])
if len(lbl_idx) < len(nl_idx):
diff = len(nl_idx) - len(lbl_idx)
lbl_idx = np.hstack((lbl_idx, np.random.choice(lbl_idx, diff)))
else:
assert len(lbl_idx) == len(nl_idx)
else:
pseudo_idx = None
pseudo_target = None
nl_idx = None
nl_mask = None
train_lbl_dataset = COCOSSL(
root, lbl_idx, train=True, transform=transform_train,
pseudo_idx=pseudo_idx, pseudo_target=pseudo_target,
nl_idx=nl_idx, nl_mask=nl_mask)
if nl_idx is not None:
train_nl_dataset = COCOSSL(
root, np.array(nl_idx), train=True, transform=transform_train,
pseudo_idx=pseudo_idx, pseudo_target=pseudo_target,
nl_idx=nl_idx, nl_mask=nl_mask)
train_unlbl_dataset = COCOSSL(
root, train_unlbl_idx, train=True, transform=transform_val)
test_dataset = datasets.CocoDetection(root, train=False, transform=transform_val, download=True)
if nl_idx is not None:
return train_lbl_dataset, train_nl_dataset, train_unlbl_dataset, test_dataset
else:
return train_lbl_dataset, train_unlbl_dataset, train_unlbl_dataset, test_dataset
os.getcwd()
cd /content/drive/MyDrive/SKT/ups/SKT_AI_Fellowship
os.getcwd()
!pwd
!ls
# For CIFAR10 4000 Labels
!python3 train_coco.py --dataset "coco" --n-lbl 4000 --class-blnc 7 --split-txt "run1" --arch "cnn13"
# For CIFAR10 1000 Labels
!python3 train_coco.py --dataset "coco" --n-lbl 10000 --class-blnc 7 --split-txt "run1" --arch "cnn13"
# For CIFAR100 10000 Labels
!python3 train_coco.py --dataset "coco" --n-lbl 10000 --class-blnc 1 --split-txt "run1" --arch "cnn13"
# For CIFAR100 4000 Labels
!python3 train_coco.py --dataset "coco" --n-lbl 4000 --class-blnc 1 --split-txt "run1" --arch "cnn13"
| [
"noreply@github.com"
] | noreply@github.com |
e27cf93f24bc53f6f16fd551ed429b1aca98d4d2 | 480bee2fee71fa5f91fcece256918795adfb3eda | /detector/model.py | 7ebca4a47e922b335504cca41e45677a4865c1e2 | [] | no_license | favyen/skyquery | f71d0095681660e4bce5324ae866371fe51e9e3a | dce2639314aaa06cba0d56aab1f7794744c22090 | refs/heads/master | 2023-08-22T17:48:08.697538 | 2021-09-27T02:14:52 | 2021-09-27T02:14:52 | 412,963,924 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,272 | py | import numpy
import tensorflow as tf
import os
import os.path
import random
import math
import time
from PIL import Image
BATCH_SIZE = 4
KERNEL_SIZE = 3
class Model:
def _conv_layer(self, name, input_var, stride, in_channels, out_channels, options = {}):
activation = options.get('activation', 'relu')
dropout = options.get('dropout', None)
padding = options.get('padding', 'SAME')
batchnorm = options.get('batchnorm', False)
transpose = options.get('transpose', False)
with tf.variable_scope(name) as scope:
if not transpose:
filter_shape = [KERNEL_SIZE, KERNEL_SIZE, in_channels, out_channels]
else:
filter_shape = [KERNEL_SIZE, KERNEL_SIZE, out_channels, in_channels]
kernel = tf.get_variable(
'weights',
shape=filter_shape,
initializer=tf.truncated_normal_initializer(stddev=math.sqrt(2.0 / KERNEL_SIZE / KERNEL_SIZE / in_channels)),
dtype=tf.float32
)
biases = tf.get_variable(
'biases',
shape=[out_channels],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32
)
if not transpose:
output = tf.nn.bias_add(
tf.nn.conv2d(
input_var,
kernel,
[1, stride, stride, 1],
padding=padding
),
biases
)
else:
batch = tf.shape(input_var)[0]
side = tf.shape(input_var)[1]
output = tf.nn.bias_add(
tf.nn.conv2d_transpose(
input_var,
kernel,
[batch, side * stride, side * stride, out_channels],
[1, stride, stride, 1],
padding=padding
),
biases
)
if batchnorm:
output = tf.contrib.layers.batch_norm(output, center=True, scale=True, is_training=self.is_training, decay=0.99)
if dropout is not None:
output = tf.nn.dropout(output, keep_prob=1-dropout)
if activation == 'relu':
return tf.nn.relu(output, name=scope.name)
elif activation == 'sigmoid':
return tf.nn.sigmoid(output, name=scope.name)
elif activation == 'none':
return output
else:
raise Exception('invalid activation {} specified'.format(activation))
def _fc_layer(self, name, input_var, input_size, output_size, options = {}):
activation = options.get('activation', 'relu')
dropout = options.get('dropout', None)
batchnorm = options.get('batchnorm', False)
with tf.variable_scope(name) as scope:
weights = tf.get_variable(
'weights',
shape=[input_size, output_size],
initializer=tf.truncated_normal_initializer(stddev=math.sqrt(2.0 / input_size)),
dtype=tf.float32
)
biases = tf.get_variable(
'biases',
shape=[output_size],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32
)
output = tf.matmul(input_var, weights) + biases
if batchnorm:
output = tf.contrib.layers.batch_norm(output, center=True, scale=True, is_training=self.is_training, decay=0.99)
if dropout is not None:
output = tf.nn.dropout(output, keep_prob=1-dropout)
if activation == 'relu':
return tf.nn.relu(output, name=scope.name)
elif activation == 'sigmoid':
return tf.nn.sigmoid(output, name=scope.name)
elif activation == 'none':
return output
else:
raise Exception('invalid activation {} specified'.format(activation))
def __init__(self, bn=False, size=(512, 512), input_channels=6):
tf.reset_default_graph()
self.is_training = tf.placeholder(tf.bool)
self.inputs = tf.placeholder(tf.uint8, [None, size[0], size[1], input_channels])
self.float_inputs = tf.cast(self.inputs, tf.float32)/255.0# + tf.random.normal(tf.shape(self.inputs), stddev=0.04)*tf.cast(self.is_training, tf.float32)
self.targets = tf.placeholder(tf.float32, [None, size[0]/4, size[1]/4])
self.masks = tf.placeholder(tf.float32, [None, size[0]/4, size[1]/4])
self.learning_rate = tf.placeholder(tf.float32)
# layers
self.layer1 = self._conv_layer('layer1', self.float_inputs, 2, input_channels, 32, {'batchnorm': False}) # -> 256x256x32
self.layer2 = self._conv_layer('layer2', self.layer1, 2, 32, 64, {'batchnorm': bn}) # -> 128x128x64
self.layer3 = self._conv_layer('layer3', self.layer2, 2, 64, 64, {'batchnorm': bn}) # -> 64x64x64
self.layer4 = self._conv_layer('layer4', self.layer3, 2, 64, 64, {'batchnorm': bn}) # -> 32x32x64
self.layer5 = self._conv_layer('layer5', self.layer4, 1, 64, 64, {'batchnorm': bn}) # -> 32x32x64
self.layer6 = self._conv_layer('layer6', self.layer5, 2, 64, 64, {'batchnorm': bn, 'transpose': True}) # -> 64x64x64
self.layer7 = self._conv_layer('layer7', self.layer6, 2, 64, 64, {'batchnorm': bn, 'transpose': True}) # -> 128x128x64
#self.layer7 = tf.concat([self.layer2, tf.image.resize(self.layer5, [128, 128], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)], axis=3)
self.pre_outputs = self._conv_layer('pre_outputs', self.layer7, 1, 64, 1, {'activation': 'none', 'batchnorm': False})[:, :, :, 0] # -> 128x128x1
self.outputs = tf.nn.sigmoid(self.pre_outputs)
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.targets, logits=self.pre_outputs) * self.masks)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
self.init_op = tf.initialize_all_variables()
self.saver = tf.train.Saver(max_to_keep=None)
| [
"fbastani@perennate.com"
] | fbastani@perennate.com |
907e51e3e9abb9e4b37491c1122a2c555afe1fcc | 42674d7355d852e6ec7071830bb87d781ab63ad3 | /bitmovin/resources/models/manifests/dash/audio_adaptation_set.py | 71e54986b06f54a233eb3ec0495f83cff6d90a84 | [
"Unlicense"
] | permissive | bitmovin/bitmovin-python | 57b1eb5deb7e38f3079e0ded546ec762753c3132 | d183718d640117dd75141da261901dc2f60433b0 | refs/heads/master | 2023-07-11T02:40:59.277881 | 2020-01-28T14:49:15 | 2020-01-28T14:49:15 | 72,857,798 | 46 | 27 | Unlicense | 2019-12-17T13:59:51 | 2016-11-04T15:01:56 | Python | UTF-8 | Python | false | false | 663 | py | from .abstract_adaptation_set import AbstractAdaptationSet
class AudioAdaptationSet(AbstractAdaptationSet):
def __init__(self, lang, id_=None, custom_data=None):
super().__init__(id_=id_, custom_data=custom_data)
self.lang = lang
@classmethod
def parse_from_json_object(cls, json_object):
adaptation_set = AbstractAdaptationSet.parse_from_json_object(json_object=json_object)
id_ = adaptation_set.id
custom_data = adaptation_set.customData
lang = json_object['lang']
audio_adaptation_set = AudioAdaptationSet(id_=id_, custom_data=custom_data, lang=lang)
return audio_adaptation_set
| [
"dominic.miglar@netunix.at"
] | dominic.miglar@netunix.at |
57f3d78002099dd25a0414ffe76313bcfd59368b | 5323f9bafe599813ea7860b36a17b20f928ee8e2 | /PS3/code/testCarSequence.py | 95f1a9a64056dca3eaeab3f31b1c30530d83dca9 | [] | no_license | zCoCo/16720 | 78f0eda9e251bb61ca0f3cb5704f967e841c7304 | 02b223ac2a4aa71ed7bfbd6b2fbc3a1c5aa93155 | refs/heads/master | 2023-01-28T00:59:33.965622 | 2020-12-11T04:03:57 | 2020-12-11T04:03:57 | 294,991,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | import argparse
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from LucasKanade import LucasKanade
parser = argparse.ArgumentParser()
parser.add_argument('--num_iters', type=int, default=1e4, help='number of iterations of Lucas-Kanade')
parser.add_argument('--threshold', type=float, default=1e-4, help='dp threshold of Lucas-Kanade for terminating optimization')
args = parser.parse_args()
num_iters = args.num_iters
threshold = args.threshold
seq = np.load("../data/carseq.npy")
num_frames = seq.shape[2];
rect = [59, 116, 145, 151] # initial rectangle value
rects = np.zeros((num_frames,4))
rects[0,:] = rect
def show_frame(f, save=False):
plt.figure()
plt.imshow(seq[:,:,f], cmap='gray')
width, height = rects[f,2:] - rects[f,0:2]
patch = patches.Rectangle(rects[f,0:2], width, height, linewidth=1, edgecolor='r', facecolor='none')
plt.gca().add_patch(patch)
if save:
plt.axis('off')
plt.savefig("../outputs/q1_3_car_{}.png".format(f), bbox_inches = 'tight')
else:
plt.show()
for f in range(num_frames-1):
# NB: argmin is init w/zeros (better performance than p_n-1/minimizes affect of drift - prevents inertia)
p = LucasKanade(seq[:,:,f], seq[:,:,f+1], rects[f,:], threshold, num_iters, p0=np.zeros(2))
rects[f+1,:] = rects[f,:] + [p[0],p[1], p[0],p[1]]
#print("{}/{}".format(f+2,num_frames))
np.save('../result/carseqrects.npy', rects) # Note: rect is allowed to be float according to Piazza @323
for f in [1,100,200,300,400]:
show_frame(f) | [
"connor.caxd@gmail.com"
] | connor.caxd@gmail.com |
3ceae33d561fab283591b7d340d0bdd984b6139d | f3b691c032bafa8f1df43fcfea2d35ca2cf3de64 | /static_reader/scripts/generate_canned_data.py | 74aa14b748ac4a652f425679ab3df7de0c5a3577 | [
"Apache-2.0"
] | permissive | qiwihui/readerisdead | b163c0d544c5cbbacba1294be50757df444ee382 | 0e35cf26e88f27e0a07432182757c1ce230f6936 | refs/heads/master | 2021-01-15T10:20:48.243536 | 2018-07-01T00:09:44 | 2018-07-01T00:09:44 | 66,332,546 | 0 | 0 | null | 2016-08-23T04:16:22 | 2016-08-23T04:16:21 | null | UTF-8 | Python | false | false | 4,010 | py | import feedparser
import json
import os.path
import time
import uuid
import xml.sax.saxutils
def crawl_feed_url(feed_url):
print " Crawling %s..." % (feed_url)
stream_id = "feed/%s" % feed_url
feed = feedparser.parse(feed_url)
crawl_time_sec = time.time()
crawl_time_msec = time.time() * 1000
feed_json = {
"id": stream_id,
"title": feed.feed.title,
"htmlUrl": feed.feed.link,
"updated": crawl_time_sec,
"items": [],
}
origin_json = {
"title": feed.feed.title,
"htmlUrl": feed.feed.link,
"streamId": stream_id,
}
for entry in feed.entries:
id = str(uuid.uuid4()).replace("-", "")[:16]
def to_html(d):
if not d:
return ""
if d.type in ["text/html", "application/xhtml+xml"]:
return d.value
return xml.sax.saxutils.escape(d.value)
content = None
if "content" in entry:
content = entry.content[0]
elif "summary_detail" in entry:
content = entry.summary_detail
elif "summary_" in entry:
content = entry.summary
item_json = {
"origin": origin_json,
"published": time.mktime(entry.published_parsed),
"updated": time.mktime(entry.updated_parsed),
"author": entry.author_detail.name if "author_detail" in entry else "",
"id": "tag:google.com,2005:reader/item/%s" % id,
"categories": [],
"title": to_html(entry.title_detail),
"alternate": [
{
"href": entry.link,
"type": "text/html",
}
],
"timestampUsec": json.dumps(
time.mktime(entry.published_parsed) * 1000000),
"content": {
"content": to_html(content),
"direction": "ltr",
},
"crawlTimeMsec": json.dumps(int(crawl_time_msec)),
"annotations": [],
"likingUsers": [],
}
feed_json["items"].append(item_json)
print " ...crawled with %d items" % (len(feed.entries))
return stream_id, feed_json
_CANNED_FEED_URLS = {
"Reader Team": [
"http://googlereader.blogspot.com/atom.xml",
"http://feeds.feedburner.com/PersistentInfo",
"http://feeds.feedburner.com/xenomachina",
# "http://www.footbag.org/index2/index.rss", # Not parseable
# "http://feeds.feedburner.com/furycom", # Not a feed
"http://massless.org/index.html%3Ffeed=rss2",
"http://blog.shellen.com/feeds/posts/default",
"http://www.blogger.com/feeds/6616843/posts/default",
],
# From http://techland.time.com/2013/08/05/the-25-best-bloggers-2013-edition
"Party Like it's 2013": [
"http://feeds.feedburner.com/WorldWarIIToday",
"http://feeds.feedburner.com/marginalrevolution/feed",
"https://www.101cookbooks.com/feed",
"https://www.schneier.com/blog/atom.xml",
"http://surisburnbook.tumblr.com/rss",
"http://www.askthepilot.com/feed/",
"http://beerlabelsinmotion.tumblr.com/rss",
],
}
_CANNED_DATA_OUTPUT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../reader/canned-data.js"))
print "Crawling feeds..."
canned_feed_json = {}
canned_folder_json = {}
for folder, feed_urls in _CANNED_FEED_URLS.iteritems():
stream_ids = []
for feed_url in feed_urls:
stream_id, feed_json = crawl_feed_url(feed_url)
canned_feed_json[stream_id] = feed_json
stream_ids.append(stream_id)
canned_folder_json[folder] = stream_ids
with open(_CANNED_DATA_OUTPUT_PATH, "w") as o:
o.write("const _CANNED_FOLDER_DATA = %s;\n" % json.dumps(
canned_folder_json, indent=4))
o.write("const _CANNED_FEED_DATA = %s;\n" % json.dumps(
canned_feed_json, indent=4))
print "Wrote canned data to: %s" % _CANNED_DATA_OUTPUT_PATH
| [
"mihai@persistent.info"
] | mihai@persistent.info |
e14e3132d18bf92b6fbd9baa8184bd14460d3572 | f23708e78a3d7a268b35015230f1aa2e6958f16f | /Data/Models/6/train.py | 5b0bbc311ac4b1f54441128a0a642cc4b634fc1b | [] | no_license | destinyson7/Dependency-Parser-Marathi | 785be8cce7003b02d3da475eef1ff6d5e9b2aea3 | 3deac41d25c9a895ae5b903c765730b6e561a399 | refs/heads/master | 2022-06-18T08:45:13.460664 | 2020-05-07T13:52:40 | 2020-05-07T13:52:40 | 256,130,158 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | import pickle
import sys
from itertools import repeat
from scipy.sparse import csr_matrix
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
import numpy as np
with open('../../words', 'rb') as fp:
words = pickle.load(fp)
# with open('../../pos_tags', 'rb') as fp:
# pos_tags = pickle.load(fp)
# with open('../../chunk_tags', 'rb') as fp:
# chunk_tags = pickle.load(fp)
# print(words)
# print(pos_tags)
# print(chunk_tags)
N = len(words) + \
len(words) + 2
Y = []
classes = set([])
words_len = len(words)
# pos_len = len(pos_tags)
# chunk_len = len(chunk_tags)
row_ind = []
col_ind = []
word_index = {}
# pos_index = {}
# chunk_index = {}
for i in range(len(words)):
word_index[words[i]] = i
# for i in range(len(pos_tags)):
# pos_index[pos_tags[i]] = i
# for i in range(len(chunk_tags)):
# chunk_index[chunk_tags[i]] = i
lr_index = {}
lr_index["L"] = 0
lr_index["R"] = 1
with open(sys.argv[1], "r") as f:
for line in f:
if line.strip():
current = line.strip().split("*")
# print(len(Y))
if current[2].strip() == "U":
continue
if current[0].strip().split(" ")[0].strip() == "ROOT":
col_ind.append(word_index["ROOT"])
# col_ind.append(words_len + pos_index["ROOT"])
# col_ind.append(words_len + pos_len + chunk_index["ROOT"])
else:
# print(current[0].strip().split(" ")[1])
col_ind.append(word_index[(current[0].strip().split(" ")[1])])
# col_ind.append(
# words_len + pos_index[(current[0].strip().split(" ")[2])])
# col_ind.append(words_len + pos_len +
# chunk_index[(current[0].strip().split(" ")[3])])
if current[1].strip().split(" ")[0].strip() == "ROOT":
col_ind.append(words_len + word_index[("ROOT")])
# col_ind.append(words_len + pos_tags.index("ROOT"))
# col_ind.append(words_len + pos_len + chunk_tags.index("ROOT"))
else:
# print(current[1].strip().split(" ")[1])
col_ind.append(
words_len + word_index[(current[1].strip().split(" ")[1])])
# col_ind.append(words_len + pos_len + chunk_len + words_len +
# pos_index[(current[1].strip().split(" ")[2])])
# col_ind.append(words_len + pos_len + chunk_len + words_len +
# pos_len + chunk_index[(current[1].strip().split(" ")[3])])
col_ind.append(2 * words_len + lr_index[current[2].strip()])
row_ind.extend(repeat(len(Y), 3))
Y.append(current[3].strip())
classes.add(current[3].strip())
M = len(Y)
data = [1] * len(col_ind)
# print(col_ind)
# print(len(Y))
# print(len(row_ind))
# print(len(col_ind))
# print(len(data))
X = csr_matrix((data, (row_ind, col_ind)), shape=(M, N))
# print(np.shape(X))
SVM = LinearSVC()
SVM.fit(X, Y)
pickle.dump(SVM, open('SVM.sav', 'wb'))
logisticRegr = LogisticRegression(max_iter=4000)
logisticRegr.fit(X, Y)
pickle.dump(logisticRegr, open('LogisticRegression.sav', 'wb'))
print(SVM)
print(logisticRegr)
| [
"tanish.lad@research.iiit.ac.in"
] | tanish.lad@research.iiit.ac.in |
5b4011742a367c17c7a4ced35f7feae7412c357b | 3507e5dcb7dea780d9d3a30601524ced6c8a21d1 | /DRT/urls.py | caef5ce17172f5b789d8cb5c60d9c8ebdb8ca087 | [] | no_license | Pbasnal/SmartBus | 2e4660cb6bda8e6115c3a90a9e0ad630cf8cdcc3 | ce90a1d64cba4799f76efc9abb2435db526aad29 | refs/heads/master | 2021-01-22T17:17:49.962865 | 2015-02-12T06:18:04 | 2015-02-12T06:18:04 | 30,032,610 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | __author__ = 'basnal'
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'DRT.views.showMap'),
# url(r'^blog/', include('blog.urls')),
url(r'^map/', 'DRT.views.showMap'),
url(r'^setpath/$', 'DRT.views.setPath'),
url(r'^admin/', include(admin.site.urls)),
)
| [
"pankajbasnal17@gmail.com"
] | pankajbasnal17@gmail.com |
11c2b3f77fed5f5d541620aaa9c48d9be7d924c9 | 21ca0d7d66cf373a6efc532bf1b05bb9173b802f | /dml/parameter_node.py | c2b28b1bc504d4a31a6bf2cb99f2cff47e603c6d | [] | no_license | liu-ca/DML_Network | 727f689473c71d4da86f930732ec71488beff9ff | dc59c9fb47bc793998eca28d63796579aae195f1 | refs/heads/master | 2021-10-25T04:28:37.862379 | 2019-03-27T08:31:32 | 2019-03-27T08:31:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,843 | py | import queue
import threading
from dml import dml_base_thread as dbt
from dml import server_node as sn
module = __import__("dml.dml_base_thread")
class ParameterServer:
calc_loss_thread: dbt.CalcAverageLoss
def __init__(self, ip_set, num):
"""
:type ip_set: dict
"""
self.ip_set = ip_set
self.clients_num = num
self.send_locks = {}
self.rec_locks = {}
self.rec_queues = {}
self.send_queues = {}
self.server_nodes = {}
self.calc_loss_thread = None
def distributed_dnn(self):
self._create_server_nodes() # 根据计算节点的个数创建对应的通信节点
self._init_socket_conn() # 和计算节点建立连接
self._init_send_rec_queues() # 创建用于接受数据的队列
self.create_send_rec_threads() # 每个通信节点创建两个进程(负责收、发)
self.create_avg_calc_thread() # 创建参数服务器用于计算机平均梯度或者loss的线程
self._start_send_rec_threads() # 开启进程
self._notify_clients() # 通知所有的计算节点可以开始发送数据
self._start_avg_calc_thread() # 开启计算平均梯度的线程
def _create_server_nodes(self):
for port, ip in self.ip_set.items():
server = sn.ServerNode(ip, port)
self.server_nodes[port] = server
def _start_send_loss(self):
self.calc_loss_thread.start()
def _init_socket_conn(self):
for port, ip in self.ip_set.items():
node: sn.ServerNode = self.server_nodes[port]
while not node.net_state:
node.create_conn()
def _notify_clients(self):
for port, ip in self.ip_set.items():
node: sn.ServerNode = self.server_nodes[port]
node.start_send_loss()
def _init_send_rec_queues(self):
for port, ip in self.ip_set.items():
# 接收队列的锁
rec_queue_lock = threading.Lock()
self.rec_locks[port] = rec_queue_lock
# 发送队列的锁
send_queue_lock = threading.Lock()
self.send_locks[port] = send_queue_lock
# 接收队列,每个work对应一个接收队列
rec_data = queue.Queue()
self.rec_queues[port] = rec_data
# 发送队列,每个work对应一个发送队列
send_data = queue.Queue()
self.send_queues[port] = send_data
# 允许被子类重载
def create_send_rec_threads(self):
'''
针对每个客户节点创建对应的服务节点
:return:
'''
for port, ip in self.ip_set.items():
thread_list = []
rec_thread = dbt.ServerRecBaseThread("服务端接受线程")
rec_thread.init_para(self.server_nodes[port], self.rec_queues[port], self.rec_locks[port])
thread_list.append(rec_thread)
send_thread = dbt.ServerSendBaseThread("服务端发送线程")
send_thread.init_para(self.server_nodes[port], self.send_queues[port], self.send_locks[port])
thread_list.append(send_thread)
self.server_nodes[port].set_thread_list(thread_list)
# 允许被子类重载
def create_avg_calc_thread(self):
# 每个参数节点创建一个负责计算平均梯度的线程
self.calc_loss_thread = dbt.CalcAverageLoss("计算平均梯度线程")
self.calc_loss_thread.init_para(self.ip_set, self.send_queues, self.rec_queues, self.rec_locks)
def _start_send_rec_threads(self):
for port, ip in self.ip_set.items():
server_node: sn.ServerNode = self.server_nodes[port]
server_node.run_thread()
def _start_avg_calc_thread(self):
self.calc_loss_thread.start()
| [
"474387803@qq.com"
] | 474387803@qq.com |
eee2779135d6a92b3cc047ecbea67cad42f0656d | 082530d4140c040c608c0a537bf77a09ed0d5c5f | /Entertainment/models.py | c64e4a32c5601056a371ffb925d6d5a29271c42b | [] | no_license | nandha712019/Entertainment | f4a6c72a4a9723be5638903ea73d7b6719386436 | 9e5dc8d63505e4e1b0081eef10c78c0b5fdf2be3 | refs/heads/master | 2023-06-06T21:49:05.623411 | 2021-07-13T05:00:28 | 2021-07-13T05:00:28 | 385,474,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | import uuid
from datetime import datetime
from django.contrib.auth.models import User
from django.db import models
class Media(models.Model):
"""Database table for Media details!!!"""
Media_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
Media_link = models.FileField(upload_to="uploads/", unique=True)
Media_type = models.CharField('Media_type', max_length=20, default='Audio',
choices=[('Audio', 'Audio'), ('Video', 'Video')])
Title = models.TextField(max_length=50, default="")
Description = models.TextField(max_length=300, default="")
Thumbnail = models.ImageField(upload_to="uploads/")
created_on = models.DateTimeField(default=datetime.now)
class Favourite(models.Model):
"""Database table for Favourite"""
user = models.ForeignKey(User, on_delete=models.CASCADE, )
Media_id = models.ForeignKey(Media, on_delete=models.CASCADE, )
class Meta:
unique_together = ["user", "Media_id"]
| [
"nandha712019"
] | nandha712019 |
4943e2a33bb554208d68eb6a684117fda0462433 | 44c372cd25a2496979fa29a1dc9131c54487d945 | /data/zh50w/zh50w_process.py | 0c551f731118ff55f6d1cef12a2e33090afd341d | [
"MIT"
] | permissive | zhongerqiandan/OpenDialog | 4ff4e65d0ade1efdd3029475634ae1cf38c7bdd3 | f478b2a912c8c742da5ced510ac40da59217ddb3 | refs/heads/master | 2023-01-22T09:07:54.385604 | 2020-12-04T02:00:17 | 2020-12-04T02:00:17 | 318,419,052 | 0 | 1 | MIT | 2020-12-04T05:58:37 | 2020-12-04T05:58:37 | null | UTF-8 | Python | false | false | 5,482 | py | import csv
import random
from tqdm import tqdm
import ipdb
import sys
import pickle
sys.path.append('..')
from utils import read_stop_words
from collections import Counter
from gensim.summarization import bm25
from elasticsearch import Elasticsearch
'''
TODO
1. adding the reesponses into elasticsearch for q-r match
'''
class ESUtils:
def __init__(self, index_name, create_index=False):
self.es = Elasticsearch()
self.index = index_name
if create_index:
mapping = {
'properties': {
'context': {
'type': 'text',
'analyzer': 'ik_max_word',
'search_analyzer': 'ik_max_word'
}
}
}
if self.es.indices.exists(index=self.index):
self.es.indices.delete(index=self.index)
rest = self.es.indices.create(index=self.index)
print(rest)
rest = self.es.indices.put_mapping(body=mapping, index=self.index)
print(rest)
def insert_pairs(self, pairs):
count = self.es.count(index=self.index)['count']
print(f'[!] begin of the idx: {count}')
for i, qa in enumerate(tqdm(pairs)):
data = {
'context': qa[0],
'response': qa[1]
}
self.es.index(index=self.index, body=data)
print(f'[!] insert data over, whole size: {self.es.count(index=self.index)["count"]}')
class ESChat:
def __init__(self, index_name):
self.es = Elasticsearch()
self.index = index_name
def search(self, query, samples=10):
dsl = {
'query': {
'match': {
'context': query
}
}
}
hits = self.es.search(index=self.index, body=dsl, size=samples)['hits']['hits']
rest = []
for h in hits:
rest.append({'score': h['_score'], 'context': h['_source']['context'],
'response': h['_source']['response']
})
return rest
def chat(self):
sentence = input('You are speaking: ').strip()
while sentence:
if sentence == 'exit':
break
rest = self.search(sentence)
for idx, i in enumerate(rest):
print(f'ESChat({idx}/{len(rest)}): {i["response"]}')
sentence = input('You are speaking: ').strip()
def read_file(path):
with open(path) as f:
data = f.read()
dialogs = data.split('\n\n')
dialogs = [dialog.split('\n') for dialog in dialogs if dialog.strip()]
random.shuffle(dialogs)
return dialogs
def write_file(dialogs, mode='train', samples=10):
chatbot = ESChat('retrieval_chatbot')
with open(f'{mode}.csv', 'w') as f:
f = csv.writer(f)
f.writerow(['Context', 'Response'] + [f'Retrieval_{i+1}' for i in range(samples)])
# f.writerow(['Context', 'Response'])
error_counter = 0
responses = [i[1] for i in dialogs]
for dialog in tqdm(dialogs):
rest = [i['response'] for i in chatbot.search(dialog[0], samples=samples+1)]
if dialog[1] in rest:
rest.remove(dialog[1])
dialog = list(dialog) + rest
if len(dialog) != samples + 2:
error_counter += 1
dialog.extend(random.sample(responses, samples+3-len(dialog)))
# assert len(dialog) == samples + 2, f'{len(dialog)} retrieval utterances are obtained'
f.writerow(dialog[:samples+2])
print(f'[!] finish writing the file {mode}.csv, error counter: {error_counter}')
def process_data(dialogs, samples=10, max_len=10, max_utter_len=50):
data = []
for dialog in tqdm(dialogs):
# dialog = [' '.join(list(jieba.cut(i))) for i in dialog]
context, response = dialog[-(max_len+1):-1], dialog[-1]
context = [i[-max_utter_len:] for i in context]
context = ' <eou> '.join(context)
data.append((context, response))
return data
def retrieval_model():
chatbot = ESChat('retrieval_chatbot')
print(f'[!] load retrieval model from ElasticSearch, default 10 replys.')
return chatbot
if __name__ == "__main__":
import sys
if sys.argv[1] == 'process':
data = read_file('train.txt')
whole_size = len(data)
train_size = (0, int(0.95 * whole_size))
dev_size = (train_size[1], train_size[1] + int(0.025 * whole_size))
test_size = (dev_size[1], whole_size)
print(f'data size: train({train_size[1]-train_size[0]}); dev({dev_size[1]-dev_size[0]}); test({test_size[1]-test_size[0]})')
train_data = data[train_size[0]:train_size[1]]
dev_data = data[dev_size[0]:dev_size[1]]
test_data = data[test_size[0]:test_size[1]]
train_data = process_data(train_data)
dev_data = process_data(dev_data)
test_data = process_data(test_data)
# write file
write_file(train_data, mode='train')
write_file(dev_data, mode='dev')
write_file(test_data, mode='test')
else:
# test elasticsearch
# data = read_file('zh50w/train.txt')
# pairs = [(' . '.join(i[:-1]), i[-1]) for i in data]
# ut = ESUtils('retrieval_chatbot', create_index=True)
# ut.insert_pairs(pairs)
chatbot = ESChat('retrieval_chatbot')
chatbot.chat()
| [
"18811371908@163.com"
] | 18811371908@163.com |
d6cec8b6abf23f1fc332ef785d46d6d40a480c9f | 2e24f9a4378fab6de582421e0af3795fa0d8d5f5 | /models/model_c16_c32_c32_spp_r1024_atpl.py | 90fb80657a7bcbbbe08a3668164e84f08a906b3e | [] | no_license | ms-martin/ms-fiit-bc-cd | 38759fdbab5d7261722aa5b8c64a872085fd91ec | 3452d7b9744636ed0c73db9cf703447fdacf7551 | refs/heads/master | 2020-03-13T21:15:47.457258 | 2018-06-07T17:08:03 | 2018-06-07T17:08:03 | 131,292,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,605 | py | import tensorflow as tf
import numpy as np
class Siamese:
def __init__(self, training, batch_size, seq_len):
self.batch_size = batch_size
self.seq_len = seq_len
self.hidden_size = 1024
self.input1 = tf.placeholder(tf.float32, [None, 24576])
self.input2 = tf.placeholder(tf.float32, [None, 24576])
with tf.variable_scope("siamese") as scope:
self.leg_out1 = self.siamese_leg(self.input1, training)
scope.reuse_variables()
self.leg_out2 = self.siamese_leg(self.input2, training)
self.out1, self.out2 = self.atpl_layer(self.leg_out1, self.leg_out2)
self.labels = tf.placeholder(tf.float32, [None])
self.loss = self.contrastive_loss(self.out1, self.out2)
self.distance = self.euclidian_distance(self.out1, self.out2)
def siamese_leg(self, inputx, training):
input_reshaped = tf.reshape(inputx, [self.batch_size * self.seq_len, 128, 64, 3])
conv1 = self.conv_layer(input_reshaped, [5, 5, 3, 16], [16], "conv1")
max1 = tf.layers.max_pooling2d(inputs=conv1,
pool_size=[2, 2],
strides=2,
name="max1")
conv2 = self.conv_layer(max1, [5, 5, 16, 32], [32], "conv2")
max2 = tf.layers.max_pooling2d(inputs=conv2,
pool_size=[2, 2],
strides=2,
name="max2")
conv3 = self.conv_layer(max2, [5, 5, 32, 32], [32], "conv3")
spp = self.spp_layer(conv3, [8, 4, 2, 1], "spp")
rnn = self.rnn_layers(spp, 1, self.hidden_size)
rnn = tf.reshape(rnn, [self.batch_size, self.seq_len, self.hidden_size], name="rnn_flat")
return rnn
def spp_layer(self, input_, levels, name):
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
pool_outputs = []
for level in levels:
kernel = [1,
np.ceil(shape[1] * 1.0 / level).astype(np.int32),
np.ceil(shape[2] * 1.0 / level).astype(np.int32),
1]
stride = [1,
np.floor(shape[1] * 1.0 / level).astype(np.int32),
np.floor(shape[2] * 1.0 / level).astype(np.int32),
1]
poll = tf.nn.max_pool(value=input_,
ksize=kernel,
strides=stride,
padding='SAME',
name="spp_pool")
pool_outputs.append(tf.reshape(poll, [shape[0], -1]))
spp_pool = tf.concat(pool_outputs, 1)
spp_pool = tf.reshape(spp_pool, [self.seq_len, self.batch_size, -1])
spp_pool = tf.unstack(spp_pool)
return spp_pool
def atpl_layer(self, input1, input2):
temp_mat = tf.get_variable(name="temp_mat",
shape=[self.batch_size, self.hidden_size, self.hidden_size],
dtype=tf.float32,
initializer=tf.random_normal_initializer)
in1_temp_mat = tf.matmul(input1, temp_mat, name="in1_temp_mat")
in1_temp_mat_in2 = tf.matmul(in1_temp_mat, input2, transpose_b=True, name="in1_temp_mat_in2")
atpl_mat = tf.tanh(in1_temp_mat_in2, name="atpl_mat")
max_row = tf.reduce_max(atpl_mat, axis=1, name="max_row")
max_col = tf.reduce_max(atpl_mat, axis=2, name="max_col")
return max_row, max_col
def rnn_layers(self, inputs, num_layers, hidden_size):
rnn_cells = []
for _ in range(num_layers):
cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=hidden_size,
state_is_tuple=True)
rnn_cells.append(cell)
multi_cell = tf.nn.rnn_cell.MultiRNNCell(cells=rnn_cells,
state_is_tuple=True)
outputs, _ = tf.nn.static_rnn(cell=multi_cell,
inputs=inputs,
dtype=tf.float32)
return outputs
def conv_layer(self, inputx, kernel_shape, bias_shape, name):
weights = tf.get_variable(name=name + "_weights",
dtype=tf.float32,
shape=kernel_shape,
initializer=tf.random_normal_initializer)
biases = tf.get_variable(name=name + "_biases",
dtype=tf.float32,
shape=bias_shape,
initializer=tf.constant_initializer)
conv = tf.nn.conv2d(input=inputx,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
return tf.nn.relu(conv + biases)
def fc_layer(self, _input, units, name):
assert len(_input.get_shape()) == 2
n_prev_weight = _input.get_shape()[1]
initializer = tf.truncated_normal_initializer(stddev=0.01)
weights = tf.get_variable(name=name + '_weights',
dtype=tf.float32,
shape=[n_prev_weight, units],
initializer=initializer)
biases = tf.get_variable(name=name + '_biases',
dtype=tf.float32,
shape=[units],
initializer=tf.constant_initializer)
return tf.nn.bias_add(tf.matmul(_input, weights), biases)
def euclidian_distance(self, out1, out2):
euclidian2 = tf.pow(tf.subtract(out1, out2), 2)
euclidian2 = tf.reduce_sum(euclidian2, 1)
return tf.sqrt(euclidian2 + 1e-6, name="distance")
def contrastive_loss(self, out1, out2):
margin = 5.0
c = tf.constant(margin)
labels_true = self.labels
labels_false = tf.subtract(1.0, self.labels, name="1-y")
euclidian = self.euclidian_distance(out1, out2)
pos = tf.multiply(labels_true, euclidian, name="y_x_distance")
neg = tf.multiply(labels_false, tf.pow(tf.maximum(tf.subtract(c, euclidian), 0), 2), name="ny_x_c-distance_2")
losses = tf.add(pos, neg, name="losses")
loss = tf.reduce_mean(losses, name="loss")
return loss
| [
"martin.stano@icloud.com"
] | martin.stano@icloud.com |
4157e5f98516854aa513cf5633fc4ea29b69a95f | 0993e97f6e34cbbda1f03b4c120499e41918100a | /paranuara_challenge/paranuara/models.py | b507e308b3f9a21cb5e9e21099af7d9950f290a5 | [] | no_license | sam-mi/paranuara | 41407a7441eb8b86c2e346330a038b74fad7931d | cb0aa92237e93c1af6c2067c4424dd5773e652a8 | refs/heads/master | 2023-01-11T08:46:55.103912 | 2019-08-08T00:11:02 | 2019-08-08T00:11:02 | 199,132,158 | 0 | 0 | null | 2022-12-26T20:39:22 | 2019-07-27T07:29:38 | Python | UTF-8 | Python | false | false | 3,950 | py | import uuid
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.db.models import QuerySet
from django.utils import timezone
from model_utils import Choices
from model_utils.models import TimeStampedModel
from taggit.managers import TaggableManager
from paranuara_challenge.paranuara.helpers import FOOD_CLASSIFICATION_CHOICES
class Company(TimeStampedModel):
"""
A Company on Paranuara
"""
name = models.CharField(
max_length=255,
unique=True
)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class Food(TimeStampedModel):
"""
Food available on Paranuara
"""
classification = models.CharField(
max_length=1,
choices=FOOD_CLASSIFICATION_CHOICES,
default=FOOD_CLASSIFICATION_CHOICES.unknown
)
name = models.CharField(
max_length=55,
unique=True,
)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
GENDER_CHOICES = Choices(
('unknown', 'unknown', 'Unknown'),
('male', 'male', 'Male'),
('female', 'female', 'Female'),
('non_binary', 'non_binary', 'Non Binary'),
)
class Person(TimeStampedModel):
"""
A Citizen of the Colony of Paranuara
"""
#### REQUIRED ############################################
guid = models.UUIDField(
primary_key=False, # dammit taggit!
default=uuid.uuid4
)
_id = models.CharField(
max_length=24,
help_text="External System ID"
)
name = models.CharField(
max_length=255,
)
email = models.EmailField()
#### RELATIONS ############################################
company = models.ForeignKey(
Company,
db_index=True,
blank=True, null=True,
on_delete=models.SET_NULL,
related_name='employees'
)
friends = models.ManyToManyField(
"Person",
blank=True
)
_friend_cache = JSONField(
default=dict,
help_text="Storage for friend ids if related friend has "
"not been created yet",
blank=True
)
food = models.ManyToManyField(
"Food",
help_text="Favourite Foods",
blank=True
)
#### OPTIONAL ############################################
address = models.TextField(
max_length=255,
blank=True,
)
registered = models.DateTimeField(
default=timezone.now,
)
gender = models.CharField(
max_length=12,
choices=GENDER_CHOICES,
default=GENDER_CHOICES.unknown
)
age = models.PositiveSmallIntegerField(
blank=True,
)
picture = models.CharField(
max_length=255,
help_text="URL for picture",
blank=True,
)
greeting = models.CharField(
max_length=255,
blank=True
)
phone = models.CharField(
max_length=30,
blank=True,
)
eye_color = models.CharField(
max_length=15,
blank=True
)
balance = models.DecimalField(
decimal_places=2,
default=0.00,
max_digits=6
)
has_died = models.BooleanField(
default=False,
)
about = models.TextField(
blank=True
)
def update_friends(self) -> QuerySet:
"""
Checks whether the friends in the `_friend_cache` exists and
assigns them to `friends` if they do
:return: updated friends
"""
friend_ids = [
item['id'] for item in self._friend_cache
]
friends = Person.objects.filter(pk__in=friend_ids)
self.friends.add(*friends)
return self.friends.all()
def get_balance(self) -> str:
return f'${str(self.balance)}'
tags = TaggableManager(blank=True)
def __str__(self):
return self.name
class Meta:
ordering = ('-pk',)
| [
"sam@purecreative.digital"
] | sam@purecreative.digital |
d790cc0847a145a66de4e78a92d0394208a82ad9 | a19179d51e494b1fed9eed94e72a2d01b67fcb44 | /xylophone/project/rooms/room1.py | 78b64d325b104a1e054346b7b3d3b268afae1f3a | [
"MIT"
] | permissive | Turysaz/xylophone | d4752b926f80fab5350e1e6eb36376f640df6b10 | da44b8127aa6b89d6cdb3bdb564c386520b37e22 | refs/heads/master | 2021-09-11T16:41:31.632019 | 2018-04-09T20:46:00 | 2018-04-09T20:46:00 | 124,000,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py |
def pre_step(self):
print("TestPRE")
def post_step(self):
print("TestPOST")
| [
"turysaz@posteo.org"
] | turysaz@posteo.org |
df6ef7c6944817a25ccbfd8559fcf9785e64e3cc | 974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_07_01/operations/_managed_clusters_operations.py | bdbec1f83cfe156583cef0b5a15d8fa90b4f386b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | gaoyp830/azure-sdk-for-python | 4816f04c554dcffb7510a6b7044b0c86a2dd32e1 | 1c66defa502b754abcc9e5afa444ca03c609342f | refs/heads/master | 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 | MIT | 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null | UTF-8 | Python | false | false | 144,531 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_os_options_request(
location: str, subscription_id: str, *, resource_type: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/osOptions/default",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"location": _SERIALIZER.url("location", location, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if resource_type is not None:
_params["resource-type"] = _SERIALIZER.query("resource_type", resource_type, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters"
)
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_upgrade_profile_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_access_profile_request(
resource_group_name: str, resource_name: str, role_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"roleName": _SERIALIZER.url("role_name", role_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_cluster_admin_credentials_request(
resource_group_name: str,
resource_name: str,
subscription_id: str,
*,
server_fqdn: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if server_fqdn is not None:
_params["server-fqdn"] = _SERIALIZER.query("server_fqdn", server_fqdn, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_cluster_user_credentials_request(
resource_group_name: str,
resource_name: str,
subscription_id: str,
*,
server_fqdn: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if server_fqdn is not None:
_params["server-fqdn"] = _SERIALIZER.query("server_fqdn", server_fqdn, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_cluster_monitoring_user_credentials_request(
resource_group_name: str,
resource_name: str,
subscription_id: str,
*,
server_fqdn: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if server_fqdn is not None:
_params["server-fqdn"] = _SERIALIZER.query("server_fqdn", server_fqdn, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_tags_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_reset_service_principal_profile_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_reset_aad_profile_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_rotate_cluster_certificates_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_stop_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_start_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_run_command_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/runCommand",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_command_result_request(
resource_group_name: str, resource_name: str, command_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/commandResults/{commandId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"commandId": _SERIALIZER.url("command_id", command_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_outbound_network_dependencies_endpoints_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/outboundNetworkDependenciesEndpoints",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class ManagedClustersOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_07_01.ContainerServiceClient`'s
:attr:`managed_clusters` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get_os_options(
self, location: str, resource_type: Optional[str] = None, **kwargs: Any
) -> _models.OSOptionProfile:
"""Gets supported OS options in the specified subscription.
Gets supported OS options in the specified subscription.
:param location: The name of a supported Azure region. Required.
:type location: str
:param resource_type: The resource type for which the OS options needs to be returned. Default
value is None.
:type resource_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OSOptionProfile or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_07_01.models.OSOptionProfile
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OSOptionProfile]
request = build_get_os_options_request(
location=location,
subscription_id=self._config.subscription_id,
resource_type=resource_type,
api_version=api_version,
template_url=self.get_os_options.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("OSOptionProfile", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_os_options.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/osOptions/default"} # type: ignore
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.ManagedCluster"]:
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedCluster or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedClusterListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters"} # type: ignore
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.ManagedCluster"]:
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedCluster or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedClusterListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters"} # type: ignore
@distributed_trace
def get_upgrade_profile(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> _models.ManagedClusterUpgradeProfile:
"""Gets the upgrade profile of a managed cluster.
Gets the upgrade profile of a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_07_01.models.ManagedClusterUpgradeProfile
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedClusterUpgradeProfile]
request = build_get_upgrade_profile_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_upgrade_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedClusterUpgradeProfile", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default"} # type: ignore
@distributed_trace
def get_access_profile(
self, resource_group_name: str, resource_name: str, role_name: str, **kwargs: Any
) -> _models.ManagedClusterAccessProfile:
"""Gets an access profile of a managed cluster.
**WARNING**\ : This API will be deprecated. Instead use `ListClusterUserCredentials
<https://docs.microsoft.com/rest/api/aks/managedclusters/listclusterusercredentials>`_ or
`ListClusterAdminCredentials
<https://docs.microsoft.com/rest/api/aks/managedclusters/listclusteradmincredentials>`_ .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource. Required.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_07_01.models.ManagedClusterAccessProfile
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedClusterAccessProfile]
request = build_get_access_profile_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
role_name=role_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_access_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedClusterAccessProfile", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential"} # type: ignore
@distributed_trace
def list_cluster_admin_credentials(
self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any
) -> _models.CredentialResults:
"""Lists the admin credentials of a managed cluster.
Lists the admin credentials of a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param server_fqdn: server fqdn type for credentials to be returned. Default value is None.
:type server_fqdn: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_07_01.models.CredentialResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.CredentialResults]
request = build_list_cluster_admin_credentials_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
server_fqdn=server_fqdn,
api_version=api_version,
template_url=self.list_cluster_admin_credentials.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CredentialResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential"} # type: ignore
@distributed_trace
def list_cluster_user_credentials(
self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any
) -> _models.CredentialResults:
"""Lists the user credentials of a managed cluster.
Lists the user credentials of a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param server_fqdn: server fqdn type for credentials to be returned. Default value is None.
:type server_fqdn: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_07_01.models.CredentialResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.CredentialResults]
request = build_list_cluster_user_credentials_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
server_fqdn=server_fqdn,
api_version=api_version,
template_url=self.list_cluster_user_credentials.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CredentialResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential"} # type: ignore
@distributed_trace
def list_cluster_monitoring_user_credentials(
self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any
) -> _models.CredentialResults:
"""Lists the cluster monitoring user credentials of a managed cluster.
Lists the cluster monitoring user credentials of a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param server_fqdn: server fqdn type for credentials to be returned. Default value is None.
:type server_fqdn: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_07_01.models.CredentialResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.CredentialResults]
request = build_list_cluster_monitoring_user_credentials_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
server_fqdn=server_fqdn,
api_version=api_version,
template_url=self.list_cluster_monitoring_user_credentials.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CredentialResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_monitoring_user_credentials.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential"} # type: ignore
@distributed_trace
def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.ManagedCluster:
"""Gets a managed cluster.
Gets a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedCluster]
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedCluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}"} # type: ignore
def _create_or_update_initial(
self, resource_group_name: str, resource_name: str, parameters: Union[_models.ManagedCluster, IO], **kwargs: Any
) -> _models.ManagedCluster:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedCluster]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ManagedCluster")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ManagedCluster", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ManagedCluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.ManagedCluster,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ManagedCluster]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The managed cluster to create or update. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ManagedCluster]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The managed cluster to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self, resource_group_name: str, resource_name: str, parameters: Union[_models.ManagedCluster, IO], **kwargs: Any
) -> LROPoller[_models.ManagedCluster]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The managed cluster to create or update. Is either a model type or a IO
type. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedCluster]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ManagedCluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}"} # type: ignore
def _update_tags_initial(
self, resource_group_name: str, resource_name: str, parameters: Union[_models.TagsObject, IO], **kwargs: Any
) -> _models.ManagedCluster:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedCluster]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "TagsObject")
request = build_update_tags_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_tags_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedCluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}"} # type: ignore
@overload
def begin_update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.TagsObject,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ManagedCluster]:
"""Updates tags on a managed cluster.
Updates tags on a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_07_01.models.TagsObject
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ManagedCluster]:
"""Updates tags on a managed cluster.
Updates tags on a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update_tags(
self, resource_group_name: str, resource_name: str, parameters: Union[_models.TagsObject, IO], **kwargs: Any
) -> LROPoller[_models.ManagedCluster]:
"""Updates tags on a managed cluster.
Updates tags on a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Is either
a model type or a IO type. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_07_01.models.TagsObject or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2021_07_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedCluster]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ManagedCluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}"} # type: ignore
@distributed_trace
def begin_delete(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> LROPoller[None]:
"""Deletes a managed cluster.
Deletes a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}"} # type: ignore
def _reset_service_principal_profile_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_name: str,
parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO],
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[None]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ManagedClusterServicePrincipalProfile")
request = build_reset_service_principal_profile_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._reset_service_principal_profile_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile"} # type: ignore
@overload
def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.ManagedClusterServicePrincipalProfile,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Reset the Service Principal Profile of a managed cluster.
This action cannot be performed on a cluster that is not using a service principal.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The service principal profile to set on the managed cluster. Required.
:type parameters:
~azure.mgmt.containerservice.v2021_07_01.models.ManagedClusterServicePrincipalProfile
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Reset the Service Principal Profile of a managed cluster.
This action cannot be performed on a cluster that is not using a service principal.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The service principal profile to set on the managed cluster. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO],
**kwargs: Any
) -> LROPoller[None]:
"""Reset the Service Principal Profile of a managed cluster.
This action cannot be performed on a cluster that is not using a service principal.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The service principal profile to set on the managed cluster. Is either a
model type or a IO type. Required.
:type parameters:
~azure.mgmt.containerservice.v2021_07_01.models.ManagedClusterServicePrincipalProfile or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_service_principal_profile_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile"} # type: ignore
def _reset_aad_profile_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_name: str,
parameters: Union[_models.ManagedClusterAADProfile, IO],
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[None]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ManagedClusterAADProfile")
request = build_reset_aad_profile_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._reset_aad_profile_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile"} # type: ignore
@overload
def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.ManagedClusterAADProfile,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Reset the AAD Profile of a managed cluster.
Reset the AAD Profile of a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The AAD profile to set on the Managed Cluster. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_07_01.models.ManagedClusterAADProfile
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Reset the AAD Profile of a managed cluster.
Reset the AAD Profile of a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The AAD profile to set on the Managed Cluster. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: Union[_models.ManagedClusterAADProfile, IO],
**kwargs: Any
) -> LROPoller[None]:
"""Reset the AAD Profile of a managed cluster.
Reset the AAD Profile of a managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The AAD profile to set on the Managed Cluster. Is either a model type or a
IO type. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_07_01.models.ManagedClusterAADProfile or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_aad_profile_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile"} # type: ignore
def _rotate_cluster_certificates_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_rotate_cluster_certificates_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._rotate_cluster_certificates_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates"} # type: ignore
@distributed_trace
def begin_rotate_cluster_certificates(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Rotates the certificates of a managed cluster.
See `Certificate rotation <https://docs.microsoft.com/azure/aks/certificate-rotation>`_ for
more details about rotating managed cluster certificates.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._rotate_cluster_certificates_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates"} # type: ignore
def _stop_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_stop_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._stop_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop"} # type: ignore
@distributed_trace
def begin_stop(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> LROPoller[None]:
"""Stops a Managed Cluster.
This can only be performed on Azure Virtual Machine Scale set backed clusters. Stopping a
cluster stops the control plane and agent nodes entirely, while maintaining all object and
cluster state. A cluster does not accrue charges while it is stopped. See `stopping a cluster
<https://docs.microsoft.com/azure/aks/start-stop-cluster>`_ for more details about stopping a
cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop"} # type: ignore
def _start_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_start_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start"} # type: ignore
@distributed_trace
def begin_start(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> LROPoller[None]:
"""Starts a previously stopped Managed Cluster.
See `starting a cluster <https://docs.microsoft.com/azure/aks/start-stop-cluster>`_ for more
details about starting a cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start"} # type: ignore
def _run_command_initial(
self,
resource_group_name: str,
resource_name: str,
request_payload: Union[_models.RunCommandRequest, IO],
**kwargs: Any
) -> Optional[_models.RunCommandResult]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.RunCommandResult]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(request_payload, (IO, bytes)):
_content = request_payload
else:
_json = self._serialize.body(request_payload, "RunCommandRequest")
request = build_run_command_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._run_command_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("RunCommandResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_run_command_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/runCommand"} # type: ignore
@overload
def begin_run_command(
self,
resource_group_name: str,
resource_name: str,
request_payload: _models.RunCommandRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.RunCommandResult]:
"""Submits a command to run against the Managed Cluster.
AKS will create a pod to run the command. This is primarily useful for private clusters. For
more information see `AKS Run Command
<https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview>`_.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param request_payload: The run command request. Required.
:type request_payload: ~azure.mgmt.containerservice.v2021_07_01.models.RunCommandRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either RunCommandResult or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2021_07_01.models.RunCommandResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_run_command(
self,
resource_group_name: str,
resource_name: str,
request_payload: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.RunCommandResult]:
"""Submits a command to run against the Managed Cluster.
AKS will create a pod to run the command. This is primarily useful for private clusters. For
more information see `AKS Run Command
<https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview>`_.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param request_payload: The run command request. Required.
:type request_payload: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either RunCommandResult or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2021_07_01.models.RunCommandResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_run_command(
self,
resource_group_name: str,
resource_name: str,
request_payload: Union[_models.RunCommandRequest, IO],
**kwargs: Any
) -> LROPoller[_models.RunCommandResult]:
"""Submits a command to run against the Managed Cluster.
AKS will create a pod to run the command. This is primarily useful for private clusters. For
more information see `AKS Run Command
<https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview>`_.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param request_payload: The run command request. Is either a model type or a IO type. Required.
:type request_payload: ~azure.mgmt.containerservice.v2021_07_01.models.RunCommandRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either RunCommandResult or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2021_07_01.models.RunCommandResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.RunCommandResult]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._run_command_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
request_payload=request_payload,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("RunCommandResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_run_command.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/runCommand"} # type: ignore
@distributed_trace
def get_command_result(
self, resource_group_name: str, resource_name: str, command_id: str, **kwargs: Any
) -> Optional[_models.RunCommandResult]:
"""Gets the results of a command which has been run on the Managed Cluster.
Gets the results of a command which has been run on the Managed Cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param command_id: Id of the command. Required.
:type command_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunCommandResult or None or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_07_01.models.RunCommandResult or None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.RunCommandResult]]
request = build_get_command_result_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
command_id=command_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_command_result.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("RunCommandResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_command_result.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/commandResults/{commandId}"} # type: ignore
@distributed_trace
def list_outbound_network_dependencies_endpoints(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> Iterable["_models.OutboundEnvironmentEndpoint"]:
"""Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the
specified managed cluster.
Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the
specified managed cluster. The operation returns properties of each egress endpoint.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OutboundEnvironmentEndpoint or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_07_01.models.OutboundEnvironmentEndpoint]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OutboundEnvironmentEndpointCollection]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_outbound_network_dependencies_endpoints_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_outbound_network_dependencies_endpoints.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OutboundEnvironmentEndpointCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_outbound_network_dependencies_endpoints.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/outboundNetworkDependenciesEndpoints"} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
40bd32d463382e2855ae9a229d4491ed9f19b3f7 | e2379f0cf35337ad9af40d32dd2f43f19cebb90e | /pymock/mock_object.py | e65e9066bddf2f7972c46f833814dde04d7b1fd8 | [
"MIT"
] | permissive | diddi-/pymock | 6548a81274a7b9c870cba80f712183e8f6e625ed | 53acd56a0e239bdc521e02a12bab81c4ec65115c | refs/heads/master | 2023-02-05T02:39:36.357882 | 2020-12-20T13:12:13 | 2020-12-20T13:12:13 | 322,200,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | from typing import Optional
from pymock.action.call_action import CallAction
from pymock.argsmap import ArgsMap
class MockObject:
def __init__(self):
self.__PyMock__internal__attributes = {}
self.__PyMock__internal__calls = ArgsMap()
self.__PyMock__internal__value: Optional[CallAction] = None
def __getattr__(self, item):
if item in self.__PyMock__internal__attributes.keys():
return self.__PyMock__internal__attributes[item]
m = MockObject()
self.__PyMock__internal__attributes[item] = m
return m
def __call__(self, *args, **kwargs):
if self.__PyMock__internal__calls.has_args(args):
mock = self.__PyMock__internal__calls.get(args)
else:
mock = MockObject()
self.__PyMock__internal__calls.add(args, mock)
if mock._MockObject__PyMock__internal__value is not None:
return mock._MockObject__PyMock__internal__value.execute()
return mock
def __eq__(self, other):
if self.__PyMock__internal__value is not None:
return self.__PyMock__internal__value == other
if isinstance(other, MockObject):
return id(self) == id(other)
return False
def __str__(self):
if self.__PyMock__internal__value is not None:
return str(self.__PyMock__internal__value)
return f"<MockObject id={id(self)}>"
| [
"diddi@diddi.se"
] | diddi@diddi.se |
471617c5f53e57e2db18ac38d5151c4806dfe08d | 17c169be4ba5fe852ad20336b454f20074f5c6b0 | /COEN317MapRMaster/SaveServerIP.py | 8f1a5eee73213fc48ad3cd299f5ee5108a1660ed | [] | no_license | NishantVP/COEN317MapRMaster | ffd8f0f3cc89716b7c713a66e8af503ecde54b30 | 6f678f1e770eaf5773d4b74146cf923390027812 | refs/heads/master | 2021-01-10T16:00:47.812288 | 2016-03-12T07:23:32 | 2016-03-12T07:23:32 | 53,564,509 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | import json,httplib
import sys
#n=len(sys.argv)
#for i in range(len(sys.argv)):
# if i>0:
# print sys.argv[i]
def str2bool(v):
return v.lower() in ("True")
connection = httplib.HTTPSConnection('api.parse.com', 443)
connection.connect()
connection.request('POST', '/1/classes/ServerIP', json.dumps({
"ServerName":sys.argv[1],
"IP": sys.argv[2],
"PORT": sys.argv[3],
"Running": sys.argv[4]
}), {
"X-Parse-Application-Id": "qhAoTHhBnrpOjQkD4BJxBcC1vvUKm5zI0fbPjNq3",
"X-Parse-REST-API-Key": "W7du6PZgzSXmk1tM4NOlNJAxNdTfh1TVujOluJTZ",
"Content-Type": "application/json"
})
results = json.loads(connection.getresponse().read())
#print results
print results['objectId']
| [
"nishant1914@gmail.com"
] | nishant1914@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.