blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8b5f46f03fd3acf298116d84ec5c3e44a9f3af84 | a8750439f200e4efc11715df797489f30e9828c6 | /CodeForces/login.py | 785f5e468166714bb35241f17932e9b1ce0d062a | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | a, b = [x for x in input().split()]
ans = a[0]
i = 1
j = 0
while i < len(a) or j < len(b):
if i >= len(a) or b[j] < a[i]:
ans += b[j]
j += 1
break
else:
ans += a[i]
i += 1
print(ans)
| [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
767a6dd112009fd8aff53556497a3da6699bf4ea | fd94f0f140e6437753a058e4849bfb79f8a69d7e | /Codes/PodoSighter_cnn_folder/xmltojson.py | 44a14327f32163b49734f58a5842f79457fb4cc3 | [] | no_license | SarderLab/PodoSighter | 6234e19ab269a3f6f739e3cbbe2650d40dcd54a1 | aa00c4848ba7ac99723a63cf2be4a5291f398e7f | refs/heads/main | 2023-06-17T03:23:14.633302 | 2021-07-04T18:24:43 | 2021-07-04T18:24:43 | 348,821,788 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,888 | py | import json
import xml.etree.ElementTree as ET
import logging
logging.basicConfig()
def xmltojson(root):
colorList = ["rgb(0, 255, 128)", "rgb(0, 255, 255)", "rgb(255, 255, 0)", "rgb(255, 128, 0)", "rgb(0, 128, 255)",
"rgb(0, 0, 255)", "rgb(0, 102, 0)", "rgb(153, 0, 0)", "rgb(0, 153, 0)", "rgb(102, 0, 204)",
"rgb(76, 216, 23)", "rgb(102, 51, 0)", "rgb(128, 128, 128)", "rgb(0, 153, 153)", "rgb(0, 0, 0)"]
l_size = list(root)
if len(l_size) == 1:
# print(len(l_size))
data = dict()
ann = root.find('Annotation')
attr = ann.find('Attributes')
name = attr.find('Attribute').get('Name')
element = []
reg = ann.find('Regions')
for i in reg.findall('Region'):
eleDict = dict()
eleDict["closed"] = True
eleDict["fillColor"] = "rgba(11, 156, 49, 0.8)"
eleDict["lineColor"] = colorList[0]
eleDict["lineWidth"] = 2
points = []
ver = i.find('Vertices')
for j in ver.findall('Vertex'):
eachPoint = []
eachPoint.append(float(j.get('X')))
eachPoint.append(float(j.get('Y')))
eachPoint.append(float(j.get('Z')))
points.append(eachPoint)
eleDict["points"] = points
eleDict["type"] = "polyline"
element.append(eleDict)
data["elements"] = element
data["name"] = name
return data
elif len(l_size) > 1:
# print(len(l_size))
data = []
for n, child in enumerate(root, start=0):
dataDict = dict()
attr = child.find('Attributes')
name = attr.find('Attribute').get('Name')
element = []
reg = child.find('Regions')
for i in reg.findall('Region'):
eleDict = dict()
eleDict["closed"] = True
eleDict["fillColor"] = "rgba(11, 156, 49, 0.8)"
eleDict["lineColor"] = colorList[n % 15]
eleDict["lineWidth"] = 2
points = []
ver = i.find('Vertices')
for j in ver.findall('Vertex'):
eachPoint = []
eachPoint.append(float(j.get('X')))
eachPoint.append(float(j.get('Y')))
eachPoint.append(float(j.get('Z')))
points.append(eachPoint)
eleDict["points"] = points
eleDict["type"] = "polyline"
element.append(eleDict)
dataDict["elements"] = element
dataDict["name"] = name
data.append(dataDict)
return data
else:
raise ValueError('Check the format of json file')
| [
"noreply@github.com"
] | SarderLab.noreply@github.com |
d4f0c626e2bd451c7704118209afe8adf6d93c47 | 93b88de2ae87c4d7bed4d545fe38c502e84e1ba6 | /table/models.py | dee20de09b8933b6cbaa0e3a4cfd8823273031b1 | [] | no_license | jod35/empdata-table | b77fb8394f74cb71d50aeb1c2d5183d39f9fd5dd | 4bda87eb8f54b4e53c3adc534002f50a7e46c5f8 | refs/heads/master | 2020-12-20T05:23:17.126355 | 2020-01-25T05:49:20 | 2020-01-25T05:49:20 | 235,975,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from . import db
class Employee(db.Model):
id=db.Column(db.Integer(),primary_key=True)
name=db.Column(db.String(40),nullable=False)
age=db.Column(db.Integer(),nullable=False)
gender=db.Column(db.String(10),nullable=False)
salary=db.Column(db.Integer(),nullable=False)
residence=db.Column(db.String(25),nullable=False)
def __repr__(self):
return "Employee {}".format(self.name) | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
0d6d50fe03634a9956397e0cd037cd9f4ae7634e | 607e1b1ec5a41fd5f6cf83e7e20a1372717d2486 | /leetcode/62.py | a6d0a7914195cf7602733f2e272dab0afe4cdedd | [] | no_license | histuckyi/algorithm | 067e627e1672e858b3143440200262e0e5db495c | fb04bbd8cdb3ead707bb07abbc1688b99f7505a7 | refs/heads/master | 2023-07-08T05:22:49.049599 | 2023-06-24T07:00:25 | 2023-06-24T07:00:25 | 147,614,786 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | """
LeetCode 62. Unique Paths
blog : https://daimhada.tistory.com/131
problem : https://leetcode.com/problems/unique-paths/submissions/
"""
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
r = n
c = m
field = [[0]*c for i in range(r)]
rd = [0, 1]
cd = [1, 0]
pos_list = [(0,0)]
while pos_list:
pos = pos_list.pop()
pr, pc = pos
field[pr][pc] += 1
for i in range(2):
temp_r = pr + rd[i]
temp_c = pc + cd[i]
if temp_r < 0 or temp_c < 0 or r <= temp_r or c <= temp_c:
continue
pos_list.append((temp_r, temp_c))
return field[r-1][c-1]
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
"""
Runtime : faster than 40.64% of Python3
Memory Usage : less than 5.25% of Python3
"""
r = n
c = m
field = [[0]*(c) for i in range(r)]
direction = [(0,-1), (-1, 0)]
for i in range(r):
for j in range(c):
if i == 0 or j == 0:
field[i][j] = 1
continue
for next_pos in direction:
add_r, add_c = next_pos
temp_r = i + add_r
temp_c = j + add_c
if temp_r < 0 or temp_c < 0 or r <= temp_r or c <= temp_c:
continue
field[i][j] += field[temp_r][temp_c]
return field[r-1][c-1]
s = Solution()
s.uniquePaths(7,3) | [
"histuckyi@gmail.com"
] | histuckyi@gmail.com |
74706744083239e8a580d5554f9d9b44a42f4f86 | 1b10005fa38e07bbd61b97442879c4039fa1518e | /facts/models.py | 10a6cb7af093ddc37a6ffa4c679f891cc35d62a4 | [] | no_license | maltefl/xsperm | 6ee7ca4c708f09be77007a1d3f549560726554c9 | 6a78c421f3fab82b972c2e240167459367917cff | refs/heads/master | 2021-01-17T22:22:06.808113 | 2010-01-15T13:35:25 | 2010-01-15T13:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | from django.db import models
class Fact(models.Model):
text = models.CharField(max_length=140)
rating = models.IntegerField(default=0)
class Meta:
ordering = ('-rating',)
def __unicode__(self):
return self.text
| [
"mrbichel@mrbichel-laptop.(none)"
] | mrbichel@mrbichel-laptop.(none) |
f21bc2bcf384944f2a661bcb346820b772c9659e | 2eb93e87db7e55dd1b97975f1c45025d5bb438e9 | /type_info.py | 2c221151fe98ab017f9f604c458c495c7df909bb | [
"MIT"
] | permissive | Hirmuolio/ESI-type-ID-attributes | 106d8f603b06a44e9bda20c5da71069e10ca2cf0 | 320d386f3348c3e2dfccbe85260a0ca30dd2b54f | refs/heads/master | 2021-05-26T07:19:31.008750 | 2020-05-17T13:02:39 | 2020-05-17T13:02:39 | 127,891,373 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,052 | py | #!/usr/bin/env python3
#This python script gets you attributes for any object
import json
import gzip
import esi_calling
#Uncomment the one you want to use.
#TQ
datasource ='tranquility'
#SISI
#datasource ='singularity'
esi_calling.set_user_agent('Hirmuolio/ESI-type-ID-attributes')
def print_normal_attributes(esi_response):
print('\nAttributes:\n')
type_info = esi_response.json()
for key in type_info:
if key not in ['dogma_attributes', 'dogma_effects', 'type_id', 'name', 'description']:
print( ' {:<20s} {:<}'.format(key, type_info[key]))
#print(key, ': ', type_info[key])
def print_dogma_attributes(esi_response):
type_info = esi_response.json()
if 'dogma_attributes' not in type_info:
print(type_info['name'], ' has no dogma attributes')
else:
print('\ndogma attributes:\n')
length = len(type_info['dogma_attributes'])
new_attributes = []
for n in range(0, length):
dogma_id = type_info['dogma_attributes'][n]['attribute_id']
if not str(dogma_id) in dogma_attributes:
#Find what this ID is for
new_attributes.append(dogma_id)
if len(new_attributes) != 0:
#print('Getting info on', len(new_attributes), 'dogma attributes')
esi_response_arrays = esi_calling.call_esi(scope = '/v1/dogma/attributes/{par}', url_parameters = new_attributes, datasource = datasource, job = 'get info on dogma attribute')
for array in esi_response_arrays:
response_json = array[0].json()
if 'attribute_id' in response_json:
dogma_attributes[str(response_json['attribute_id'])] = response_json
#Save the ID list
with gzip.GzipFile('dogma_attributes.gz', 'w') as outfile:
outfile.write(json.dumps(dogma_attributes, indent=2).encode('utf-8'))
for n in range(0, length):
dogma_id = type_info['dogma_attributes'][n]['attribute_id']
value = type_info['dogma_attributes'][n]['value']
if str(dogma_id) in dogma_attributes:
name = dogma_attributes[str(dogma_id)]['name']
display_name = dogma_attributes[str(dogma_id)]['display_name']
if 'description' in dogma_attributes[str(dogma_id)]:
description = dogma_attributes[str(dogma_id)]['description']
else:
description = ""
print( ' {:<30s} {:<10s} {:<}{:<}'.format(name, str(value), '( '+display_name, ', '+description+' )'))
else:
print( "Unknown dogma ID ", str(dogma_id) )
def print_dogma_effects(esi_response):
type_info = esi_response.json()
if 'dogma_effects' not in type_info:
print(type_info['name'], ' has no dogma effects')
else:
print('\ndogma effects:\n')
length = len(type_info['dogma_effects'])
new_effects = []
for n in range(0, length):
dogma_id = type_info['dogma_effects'][n]['effect_id']
if not str(dogma_id) in dogma_effects:
#Find what this ID is for
new_effects.append(dogma_id)
if len(new_effects) != 0:
#print('Getting info on', len(new_effects), 'dogma effects')
esi_response_arrays = esi_calling.call_esi(scope = '/v2/dogma/effects/{par}', url_parameters = new_effects, datasource = datasource, job = 'get info on dogma attribute')
for array in esi_response_arrays:
response_json = array[0].json()
if 'effect_id' in response_json:
dogma_effects[str(response_json['effect_id'])] = response_json
else:
print( "Something wrong: ", response_json )
#Save the ID list
with gzip.GzipFile('dogma_effects.gz', 'w') as outfile:
outfile.write(json.dumps(dogma_effects, indent=2).encode('utf-8'))
for n in range(0, length):
dogma_id = type_info['dogma_effects'][n]['effect_id']
print(' ')
if str( dogma_id ) in dogma_effects:
name = dogma_effects[str(dogma_id)]['name']
print(' ', name)
for key in dogma_effects[str(dogma_id)]:
if key == "modifiers":
print( " modifiers:" )
for arr_element in dogma_effects[str(dogma_id)]["modifiers"]:
for key2 in arr_element:
if key2 in [ "modified_attribute_id", "modifying_attribute_id" ]:
attr_id = arr_element[key2]
if not str(attr_id) in dogma_attributes:
#Find what this ID is for
esi_response = esi_calling.call_esi(scope = '/v1/dogma/attributes/{par}', url_parameters = [str(attr_id)], job = 'get info on dogma attribute')[0][0]
response_json = esi_response.json()
if 'attribute_id' in response_json:
dogma_attributes[str(response_json['attribute_id'])] = response_json
#Save the ID list
with gzip.GzipFile('dogma_attributes.gz', 'w') as outfile:
outfile.write(json.dumps(dogma_attributes, indent=2).encode('utf-8'))
print( " ", key2, ":", str(attr_id), "-", dogma_attributes[ str(attr_id) ]["name"] )
else:
print( " ", key2, ":", arr_element[key2] )
elif key in [ "discharge_attribute_id", "duration_attribute_id", "falloff_attribute_id", "tracking_speed_attribute_id", "range_attribute_id"]:
if not str(dogma_effects[str(dogma_id)][key]) in dogma_attributes:
esi_response = esi_calling.call_esi(scope = '/v1/dogma/attributes/{par}', url_parameters = [str(dogma_effects[str(dogma_id)][key])], job = 'get info on dogma attribute')[0][0]
response_json = esi_response.json()
if 'attribute_id' in response_json:
dogma_attributes[str(response_json['attribute_id'])] = response_json
#Save the ID list
with gzip.GzipFile('dogma_attributes.gz', 'w') as outfile:
outfile.write(json.dumps(dogma_attributes, indent=2).encode('utf-8'))
print( ' ', key, ': ', dogma_effects[str(dogma_id)][key], "-", dogma_attributes[str(dogma_effects[str(dogma_id)][key])]["name"] )
else:
print( ' ', key, ': ', dogma_effects[str(dogma_id)][key] )
else:
print( "Unknown dogma effect ", dogma_id )
def parse_stats(esi_response):
type_info = esi_response.json()
#Print the output
print('\n----')
print(' Type ID:', type_info['type_id'])
print(' Name:', type_info['name'])
print(' Description:', type_info['description'])
print_normal_attributes(esi_response)
print_dogma_attributes(esi_response)
print_dogma_effects(esi_response)
print('----\n')
try:
#Load cached dogma attribute ID info
with gzip.GzipFile('dogma_attributes.gz', 'r') as fin:
dogma_attributes = json.loads(fin.read().decode('utf-8'))
except FileNotFoundError:
#No file found. Start from scratch
dogma_attributes = {}
try:
#Load cached dogma effect ID info
with gzip.GzipFile('dogma_effects.gz', 'r') as fin:
dogma_effects = json.loads(fin.read().decode('utf-8'))
except FileNotFoundError:
#No file found. Start from scratch
dogma_effects = {}
print('Using', datasource, 'as data source')
while True:
#Call ESI
type_id = input("Give type ID: ")
esi_response = esi_calling.call_esi(scope = '/v3/universe/types/{par}', url_parameters = [type_id], datasource = datasource, job = 'get type ID attributes')[0][0]
if esi_response.status_code == 404:
print('404 - Type ID: ' + type_id + ' not found')
else:
parse_stats(esi_response)
| [
"noreply@github.com"
] | Hirmuolio.noreply@github.com |
d5f310040aecfb186ec0eeaf080f2b884502218d | 1d3b49f5ea70a24393290fe07212df2aac36cda5 | /pythonProject11/main.py | 4dc966db69ab976d8060167775f505790b3e6b51 | [] | no_license | uykykhj/Hometask | b7f02d0e45481ce20972e537e2edd0b87de934e2 | a3cba07afdeaa9d617b20ab833f818e49ee97ded | refs/heads/main | 2023-08-24T11:45:13.508216 | 2021-10-21T14:01:47 | 2021-10-21T14:01:47 | 419,738,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | str=input('ведите первую строку любого стихотворения')
n1=int(input())
n1-=1
n2=int(input())
n2-=1
print(len(str))
print(str[n1:n2]) | [
"sanyaostapu4ok@gmail.com"
] | sanyaostapu4ok@gmail.com |
026c1164d905258ebd9724af53ff47a96b83f0aa | 826f3403b997296874757fb96b4f2bd893f6f0a0 | /jobsapp/apps.py | da3fe0655ccad9ce82d1bef2ed41177ae83efcd4 | [] | no_license | harishramuk/fakerproject | 122a8dc80d6571ae9c5a42c4a7c569dbb7bca20e | 8992edad8694208e5105861f11b523e349da45e8 | refs/heads/main | 2023-02-07T07:01:49.962450 | 2020-12-27T07:49:28 | 2020-12-27T07:49:28 | 324,710,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | from django.apps import AppConfig
class JobsappConfig(AppConfig):
name = 'JobsApp'
| [
"noreply@github.com"
] | harishramuk.noreply@github.com |
aa2eaf1175b8b181761fce2ae6f5256beafa5e3c | 259b81f2a6d02947cd95c1fbdac4ea182592d331 | /mySE server/setupDB.py | 7f05234387561525cff55e10ed8f1703c6fe7121 | [
"MIT"
] | permissive | Arthurdw/mySE | 58ee59c982266b1d4ff0a53e01b882820125a71f | 0100c2a4e8fd5006e170ca8a2890080ed96d0153 | refs/heads/master | 2020-12-08T20:48:54.380742 | 2020-05-03T11:56:29 | 2020-05-03T11:56:29 | 233,091,191 | 0 | 0 | MIT | 2020-01-16T20:06:44 | 2020-01-10T16:52:39 | Python | UTF-8 | Python | false | false | 2,042 | py | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# API created for mySE by Arthurdw #
# mySE stands for My Secure Environment! #
# mySE was created for GO-Atheneum Oudenaarde #
# This project and all it files are under a MIT licence. #
# Project (mySE) started on 09/01/2020! #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This file will do the basic DB setup for first usage. #
# If this file hasn't been run yet and you start the - #
# - server errors will occur! #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from os import system
from util.utils import date
from util.SQLite import DataBase
from json import dumps
# DB formats:
# Tokens DB:
# id int, token string, mail string
# Logs DB:
# id int, time blob, light int, breach int
# Users DB:
# user_name text, mail text, password text
# Create DB object and DB:
print("\r", "Creating DB...", end=' ')
tokens = DataBase("tokens")
logs = DataBase("logs")
users = DataBase("users")
print("\b\b\b\b: Done")
# Add the db parameters:
print("\r", "Setting up DB", end=' ')
tokens.setup("tokens", "(id int, token string, mail string)")
logs.setup("logs", "(id int, time blob, light int, breach int)")
users.setup("users", "(user_name text, mail text, password text)")
print("\b\b\b\b: Done")
# Create example token:
print("\r", "Creating first token...", end=' ')
token = tokens.add_token("email@mail.mail")
print("\b\b\b\b: " + str(token))
# Create example log:
print("\r", "Creating first log...", end=' ')
date = date()
logs.post_log("INSERT INTO logs VALUES (?, ?, ?, ?)", (1, str(date), 0, 1))
# Hard coded example:
print("\b\b\b\b: " + dumps({"time": date, "light": False, "id": 1}))
# Install dependecies.
print("Installing depencencies.")
system("python -m pip install -r requirements.txt")
print("Succesfully installed the server dependencies.")
| [
"arthur.dewitte@gmail.com"
] | arthur.dewitte@gmail.com |
848daa1b7f69e0cfa889d5f7a5c7cfa3fc9f2b92 | 12e6723f9bc5b7fc3993e584c837469ce565e70f | /client.py | e35385956fd8274c12344c0b0d8b49e57222693f | [] | no_license | maujordan/meMusicPersonal | 779233810452e0ea7b21690e3ba428c9b77cf57f | 074dd4b83df55aa0a5e98367e00611b4fed53010 | refs/heads/master | 2020-08-16T00:27:45.975453 | 2019-10-29T23:57:03 | 2019-10-29T23:57:03 | 215,430,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | import time
import socket
import json
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.settimeout(1.0)
addr = ("127.0.0.1", 12000)
str_data = {
'color': 'rojo',
'x': -3,
'y': 1
}
message = json.dumps(str_data).encode('utf-8')
client_socket.sendto(message, addr)
| [
"noreply@github.com"
] | maujordan.noreply@github.com |
2ef032bab20a1fd7af7aa08e330c93a4a5a3865a | c1fa73c6589df48aced3b1cbdbc30fb237b007fc | /code/scrapers_crawlers/vox/scrape_vox.py | 57f703b6009188501efd942eb2b94a7beadcb8f0 | [] | no_license | slee981/mnir_news | 0690b974a0b47bc2ca46cc69182e9b6db3735aed | 6746bd4afb754d156c30bbd6a201ef5e2982a8e3 | refs/heads/master | 2022-04-05T12:08:23.352478 | 2020-02-04T00:37:33 | 2020-02-04T00:37:33 | 207,697,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,664 | py | ########################################
# Imports
########################################
import requests as req
from selenium import webdriver
import sys
import os # os.chdir('/home/stephen/...')
# os.getcwd()
########################################
# Constants
########################################
BASE_FOLDER = '/home/stephen/Dropbox/CodeWorkspace/python/news-article-scraper/vox/'
ENTERTAINMENT_FOLDER = 'vox-culture'
POLITICS_FOLDER = 'vox-politics'
TECH_FOLDER = 'vox-tech'
ARTICLES_PER_TOPIC = 2
CULTURE = []
POLITICS = []
TECH = []
TOPICS = {"politics": POLITICS, "culture": CULTURE, "tech": TECH}
CHROME_PATH = "/usr/lib/chromium-browser/chromedriver"
########################################
# Functions
########################################
def scrape(url, browser=None, file_id=None, last_article=True, relevant_topics=None):
'''
Motivation
- this function should take a vox news url,
scrape the text, and write a txt file
into a corresponding folder.
Input
- a vox news url to scrape
- a file id to save it (optional)
- a selenium webdriver (optional)
Output
- a txt file written to appropriate folder
'''
if browser == None:
browser = webdriver.Chrome(CHROME_PATH)
if file_id == None:
file_id = 1
if relevant_topics == None:
relevant_topics = ['politics', 'culture', 'tech']
try:
browser.get(url)
except:
print(f'\n[!!!] ERROR: Cannot get {url}\n')
browser.refresh()
return False
# unique xpath to get text for vox news
try:
contents = browser.find_elements_by_xpath('/html/body/div/section/section/div[2]/div[1]/div[1]')
except:
print(f"Error getting xpath {url}")
return False
try:
text = get_clean_txt(contents)
except:
print(f"Error cleaning text in {url}")
return False
try:
get_relevant_urls(browser, relevant_topics)
except:
print("error getting urls")
return False
try:
topic_folder = get_correct_folder(url)
write_txt_file(text, url, file_id, topic_folder)
except:
print("error writing")
return False
if last_article:
browser.quit()
return True
def get_clean_txt(article_contents):
'''
Motivation
- vox news adds random shit in 'strong'
and 'blockquote' brackets that are
not really part of the article
Input
- the article contents webdriver object
Output
- a string of cleaned text
'''
text = article_contents[0].text
headers = article_contents[0].find_elements_by_tag_name('h3')
tweets = article_contents[0].find_elements_by_tag_name('twitter-widget')
for h in headers:
text = text.replace(h.text, '')
for tweet in tweets:
text = text.replace(tweet.text, '')
# remove the word Vox and VOX from the text
if "Vox News" in text:
text = text.replace("Vox News", '')
if "VOX" in text:
text = text.replace("VOX", '')
if "Vox's" in text:
text = text.replace("Vox's", '')
if "Vox" in text:
text = text.replace("Vox", '')
if 'vox.com' in text:
text = text.replace("vox.com", '')
if 'vox' in text:
text = text.replace("vox", '')
return text
def get_relevant_urls(browser, relevant_topics=None):
'''
Motivation
- this appends 'relevant' urls to
an appropriate list. Relevant means that
the url is unique and related to either
politics, entertainment, or tech.
Input
- a live selenium webdriver instance with
loaded vox news article
Output
- none
'''
if relevant_topics == None:
relevant_topics = ['politics', 'culture', 'tech']
anchors = browser.find_elements_by_tag_name('a')
new_urls = 0
for a in anchors:
if a.get_attribute('href') != None:
addr = a.get_attribute('href')
if ('policy-and-politics/2' in addr) \
and ('politics' in relevant_topics) \
and addr not in TOPICS['politics']:
TOPICS['politics'].append(addr)
new_urls += 1
elif ('culture/2' in addr) \
and ('culture' in relevant_topics) \
and addr not in TOPICS['culture']:
TOPICS['culture'].append(addr)
new_urls += 1
elif ('technology/2' in addr) \
and ('technology' in relevant_topics) \
and addr not in TOPICS['tech']:
TOPICS['tech'].append(addr)
new_urls += 1
print(f" -> Added {new_urls} new urls")
def get_correct_folder(url):
folder = BASE_FOLDER
if 'politics/' in url:
folder += POLITICS_FOLDER
elif 'culture' in url:
folder += ENTERTAINMENT_FOLDER
elif 'tech' in url:
folder += TECH_FOLDER
else:
raise ValueError
return folder
def write_txt_file(text, url, file_id, folder):
'''
Motivation
- this function takes text and writes
it as a text file into an appropriate
folder
Input
- text from article
- file id number
- topic category
--> entertainment
--> politics
--> tech
Output
- none
'''
os.chdir(folder)
# folder is formatted "/home/stephen/.../vox-topic"
topic = folder[len(BASE_FOLDER)+4:]
file_name = f"vox_{topic}_{file_id}.txt"
with open(file_name, 'a') as f:
f.write(text)
with open('urls.txt', 'a') as f:
f.write(f"{url}|*|")
print(f"wrote {file_name}")
def get_starter_urls():
folder = './starter-urls/'
politics = 'politics.txt'
culture = 'culture.txt'
tech = 'tech.txt'
files = [politics, culture, tech]
os.chdir(folder)
for url_file in files:
with open(url_file, 'r') as f:
urls = f.read()
urls = urls.split("|*|")
if 'politics' in url_file:
TOPICS['politics'].extend(urls)
elif 'culture' in url_file:
TOPICS['culture'].extend(urls)
elif 'tech' in url_file:
TOPICS['tech'].extend(urls)
os.chdir("../")
def main():
# read in starter urls
get_starter_urls()
browser = webdriver.Chrome(CHROME_PATH)
browser.implicitly_wait(15)
topics = [topic for topic in TOPICS.keys()]
for i, topic in enumerate(topics):
print(f'\n*** looking for {topic} ***\n')
# set relevant topics to avoid unnecessary url hunting
relevant_topics = topics[i:]
# reset counters for each topic
articles_scraped = 0
current_url = 0
# loop as long as there are enough new urls
while len(TOPICS[topic]) > current_url and articles_scraped < ARTICLES_PER_TOPIC:
url = TOPICS[topic][current_url]
if scrape(url, browser=browser, file_id=articles_scraped, last_article=False, relevant_topics=relevant_topics):
articles_scraped += 1
print(f' -> {len(TOPICS[topic]) - current_url} urls left in this topic')
print(f" -> relevant url topics {relevant_topics}")
current_url += 1
browser.quit()
def test(url):
scrape(url, file_id="test")
########################################
# Main
########################################
if __name__ == "__main__":
if len(sys.argv) == 2:
url = sys.argv[1]
test(url)
else:
main()
| [
"smlee.981@gmail.com"
] | smlee.981@gmail.com |
2f4e456870305284bd134826e6bf1cf2ac4e66dc | 6078520160f17b7e57bd232bd09d9c7cfb7324b1 | /nagios/libexec-/check_dns_srv.py | c465fcf94dd5dfec8874554c689b11ef8937f539 | [] | no_license | juniorkalvin7/nagios-2 | 12bee5ff4c35313b42b61b073e8e710ea47acf4f | 223a796ed073bfd97da36fca046ae5e05b0a8c87 | refs/heads/master | 2021-01-17T14:48:36.650222 | 2017-03-06T16:49:24 | 2017-03-06T16:49:24 | 84,097,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,310 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
* Copyright (c) 2005 Gemayel Alves de Lira (gemayellira@gmail.com.br)
* All rights reserved.
* Intechne Information Technologies
* version 0.1 -
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE INTECHNE INFORMATION TECNOLOGIES, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
import commands
import time
import math
import os
import sys
args = sys.argv[1:]
if len(args)<1:
print "Centreon Plugin\nCheck DNS Server por Gemayel Lira"
print "Argumentos:host"
print "Ex.\n%s 10.0.0.45" % sys.argv[0]
sys.exit(1)
else:
#print args
host=args[0]
#community=args[1]
try:
data=(commands.getoutput("dig @%s www.bol.com.br www.cnn.com www.google.com" %(str(host))))
except Exception,e:
print "CRITICAL - nao consegui capturar valores"
print e
sys.exit(2)
if data.count(';; ANSWER SECTION:') ==0:
print "CRITICAL - DNS Service - Nao esta respondendo|ok=0"
sys.exit(2)
print "DNS Service - OK |ok=100"
| [
"juniorkalvin7@gmail.com"
] | juniorkalvin7@gmail.com |
1fcc1e4dc5331ee16b5a22ff7eeede6cdb9460bb | e8e67a0b6d1259a8588181b733393303f661a843 | /python_examples/control_mapping_framework/excel_utils.py | 8ec023625c6eef08768fa1052be219dd532c49d6 | [
"MIT"
] | permissive | CyberGRX/api-examples | 7019b0930e4b427df5bce71205cf65cb412a0d9e | 3ae16c023a6f217748d3e8acfdb8f2d9611f91ad | refs/heads/master | 2023-05-27T02:36:31.506462 | 2023-01-20T17:54:28 | 2023-01-20T17:54:28 | 228,675,512 | 1 | 1 | MIT | 2023-05-22T21:44:10 | 2019-12-17T18:13:45 | Python | UTF-8 | Python | false | false | 3,627 | py | #########################################################################
# _________ ___. ______________________ ___
# \_ ___ \___.__.\_ |__ ___________ / _____/\______ \ \/ /
# / \ \< | | | __ \_/ __ \_ __ \/ \ ___ | _/\ /
# \ \___\___ | | \_\ \ ___/| | \/\ \_\ \| | \/ \
# \______ / ____| |___ /\___ >__| \______ /|____|_ /___/\ \
# \/\/ \/ \/ \/ \/ \_/
#
#
import re
from copy import copy
from jinja2 import Template
from openpyxl import load_workbook
from openpyxl.cell import Cell, MergedCell
from reporting import debug_keys, read_report
from utils import cell_value, create_sheet
def process_excel_template(filename, metadata=None, debug=False):
if not metadata:
metadata = {}
report_data = read_report(filename)
metadata.update(report_data)
wb = load_workbook(filename=filename, data_only=True)
for _, sheet in enumerate(wb):
start = None
end = None
template = []
style = []
for i, row in enumerate(sheet):
row_values = [cell_value(c) for _, c in enumerate(row) if isinstance(c, (Cell, MergedCell))]
raw_values = " ".join(row_values)
if "{%tr " in raw_values:
logic_statement = "".join(row_values).replace("{%tr", "{%").replace(" %}", " -%}")
if "{%tr for" in raw_values:
start = i
elif "{%tr endfor" in raw_values:
end = i
template.append(logic_statement)
elif start is not None and end is None:
style = [
{
"font": copy(c.font),
"border": copy(c.border),
"fill": copy(c.fill),
"number_format": copy(c.number_format),
"protection": copy(c.protection),
"alignment": copy(c.alignment),
}
for c in row
]
template.append("-=+".join(row_values))
if not template or start is None or end is None:
continue
raw_template = "\n".join(template)
if debug:
print("Raw Template:\n" + raw_template.replace("-=+", ""))
jinga_template = Template(raw_template)
processed = jinga_template.render(metadata)
sheet.delete_rows(start + 1, amount=end - start + 1)
for i, row in enumerate(processed.splitlines()):
for j, c in enumerate(row.split("-=+")):
cell = sheet.cell(row=start + i + 1, column=j + 1)
cell.value = re.sub(r"\n\n+", "\n\n", c.replace("<w:br/>", "\n")).strip()
try:
cell.font = copy(style[j]["font"])
cell.border = copy(style[j]["border"])
cell.fill = copy(style[j]["fill"])
cell.number_format = copy(style[j]["number_format"])
cell.protection = copy(style[j]["protection"])
cell.alignment = copy(style[j]["alignment"])
except IndexError:
pass
if debug and metadata:
debugging_keys = debug_keys(metadata)
debugging_keys.sort()
create_sheet(wb, "Debug Table")
debug_sheet = wb["Debug Table"]
for i, key in enumerate(debugging_keys):
cell = debug_sheet.cell(row=i + 1, column=1)
cell.value = key
wb.save(filename)
| [
"noreply@github.com"
] | CyberGRX.noreply@github.com |
88ac7eaa07a6e60ea86b3a2c3c89d5bdf3800eed | 7a0f0c2107019c82b693e809c1a9b912bee9d9b1 | /app/chap3_2_2/models/mkqueries.py | a6ed847f49085fe78b1ee60cf6cf84fe8ca6cc7b | [] | no_license | petershan1119/Django-Official-Practice | 352f17a4c0b03abe81af7471c4823f096868a4b5 | a24f626c28bda6024e1b5380f1f8a3c436ba5a0d | refs/heads/master | 2021-01-24T01:28:46.044910 | 2018-02-26T00:32:55 | 2018-02-26T00:32:55 | 122,808,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,955 | py | from django.db import models
__all__ = (
'Blog',
'Author',
'Entry',
)
class Blog(models.Model):
name = models.CharField(max_length=100)
tagline = models.TextField(blank=True)
def __str__(self):
return self.name
class Author(models.Model):
"""
## ManyToMany의 경우 add 이용해서 업데이트 (p.105)
joe = Author.objects.create(name='Joe')
entry.authors.all()
entry.authors.add(joe)
"""
name = models.CharField(max_length=200)
email = models.EmailField(blank=True)
def __str__(self):
return self.name
class Entry(models.Model):
"""
## ForeignKey 업데이트 경우 그냥 할당 (p.105)
b = Blog(name='Beatles Blog', tagline='All the latest Beatles news')
b.save()
entry = Entry.objects.create(blog=b, headline='Test entry')
entry.blog
entry.blog.pk
b2 = Blog.objects.create(name='Cheddar Talk')
entry.blog = b2
## filters 이용해서 특정 objects retrieve하는 경우 (p.106)
Entry.objects.create(blog=b, headline='2006 test entry', pub_date=date(2006, 1, 1))
Entry.objects.filter(pub_date__year=2006)
## chaining filters 예시 (p.107)
b = Blog.objects.create(name='lhy Blog')
Entry.objects.create(blog=b, headline='What\'s up', pub_date=date(2020, 1, 1))
Entry.objects.create(blog=b, headline='What 123', pub_date=date(2000, 1, 1))
Entry.objects.create(blog=b, headline='Whattttttt', pub_date=date(2005, 2, 1))
## Everything inside a single filter() call vs. Successive filter() (p.111)
b1 = Blog.objects.create(name='Lennon and 2008')
b2 = Blog.objects.create(name='Lennon 2008 separate')
Entry.objects.create(blog=b1, headline='Lennon', pub_date=date(2008, 1, 1))
Entry.objects.create(blog=b2, headline='Fastcampus', pub_date=date(2008, 1, 1))
Entry.objects.create(blog=b2, headline='Lennon', pub_date=date(2018, 2, 19))
Blog.objects.filter(entry__headline__contains='Lennon', entry__pub_date__year=2008)
Blog.objects.filter(entry__headline__contains='Lennon').filter(entry__pub_date__year=2008)
## 다른 fields간 values 비교 (p.112)
b = Blog.objects.create(name='F blog')
e1 = Entry.objects.create(blog=b, headline='F entry', n_comments=10, n_pingbacks=5)
e1.n_comments = 10
e1.n_pingbacks = 5
e1.save()
e2 = Entry.objects.create(blog=b, headline='F entry2', n_comments=5, n_pingbacks=10)
Entry.objects.filter(n_comments__gt=F('n_pingbacks'))
"""
blog = models.ForeignKey(Blog, on_delete=models.CASCADE)
headline = models.CharField(max_length=255)
pub_date = models.DateField(blank=True, null=True)
mod_date = models.DateField(auto_now=True)
authors = models.ManyToManyField(Author, blank=True)
n_comments = models.IntegerField(default=0)
n_pingbacks = models.IntegerField(default=0)
rating = models.IntegerField(default=0)
def __str__(self):
return self.headline | [
"peter.s.han.1119@gmail.com"
] | peter.s.han.1119@gmail.com |
eecc3418673eb2435b8bd870ebdd648b9fba9d7e | 1cde1ef47cb6ab401e11d0c26d73db6fd3c43b41 | /workit/model/Offer.py | 8d0626619508344a7dfd9ac95fb101d8519e2211 | [] | no_license | Mluszczewski/WorkIT | 0fce4edd1f23065603fdba2a939faf71990713f0 | 657e2ae3758d8babe032c692ff20eed36d193782 | refs/heads/master | 2023-05-12T13:27:35.441661 | 2020-06-15T12:23:38 | 2020-06-15T12:23:38 | 216,622,088 | 1 | 1 | null | 2023-05-01T21:18:13 | 2019-10-21T17:07:18 | Python | UTF-8 | Python | false | false | 4,675 | py | from workit.const import CATEGORIES, CURRENCIES, LOCATIONS
from uuid import uuid4
from re import sub
from fuzzywuzzy import fuzz
from unidecode import unidecode
class Offer:
def __init__(
self,
title,
company,
city,
url,
salary,
techstack,
experience
):
self.title = title
self.company = company
self.city = city
self.url = url
self.salary = salary
self.techstack = techstack
self.experience = experience
self.category = self._classify()
self._id = uuid4().hex
def __repr__(self):
return str(self.__dict__)
def __iter__(self):
yield 'title', self.title
yield 'company', self.company
yield 'city', self.city
yield 'url', self.url
yield 'salary', self.salary
yield 'techstack', self.techstack
yield 'experience', self.experience
yield 'category', self.category
@property
def city(self):
return self._city
@city.setter
def city(self, name):
if type(name) is str:
# Replace everything in enclosed in parentheses
clean_name = sub(r'\(.+\)', '', name)
clean_name = unidecode(clean_name).replace('_', ' ').lower().strip() # noqa 501
best_score = 0
best_name = ''
for city in LOCATIONS:
clean_city = unidecode(city).lower()
match_score = fuzz.ratio(clean_city, clean_name)
if match_score == 100:
best_score = match_score
best_name = city
break
if match_score > best_score:
best_score = match_score
best_name = city
if best_score < 80:
self._city = name.replace('_', ' ').title().strip()
else:
self._city = best_name
else:
raise ValueError(
'City field has to be of type str. ' +
f'You passed {type(name)}'
)
@property
def experience(self):
return self._experience
@experience.setter
def experience(self, value):
self._experience = []
if type(value) is list:
self._experience = [exp.title() for exp in value]
if type(value) is str and value != '':
if value.find(',') == -1:
self._experience = [value.title()]
else:
self._experience = [exp.title() for exp in value.split(',')]
@property
def techstack(self):
return self._techstack
@techstack.setter
def techstack(self, value):
self._techstack = []
if type(value) is list:
self._techstack = [tech.title() for tech in value]
if type(value) is str and value != '':
if value.find(',') == -1:
self._techstack = [value.title()]
else:
self._techstack = [tech.title() for tech in value.split(',')]
@property
def url(self):
return self._url
@url.setter
def url(self, url):
if url is not None and url != '':
self._url = url
else:
raise ValueError(f'Offer URL {url} cannot be empty or None.')
@property
def salary(self):
return self._salary
@salary.setter
def salary(self, value):
self._salary = {'floor': None, 'ceiling': None, 'currency': None}
if value is not None and value != '':
currency = sub(r'[\W\d]', '', value)
split = value.split('-')
for label, abbreviations in CURRENCIES.items():
if currency.lower() in abbreviations:
self._salary['currency'] = label
break
if len(split) == 2:
floor = sub(r'[^0-9]', '', split[0])
ceiling = sub(r'[^0-9]', '', split[1])
if floor.isdecimal() and ceiling.isdecimal():
self._salary['floor'] = floor
self._salary['ceiling'] = ceiling
def _classify(self):
description = sub(r'-', '', self.title).lower()
description = sub(r'[^a-zA-Z.+# ]', ' ', description).lower()
for keyword in description.split():
for category, tags in CATEGORIES.items():
if keyword in tags:
return category
for tech in self.techstack:
for category, tags in CATEGORIES.items():
if tech.lower() in tags:
return category
return 'Other'
| [
"pjedrzejczak@sigma.ug.edu.pl"
] | pjedrzejczak@sigma.ug.edu.pl |
b481585589990d5f721f1f523af73b0e12effd35 | 67b6f1433649af979ecf14049a783127ed16799d | /pilingUp/pilingUp00.py | 32b79d91c5de07d9b635dbe30179a461c202b0ea | [
"MIT"
] | permissive | tnkteja/notthisagain | bdb919f9b713b9abe5dde76af000186d1b3bd896 | 85e2b2cbea1298a052986e9dfe5e73d022b537f3 | refs/heads/master | 2021-01-22T22:13:16.421008 | 2017-05-24T15:56:14 | 2017-05-24T15:56:14 | 85,519,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import deque
T=input()
for _ in xrange(T):
N=input()
cubes=deque(map(int,raw_input().split(' ')))
stack=[]
ans="Yes"
while cubes:
index=0 if cubes[0] > cubes[-1] else -1
if stack and cubes[index] > stack[-1]:
ans="No"
break
stack.append(cubes[index])
del cubes[index]
print ans
| [
"ntadiko@ncsu.edu"
] | ntadiko@ncsu.edu |
c03eaa16a3e0a5b7f3a46d2d94e6d83848e0d6e8 | 4f972877da14226125440b3da9bdb058764d8a54 | /pandasStudy/temp_opt.py | f108619a26d725634c493b10c9b32adf500d1dee | [] | no_license | ZhiYinZhang/study | 16c29990cb371e7e278c437aa0abc7c348614063 | 8c085310b4f65e36f2d84d0acda4ca257b7389af | refs/heads/master | 2021-07-09T16:05:02.925343 | 2020-06-30T07:53:05 | 2020-06-30T07:53:05 | 153,767,096 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | #-*- coding: utf-8 -*-
# @Time : 2019/3/9 14:37
# @Author : Z
# @Email : S
# @File : temp_opt.py
import pandas as pd
import json
# df.to_json(,orient="records",force_ascii=False)
# path="e:/test/json/shaoshanshi.json"
#
# df=pd.read_json(path,orient="records",lines=True)
#
# print(df)
# df.to_json("e:/test/json/shaoshanshi.csv",orient="records",force_ascii=False)
# df=pd.read_csv("E:/test/dianshang/data/cust_tel_20200110.csv",dtype=str)
#
# df.to_json("e://test/dianshang/data/cust_tel_20200110.json",orient="records")
# path="e://test//json//"
# df=pd.read_json(path+"part.json",orient="records",lines=True,encoding="utf-8",dtype=False)
#
#
# # pd.read_csv()
#
# print(df.dtypes)
#
# print(df)
# df.to_json(path+"part1.json",orient="records",force_ascii=False)
pd.read_excel()
df=pd.read_csv("e://test//csv//test.csv",dtype=str)
print(df)
print(df.dtypes)
| [
"2454099127@qq.com"
] | 2454099127@qq.com |
c8b547b5c2825f3a201e760acb128b8fc94edaca | 14cc70fa60dfaa441aab34b083cff1bf59574264 | /opencivicdata/legislative/models/session.py | 397d1f240810a4a6ecba6cda44895ce9e76871cc | [] | permissive | tubaman/python-opencivicdata | 85434672bea6b40a417104d9381097df58b8a7b2 | 010cd72bdd806e76f342195a1f1e20acbed5a431 | refs/heads/master | 2020-07-26T13:32:22.452022 | 2019-08-20T05:56:12 | 2019-08-20T05:56:12 | 208,660,220 | 0 | 0 | BSD-3-Clause | 2019-09-15T21:33:06 | 2019-09-15T21:33:06 | null | UTF-8 | Python | false | false | 1,192 | py | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from opencivicdata.core.models.base import RelatedBase
from opencivicdata.core.models import Jurisdiction
from ...common import SESSION_CLASSIFICATION_CHOICES
@python_2_unicode_compatible
class LegislativeSession(RelatedBase):
jurisdiction = models.ForeignKey(Jurisdiction,
related_name='legislative_sessions',
# should be hard to delete Jurisdiction
on_delete=models.PROTECT
)
identifier = models.CharField(max_length=100)
name = models.CharField(max_length=300)
classification = models.CharField(max_length=100, choices=SESSION_CLASSIFICATION_CHOICES,
blank=True)
start_date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
end_date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
def __str__(self):
return '{} {}'.format(self.jurisdiction, self.name)
class Meta:
db_table = 'opencivicdata_legislativesession'
| [
"james.p.turk@gmail.com"
] | james.p.turk@gmail.com |
18355aa0f6375e11796880df007410c7b767cc84 | 111212d14fe7344a8635f0f8b392a657b5db27d8 | /trabajo/migrations/0001_initial.py | c32bbbabe988780ebcaa511d59900c57eef5bca3 | [] | no_license | sebasgoldberg/agencia | c83acd2cbdd2097e65c9dfb85fafbd31ed2fa8e8 | dc837d8d40183cb22231a13e408bf56b8ce168e0 | refs/heads/master | 2021-01-25T03:48:18.591880 | 2014-05-19T10:21:30 | 2014-05-19T10:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,493 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Productora'
db.create_table(u'trabajo_productora', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, null=True, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=60)),
('mail', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('imagen', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'trabajo', ['Productora'])
# Adding model 'DireccionProductora'
db.create_table(u'trabajo_direccionproductora', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('descripcion', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('pais', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Country'], null=True, on_delete=models.PROTECT)),
('estado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Region'], null=True, on_delete=models.PROTECT)),
('ciudad', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.City'], null=True, on_delete=models.PROTECT)),
('barrio', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('direccion', self.gf('django.db.models.fields.CharField')(max_length=120)),
('codigo_postal', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
('productora', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Productora'])),
))
db.send_create_signal(u'trabajo', ['DireccionProductora'])
# Adding model 'TelefonoProductora'
db.create_table(u'trabajo_telefonoproductora', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('compania', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['telefono.Compania'], null=True, on_delete=models.PROTECT, blank=True)),
('telefono', self.gf('django.db.models.fields.CharField')(max_length=60)),
('productora', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Productora'])),
))
db.send_create_signal(u'trabajo', ['TelefonoProductora'])
# Adding model 'ItemPortfolio'
db.create_table(u'trabajo_itemportfolio', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('titulo', self.gf('django.db.models.fields.CharField')(max_length=100)),
('video', self.gf('django.db.models.fields.URLField')(max_length=200, unique=True, null=True, blank=True)),
('codigo_video', self.gf('django.db.models.fields.CharField')(max_length=30, unique=True, null=True, blank=True)),
('imagen', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('fecha', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2014, 1, 16, 0, 0))),
))
db.send_create_signal(u'trabajo', ['ItemPortfolio'])
# Adding model 'Trabajo'
db.create_table(u'trabajo_trabajo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('titulo', self.gf('django.db.models.fields.CharField')(max_length=100)),
('productora', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Productora'], on_delete=models.PROTECT)),
('descripcion', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('imagen', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('estado', self.gf('django.db.models.fields.CharField')(max_length=2)),
('fecha_ingreso', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2014, 1, 16, 0, 0))),
('publicado', self.gf('django.db.models.fields.BooleanField')()),
))
db.send_create_signal(u'trabajo', ['Trabajo'])
# Adding model 'EventoTrabajo'
db.create_table(u'trabajo_eventotrabajo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('descripcion', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('pais', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Country'], null=True, on_delete=models.PROTECT)),
('estado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Region'], null=True, on_delete=models.PROTECT)),
('ciudad', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.City'], null=True, on_delete=models.PROTECT)),
('barrio', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('direccion', self.gf('django.db.models.fields.CharField')(max_length=120)),
('codigo_postal', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
('fecha', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 1, 16, 0, 0), null=True, blank=True)),
('tipo', self.gf('django.db.models.fields.CharField')(max_length=1)),
('trabajo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Trabajo'], on_delete=models.PROTECT)),
))
db.send_create_signal(u'trabajo', ['EventoTrabajo'])
# Adding model 'Rol'
db.create_table(u'trabajo_rol', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('trabajo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Trabajo'], on_delete=models.PROTECT)),
('descripcion', self.gf('django.db.models.fields.CharField')(max_length=60)),
('cache', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=15, decimal_places=4, blank=True)),
('caracteristicas', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'trabajo', ['Rol'])
# Adding unique constraint on 'Rol', fields ['trabajo', 'descripcion']
db.create_unique(u'trabajo_rol', ['trabajo_id', 'descripcion'])
# Adding model 'EventoRol'
db.create_table(u'trabajo_eventorol', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('descripcion', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('pais', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Country'], null=True, on_delete=models.PROTECT)),
('estado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Region'], null=True, on_delete=models.PROTECT)),
('ciudad', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.City'], null=True, on_delete=models.PROTECT)),
('barrio', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('direccion', self.gf('django.db.models.fields.CharField')(max_length=120)),
('codigo_postal', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
('fecha', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 1, 16, 0, 0), null=True, blank=True)),
('tipo', self.gf('django.db.models.fields.CharField')(max_length=1)),
('rol', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Rol'], on_delete=models.PROTECT)),
))
db.send_create_signal(u'trabajo', ['EventoRol'])
# Adding model 'Postulacion'
db.create_table(u'trabajo_postulacion', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('agenciado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['agencia.Agenciado'], on_delete=models.PROTECT)),
('rol', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['trabajo.Rol'], on_delete=models.PROTECT)),
('estado', self.gf('django.db.models.fields.CharField')(max_length=2)),
))
db.send_create_signal(u'trabajo', ['Postulacion'])
# Adding unique constraint on 'Postulacion', fields ['agenciado', 'rol']
db.create_unique(u'trabajo_postulacion', ['agenciado_id', 'rol_id'])
def backwards(self, orm):
# Removing unique constraint on 'Postulacion', fields ['agenciado', 'rol']
db.delete_unique(u'trabajo_postulacion', ['agenciado_id', 'rol_id'])
# Removing unique constraint on 'Rol', fields ['trabajo', 'descripcion']
db.delete_unique(u'trabajo_rol', ['trabajo_id', 'descripcion'])
# Deleting model 'Productora'
db.delete_table(u'trabajo_productora')
# Deleting model 'DireccionProductora'
db.delete_table(u'trabajo_direccionproductora')
# Deleting model 'TelefonoProductora'
db.delete_table(u'trabajo_telefonoproductora')
# Deleting model 'ItemPortfolio'
db.delete_table(u'trabajo_itemportfolio')
# Deleting model 'Trabajo'
db.delete_table(u'trabajo_trabajo')
# Deleting model 'EventoTrabajo'
db.delete_table(u'trabajo_eventotrabajo')
# Deleting model 'Rol'
db.delete_table(u'trabajo_rol')
# Deleting model 'EventoRol'
db.delete_table(u'trabajo_eventorol')
# Deleting model 'Postulacion'
db.delete_table(u'trabajo_postulacion')
models = {
u'agencia.agenciado': {
'Meta': {'ordering': "['nombre', 'apellido']", 'object_name': 'Agenciado'},
'activo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'altura': ('django.db.models.fields.FloatField', [], {}),
'apellido': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'calzado': ('django.db.models.fields.IntegerField', [], {}),
'como_nos_conocio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cuenta_bancaria': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'danzas': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['perfil.Danza']", 'symmetrical': 'False', 'blank': 'True'}),
'deportes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['perfil.Deporte']", 'symmetrical': 'False', 'blank': 'True'}),
'documento_cpf': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'documento_rg': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'estado_dientes': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.EstadoDientes']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'fecha_ingreso': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)'}),
'fecha_nacimiento': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idiomas': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['perfil.Idioma']", 'symmetrical': 'False', 'blank': 'True'}),
'indicador_maneja': ('django.db.models.fields.BooleanField', [], {}),
'indicador_tiene_registro': ('django.db.models.fields.BooleanField', [], {}),
'instrumentos': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['perfil.Instrumento']", 'symmetrical': 'False', 'blank': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'observaciones': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ojos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.Ojos']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'pelo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.Pelo']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'peso': ('django.db.models.fields.FloatField', [], {}),
'piel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.Piel']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'recurso_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'responsable': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'sexo': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'talle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['perfil.Talle']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'talle_camisa': ('django.db.models.fields.IntegerField', [], {}),
'talle_pantalon': ('django.db.models.fields.IntegerField', [], {}),
'trabaja_como_extra': ('django.db.models.fields.BooleanField', [], {}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cities_light.city': {
'Meta': {'ordering': "['name']", 'unique_together': "(('region', 'name'),)", 'object_name': 'City'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'feature_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'population': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Region']", 'null': 'True', 'blank': 'True'}),
'search_names': ('cities_light.models.ToSearchTextField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name_ascii'"})
},
u'cities_light.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'code2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'code3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name_ascii'"}),
'tld': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'blank': 'True'})
},
u'cities_light.region': {
'Meta': {'ordering': "['name']", 'unique_together': "(('country', 'name'),)", 'object_name': 'Region'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name_ascii'"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'perfil.danza': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Danza'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.deporte': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Deporte'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.estadodientes': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'EstadoDientes'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.idioma': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Idioma'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.instrumento': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Instrumento'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.ojos': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Ojos'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.pelo': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Pelo'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.piel': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Piel'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'perfil.talle': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Talle'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'telefono.compania': {
'Meta': {'ordering': "['descripcion']", 'object_name': 'Compania'},
'descripcion': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'trabajo.direccionproductora': {
'Meta': {'object_name': 'DireccionProductora'},
'barrio': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'ciudad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.City']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'codigo_postal': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'direccion': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'estado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Region']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'productora': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Productora']"})
},
u'trabajo.eventorol': {
'Meta': {'object_name': 'EventoRol'},
'barrio': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'ciudad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.City']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'codigo_postal': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'direccion': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'estado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Region']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'fecha': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'rol': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Rol']", 'on_delete': 'models.PROTECT'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
u'trabajo.eventotrabajo': {
'Meta': {'object_name': 'EventoTrabajo'},
'barrio': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'ciudad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.City']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'codigo_postal': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'direccion': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'estado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Region']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'fecha': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'trabajo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Trabajo']", 'on_delete': 'models.PROTECT'})
},
u'trabajo.itemportfolio': {
'Meta': {'ordering': "['-fecha']", 'object_name': 'ItemPortfolio'},
'codigo_video': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'fecha': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagen': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'video': ('django.db.models.fields.URLField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'trabajo.postulacion': {
'Meta': {'ordering': "['-rol__trabajo__fecha_ingreso', 'rol__descripcion', 'agenciado__nombre', 'agenciado__apellido']", 'unique_together': "(('agenciado', 'rol'),)", 'object_name': 'Postulacion'},
'agenciado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['agencia.Agenciado']", 'on_delete': 'models.PROTECT'}),
'estado': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rol': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Rol']", 'on_delete': 'models.PROTECT'})
},
u'trabajo.productora': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Productora'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagen': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'trabajo.rol': {
'Meta': {'ordering': "['-trabajo__fecha_ingreso', 'descripcion']", 'unique_together': "(('trabajo', 'descripcion'),)", 'object_name': 'Rol'},
'cache': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '4', 'blank': 'True'}),
'caracteristicas': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trabajo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Trabajo']", 'on_delete': 'models.PROTECT'})
},
u'trabajo.telefonoproductora': {
'Meta': {'object_name': 'TelefonoProductora'},
'compania': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['telefono.Compania']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productora': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Productora']"}),
'telefono': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'trabajo.trabajo': {
'Meta': {'ordering': "['-fecha_ingreso']", 'object_name': 'Trabajo'},
'descripcion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'estado': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'fecha_ingreso': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 1, 16, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagen': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'productora': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trabajo.Productora']", 'on_delete': 'models.PROTECT'}),
'publicado': ('django.db.models.fields.BooleanField', [], {}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['trabajo'] | [
"sebas.goldberg@gmail.com"
] | sebas.goldberg@gmail.com |
320977639dd32cd223a315df756e2a387a62bd39 | 670702ad2673ad36b6349949f626e4ed6ca79d1f | /simplesocial/groups/models.py | 6aa93c31c4d45612b688019c19223a6f7a3df120 | [] | no_license | abduyevelvin/django-social-media-project | 6ed4b593652062e5949d05b29a49d67706a6827e | 384e8a7259dab837ceb555c8387c29c062954e88 | refs/heads/master | 2022-04-15T01:18:54.640615 | 2020-04-11T21:53:59 | 2020-04-11T21:53:59 | 254,933,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | from django.db import models
from django.conf import settings
from django.urls import reverse
from django.utils.text import slugify
import misaka
from django.contrib.auth import get_user_model
User = get_user_model()
from django import template
register = template.Library()
class Group(models.Model):
name = models.CharField(max_length=255, unique=True)
slug = models.SlugField(allow_unicode=True, unique=True)
description = models.TextField(blank=True, default='')
description_html = models.TextField(editable=False, default='', blank=True)
members = models.ManyToManyField(User,through="GroupMember")
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
self.description_html = misaka.html(self.description)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("groups:single", kwargs={"slug": self.slug})
class Meta:
ordering = ["name"]
class GroupMember(models.Model):
group = models.ForeignKey(Group,related_name='memberships',on_delete=models.CASCADE)
user = models.ForeignKey(User,related_name='user_groups',on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Meta:
unique_together = ("group", "user")
| [
"elvin.abduyev@bertelsmann.de"
] | elvin.abduyev@bertelsmann.de |
bd92e49c54aae1b8b2bbf08a3841dafdafbf905f | 07f18080703f71444c0a60deec67474a9b113c71 | /algo/binary_search.py | f1ae55d0dacf1b5beac5c2809b4f5df563ba7275 | [] | no_license | ksuarz/hundred-days | 674002c57f5e0372cf2875a2314c6e8828c2f1b0 | a502ac227c8331734b65a58f67bbf5e6b982ff6b | refs/heads/master | 2020-05-18T16:53:55.328823 | 2014-03-23T03:43:21 | 2014-03-23T03:43:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | '''
A simple implementation of binary search on primitive types.
'''
def binary_search(list, target):
'''
Returns the index of the target in the array, or -1 if the item is not
contained in the list.
The binary search algorithm only works on sorted lists.
'''
if not list:
return -1
hi, lo = len(list) - 1, 0
while lo <= hi:
mid = int((hi+lo) / 2)
if list[mid] == target:
return mid
elif target > list[mid]:
lo = mid + 1
else:
hi = mid - 1
else:
return -1
| [
"ksuarz@gmail.com"
] | ksuarz@gmail.com |
4f086d0abd4fee89dc9252a3a4212d6653a80f19 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/1467. Probability of a Two Boxes Having The Same Number of Distinct Balls.py | 5becc6fac00c3d0f19e7da6a06a9d4ace6447378 | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | from typing import List
from functools import lru_cache
class Solution:
def getProbability(self, balls: List[int]) -> float:
self.num = 0
N = len(balls)
space_in_each_box = sum(balls) // 2
@lru_cache(None)
def comb(x, y): # x<=y
res = 1
for i in range(x):
res *= y - i
for i in range(1, x + 1):
res //= i
return res
@lru_cache(None)
def get_permunation_number(balls_array):
# print(balls_array)
summ = sum(balls_array)
res = 1
for ball in balls_array:
res *= comb(ball, summ)
summ -= ball
# print(res)
return res
def dfs(cur_no, space_box1, colour_box1, colour_box2, balls_array):
if space_box1 == 0:
colour_box2 += N - cur_no
if colour_box1 == colour_box2:
balls_array1=balls_array
balls_array2=[balls[i]-(balls_array[i] if i<len(balls_array) else 0) for i in range(N)]
balls_array1 = tuple(sorted([x for x in balls_array1 if x!=0]))
balls_array2 = tuple(sorted([x for x in balls_array2 if x != 0]))
temp1 = get_permunation_number(balls_array1)
temp2 = get_permunation_number(balls_array2)
self.num += temp1*temp2
else:
if cur_no < N:
for i in range(min(space_box1+1, balls[cur_no]+1)):
if i == 0:
dfs(cur_no + 1, space_box1, colour_box1, colour_box2 + 1, balls_array+[0])
elif i == balls[cur_no]:
dfs(cur_no + 1, space_box1 - i, colour_box1 + 1, colour_box2, balls_array + [i])
else:
dfs(cur_no + 1, space_box1 - i, colour_box1 + 1, colour_box2 + 1, balls_array + [i])
self.den=get_permunation_number(tuple(sorted(balls)))
dfs(0, space_in_each_box, 0, 0, [])
return self.num / self.den
balls=[1,1]
balls= [2,1,1]
balls = [6, 6, 6, 6, 6, 6,6,6]
print(Solution().getProbability(balls))
| [
"19241008o"
] | 19241008o |
b90c7a68490243757448c83d51d4eae5a3c86fad | 8f6cc0e8bd15067f1d9161a4b178383e62377bc7 | /ppo_baseline_DMB/WORKINGON/easy_ppo_v2/storage.py | 0bd79023734c597fa209870d6297b8372a5c8253 | [] | no_license | humorbeing/python_github | 9c4dfc61a3cefbb266fefff335f6b28d05797e5e | e4b4b49bee7e7e3843c6874717779ce8d619bd02 | refs/heads/master | 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 | Python | UTF-8 | Python | false | false | 7,531 | py | import torch
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
import numpy as np
def ss(s=''):
print()
print(' ---' * 15)
print(' ---' * 15)
print()
# print(' >>>>>>>>>>>>>>>>>>>> <<<<<<<<<<<<<<<<<<<< ')
print(s)
print()
print(' ---' * 15)
print(' ---' * 15)
print()
import sys
sys.exit()
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
class RolloutStorage(object):
def __init__(self, num_steps, num_processes, obs_shape):
self.obs = np.zeros(shape=(num_steps + 1, num_processes, *obs_shape))
self.rewards = np.zeros(shape=(num_steps, num_processes, 1))
self.value_preds = np.zeros(shape=(num_steps + 1, num_processes, 1))
self.returns = np.zeros(shape=(num_steps + 1, num_processes, 1))
self.action_log_probs = np.zeros(shape=(num_steps, num_processes, 1))
action_shape = 1
self.actions = np.zeros(shape=(num_steps, num_processes, action_shape))
self.masks = np.ones(shape=(num_steps + 1, num_processes, 1))
self.bad_masks = np.ones(shape=(num_steps + 1, num_processes, 1))
self.num_steps = num_steps
self.step = 0
def to(self, device):
self.obs = self.obs.to(device)
self.recurrent_hidden_states = self.recurrent_hidden_states.to(device)
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.returns = self.returns.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.actions = self.actions.to(device)
self.masks = self.masks.to(device)
self.bad_masks = self.bad_masks.to(device)
def insert(self, obs, actions, action_log_probs,
value_preds, rewards, masks, bad_masks):
np.copyto(self.obs[self.step + 1], obs)
np.copyto(self.actions[self.step], actions)
np.copyto(self.action_log_probs[self.step], action_log_probs)
np.copyto(self.value_preds[self.step], value_preds)
np.copyto(self.rewards[self.step], rewards)
np.copyto(self.masks[self.step + 1], masks)
np.copyto(self.bad_masks[self.step + 1], bad_masks)
self.step = (self.step + 1) % self.num_steps
def after_update(self):
self.obs[0].copy_(self.obs[-1])
self.masks[0].copy_(self.masks[-1])
self.bad_masks[0].copy_(self.bad_masks[-1])
def compute_returns(self,
next_value,
gamma):
self.returns[-1] = next_value
for step in reversed(range(self.rewards.size(0))):
self.returns[step] = self.returns[step + 1] * \
gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self,
advantages,
num_mini_batch=None,
mini_batch_size=None):
num_steps, num_processes = self.rewards.size()[0:2]
batch_size = num_processes * num_steps
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(num_processes, num_steps, num_processes * num_steps,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
sampler = BatchSampler(
SubsetRandomSampler(range(batch_size)),
mini_batch_size,
drop_last=True)
for indices in sampler:
obs_batch = self.obs[:-1].view(-1, *self.obs.size()[2:])[indices]
actions_batch = self.actions.view(-1,
self.actions.size(-1))[indices]
value_preds_batch = self.value_preds[:-1].view(-1, 1)[indices]
return_batch = self.returns[:-1].view(-1, 1)[indices]
masks_batch = self.masks[:-1].view(-1, 1)[indices]
old_action_log_probs_batch = self.action_log_probs.view(-1,
1)[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages.view(-1, 1)[indices]
yield obs_batch, actions_batch,\
value_preds_batch, return_batch,\
masks_batch, old_action_log_probs_batch,\
adv_targ
def recurrent_generator(self, advantages, num_mini_batch):
num_processes = self.rewards.size(1)
assert num_processes >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(num_processes, num_mini_batch))
num_envs_per_batch = num_processes // num_mini_batch
perm = torch.randperm(num_processes)
for start_ind in range(0, num_processes, num_envs_per_batch):
obs_batch = []
recurrent_hidden_states_batch = []
actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
obs_batch.append(self.obs[:-1, ind])
recurrent_hidden_states_batch.append(
self.recurrent_hidden_states[0:1, ind])
actions_batch.append(self.actions[:, ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
masks_batch.append(self.masks[:-1, ind])
old_action_log_probs_batch.append(
self.action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
T, N = self.num_steps, num_envs_per_batch
# These are all tensors of size (T, N, -1)
obs_batch = torch.stack(obs_batch, 1)
actions_batch = torch.stack(actions_batch, 1)
value_preds_batch = torch.stack(value_preds_batch, 1)
return_batch = torch.stack(return_batch, 1)
masks_batch = torch.stack(masks_batch, 1)
old_action_log_probs_batch = torch.stack(
old_action_log_probs_batch, 1)
adv_targ = torch.stack(adv_targ, 1)
# States is just a (N, -1) tensor
recurrent_hidden_states_batch = torch.stack(
recurrent_hidden_states_batch, 1).view(N, -1)
# Flatten the (T, N, ...) tensors to (T * N, ...)
obs_batch = _flatten_helper(T, N, obs_batch)
actions_batch = _flatten_helper(T, N, actions_batch)
value_preds_batch = _flatten_helper(T, N, value_preds_batch)
return_batch = _flatten_helper(T, N, return_batch)
masks_batch = _flatten_helper(T, N, masks_batch)
old_action_log_probs_batch = _flatten_helper(T, N, \
old_action_log_probs_batch)
adv_targ = _flatten_helper(T, N, adv_targ)
yield obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ
| [
"geemguang@gmail.com"
] | geemguang@gmail.com |
66b1f7ab8b33518cd88195b541716565248d3e8e | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/document/behaviors/__init__.py | 203ebd83e1f3d6ecb246888b2fffc589e66ad832 | [] | no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 174 | py | from zope.interface import Interface
class IBaseDocument(Interface):
"""Marker interface for objects with a document like type
(og.document, ftw.mail.mail) etc."""
| [
"lukas.graf@4teamwork.ch"
] | lukas.graf@4teamwork.ch |
472e0b5db22596d4271a9578e19737f679e5bee6 | 6ff626cace2bd401b1acb9e300d4aaa7e141dffb | /tools/tb_maker/tb_maker.py | 730e375901ac7b8b785b20f585b34467cfc1451d | [
"Unlicense"
] | permissive | Bruno02468/ghdlfiddle | c8c5678ece645565faa7bf08b00fe43c4aca432f | 9d94f31e68bda6d0f2dc2122e2fc444117c68a18 | refs/heads/master | 2023-02-21T13:07:16.716351 | 2023-02-10T18:28:57 | 2023-02-10T18:28:57 | 209,203,289 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,406 | py | #!/usr/bin/env python3
# coded by bruno, 2019
# all in all, I consume a "specs" file and generates testbenches based on good
# submissions. it uses tricks such as not doing tests which good submissions
# give different results to, for instance.
# basically, the specs file gives me the following information:
# - names of inputs and outputs
# - specs for inputs (size in bits, randomize, combinations, etcetera)
# - lag time between reads
# - paths to "good" submissions
# the following is expected for context (folder I am run in)
# - Makefile (just like in a regular testbench) that prepares auxiliar files
# - said auxiliar files
# - skeleton.vhd file, basically, a regular testbench, but with the "inputs"
# and "asserts" replaced by a comment that reads "-- SKELETON".
# - gjvnq's "utils.vhd" library, current copy is here
# mind you, an example skeleton a json specs file are supplied in this folder.
# I will do the following:
# - first, I will use the "skeleton.vhd" file and the input specs to create
# a "preliminary testbench" file, tb.vhd. that testbench asserts nothing,
# it just sends the inputs and reports the outputs.
# - then, for every "good" submission known, I will compile it against the
# preliminary testbench, run it, and store the output for every input.
# - finally, I will compare the results, remove any inputs for which outputs
# differ between the "good" submissions, and generate a "final testbench",
# with assertions and all. it's up to you to zip it.
# beware, nothing here is firejailed. be wary of the submissions you choose to
# run, or firejail me as a whole!
# some special strings
BAD = "ghdlfiddle:BAD"
GOOD = "ghdlfiddle:GOOD"
DEBUG = "ghdlfiddle:DEBUG"
FINISHED = GOOD + " -- Testbench finished executing."
TB = "tb.vhd"
TARGET = "target.vhd"
SKELETON = "skeleton.vhd"
SKEL_LINE = "-- SKELETON HERE"
PREPARE = "make setup >/dev/null"
ANALYSE = "ghdl -a %s; ghdl -a %s" % (TARGET, TB)
ELABORATE = "ghdl -e tb"
RUN = "ghdl -r tb 2>&1"
CLEANUP = "rm -f *.o *.cf target.vhd tb"
# now, import some stuff!
import sys, os, re, json, random
from itertools import product
# auxiliary function to generate a random bitstring of length n
def bitstring(n):
b = ""
while len(b) != n:
b += str(random.randint(0, 1))
return b
# auxiliary function to represent binary values in VHDL
def vhdl_binary(bits):
return ("'%s'" if len(bits) == 1 else "\"%s\"") % (bits,)
# auxiliary function to wait
def vhdl_wait(delay):
return "wait for %s;\n" % (delay,)
# auxiliary function to turn an input dict into a wait-guarded series of
# assignments, and asserts their values if a dict of outputs is given
def vhdl_assign(ins, delay, outs_names, expects=None):
code = vhdl_wait(delay)
for name, bits in ins.items():
code += name + " <= " + vhdl_binary(bits) + ";\n"
code += vhdl_wait(delay)
outs_report = "{" + ",".join(["'%s': '\"&bin(%s)&\"'" % (name, name)
for name in outs_names]) + "}";
if expects:
# if a list of expectations is given, I'll create asserts, for we are
# making the final testbench
for name, bits in expects.items():
code += "assert (%s = %s)\nreport " % (name, vhdl_binary(bits))
code += (("\" -- %s --\\n · with inputs: %s\\n · with outputs: %s"
+ "\\n · expected %s to "
+ "be %s, got %s!\\n\";\n")
% (BAD, str(ins), outs_report, name, bits,
"\"&bin(" + name + ")&\""))
else:
# if a list of expectations is not given, I'll merely have the testbench
# print out the outputs as a JSON line
code += "report \"" + outs_report + "\";\n"
code += "\n"
return code
# auxiliary function to put a series of assignments (with or without asserts)
# into the skeleton, generating a tb.vhd
def vhdl_fill(ins_list, delay, expects_list=None, outs_names=None):
contents = ""
for i in range(len(ins_list)):
ins = ins_list[i]
expects = expects_list[i] if expects_list else None
contents += vhdl_assign(ins, delay, outs_names, expects)
contents += "report \"%s\";\nwait;\n" % (FINISHED,)
os.system("cp %s %s" % (SKELETON, TB))
with open(TB, "r") as f:
skel = f.read()
filled = skel.replace(SKEL_LINE, contents).replace("\\n", "\"&LF&\"")
with open(TB, "w") as f:
f.write(filled)
# auxiliary function to run a certain "good" assignment and get the outputs
# we expect the tb.vhd to be ready
def vhdl_run(assignment):
# first, run and get the output
os.system("cp %s %s" % (assignment, TARGET))
os.system(";".join([PREPARE, ANALYSE, ELABORATE]))
run_output = os.popen(RUN).read()
os.system(CLEANUP)
# now, get all outputs
outs = []
for json_string in re.findall(r"{[^}]*}", run_output):
outs.append(json.loads(json_string.replace("'", "\"")))
return outs
# get to know the specs
if len(sys.argv) < 2:
print("Tell me the specs JSON file!")
sys.exit(0)
with open(sys.argv[1], "r") as specfile:
specs = json.load(specfile)
print("Specs read! Generating preliminary...")
# generate the inputs, first by making all the possible values...
values = {}
for name, details in specs["input_sets"].items():
values[name] = set(details["must_happen"])
must_have = len(values[name]) + details["randomized"]
while len(values[name]) != must_have:
values[name].add(bitstring(details["size"]))
# now, combine them! how elegant... and also make the preliminary testbench
inputs = [dict(zip(values.keys(), l)) for l in product(*values.values())]
vhdl_fill(inputs, specs["lag"], None, specs["outputs"])
print("Done! Total inputs: %s." % (str(len(inputs)),))
# and run every "good" assignment against it
outs = []
for goodname in specs["run_against"]:
print("Running against %s..." % (goodname,))
outs.append(vhdl_run(goodname))
# now, remove inputs for which good assignments gave different outputs
disagreements = 0
final_ins = []
final_outs = []
for i in range(len(inputs)):
opinions = [dude[i] for dude in outs]
if all(x == opinions[0] for x in opinions) or True:
final_ins.append(inputs[i])
final_outs.append(opinions[0])
else:
disagreements += 1
print("Removed %s disagreements." % (str(disagreements),))
# and for out final trick, generate the testbench
vhdl_fill(final_ins, specs["lag"], final_outs, specs["outputs"])
print("Final testbench saved to %s!" % (TB,))
| [
"bruno@paschoalinoto.com"
] | bruno@paschoalinoto.com |
8dc55a36cc1b76e0e45e81f5a28b9927cd6368ee | 4ed02f7fa19a3a9c582341cb4267ff9176910940 | /day_2/super_sum.py | 5f1b622ec7c616368e666d5f314338a7fded375d | [] | no_license | maureengithu/bootcamp_7 | d65d996bd43c1f875e78457835cef2cabb041a22 | 1d93502c5571a6e445fedaa2fedef1792b447b2f | refs/heads/master | 2016-09-13T08:16:43.952553 | 2016-04-29T12:51:03 | 2016-04-29T12:51:03 | 57,123,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | def super_sum(A):
'''
Takes a list A, and:
-Halves every even number
-Doubles every odd number
And returns the sum of all.
'''
total = 0
for i in A:
if i % 2 == 0:
total += (i / 2)
else:
total += (i * 2)
return total | [
"maureengithu90@gmail.com"
] | maureengithu90@gmail.com |
a81fb41b633aa98a866ecd73ff4b9eb63c984f45 | 4705f5c0d89298c961e80b229b9819b77fb81f37 | /julian.py | 9d9dbb0be2b91754ac3ce97286f36dd8c2426e99 | [] | no_license | farahnorma/Homework5 | b92a3d8afd3c0f9f2fa0f4146b07063dbf6ce9a8 | 8de33ec522c75c5ecd09c5587e44de1b112d7d72 | refs/heads/master | 2020-09-10T19:04:28.131288 | 2019-11-15T00:02:49 | 2019-11-15T00:02:49 | 221,808,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,985 | py | #Norma
#julian.py
def valid(month, day, y):
if y <0:
print("Invalid date")
else:
if month<1 or month>12:
print("Invalid date")
return False
elif month == 1 or 3 or 5 or 7 or 8 or 10 or 12:
if day<1 or day >31:
print("Invalid date")
return False
else:
return True
elif month == 4 or 6 or 9 or 11:
if day<1 or day>30:
print("Invalid date")
return False
else:
return True
elif month == 2:
if is_leap(y) is True:
if day<1 or day >29:
print("Invalid date")
return False
else:
return True
else:
if day<1 or day>28:
print("Invalid date")
return False
else:
return True
def julian(month, day, y):
is_leap(y)
valid(month, day, y)
if valid(month, day, y) is True:
daynum = 31*(month-1)+day
if month >2:
if is_leap(y) is True:
daynum = daynum - (4 * month + 23) // 10
daynum = daynum+1
return daynum
else:
daynum = daynum - (4 * month + 23) // 10
return daynum
else:
return daynum
def is_leap(y):
if y%4 ==0:
if y%100 ==0:
if y%400 ==0:
return True
else:
return False
else:
return True
else:
return False
def main():
month = int(input("Enter month: "))
day = int(input("Enter day: "))
y = int(input("Enter year: "))
julian(month, day, y)
result =julian(month, day, y)
print('Your julian date is: ', result)
main() | [
"noreply@github.com"
] | farahnorma.noreply@github.com |
13a71a23c0d1e9a332b61b7e0427ead5f585fdbe | bb443e34a83820bb3d3cf75298f82d165c14b6dd | /Chapter07_whileLoops_userInput/rollercoaster.py | 1abfdf60fccc8fd8aec17ce12abff6dc39f5952c | [] | no_license | ashishkchaturvedi/Python-Practice-Solutions | 9e61fe2d53b4ed4366947aab1db783c31247ddb6 | 64f5557b132dbe4700e999ae5fa006dda31f27d0 | refs/heads/master | 2020-07-23T04:40:31.327415 | 2019-09-16T00:28:42 | 2019-09-16T00:28:42 | 207,448,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | '''
Created on Sep 11, 2019
@author: achaturvedi
'''
height = input("how tall are you in inches?")
height = int(height)
if height >= 36:
print("\nYou are tall enough to ride")
else:
print("\nYou will be able to ride when you are little taller") | [
"achaturvedi@agmednet.com"
] | achaturvedi@agmednet.com |
9bfeaa039908e00614a520fcf51a8d6beca7c8e9 | 40c029a40c3a1ff3f254bf18c1ceb11569ab0ceb | /tes2.py | b732fdcf0cc83a7e472a0101ded31391a4c63037 | [] | no_license | Tri334/batik-classification-resnet | 02167943bbbdf6133fc7608a4cbfc9f7c27c31d3 | 6f09964cde28eb509557d0f58ae243f468034649 | refs/heads/main | 2023-09-03T12:19:58.238525 | 2021-11-05T14:02:58 | 2021-11-05T14:02:58 | 411,088,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | from new_main import *
transform = {
'train': transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]),
'val': transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
}
path = path_new
select_fold = 1
data = ['original','balance_patch','non_balance_patch']
selected_data = data[0]
if selected_data == 'original':
join = False
else:
join = True
if path == 'new_data':
path_mod = 'new/fold '+str(select_fold)+'/'
path_save = naming_model('new',select_fold)
kelas = getKelas(path)
data_folder = get_folder(path,select_fold)
else:
path_mod = 'old/fold '+str(select_fold)+'/'
path_save = naming_model('old',select_fold)
kelas = getKelas(path_old)
data_folder = get_folder(path_old,select_fold)
print(kelas)
print(path_save)
data = {
'train': data_folder[0][selected_data],
'test': data_folder[1][selected_data],
}
batch_size = 100
pretrain = True
sampler = False
dropout = False
weight_decay = False
weight_entropy = False
data_loader = data_load(batch_size,
train=data['train'],
val=data['test'],
transform=transform,
sampler=sampler)
config = config_model(dropout=dropout, lr=0.001,
weight_decay=weight_decay,
kelas=kelas,
pretrained=pretrain,
data_loader=data_loader, freeze=False)
print('Validasi: ' + str(data_loader['sizes']['val']))
print('Training: ' + str(data_loader['sizes']['train']))
epoch = 60
to_train = False
check = True
num_mod = 16
#train model
if to_train:
coba_train(config, transform,
data_loader, epoch=epoch,
pretrained=pretrain,
sampler=sampler,
dropout=dropout,
batch_sizes=batch_size,
sliced=data,
weight_entropy=weight_entropy,
weight_decay=weight_decay, path_save=path_save,
join=join)
#check model
if check:
check_model(path_mod,num_mod)
| [
"meranggi04@student.ub.ac.id"
] | meranggi04@student.ub.ac.id |
eee7ec285cd7388f875f56772700086dae4b10e5 | ed603eeff3333dc906c54777d10912c74f27a348 | /t1.py | b53257a18688344a4081bafd46ac02dabdf7de3a | [] | no_license | fzea01/deteccar_pp2 | 3a0f399bda1f50f871e7b5c57b77db707768cc5a | 39c23ef9aeb63ae78fe5ae0893ab6efac4083255 | refs/heads/master | 2020-04-27T07:44:41.666689 | 2019-05-08T20:50:53 | 2019-05-08T20:50:53 | 174,146,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | import cv2
import numpy as np
import matplotlib.pyplot as plt
cap = cv2.VideoCapture('./videos/cars.MP4')
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
[R,C]=prvs.shape
count=0
while (1):
ret, frame2 = cap.read()
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 2, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
RV=np.arange(5,480,5)
CV=np.arange(5,640,5)
# These give arrays of points to sample at increments of 5
if count==0:
count =1 #so that the following creation is only done once
[Y,X]=np.meshgrid(CV,RV)
# makes an x and y array of the points specified at sample increments
temp =mag[np.ix_(RV,CV)]
# this makes a temp array that stores the magnitude of flow at each of the sample points
motionvectors=np.array((Y[:],X[:],Y[:]+temp.real[:],X[:]+temp.imag[:]))
Ydist=motionvectors[0,:,:]- motionvectors[2,:,:]
Xdist=motionvectors[1,:,:]- motionvectors[3,:,:]
Xoriginal=X-Xdist
Yoriginal=Y-Ydist
plot2 = plt.figure()
plt.quiver(Xoriginal, Yoriginal, X, Y,
color='Teal',
headlength=7)
plt.title('Quiver Plot, Single Colour')
plt.show(plot2)
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', bgr)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
prvs = next
cap.release()
cv2.destroyAllWindows()
| [
"nuttahpon_best@hotmail.com"
] | nuttahpon_best@hotmail.com |
afd9cd1e18d33e0a540173296f0d60db260c1efb | fd83602e34b8bde0f179753defa314859e9c5d9d | /指导书及实例/color.py | d63d927a6af30a5a3f6c1e9b6649af820ed1bea0 | [] | no_license | HIT-five/2020Picture_project | 5467ccca8b2752954429a10c8ff4444c357e24f2 | 4c17d88b0d0c1b7a652cb56ce18c0d9ae7808501 | refs/heads/master | 2022-11-16T00:51:02.041817 | 2020-07-13T19:14:51 | 2020-07-13T19:14:51 | 279,364,686 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | import cv2 as cv
filename = r'F:\test\images\season.jpg'
img = cv.imread(filename)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('source image', img)
cv.imshow('gray', gray)
cv.waitKey()
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
cv.imshow("Hue", hsv[:, :, 0])
cv.imshow("Saturation", hsv[:, :, 1])
cv.imshow("Value", hsv[:, :, 2])
cv.waitKey()
cv.imshow("Blue", img[:, :, 0])
cv.imshow("Green", img[:, :, 1])
cv.imshow("Red", img[:, :, 2])
cv.waitKey()
cv.destroyAllWindows() | [
"958476238@qq.com"
] | 958476238@qq.com |
c674165dd2f415ded8e82f1710a6d698e2b5271e | 5ba6f0053cbd1156750124ad7c5def1c99d3457a | /SpMarket/apps/goods/search_indexes.py | 11028713b3ebe3a6d46412d24ca593d221f271c3 | [] | no_license | TangTT-xbb/python1116 | 208c0d1aff7cc2610daf408e02eabecea94b98e6 | 447a661f0fc72dacc084c80a17268fe0660e927d | refs/heads/main | 2023-04-22T17:16:36.714672 | 2021-05-07T01:42:13 | 2021-05-07T01:42:13 | 313,237,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # 导入全文检索框架索引类
from haystack import indexes
from goods.models import GoodsSKU
class GoodsSKUSearchIndex(indexes.SearchIndex, indexes.Indexable):
# 设置需要检索的主要字段内容 use_template表示字段内容在模板中
# 用与指定主要索引字段的,索引字段在模板中指定
text = indexes.CharField(document=True, use_template=True)
# 获取检索对应对的模型
# 返回模型类
def get_model(self):
return GoodsSKU
# 设置检索需要使用的查询集
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
return self.get_model().objects.filter(is_delete=False)
# return self.get_model().objects.all()
| [
"1349905607@qq.com"
] | 1349905607@qq.com |
3a6ecf79f1d71f56398219969add0d7eaa07bd92 | 908bba8bdc246d665d6b22e3a8b91720c34054e7 | /whatsapp-sentiment.py | e7af36895172fa9f736ffba1bc4ba56d53798139 | [
"Apache-2.0"
] | permissive | yogithesymbian/whatsapp-sentiments | 24874ab055522b8733c500a104d218b205c054a8 | d15d4a44282ecfc9b28fc0d16f2714f0f6ed7d2b | refs/heads/master | 2020-05-25T00:33:48.165911 | 2017-03-19T17:27:15 | 2017-03-19T17:27:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | from textblob import TextBlob
from plotly.offline import plot
import plotly.graph_objs as go
import random
user1 = "Bob"
user2 = 'Alice'
with open('chat_sample.txt', 'r+') as f:
samples = f.readlines()
d = {user1:[], user2:[]}
for line in samples:
time, *text = line.split('-')
text = ''.join(text)
name, *chat = text.split(':')
t = TextBlob(''.join(chat))
name = name.strip()
if name == user1 or name == user2:
d[name].append(t.sentiment.polarity)
trace1 = go.Scatter(
y = d[user1][:9000],
name = user1,
mode = 'markers',
marker=dict(
size='8',
colorscale='Picnic',
color = random.sample(range(9000),9000),
)
)
trace2 = go.Scatter(
y = d[user2],
name = user2,
mode = 'markers',
marker=dict(
size='7',
color = random.sample(range(8000), 8000),
colorscale='Electric',
)
)
data = [trace1, trace2]
plot(data) | [
"wasi0013@gmail.com"
] | wasi0013@gmail.com |
a57890656dff0d9937db97759a303f9fd7159333 | 5d136796e63aad250c0f61c09a296b1210044d86 | /Program166.py | dfbba6a2c5d83984abc948d26493f97f33097251 | [] | no_license | ArunKarthi-Git/pythonProject | b2fe041fba91c785870df8754bae530c3443d498 | 31f917dd8197f0b684a94be29488366aa85544cd | refs/heads/master | 2023-08-10T18:27:01.379898 | 2021-09-21T14:42:17 | 2021-09-21T14:42:17 | 405,416,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | class BigThree:
def __init__(self,a,b,c):
self.a=a
self.b=b
self.c=c
def callBig(self):
d=self.a if self.a>self.b else self.b if self.a >self.c else self.c
return d
a=int(input("Enter a 1st No."))
b=int(input("Enter a 2nd No."))
c=int(input("Enter a 3rd No."))
big=BigThree(a,b,c)
print(big.callBig())
| [
"arunkarthimail@gmail.com"
] | arunkarthimail@gmail.com |
85be4894c22fe78fd141820268bb10ace74cc3f9 | 66adcd42f0eae6e708ad493a7e9367385ea1a0a5 | /exercises/exe81 - 90/exe084.py | aadd904a5c92104b82f5be6a6cbd6ecda106eb28 | [
"MIT"
] | permissive | thomas-rohde/Classes-Python | 9850227b2c1aff15ae77619ba0cc8875b0558a6e | f862995510b7aabf68bc14aecf815f597034d8a1 | refs/heads/main | 2023-05-20T12:49:46.407674 | 2021-06-06T22:43:21 | 2021-06-06T22:43:21 | 357,386,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | nu = [[], []]
n = 0
for c in range(0, 7):
n = int(input('Digite um valor: '))
if n % 2 == 0:
nu[0].append(n)
else:
nu[1].append(n)
nu[0].sort()
nu[1].sort()
print(f'''A lista par é {nu[0]},
Enquanto a lista ímpar é {nu[1]}''') | [
"thomasrohde45@gmail.com"
] | thomasrohde45@gmail.com |
9defdbe58eb8621560cf99e568ba0bc43cd9c862 | f3ef9e48f78e2b023c0c8d648dd34037d1daf741 | /Topology.py | bbc663bdd23e7adc097b299573c797b0fde8111f | [] | no_license | tngo0508/cpsc558-final | 6797f0d453bbc64742eb1e39912648aee4e9957b | 722c2b3d26c6c261520a7b41909db58241e8effc | refs/heads/master | 2022-09-14T05:58:30.227804 | 2020-05-12T00:48:54 | 2020-05-12T00:48:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,490 | py |
from Logger import Logger
import graphviz
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.link import TCLink
from mininet.node import OVSSwitch
import os
import subprocess
class Topology(Topo):
"""
Topology initializer for CPSC 558 Final Project
"""
__net = None
__logger = None
__main_switch_name = "S1"
__main_switch_ip = None
__main_switch_instance = None
__file_server_name = "FS"
__file_server_instance = None
__video_server_name = "VS"
__video_server_instance = None
__tattle_tail_name = "TT"
__tattle_tail_instance = None
__file_client_name_prefix = "FC"
__file_client_hosts_count = 2
__file_client_names = None
__file_client_instances = None
__video_client_name_prefix = "VC"
__video_client_hosts_count = 2
__video_client_names = None
__video_client_instances = None
__mac_address_base = "00:00:00:00:00:" # Obviously like 255 host macs with this scheme
__mac_address_counter = 1
__ip_address_base = "10.0.0."
__ip_counter = 1
__BANDWIDTH_LIMIT_SERVERS_MBPS = 1000
__BANDWIDTH_LIMIT_SERVERS_DELAY = "0.5ms"
__BANDWIDTH_LIMIT_CLIENTS_MBPS = 100
__BANDWIDTH_LIMIT_CLIENTS_DELAY = "1ms"
def __init__(self, logger):
self.__logger = logger
# type: Logger
super(Topology, self).__init__()
# Should be run after build
def set_net(self, net: Mininet):
self.__net = net
self.consume_instances()
def build(self):
log = self.__logger.get()
self.reset_mac_address_counter()
self.reset_ip_counter()
#
log.info("Clearing topology")
self.__file_client_names = list()
self.__video_client_names = list()
# Create our main switch
log.info("Creating main switch")
self.add_switch_with_addresses(self.__main_switch_name)
# Create file server host
log.info("Creating file server host")
self.add_host_with_addresses(self.__file_server_name)
"""
self.addLink(
self.__main_switch_name,
self.__file_server_name,
intfName1="switch-fs",
cls=TCLink, bw=self.__BANDWIDTH_LIMIT_SERVERS_MBPS, delay=self.__BANDWIDTH_LIMIT_SERVERS_DELAY
)
"""
self.add_link_to_main_switch(
node_name=self.__file_server_name,
interface_name="switch-fs",
preferred_mbps=self.__BANDWIDTH_LIMIT_SERVERS_MBPS,
preferred_delay=self.__BANDWIDTH_LIMIT_SERVERS_DELAY
)
# Create video server host
log.info("Creating video server host")
self.add_host_with_addresses(self.__video_server_name)
"""
self.addLink(
self.__main_switch_name,
self.__video_server_name,
intfName1="switch-vs",
cls=TCLink, bw=self.__BANDWIDTH_LIMIT_SERVERS_MBPS, delay=self.__BANDWIDTH_LIMIT_SERVERS_DELAY
)
"""
self.add_link_to_main_switch(
node_name=self.__video_server_name,
interface_name="switch-vs",
preferred_mbps=self.__BANDWIDTH_LIMIT_SERVERS_MBPS,
preferred_delay=self.__BANDWIDTH_LIMIT_SERVERS_DELAY
)
# Create our tattle tail host
log.info("Creating tattle tail host")
self.add_host_with_addresses(self.__tattle_tail_name)
"""
self.addLink(
self.__main_switch_name, self.__tattle_tail_name,
intfName1="switch-tt",
cls=TCLink, bw=self.__BANDWIDTH_LIMIT_CLIENTS_MBPS, delay=self.__BANDWIDTH_LIMIT_CLIENTS_DELAY
)
"""
self.add_link_to_main_switch(
node_name=self.__tattle_tail_name,
interface_name="switch-tt",
preferred_mbps=self.__BANDWIDTH_LIMIT_CLIENTS_MBPS,
preferred_delay=self.__BANDWIDTH_LIMIT_CLIENTS_DELAY
)
# Create file clients
log.info("Creating file clients")
for i in range(self.__file_client_hosts_count):
client_name = self.__file_client_name_prefix + str(i + 1)
log.info("Creating file client: " + client_name)
self.add_host_with_addresses(client_name)
"""
self.addLink(
self.__main_switch_name,
client_name,
intfName1="switch-" + client_name,
cls=TCLink, bw=self.__BANDWIDTH_LIMIT_CLIENTS_MBPS, delay=self.__BANDWIDTH_LIMIT_CLIENTS_DELAY
)
"""
self.add_link_to_main_switch(
node_name=client_name,
interface_name="switch-" + client_name,
preferred_mbps=self.__BANDWIDTH_LIMIT_CLIENTS_MBPS,
preferred_delay=self.__BANDWIDTH_LIMIT_CLIENTS_DELAY
)
self.__file_client_names.append(client_name)
# Create video clients
log.info("Creating video clients")
for i in range(self.__video_client_hosts_count):
client_name = self.__video_client_name_prefix + str(i + 1)
log.info("Creating video client: " + client_name)
self.add_host_with_addresses(client_name)
"""
self.addLink(
self.__main_switch_name,
client_name,
intfName1="switch-" + client_name,
cls=TCLink, bw=self.__BANDWIDTH_LIMIT_CLIENTS_MBPS, delay=self.__BANDWIDTH_LIMIT_CLIENTS_DELAY
)
"""
self.add_link_to_main_switch(
node_name=client_name,
interface_name="switch-" + client_name,
preferred_mbps=self.__BANDWIDTH_LIMIT_CLIENTS_MBPS,
preferred_delay=self.__BANDWIDTH_LIMIT_CLIENTS_DELAY
)
self.__video_client_names.append(client_name)
#
log.info("Finished building topology")
def reset_ip_counter(self):
self.__ip_counter = 1
def get_next_ip(self):
ip = self.__ip_address_base + str(self.__ip_counter)
self.__ip_counter += 1
return ip
def reset_mac_address_counter(self):
self.__mac_address_counter = 0
def get_next_mac_address(self):
# Works for up to 255 hosts
suffix_int = self.__mac_address_counter
self.__mac_address_counter += 1
suffix = hex(suffix_int)[2:]
if len(suffix) == 1:
suffix = "0" + suffix
mac = self.__mac_address_base
mac += suffix
return mac
def add_switch_with_addresses(self, name):
mac = self.get_next_mac_address()
ip = self.get_next_ip()
self.__logger.get().info("Adding switch with " + ip + "; " + mac)
return self.addSwitch(
name,
mac=mac,
ip=ip,
cls=OVSSwitch
)
def add_host_with_addresses(self, name):
mac = self.get_next_mac_address()
ip = self.get_next_ip()
self.__logger.get().info("Adding host with " + ip + "; " + mac)
host = self.addHost(
name,
mac=mac,
ip=ip
)
return host
def add_link_to_main_switch(self, node_name, interface_name=None, preferred_mbps=None, preferred_delay=None):
# Yes disable because some forum said this might interfere with vswitch queue stuff
disable_limiting = True
if disable_limiting is False and (preferred_mbps is not None or preferred_delay is not None):
self.addLink(
self.__main_switch_name,
node_name,
intfName1=interface_name,
cls=TCLink,
bw=self.__BANDWIDTH_LIMIT_CLIENTS_MBPS,
delay=self.__BANDWIDTH_LIMIT_CLIENTS_DELAY
)
else:
self.addLink(
self.__main_switch_name,
node_name,
intfName1=interface_name
)
def get_file_server_instance(self):
return self.__file_server_instance
def get_video_server_instance(self):
return self.__video_server_instance
def get_tattle_tail_instance(self):
return self.__tattle_tail_instance
def render_dotgraph(self, view=False):
dot = self.generate_dotgraph()
file_path = os.path.join(
os.path.dirname(__file__),
"render",
"topology"
)
dot.render(file_path, format="png", view=view)
def generate_dotgraph(self):
s = ""
for switch_name in self.switches():
s += "\n\t" + switch_name
s += "["
s += "label=\"" + self.get_node_label(switch_name) + "\""
s += "];"
for host_name in self.hosts():
s += "\n\t" + host_name
s += "["
s += "label=\"" + self.get_node_label(host_name) + "\""
s += "];"
for link in self.links():
first, second = link
s += "\n\t" + first + " -- " + second
# Server ranks
h = [self.__video_server_name]
h.extend([self.__file_server_name])
s += "\n\t{rank=source;" + ";".join(h) + "};"
# Client rank
hosts_names = []
for h in self.__video_client_names:
hosts_names.append(h)
for h in self.__file_client_names:
hosts_names.append(h)
s += "\n\t{rank=same;" + ";".join(hosts_names) + "};"
# Tattle tail rank
s += "\n\t{rank=sink;" + self.__tattle_tail_name + "};"
dot = graphviz.Graph(name="CPSC 558 Final Project", body=s.split("\n"))
return dot
def get_node_label(self, node_name):
if node_name == self.__video_server_name:
under = "video server"
elif node_name == self.__file_server_name:
under = "file server"
elif node_name in self.__video_client_names:
under = "video client"
elif node_name in self.__file_client_names:
under = "file client"
elif node_name == self.__tattle_tail_name:
under = "tattle tail"
elif self.isSwitch(node_name):
under = "switch"
else:
under = "host"
label = node_name + "\\n(" + under + ")"
if self.__net:
node = self.__net.getNodeByName(node_name)
label += "\n(" + node.IP() + ")"
return label
def consume_instances(self):
log = self.__logger.get()
log.info("Consuming instances from Mininet to Topology class")
# Grab the switch
self.__main_switch_instance = self.__net.nameToNode[self.__main_switch_name]
# Grab the servers
self.__video_server_instance = self.__net.nameToNode[self.__video_server_name]
self.__file_server_instance = self.__net.nameToNode[self.__file_server_name]
# Grab file client instances
self.__file_client_instances = dict()
for name in self.__file_client_names:
self.__file_client_instances[name] = self.__net.nameToNode[name]
# Grab video client instances
self.__video_client_instances = dict()
for name in self.__video_client_names:
self.__video_client_instances[name] = self.__net.nameToNode[name]
# Grab tattle tail
self.__tattle_tail_instance = self.__net.nameToNode[self.__tattle_tail_name]
#
log.info("Done consuming instances")
def get_file_client_instances(self):
return self.__file_client_instances
def get_video_client_instances(self):
return self.__video_client_instances
# Heavy inspiration: http://docs.openvswitch.org/en/latest/topics/dpdk/qos/
# Also: https://github.com/mininet/mininet/pull/132
def create_qos_queues(self):
log = self.__logger.get()
result = self.__main_switch_instance.setup()
log.info("Hey: " + str(self.__main_switch_instance) + "; " + str(result))
# ovs_path = "/usr/bin/ovs-vsctl"
ovs_path = "ovs-vsctl"
#
args = list([
ovs_path,
"--", "set", "port", "switch-fs", "qos=@newqos",
"--", "set", "port", "switch-vs", "qos=@newqos",
"--", "--id=@newqos", "create", "qos", "type=trtcm-policer", "queues=0=@q0,1=@q1",
# "--", "--id=@q0", "create", "queue", "other-config:cir=41600000", "other-config:eir=0", "other-config:priority=0",
# "--", "--id=@q1", "create", "queue", "other-config:cir=0", "other-config:eir=41600000", "other-config:priority=1"
"--", "--id=@q0", "create", "queue", "other-config:priority=0", "other-config:maxrate=1000000",
"--", "--id=@q1", "create", "queue", "other-config:priority=1", "other-config:maxrate=1000000"
])
# Try to setup the QoS setup and its queues
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = p.communicate()
# qos_id = result.splitlines()[0]
log.info("Trying to get ovswitch stuff working ... " + str(result))
# log.info("QoS ID is apparently: " + qos_id)
#
log.info("Switch interface names: " + str(self.__main_switch_instance.intfNames()))
for intf in self.__main_switch_instance.intfList():
log.info("Uhm interface: " + str(intf) + "; " + intf.name)
"""
result = self.__main_switch_instance.cmd([
ovs_path,
"set Port %s qos=%s" % ("switch-vs", qos_id)
])
log.info("Trying to hard set ports to different queues ... " + str(result))
#
result = self.__main_switch_instance.cmd([
ovs_path,
"set Port %s qos=%s" % ("'switch-fs'", "q1")
])
log.info("Trying to hard set ports to different queues ... " + str(result))
"""
result = self.__main_switch_instance.cmd([ovs_path, "list-br"])
log.info("Executed erm ... " + str(result))
def create_qos_queues_on_switch(self):
log = self.__logger.get()
self.__main_switch_instance.setup()
# ovs_path = "/usr/bin/ovs-vsctl"
ovs_path = "ovs-vsctl"
# qos_type = "trtcm-policer"
qos_type = "linux-htb"
# Try to setup the QoS setup and its queues
# "--", "--id=@q0", "create", "queue", "other-config:priority=0", "other-config:max-rate=100000000",
args = list([
ovs_path,
"--", "--id=@newqos", "create", "qos", "type=" + qos_type, "queues=0=@q0,1=@q1",
"--", "--id=@q0", "create", "queue", "other-config:priority=0", "other-config:max-rate=100000000",
"--", "--id=@q1", "create", "queue", "other-config:priority=1", "other-config:max-rate=100000000"
])
for intf_name in self.__main_switch_instance.intfNames():
if intf_name != "lo":
args += ["--", "set", "Port", intf_name, "qos=@newqos"]
log.info("Trying to initialize Open VSwitch qos stuffs: \n%s", args)
result = self.__main_switch_instance.cmd(args)
qos_id = result.splitlines()[0]
log.info("Result of OpenVSwitch init command: %s", str(result))
log.info("QoS ID is apparently: " + qos_id)
#
log.info("Switch interface names: " + str(self.__main_switch_instance.intfNames()))
result = self.__main_switch_instance.cmd([ovs_path, "-t", "ovs-vswitchd", "show"])
log.info("Showing OpenvSwitch information: " + str(result))
result = self.__main_switch_instance.cmd([ovs_path, "list-br"])
log.info("Showing OpenvSwitch information: " + str(result))
| [
"Info@MikePeralta.com"
] | Info@MikePeralta.com |
971435e6c398230a141cfe52dc82313acdca9172 | 0ead8495edbecf55ad0cd8163f41f3edd9ade50b | /start.py | d1bb2ffd2e36a963f6ffb9d59cd604921ea004b9 | [] | no_license | NikiPans/googlePlayData | d6e41586270cdc1db8674e655c3509d90e350aa6 | 594cb6a6fb0afb85bcd5d6969de66f423a1b64e0 | refs/heads/master | 2020-04-06T08:06:42.305933 | 2018-11-13T00:25:07 | 2018-11-13T00:25:07 | 157,295,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | # import pandas
def customsplit(string):
quoteopen = False
weirdsep = ""
for char in string:
if char == '"':
quoteopen = not quoteopen
elif char == "," and quoteopen == False:
weirdsep = weirdsep+"****"
else:
weirdsep = weirdsep+char
return weirdsep.split("****")
def tofloat(val):
try:
return float(val)
except:
return(val)
# gplay = pandas.read_csv ("./playStore/googleplaystore.csv")
# print (dir(gplay))
# print (gplay.Rating)
address = "./playStore/googleplaystore.csv"
def convertcsvtolist (address):
table = open(address, "r")
tb = table.read()
table.close()
print(type(tb))
rows = tb.split("\n")
headers = rows[0].strip().split(",")
data = rows[1:]
print(len(headers))
lst = []
for row in data:
dictn = {}
rowraw = row.strip()
row = customsplit(rowraw)
for col in enumerate(row):
i,c = col
# print(headers[i],c)
try:
dictn[headers[i]] = tofloat(c)
except:
print(len(row),row)
print(rowraw)
exit()
lst.append(dictn)
return(lst)
outlop = convertcsvtolist(address)
print(outlop) | [
"shashank.gopikrishna@gmail.com"
] | shashank.gopikrishna@gmail.com |
1d5bb223c6121a9c989bd2d35276a8ad1523baef | b2c3c8a45282c30821cc8c3d6a9f8b6b652ee182 | /Code/data_handlers/generators.py | 601173d45ef5111d81b5ef71272c30479e897fa6 | [] | no_license | TimRepke/adaptive-landscape | 1735dde96ce1c737c028cb53973c1e9ae07dd236 | 90025a112a5c7cec4d53485cefa49a2bf6f009c8 | refs/heads/master | 2023-03-20T10:02:37.347225 | 2021-03-16T19:33:52 | 2021-03-16T19:33:52 | 330,475,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,386 | py | import random
from datasets import DataSet
from typing import Union, Dict, List, Tuple
import numpy as np
import time
import enum
import logging
logger = logging.getLogger('generators')
class SamplingReference(enum.Enum):
LABEL_COUNT = enum.auto()
ITEM_COUNT = enum.auto()
def temporal(dataset: DataSet,
intervals: Union[int, List[float]] = None,
label_dist: List[Dict[int, float]] = None,
target_size: Union[int, float] = None,
auto_fill: bool = True,
sampling_reference: SamplingReference = SamplingReference.ITEM_COUNT,
rand_seed: str = '43') -> Tuple[List[List[np.array]], List[List[int]]]:
"""
TODO rewrite, so it's not in-memory
If some distribution information is not provided, equal distribution is assumed
:param dataset: The dataset to sample from
:param intervals: Either the number of intervals or list of floats of length intervals and distribution
If None, it's derived from label_dist
:param label_dist: Iterable of length intervals, with dict describing label distribution in each interval
:param target_size: Size of the final dataset
:param auto_fill: Automatically expand on labels not defined in label_dist
:param sampling_reference: Bases for partial data expansion
:param rand_seed: Seed for random shuffling
:return:
"""
random.seed(rand_seed)
num_items = len(dataset)
num_labels = len(dataset.data_labels)
if intervals is None:
intervals = len(label_dist)
if type(intervals) is int:
intervals = [1 / intervals for _ in range(intervals)]
if target_size is None:
target_size = num_items
elif type(target_size) is float:
target_size = target_size * num_items
if label_dist is None:
label_dist = [{label: 1 / num_labels} for _ in intervals for label in range(len(dataset.data_labels))]
data = []
labels = []
idxs = [[] for _ in range(num_labels)]
for i, label in enumerate(dataset.get_labels()):
idxs[label].append(i)
for iidxs in idxs:
random.shuffle(iidxs)
logger.info(f'Sampling data with\n'
f' - intervals: [{", ".join(f"{i:.3f}" for i in intervals)}]\n'
f' - target size: {target_size}\n'
f' - actual labels: {" | ".join(f"label {i} ({len(iidxs)})" for i, iidxs in enumerate(idxs))}\n'
f' - requested label distribution: {label_dist}')
current_idxs = [0] * num_labels
for interval_num, (interval_size, label_distribution) in enumerate(zip(intervals, label_dist)):
data_interval = []
labels_interval = []
if auto_fill and len(label_distribution) != num_labels:
preliminary_total = sum(ld if type(ld) is float else ld / len(idxs[l])
for l, ld in label_distribution.items())
default_dist = (1 - preliminary_total) / (num_labels - len(label_distribution))
label_distribution = {label: default_dist if label not in label_distribution else label_distribution[label]
for label in range(len(dataset.data_labels))}
for label, distribution in label_distribution.items():
logger.debug(f'Interval {interval_num + 1}, label {label} (current idx: {current_idxs[label]}):')
if type(distribution) is int:
interval_label_size_abs = distribution
logger.debug(f' > fixed abs = {distribution} items')
else:
if sampling_reference is SamplingReference.ITEM_COUNT:
abs_factor = target_size
else:
abs_factor = len(idxs[label]) * (target_size / num_items) * num_labels
interval_label_size_abs = int(round(distribution * interval_size * abs_factor))
logger.debug(f' > calculated abs({distribution:.3f}*{interval_size:.3f}*{abs_factor:.3f}) '
f'= {interval_label_size_abs} items')
if len(idxs[label]) < current_idxs[label] + interval_label_size_abs:
logger.warning(f'not enough data for label {label} at interval {interval_num}'
f' (diff: {len(idxs[label]) - current_idxs[label] - interval_label_size_abs})')
labels_interval += [label] * interval_label_size_abs
data_interval += [dataset.get_data()[idx] for idx in
idxs[label][current_idxs[label]:current_idxs[label] + interval_label_size_abs]]
current_idxs[label] += interval_label_size_abs
logger.info(f'Interval {interval_num + 1} has {len(data_interval)} items in total.')
data.append(data_interval)
labels.append(labels_interval)
return data, labels
def accumulate(data, labels):
accumulator_data = []
accumulator_labels = []
for i, (interval_data, interval_labels) in enumerate(zip(data, labels)):
logger.debug(f'Adding {len(interval_labels)} labels and {len(interval_data)} items')
accumulator_data += interval_data
accumulator_labels += interval_labels
logger.debug(f'Interval {i} contains {len(accumulator_labels)} labels and {len(accumulator_data)} items')
yield np.array(accumulator_data), np.array(accumulator_labels)
| [
"tim@repke.eu"
] | tim@repke.eu |
1c7d91fb0d46f9dce0ee7d4982c471f100e95da7 | 0101728fcbd7c60f0a6499f215b48df030261051 | /CNN/cnn.py | 069f794851179fde6ebc1b5b16ac4d5a59e7241a | [] | no_license | nainys/Machine-Learning | 7ed207f6b21bd07d560a0140f081c8df85b49d2a | 7858c7db2f93322ccea63f45a302bc5bd97faa3d | refs/heads/master | 2020-03-16T21:12:24.918483 | 2018-05-11T05:54:44 | 2018-05-11T05:54:44 | 132,988,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,001 | py | import numpy as np
from PIL import Image
from scipy.ndimage import gaussian_filter
from scipy.ndimage import convolve
img = Image.open('car.png').resize((32,32))
data = np.asarray(img)
print "Input image shape = ",data.shape
filter_size = 5
stride = 1
def relu(x):
return np.maximum(x,0)
def sigmoid(x):
return 1. / (1 + np.exp(-x))
def tanh(x):
return np.tanh(x)
def softmax(x):
e = np.exp(x - np.max(x)) # prevent overflow
if e.ndim == 1:
return e / np.sum(e, axis=0)
else:
return e / np.array([np.sum(e, axis=1)]).T
# Convolution operation
def convolve(data,no_of_filters):
dim = data.shape[2]
h_range = int((data.shape[1] - filter_size) / stride) + 1
# print "hr == ",h_range
v_range = int((data.shape[0] - filter_size) / stride) + 1
# print "vr == ",v_range
output = np.ndarray((h_range,v_range,no_of_filters),dtype=int)
filter = np.random.uniform(-1.0,1.0,size=(filter_size,filter_size,no_of_filters))
for k in range(no_of_filters):
for i in range(v_range):
for j in range(h_range):
x = data[i:i+filter_size,j:j+filter_size,k%dim]
# print x.shape
y = filter[:,:,k]
# print y.shape
output[i][j][k] = np.sum(np.multiply(x,y))
return output
con1 = convolve(data,6)
con1 = relu(con1)
# print "Output after first convolution = \n",con1
print "Shape after first convolution = \n",con1.shape
img1 = Image.fromarray(con1,'RGB')
img1 = img1.resize((312,312))
img1.save("con1.jpg")
pool_size = 2
# Max pooling
def maxpool(output,no_of_filters):
new = np.ndarray((output.shape[0]//2,output.shape[1]//2,no_of_filters),dtype=float)
for i in range(no_of_filters):
new[:,:,i] = output[:,:,i].reshape(output.shape[0]//2,pool_size,output.shape[1]//2,pool_size).max(axis=(1,3))
return new
pool1 = maxpool(con1,6)
# print "Output after first subsampling = \n",pool1
print "Shape after first subsampling = ",pool1.shape
img1 = Image.fromarray(pool1,'RGB')
img1 = img1.resize((312,312))
img1.save("pool1.jpg")
con2 = convolve(pool1,16)
con2 = relu(con2)
# print "Output after second convolution = \n",con2
print "Shape after second convolution = ",con2.shape
img1 = Image.fromarray(con2,'RGB')
img1 = img1.resize((312,312))
img1.save("con2.jpg")
pool2 = maxpool(con2,16)
# print "Output after second subsampling = \n",pool2
print "Shape after second subsampling = ",pool2.shape
img1 = Image.fromarray(pool2,'RGB')
img1 = img1.resize((312,312))
img1.save("pool2.jpg")
output = pool2.reshape(pool2.shape[0]*pool2.shape[1]*pool2.shape[2],1)
def init_weights(n_input,n_hidden,n_output):
"""initialize input weights uniformly randomly with small values"""
w1 = np.random.uniform(-1.0,1.0,
size=(n_input,n_hidden))
bias_hidden = np.random.uniform(-1.0,1.0,
size=(1,n_hidden))
w2 = np.random.uniform(-1.0,1.0,
size=(n_hidden,n_output))
bias_output = np.random.uniform(-1.0,1.0,
size=(1,n_output))
return w1,bias_hidden,w2,bias_output
def forward_pass(X,w1,bh,w2,bo):
hidden_layer_input = np.dot(X.T,w1)+bh
# hidden_output = sigmoid(hidden_layer_input)
# hidden_output = relu(hidden_layer_input)
hidden_output = tanh(hidden_layer_input)
output_layer_input = np.dot(hidden_output,w2)+bo
# output = sigmoid(output_layer_input)
# output = relu(output_layer_input)
output = tanh(output_layer_input)
return output
# Fully connected layer
def fc(output):
n_hidden = 84
n_output = 10
w1,bh,w2,bo = init_weights(output.shape[0],n_hidden,n_output)
result = forward_pass(output,w1,bh,w2,bo)
return result
filt = np.random.uniform(-1.0,1.0,size=(pool2.shape[0]*pool2.shape[1]*pool2.shape[2],120))
output = np.dot(filt.T,output)
# print "output shape == ",output.shape
result = fc(output)
res = softmax(result)
print "Result = \n",res
print "Result shape = ",res.shape
# img = Image.fromarray(output)
# img.show()
| [
"sharmanainy1711@gmail.com"
] | sharmanainy1711@gmail.com |
237146708050ca5d37c5f06d79988bd1e0536000 | 7c5993d6c41fad0497ca6eae6466cdd328a7fad3 | /website/views/forms/audition_signup.py | 73fc934e11ff7a07f2655f952b70677ff857346a | [] | no_license | cloverlime/ulso-django | 57b235a4cd5d697cf2fd62ad20d4f1142a9ca46a | 5dad84d4f24cd2a9f2357fd9ef7f14fd8b99f2ee | refs/heads/master | 2020-03-17T19:47:44.804871 | 2018-08-27T18:34:04 | 2018-09-29T15:12:25 | 133,878,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | import datetime
from django.contrib import messages
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.template.defaultfilters import slugify
from django.views import View
from django.urls import reverse
from status.models import Status
from ulsosite.utils import academic_year_calc
from ulsosite.models.auditions import AuditionDate
from ulsosite.models.people import Musician
from ulsosite.info.dates import CURRENT_SEASON
from website.forms.audition_signup import AuditionSignUpForm
from website.utils import redirect_error, redirect_success
from website import responses
class AuditionSignUpView(View):
form_template = 'website/pages/audition-signup.html'
fail_template = 'website/forms/form-fail.html'
def _concerto_is_open():
return Status.objects.get(season=CURRENT_SEASON).concerto_open
def post(self, request, *args, **kwargs):
form = AuditionSignUpForm(data=request.POST)
success_template = 'website/forms/form-success.html'
if not self._concerto_is_open:
context = {'message': 'We are currently closed for audition applications. Please contact us to discuss mid-year opportunities.' }
return render(request, self.fail_template , context)
if form.is_valid():
field_attr = form.cleaned_data
musician = Musician.create(field_attr)
if musician == None:
return redirect_error(request, responses.DATABASE_ERROR)
musician.save()
# TODO Send acknowledgement email
return redirect_success(request, responses.AUDITION_SIGNUP_SUCCESS)
else:
return redirect_error(request, responses.AUDITION_SIGNUP_ERROR)
def get(self, request, *args, **kwargs):
form = AuditionSignUpForm()
form_title = "Audition Sign-Up"
season = academic_year_calc(datetime.datetime.now())
if not self._concerto_is_open:
context = {'message': 'We are currently closed for audition applications. Please contact us to discuss mid-year opportunities.' }
return render(request, self.fail_template , context)
# Queryset of audition dates for the current season
audition_dates = AuditionDate.objects.filter(season=season)
context = {
'form': form,
'season': season,
'audition_dates': audition_dates
}
return render(request, self.form_template, context)
| [
"wenye.zhao@gmail.com"
] | wenye.zhao@gmail.com |
6a58387a396d96bfd5733140648664f333506bdd | 96985a1fe69a5a3fb582bb684e1b7820430af524 | /rain/rain.pyde | 782e9c928548d3a426dd59419b1e3dc22ec763a4 | [] | no_license | sowmya-nittala/GenArt-using-Processing | 1a4e591a818297b43b289b05e27abed2bca5a791 | d96ccffaa8c496bff06870625cf4a269a8495177 | refs/heads/master | 2022-12-29T16:14:08.904478 | 2020-10-10T22:26:29 | 2020-10-10T22:26:29 | 302,999,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | pyde | nighttop = color(0, 0, 51)
nightbottom = color(0, 51, 102)
rainc = color(96, 96, 96)
#linesize = 40
def setup():
size(800, 500)
frameRate(20)
def draw():
setgradient(0, 0, width, height, nighttop, nightbottom)
rain(width, height)
def setgradient(x, y, w, h, c1, c2):
for i in range(y, y + h + 1):
inter = map(i, y, y + h, 0, 1)
c = lerpColor(c1, c2, inter)
stroke(c)
line(x, i, x + w, i)
def rain(w, h):
for x in range(0, w, 4):
y = random(0, h)
linesize = random(10, 70)
stroke(rainc)
line(x, y, x+2, y+linesize)
| [
"sowmya.nittala@gmail.com"
] | sowmya.nittala@gmail.com |
70a6c42d42cac8fc9c14ca46b10126e41061fcd9 | 76daffafffacf0d3cee3a21a7e45f2c3a5872750 | /A4/decode.py | 0c90f438efc481e86206c10b647089baa38e9fde | [] | no_license | abhigoyal1997/Artificial-Intelligence | 3c12e5d655c279f6f3124927196a528cc1570b65 | 899446e521a6a10d7ce26d0e739dd0933c9fb352 | refs/heads/master | 2020-04-15T21:22:44.040593 | 2019-01-10T09:27:25 | 2019-01-10T09:27:25 | 165,027,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | import sys
gridFile = open(sys.argv[1], 'r')
policyFile = open(sys.argv[2], 'r')
grid_data = [x.split() for x in gridFile.readlines()]
policy = [x.split()[1] for x in policyFile.readlines()[:-1]]
nStates = 0
actions = ['N', 'E', 'S', 'W']
nRows = len(grid_data)
nCols = len(grid_data[0])
north = 0
east = 1
south = 2
west = 3
start = None
end = None
def getNextPos(i, j, a):
if a == north:
return (i-1, j)
if a == east:
return (i, j+1)
if a == south:
return (i+1, j)
if a == west:
return (i, j-1)
state_dict = {}
for i in range(0,nRows):
for j in range(0, nCols):
val = int(grid_data[i][j])
if val == 2:
start = (i, j)
elif val == 3:
end = (i, j)
elif val == 1:
continue
state_dict[(i, j)] = nStates
nStates += 1
pos = start
path = ''
while pos != end:
a = int(policy[state_dict[pos]])
path += actions[a] + ' '
pos = getNextPos(pos[0], pos[1], a)
print(path) | [
"agabohar@gmail.com"
] | agabohar@gmail.com |
c74ff5aebde1796318888f8c56bc6f5e35ea053a | 12c520c4f485dca125f4a7f8103c056a78c3a816 | /kolorki.pyde | 6c49680c89361ce951ca9deb927c9b60f1b2fa03 | [] | no_license | Wojowniczka1/kolorki | 393e6940dc1f4953756c47c5f995816e7803b76c | 8caaa9684a3feda592dff2a198d932da4f1a73a0 | refs/heads/master | 2020-04-30T11:57:37.821228 | 2019-03-20T20:48:58 | 2019-03-20T20:48:58 | 176,814,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | pyde | def setup():
frameRate(60)
size(400, 400)
background(255, 255, 170)
strokeWeight(2)
global krotka0
global krotka1
global krotka2
krotka0 = (149, 230, 163)
krotka1 = (220, 135, 220)
krotka2 = (255, 168, 149)
global krotka3
global krotka4
global krotka5
krotka3 = (159, 225, 176)
krotka4 = (220, 180, 235)
krotka5 = (255, 180, 170)
global w
global x
global y
global z
w = 0
x = 0
y = 50
z = 50
def draw():
global w
w = w + 1
global x
x = x + 1
rect(w, x, y, z)
if w < 100:
fill(*krotka0)
stroke(*krotka3)
if w > 101:
fill(*krotka1)
stroke(*krotka4)
if w > 250:
fill(*krotka2)
stroke(*krotka5)
if w > width:
exit()
| [
"48254319+Wojowniczka1@users.noreply.github.com"
] | 48254319+Wojowniczka1@users.noreply.github.com |
f648a0cb1508abb94a8ece9baa571ce1f7de2e9f | 6c6dde254df4072a2e8dd66b22375eab9d009f9b | /A3rcs/projects/A3rcs/virtual_cron_NV.py | ddcfa64bc9c050d52641bed4d5414ea918148cb3 | [] | no_license | aymin91/bigPy | 63e45b2e270a57a573a55db278f3239497d5dd0d | 5269ccfe0162920e1db79b6b90610c5201ddafad | refs/heads/main | 2023-06-21T18:48:22.750817 | 2021-07-22T07:07:12 | 2021-07-22T07:07:12 | 388,362,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | from time import localtime
import time
import NV_Crawler_Generator as NV_crawler
weekdays = ['월', '화', '수', '목', '금', '토', '일']
while True:
interval = 60
t = localtime()
# if t.tm_hour > 0 and t.tm_min > 0 and t.tm_sec%5==0:
if t.tm_hour == 1 and t.tm_min == 0 and t.tm_sec == 0:
msg = '{yy}.{mm}.{dd}({wd}) {hr}:{mi}:{se}'.format(
yy=t.tm_year, mm=t.tm_mon, dd=t.tm_mday, wd=weekdays[t.tm_wday],
hr=t.tm_hour, mi=t.tm_min, se=t.tm_sec
)
print(msg)
print('Virtual Scheduler is running!!')
# call crawler modude
NV_crawler.main()
print('Virtual Scheduler is finished!!')
else:
print('■', end='')
time.sleep(interval)
print('Finish') | [
"mayell9115@gmail.com"
] | mayell9115@gmail.com |
b23de1013a86b7bc80b2a76e200a8db9522b2306 | 6ab852a8acb6adf317eeffb98871d6adae18b1a0 | /venv/bin/launch_instance | 2bbfab70f1d3424587fe31500bc61d7b2516ce54 | [] | no_license | DwarkeshAhuja/KahootIt | dae7931743bd1628b4ae9055f945e5b8f07fb323 | e5523b8fb2329df04debf1e418ebf792b6a0077b | refs/heads/master | 2023-04-16T02:35:53.448382 | 2021-04-26T05:58:47 | 2021-04-26T05:58:47 | 359,279,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,648 | #!/Users/vivinahuja/PycharmProjects/Sem2_begins/venv/bin/python
# Copyright (c) 2009 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
#
# Utility to launch an EC2 Instance
#
VERSION="0.2"
CLOUD_INIT_SCRIPT = """#!/usr/bin/env python
f = open("/etc/boto.cfg", "w")
f.write(\"\"\"%s\"\"\")
f.close()
"""
import boto.pyami.config
import boto.utils
import re, os
from boto.compat import ConfigParser
class Config(boto.pyami.config.Config):
"""A special config class that also adds import abilities
Directly in the config file. To have a config file import
another config file, simply use "#import <path>" where <path>
is either a relative path or a full URL to another config
"""
def __init__(self):
ConfigParser.__init__(self, {'working_dir' : '/mnt/pyami', 'debug' : '0'})
def add_config(self, file_url):
"""Add a config file to this configuration
:param file_url: URL for the file to add, or a local path
:type file_url: str
"""
if not re.match("^([a-zA-Z0-9]*:\/\/)(.*)", file_url):
if not file_url.startswith("/"):
file_url = os.path.join(os.getcwd(), file_url)
file_url = "file://%s" % file_url
(base_url, file_name) = file_url.rsplit("/", 1)
base_config = boto.utils.fetch_file(file_url)
base_config.seek(0)
for line in base_config.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
if match:
self.add_config("%s/%s" % (base_url, match.group(1)))
base_config.seek(0)
self.readfp(base_config)
def add_creds(self, ec2):
"""Add the credentials to this config if they don't already exist"""
if not self.has_section('Credentials'):
self.add_section('Credentials')
self.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id)
self.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key)
def __str__(self):
"""Get config as string"""
from StringIO import StringIO
s = StringIO()
self.write(s)
return s.getvalue()
SCRIPTS = []
def scripts_callback(option, opt, value, parser):
arg = value.split(',')
if len(arg) == 1:
SCRIPTS.append(arg[0])
else:
SCRIPTS.extend(arg)
setattr(parser.values, option.dest, SCRIPTS)
def add_script(scr_url):
"""Read a script and any scripts that are added using #import"""
base_url = '/'.join(scr_url.split('/')[:-1]) + '/'
script_raw = boto.utils.fetch_file(scr_url)
script_content = ''
for line in script_raw.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
#if there is an import
if match:
#Read the other script and put it in that spot
script_content += add_script("%s/%s" % (base_url, match.group(1)))
else:
#Otherwise, add the line and move on
script_content += line
return script_content
if __name__ == "__main__":
try:
import readline
except ImportError:
pass
import sys
import time
import boto
from boto.ec2 import regions
from optparse import OptionParser
from boto.mashups.iobject import IObject
parser = OptionParser(version=VERSION, usage="%prog [options] config_url")
parser.add_option("-c", "--max-count", help="Maximum number of this type of instance to launch", dest="max_count", default="1")
parser.add_option("--min-count", help="Minimum number of this type of instance to launch", dest="min_count", default="1")
parser.add_option("--cloud-init", help="Indicates that this is an instance that uses 'CloudInit', Ubuntu's cloud bootstrap process. This wraps the config in a shell script command instead of just passing it in directly", dest="cloud_init", default=False, action="store_true")
parser.add_option("-g", "--groups", help="Security Groups to add this instance to", action="append", dest="groups")
parser.add_option("-a", "--ami", help="AMI to launch", dest="ami_id")
parser.add_option("-t", "--type", help="Type of Instance (default m1.small)", dest="type", default="m1.small")
parser.add_option("-k", "--key", help="Keypair", dest="key_name")
parser.add_option("-z", "--zone", help="Zone (default us-east-1a)", dest="zone", default="us-east-1a")
parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
parser.add_option("-i", "--ip", help="Elastic IP", dest="elastic_ip")
parser.add_option("-n", "--no-add-cred", help="Don't add a credentials section", default=False, action="store_true", dest="nocred")
parser.add_option("--save-ebs", help="Save the EBS volume on shutdown, instead of deleting it", default=False, action="store_true", dest="save_ebs")
parser.add_option("-w", "--wait", help="Wait until instance is running", default=False, action="store_true", dest="wait")
parser.add_option("-d", "--dns", help="Returns public and private DNS (implicates --wait)", default=False, action="store_true", dest="dns")
parser.add_option("-T", "--tag", help="Set tag", default=None, action="append", dest="tags", metavar="key:value")
parser.add_option("-s", "--scripts", help="Pass in a script or a folder containing scripts to be run when the instance starts up, assumes cloud-init. Specify scripts in a list specified by commas. If multiple scripts are specified, they are run lexically (A good way to ensure they run in the order is to prefix filenames with numbers)", type='string', action="callback", callback=scripts_callback)
parser.add_option("--role", help="IAM Role to use, this implies --no-add-cred", dest="role")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
file_url = os.path.expanduser(args[0])
cfg = Config()
cfg.add_config(file_url)
for r in regions():
if r.name == options.region:
region = r
break
else:
print("Region %s not found." % options.region)
sys.exit(1)
ec2 = boto.connect_ec2(region=region)
if not options.nocred and not options.role:
cfg.add_creds(ec2)
iobj = IObject()
if options.ami_id:
ami = ec2.get_image(options.ami_id)
else:
ami_id = options.ami_id
l = [(a, a.id, a.location) for a in ec2.get_all_images()]
ami = iobj.choose_from_list(l, prompt='Choose AMI')
if options.key_name:
key_name = options.key_name
else:
l = [(k, k.name, '') for k in ec2.get_all_key_pairs()]
key_name = iobj.choose_from_list(l, prompt='Choose Keypair').name
if options.groups:
groups = options.groups
else:
groups = []
l = [(g, g.name, g.description) for g in ec2.get_all_security_groups()]
g = iobj.choose_from_list(l, prompt='Choose Primary Security Group')
while g != None:
groups.append(g)
l.remove((g, g.name, g.description))
g = iobj.choose_from_list(l, prompt='Choose Additional Security Group (0 to quit)')
user_data = str(cfg)
# If it's a cloud init AMI,
# then we need to wrap the config in our
# little wrapper shell script
if options.cloud_init:
user_data = CLOUD_INIT_SCRIPT % user_data
scriptuples = []
if options.scripts:
scripts = options.scripts
scriptuples.append(('user_data', user_data))
for scr in scripts:
scr_url = scr
if not re.match("^([a-zA-Z0-9]*:\/\/)(.*)", scr_url):
if not scr_url.startswith("/"):
scr_url = os.path.join(os.getcwd(), scr_url)
try:
newfiles = os.listdir(scr_url)
for f in newfiles:
#put the scripts in the folder in the array such that they run in the correct order
scripts.insert(scripts.index(scr) + 1, scr.split("/")[-1] + "/" + f)
except OSError:
scr_url = "file://%s" % scr_url
try:
scriptuples.append((scr, add_script(scr_url)))
except Exception as e:
pass
user_data = boto.utils.write_mime_multipart(scriptuples, compress=True)
shutdown_proc = "terminate"
if options.save_ebs:
shutdown_proc = "save"
instance_profile_name = None
if options.role:
instance_profile_name = options.role
r = ami.run(min_count=int(options.min_count), max_count=int(options.max_count),
key_name=key_name, user_data=user_data,
security_groups=groups, instance_type=options.type,
placement=options.zone, instance_initiated_shutdown_behavior=shutdown_proc,
instance_profile_name=instance_profile_name)
instance = r.instances[0]
if options.tags:
for tag_pair in options.tags:
name = tag_pair
value = ''
if ':' in tag_pair:
name, value = tag_pair.split(':', 1)
instance.add_tag(name, value)
if options.dns:
options.wait = True
if not options.wait:
sys.exit(0)
while True:
instance.update()
if instance.state == 'running':
break
time.sleep(3)
if options.dns:
print("Public DNS name: %s" % instance.public_dns_name)
print("Private DNS name: %s" % instance.private_dns_name)
| [
"dwarkeshahuja994@gmail.com"
] | dwarkeshahuja994@gmail.com | |
bc7f9575fac3c7ebcd14bc998bd920b8498a8942 | 99e583312e680cf7b8f9b07d0d582a6d6a063524 | /modules/m_functools/with_timer.py | 649ca663e9b7e4357f798b8439086a22404466d3 | [] | no_license | negiper/my_python | 23d2d2fe111d7c8d85a4f3fa1b036e843ee64092 | 14e61ed3840a99097031113f4ab9e857444f94f2 | refs/heads/master | 2021-01-23T00:20:32.270159 | 2018-02-25T07:59:20 | 2018-02-25T07:59:20 | 92,808,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | #coding=utf-8
#将time封装为一个计时器,用于with语句
#即定义一个计时上下文管理器(必须定义__enter__和__exit__方法)
import time
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
print 'Starting...'
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000
print 'End.'
if self.verbose:
print 'elapsed time: %f ms' % self.msecs
if __name__ == '__main__':
sum = 0
with Timer(True) as t:
for i in range(10000000):
sum += i
| [
"noreply@github.com"
] | negiper.noreply@github.com |
bc4e473ea85b92d569a80bd6aeb76464a501dc4b | 590bd2b272aac6fea2ec4128818b9f8dc732b819 | /TP6/TP6.py | b53b537b1f6046a29591fe741c50be29a1de2ab6 | [
"MIT"
] | permissive | cedced19/tp-mpsi | 6ce6f2988297bab4f2d59fcdef0712178ebdb569 | bc1b57b6af63aa3db6e4f019b44ce3b872939b85 | refs/heads/master | 2020-04-13T16:56:21.625790 | 2019-08-20T10:56:27 | 2019-08-20T10:56:27 | 163,334,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | #!/usr/bin/python3
import math
import matplotlib.pyplot as pl
# pi
def f(x):
return(4/(1+x**2))
# 0.693147
def g(x):
return(1/x)
# pi
def h(x):
return(6/(math.sqrt(1-x**2)))
def rectangle(f,a,b,n):
s=0
d = (b-a)/n
xk=a
for i in range(0,n):
s+=f(xk+d/2)
xk+=d
return (s*d)
print(rectangle(f,0,1,1000))
def trapeze_naif(f,a,b,n):
s=0
d = (b-a)/n
xk=a
for i in range(0,n):
s+=(f(xk)+f(xk+d))/2
xk+=d
return (s*d)
print(trapeze_naif(f,0,1,1000))
def trapeze(f,a,b,n):
s=0
d = (b-a)/n
xk=a
for i in range(0,n):
xk+=d
s+=(f(xk))
xk+=d
s+=(f(xk)/2)
return (s*d)
print(trapeze(f,0,1,1000))
def simpson_naif(f,a,b,n):
s=0
d = (b-a)/n
xk=a
for i in range(0,n):
s+=(f(xk)+4*f(xk+d/2)+f(xk+d))
xk+=d
return (s*d*1/6)
print(simpson_naif(f,0,1,1000))
def simpson(f,a,b,n):
d = (b-a)/n
xk=a
s=f(xk)+4*f(xk+d/2)
for i in range(1,n):
xk+=d
s+=2*f(xk)+4*f(xk+d/2)
xk+=d
s+=(f(xk))
return (s*d*1/6)
print(simpson(f,0,1,1000))
def pre_list(n):
pre=[]
x=0.1
for i in range(n):
x=x/2
pre.append(x)
return(pre)
listepre=pre_list(20)
def nb_appel(methode,f,a,b,exact,listepre):
L=[]
for i in range(20):
c=1
while(abs(exact-methode(f,a,b,c))>listepre[i]):
c+=1
# en fonction de la methode on devrait avoir c, c+1 ou 2*c+1 mais au final c'est négligeable
L.append(c)
return L
#print(nb_appel(trapeze,f,0,1,math.pi,listepre))
#print(nb_appel(simpson,f,0,1,math.pi,listepre))
def show():
# affiche graphique
for i in ['trapeze', 'simpson', 'rectangle']:
y=nb_appel(eval(i),f,0,1,math.pi,listepre)
x=[math.log(elt)/math.log(10) for elt in listepre]
pl.plot(x,y,label=i)
pl.legend()
pl.xlabel('precision en 10^x')
pl.ylabel('nombres appels à f')
pl.title('fonction f')
pl.show()
#show()
# la méthode simpson est celle qui demande le moins d'appel à f pour avoir une grande précision
def isRectangle(a,b,c):
if (a== max(a,b,c)):
if (a**2 == b**2 + c**2):
return True
elif (b == max(a,b,c)):
if (b**2 == a**2 + c**2):
return True
else:
if (c**2 == b**2 + a**2):
return True
return False
def triangle1(p):
l=[]
for a in range (1,p+1):
for b in range (1,p+1):
for c in range (1,p+1):
if ((a+b+c) == p) & isRectangle(a,b,c):
l.append((a,b,c))
return l
#print(triangle1(12))
def triangle2(p):
l=[]
for a in range (1,p+1):
for b in range (1,p+1):
c=p-a-b
if (isRectangle(a,b,c)) & (c>=1):
l.append((a,b,c))
return l
print(triangle2(12)) | [
"cedced19@gmail.com"
] | cedced19@gmail.com |
b278f7784694cab7b0f6e4c0ae2aa4bf7f6d02af | 0e083f405af00029c9ec31849f0f7f81c56844b5 | /configs/mmseg/segmentation_sdk_dynamic.py | bfb033efed815d9f803ec76bca1feeee792fd4fd | [
"Apache-2.0"
] | permissive | open-mmlab/mmdeploy | 39b9e7b611caab2c76a6142fcb99f0bf1d92ad24 | 5479c8774f5b88d7ed9d399d4e305cb42cc2e73a | refs/heads/main | 2023-09-01T21:29:25.315371 | 2023-08-31T09:59:29 | 2023-08-31T09:59:29 | 441,467,833 | 2,164 | 605 | Apache-2.0 | 2023-09-14T10:39:04 | 2021-12-24T13:04:44 | Python | UTF-8 | Python | false | false | 307 | py | _base_ = ['./segmentation_dynamic.py', '../_base_/backends/sdk.py']
codebase_config = dict(model_type='sdk')
backend_config = dict(pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='PackSegInputs', meta_keys=['img_path', 'ori_shape', 'img_shape'])
])
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
3af8f7899082114e830510e4f0d6b11dc64eba1b | a700baf4c3cfc072a6e0d8c4d78cfd5b9f1af122 | /bin/python-config | 63ce8e96496ae205c4f622051dff732488f50811 | [] | no_license | agatakaczmarek12/Integration-project | 642ba4ae3c257d8769c6fe46395a6b9e1ece9cf9 | d6cfbfc68c9341560582ee8aa1e3d5d80cc99057 | refs/heads/master | 2022-12-14T15:33:14.634784 | 2019-03-26T13:21:53 | 2019-03-26T13:21:53 | 177,791,487 | 0 | 0 | null | 2022-12-08T02:30:43 | 2019-03-26T13:13:28 | JavaScript | UTF-8 | Python | false | false | 2,356 | #!/Users/agatakaczmarek/my_dash_app/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"noreply@github.com"
] | agatakaczmarek12.noreply@github.com | |
714ca93fdeb2a55695bb540870749022fa4ce69c | f5c55128fa59e185a85b2bd795efe3259c309aa3 | /catkin_ws/build/turtlebot_simulator/turtlebot_gazebo/catkin_generated/pkg.installspace.context.pc.py | 7a7a058b8ca0d272001525bf91d21a7e3fa3392a | [] | no_license | phil-ludewig/Navigation-Project-Home-Service-Robot | 8a4d29db1b916b13487da5a747635d4972f3d0ca | 3f14d35a32ea3b6d432e6729cfbb1363e423311b | refs/heads/master | 2020-05-20T09:17:19.297879 | 2019-05-10T16:28:11 | 2019-05-10T16:28:11 | 185,496,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_gazebo"
PROJECT_SPACE_DIR = "/home/phil/catkin_ws/install"
PROJECT_VERSION = "2.2.3"
| [
"ph.ludewig.shp@gmail.com"
] | ph.ludewig.shp@gmail.com |
b6002bc250faf4ddfd8640d2a7ed44bf9176c3ec | 36785c0893ab1e2c81c6a03305f42459776a84e0 | /ambra_sdk/request_args.py | e29318245d880cd3dec5ab930e8d16a232ac1280 | [
"Apache-2.0"
] | permissive | dicomgrid/sdk-python | 06589f87f33850bd15e6e99fb683bada6492775f | 2618e682d38339439340d86080e8bc6ee6cf21b5 | refs/heads/master | 2022-08-28T14:50:35.864012 | 2022-08-22T12:36:50 | 2022-08-22T12:36:50 | 253,867,502 | 11 | 6 | Apache-2.0 | 2022-04-13T10:06:38 | 2020-04-07T17:36:56 | HTML | UTF-8 | Python | false | false | 5,637 | py | """Request args."""
from datetime import date
from json import JSONEncoder
from json import dumps as json_dumps
from typing import Any, Dict, Iterable, Mapping, Optional
import aiohttp
from aiohttp.helpers import sentinel
class Encoder(JSONEncoder):
"""Ambra arguments Encoder."""
def default(self, el: Any):
"""Encode default.
:param el: el
:return: encoded el
"""
if isinstance(el, date):
return el.strftime('%Y-%m-%d %H:%M:%S')
return JSONEncoder.default(self, el)
def cast_argument(arg: Any) -> Any:
"""Cast argument.
:param arg: arg
:return: casted arg
"""
if isinstance(arg, date):
return arg.strftime('%Y-%m-%d %H:%M:%S')
if isinstance(arg, (list, dict)):
return json_dumps(arg, cls=Encoder)
return arg
def cast_arguments(args: Dict[str, Any]) -> Dict[str, str]:
"""Cast arguments.
:param args: args
:return: casted args
"""
casted_args = {}
for arg_name, arg_value in args.items():
casted_args[arg_name] = cast_argument(arg_value)
return casted_args
class RequestArgs: # NOQA:WPS230
"""Request args.
Like in requests.request args
"""
def __init__( # NOQA:D107,WPS211
self,
method: str,
url: str,
full_url: str,
params: Optional[Any] = None, # NOQA:WPS110
data: Optional[Any] = None, # NOQA:WPS110
json: Optional[Any] = None,
headers: Optional[Any] = None,
cookies: Optional[Any] = None,
files: Optional[Any] = None,
auth: Optional[Any] = None,
timeout: Optional[Any] = None,
allow_redirects: Optional[Any] = None,
proxies: Optional[Any] = None,
verify: Optional[Any] = None,
stream: Optional[Any] = None,
cert: Optional[Any] = None,
): # NOQA: DAR101
"""Init."""
self.method = method
self.url = url
self.full_url = full_url
self.params = params # NOQA:WPS110
self.data = data # NOQA:WPS110
self.json = json
self.headers = headers
self.cookies = cookies
self.files = files
self.auth = auth
self.timeout = timeout
self.allow_redirects = allow_redirects
self.proxies = proxies
self.verify = verify
self.stream = stream
self.cert = cert
def to_dict(self):
"""To dict.
:return: dict repr
"""
return self.__dict__.copy()
def dict_optional_args(
self,
autocast_arguments_to_string: bool,
):
"""Get dict optional args.
:param autocast_arguments_to_string: autocast arguments to string
:return: dict of request optional parameters
"""
dict_args = self.to_dict()
dict_args.pop('method')
dict_args.pop('url')
dict_args.pop('full_url')
if dict_args.get('data') is not None and autocast_arguments_to_string:
dict_args['data'] = cast_arguments( # NOQA:WPS110
dict_args['data'],
)
return dict_args
class AioHTTPRequestArgs: # NOQA:WPS230
"""AioHTTP Request args."""
def __init__( # NOQA:D107,WPS211
self,
method: str,
url: str,
full_url: str,
params: Optional[Mapping[str, str]] = None, # NOQA:WPS110
data: Any = None, # NOQA:WPS110
json: Any = None,
cookies=None,
headers=None,
skip_auto_headers: Optional[Iterable[str]] = None,
auth: Optional[aiohttp.BasicAuth] = None,
allow_redirects: bool = True,
max_redirects: int = 10,
compress: Optional[str] = None,
chunked: Optional[bool] = None,
expect100: bool = False,
raise_for_status=None,
read_until_eof: bool = True,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
timeout=sentinel,
ssl=None,
proxy_headers=None,
trace_request_ctx=None,
):
self.method = method
self.url = url
self.full_url = full_url
self.params = params # NOQA:WPS110
self.data = data # NOQA:WPS110
self.json = json
self.cookies = cookies
self.headers = headers
self.skip_auto_headers = skip_auto_headers
self.auth = auth
self.allow_redirects = allow_redirects
self.max_redirects = max_redirects
self.compress = compress
self.chunked = chunked
self.expect100 = expect100
self.raise_for_status = raise_for_status
self.read_until_eof = read_until_eof
self.proxy = proxy
self.proxy_auth = proxy_auth
self.timeout = timeout
self.ssl = ssl
self.proxy_headers = proxy_headers
self.trace_request_ctx = trace_request_ctx
def to_dict(self):
"""To dict.
:return: dict repr
"""
return self.__dict__.copy()
def dict_optional_args(
self,
autocast_arguments_to_string: bool,
):
"""Get dict optional args.
:param autocast_arguments_to_string: autocast arguments to string
:return: dict of request optional parameters
"""
dict_args = self.to_dict()
dict_args.pop('method')
dict_args.pop('url')
dict_args.pop('full_url')
if dict_args.get('data') is not None and autocast_arguments_to_string:
dict_args['data'] = cast_arguments( # NOQA:WPS110
dict_args['data'],
)
return dict_args
| [
"akapustin@ambrahealth.com"
] | akapustin@ambrahealth.com |
5df36ad7d20f34bdef546bfc788a807fa43b62dd | fafc8f3e7bce2f4a5e7eb4284615432e20d70724 | /user_profile/models.py | d019b508f88ec7c5e4de8be25f28bcf6009082f8 | [] | no_license | manirajyadav/Review_This | 524f0e88fd2dda1e9beb0713d60ca1981165f528 | f5a0694b78eeb0d63609de052c0e800caf616877 | refs/heads/master | 2021-01-01T19:12:47.133432 | 2017-08-08T16:27:25 | 2017-08-08T16:27:25 | 98,537,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py |
from __future__ import unicode_literals
from django.db import models
import uuid, datetime
# creating model to make a new signup and save details
class UserModel(models.Model):
name = models.CharField(max_length=120, null=False, blank=False)
email = models.EmailField(null=True)
username = models.CharField( max_length=120,unique=True, null=False, blank=False)
password = models.CharField( max_length=120)
created_on = models.DateTimeField(auto_now_add=True, null=True)
updated_on = models.DateTimeField( auto_now=True, null=True)
# creating model to save session tokens for logged in users
class SessionToken(models.Model):
user = models.ForeignKey(UserModel)
session_token = models.CharField(max_length=255)
last_request_on = models.DateTimeField(auto_now=True)
created_on = models.DateTimeField(auto_now_add=True)
is_valid = models.BooleanField(default=True)
# creating alphanumeric token
def create_token(self):
self.session_token = uuid.uuid4()
# model for making new posts with image and captions and saving user's like
class PostModel(models.Model):
user = models.ForeignKey(UserModel)
image = models.FileField(upload_to='user_images')
image_url = models.CharField(max_length=255)
caption = models.CharField(max_length=240)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
has_liked = False
review = models.CharField(max_length=120, default="")
# couting likes
@property
def like_count(self):
return len( LikeModel.objects.filter( post=self ) )
# saving comments for particular post
@property
def comments(self):
return CommentModel.objects.filter( post=self ).order_by('created_on')
# making like on posts and storing
class LikeModel(models.Model):
user = models.ForeignKey(UserModel)
post = models.ForeignKey(PostModel)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
# making comments on the posts and storing
class CommentModel(models.Model):
user = models.ForeignKey(UserModel)
post = models.ForeignKey(PostModel)
comment_text = models.CharField(max_length=555)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True) | [
"maniraj.yadav96@gmail.com"
] | maniraj.yadav96@gmail.com |
b7919cb9f4fb2f39862de1e0735712377e38f585 | b49cfcb4727ebc525561eaea672d614558b3c609 | /telegram-bot/api/v0/routes/api.py | 94672f83b2c62e9f7d31ea8bcdaac25278c06bbd | [
"MIT"
] | permissive | lacmus-foundation/sharp-in | 77d579423b258eab90eec755ed727d8647a63bf6 | d1f30cbeed12c89016113e7c0f81a634fdd3b859 | refs/heads/master | 2021-02-25T23:50:02.471921 | 2021-01-22T09:09:18 | 2021-01-22T09:09:18 | 245,478,151 | 3 | 4 | MIT | 2020-06-23T20:21:01 | 2020-03-06T17:20:19 | Jupyter Notebook | UTF-8 | Python | false | false | 225 | py | from fastapi import APIRouter
from api.v0.routes import ping
from api.v0.routes import predict
router = APIRouter()
router.include_router(ping.router, tags=["ping"])
router.include_router(predict.router, tags=["predict"])
| [
"2326wz@gmail.com"
] | 2326wz@gmail.com |
24aa177bffbefe30593f636267f6ed4a2bbc8b73 | 3a6d382503e11753dd81b291145847a2eabb8ec6 | /experimental/compilers/reachability/llvm_util_test.py | 1b152a96e77ea6c4ead37e28fb642958074c3e74 | [] | no_license | QuXing9/phd | 7e6f107c20e0b3b1de2b25eb99e0b640a4a0bfcf | 58ba53b6d78515ed555e40527f6923e28941cc19 | refs/heads/master | 2022-02-27T03:29:05.126378 | 2019-10-22T02:46:57 | 2019-10-22T02:46:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,354 | py | """Unit tests for //experimental/compilers/reachability:llvm_util."""
import sys
import pyparsing
import pytest
from absl import app
from absl import flags
from compilers.llvm import opt
from experimental.compilers.reachability import llvm_util
FLAGS = flags.FLAGS
# Bytecode generated by clang using the following command:
# $ clang -emit-llvm -S -xc - < foo.c -o - > foo.ll
# Original C source code:
#
# #include <stdio.h>
# #include <math.h>
#
# int DoSomething(int a, int b) {
# if (a % 5) {
# return a * 10;
# }
# return pow((float)a, 2.5);
# }
#
# int main(int argc, char **argv) {
# for (int i = 0; i < argc; ++i) {
# argc += DoSomething(argc, i);
# }
#
# printf("Computed value %d", argc);
# return 0;
# }
SIMPLE_C_BYTECODE = """
; ModuleID = '-'
source_filename = "-"
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.12.0"
@.str = private unnamed_addr constant [18 x i8] c"Computed value %d\00", align 1
; Function Attrs: norecurse nounwind readnone ssp uwtable
define i32 @DoSomething(i32, i32) #0 {
%3 = srem i32 %0, 5
%4 = icmp eq i32 %3, 0
br i1 %4, label %7, label %5
; <label>:5 ; preds = %2
%6 = mul nsw i32 %0, 10
br label %12
; <label>:7 ; preds = %2
%8 = sitofp i32 %0 to float
%9 = fpext float %8 to double
%10 = tail call double @llvm.pow.f64(double %9, double 2.500000e+00)
%11 = fptosi double %10 to i32
br label %12
; <label>:12 ; preds = %7, %5
%13 = phi i32 [ %6, %5 ], [ %11, %7 ]
ret i32 %13
}
; Function Attrs: nounwind readnone
declare double @llvm.pow.f64(double, double) #1
; Function Attrs: nounwind ssp uwtable
define i32 @main(i32, i8** nocapture readnone) #2 {
%3 = icmp sgt i32 %0, 0
br i1 %3, label %4, label %7
; <label>:4 ; preds = %2
br label %10
; <label>:5 ; preds = %22
%6 = phi i32 [ %24, %22 ]
br label %7
; <label>:7 ; preds = %5, %2
%8 = phi i32 [ %0, %2 ], [ %6, %5 ]
%9 = tail call i32 (i8*, ...) @printf(i8* nonnull getelementptr inbounds ([18 x i8], [18 x i8]* @.str, i64 0, i64 0), i32 %8)
ret i32 0
; <label>:10 ; preds = %4, %22
%11 = phi i32 [ %25, %22 ], [ 0, %4 ]
%12 = phi i32 [ %24, %22 ], [ %0, %4 ]
%13 = srem i32 %12, 5
%14 = icmp eq i32 %13, 0
br i1 %14, label %17, label %15
; <label>:15 ; preds = %10
%16 = mul nsw i32 %12, 10
br label %22
; <label>:17 ; preds = %10
%18 = sitofp i32 %12 to float
%19 = fpext float %18 to double
%20 = tail call double @llvm.pow.f64(double %19, double 2.500000e+00) #4
%21 = fptosi double %20 to i32
br label %22
; <label>:22 ; preds = %15, %17
%23 = phi i32 [ %16, %15 ], [ %21, %17 ]
%24 = add nsw i32 %23, %12
%25 = add nuw nsw i32 %11, 1
%26 = icmp slt i32 %25, %24
br i1 %26, label %10, label %5
}
; Function Attrs: nounwind
declare i32 @printf(i8* nocapture readonly, ...) #3
attributes #0 = { norecurse nounwind readnone ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #3 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #4 = { nounwind }
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
!0 = !{i32 1, !"PIC Level", i32 2}
!1 = !{!"Apple LLVM version 8.0.0 (clang-800.0.42.1)"}
"""
# LLVM-generated dot file for the DoSomething() function of a simple C program.
# Original C source code:
#
# #include <stdio.h>
# #include <math.h>
#
# int DoSomething(int a, int b) {
# if (a % 5) {
# return a * 10;
# }
# return pow((float)a, 2.5);
# }
#
# int main(int argc, char **argv) {
# for (int i = 0; i < argc; ++i) {
# argc += DoSomething(argc, i);
# }
#
# printf("Computed value %d", argc);
# return 0;
# }
#
# I converted tabs to spaces in the following string.
SIMPLE_C_DOT = """
digraph "CFG for 'DoSomething' function" {
label="CFG for 'DoSomething' function";
Node0x7f86c670c590 [shape=record,label="{%2:\l %3 = alloca i32, align 4\l %4 = alloca i32, align 4\l %5 = alloca i32, align 4\l store i32 %0, i32* %4, align 4\l store i32 %1, i32* %5, align 4\l %6 = load i32, i32* %4, align 4\l %7 = srem i32 %6, 5\l %8 = icmp ne i32 %7, 0\l br i1 %8, label %9, label %12\l|{<s0>T|<s1>F}}"];
Node0x7f86c670c590:s0 -> Node0x7f86c65001a0;
Node0x7f86c670c590:s1 -> Node0x7f86c65001f0;
Node0x7f86c65001a0 [shape=record,label="{%9:\l\l %10 = load i32, i32* %4, align 4\l %11 = mul nsw i32 %10, 10\l store i32 %11, i32* %3, align 4\l br label %18\l}"];
Node0x7f86c65001a0 -> Node0x7f86c65084b0;
Node0x7f86c65001f0 [shape=record,label="{%12:\l\l %13 = load i32, i32* %4, align 4\l %14 = sitofp i32 %13 to float\l %15 = fpext float %14 to double\l %16 = call double @llvm.pow.f64(double %15, double 2.500000e+00)\l %17 = fptosi double %16 to i32\l store i32 %17, i32* %3, align 4\l br label %18\l}"];
Node0x7f86c65001f0 -> Node0x7f86c65084b0;
Node0x7f86c65084b0 [shape=record,label="{%18:\l\l %19 = load i32, i32* %3, align 4\l ret i32 %19\l}"];
}
"""
def test_DotCfgsFromBytecode_simple_c_program():
"""Test that simple C program produces two Dot CFGs."""
dot_cfgs = list(llvm_util.DotCfgsFromBytecode(SIMPLE_C_BYTECODE))
assert len(dot_cfgs) == 2
assert "CFG for 'DoSomething' function" in '\n'.join(dot_cfgs)
assert "CFG for 'main' function" in '\n'.join(dot_cfgs)
def test_DotCfgsFromBytecode_invalid_bytecode():
"""Test that exception is raised if bytecode is invalid."""
with pytest.raises(opt.OptException) as e_ctx:
next(llvm_util.DotCfgsFromBytecode("invalid bytecode!"))
assert str(e_ctx.value).startswith("opt failed with return code ")
def test_GetBasicBlockNameFromLabel_unrecognized_label():
"""Test that error is raised if label is not recognized."""
with pytest.raises(ValueError):
llvm_util.GetBasicBlockNameFromLabel('invalid label')
def test_GetBasicBlockNameFromLabel_label():
label = ('"{%2:\l %3 = alloca i32, align 4\l %4 = alloca i32, align 4\l '
'%5 = alloca i8**, align 8\l %6 = alloca i32, align 4\l '
'store i32 0, i32* %3, align 4\l store i32 %0, i32* %4, '
'align 4\l store i8** %1, i8*** %5, align 8\l store i32 0, '
'i32* %6, align 4\l br label %7\l}"')
assert llvm_util.GetBasicBlockNameFromLabel(label) == "%2"
def test_ControlFlowGraphFromDotSource_invalid_source():
"""Test that exception is raised if dot can't be parsed."""
with pytest.raises(pyparsing.ParseException):
llvm_util.ControlFlowGraphFromDotSource("invalid dot source!")
def test_ControlFlowGraphFromDotSource_graph_name():
"""Test that CFG has correct name."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
assert g.graph['name'] == 'DoSomething'
def test_ControlFlowGraphFromDotSource_num_nodes():
"""Test that CFG has correct number of nodes."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
assert g.number_of_nodes() == 4
def test_ControlFlowGraphFromDotSource_num_edges():
"""Test that CFG has correct number of edges."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
assert g.number_of_edges() == 4
def test_ControlFlowGraphFromDotSource_is_valid():
"""Test that CFG is valid."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
# Control flow graphs are not guaranteed to be valid. That is, the may contain
# fusible basic blocks. This can happen if the creating the graph from
# unoptimized bytecode.
assert g.ValidateControlFlowGraph()
def test_ControlFlowGraphFromDotSource_node_names():
"""Test that CFG names are as expected."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
node_names = sorted([g.nodes[n]['name'] for n in g.nodes],
key=lambda x: int(x[1:]))
assert node_names == ['%2', '%9', '%12', '%18']
def test_ControlFlowGraphFromDotSource_edges():
"""Test that CFG edges are as expected."""
g = llvm_util.ControlFlowGraphFromDotSource(SIMPLE_C_DOT)
node_name_to_index_map = {g.nodes[n]["name"]: n for n in g.nodes}
edges = set(g.edges)
assert (node_name_to_index_map['%2'], node_name_to_index_map['%9']) in edges
assert (node_name_to_index_map['%2'], node_name_to_index_map['%12']) in edges
assert (node_name_to_index_map['%9'], node_name_to_index_map['%18']) in edges
assert (node_name_to_index_map['%12'], node_name_to_index_map['%18']) in edges
def test_ControlFlowGraphsFromBytecodes_num_graphs():
"""Test that expected number of CFGs are created."""
g = list(llvm_util.ControlFlowGraphsFromBytecodes([
SIMPLE_C_BYTECODE,
SIMPLE_C_BYTECODE,
SIMPLE_C_BYTECODE,
]))
assert len(g) == 6
def test_ControlFlowGraphsFromBytecodes_one_failure():
"""Errors during construction of CFGs are buffered until complete."""
# The middle job of the three will throw an opt.optException.
generator = llvm_util.ControlFlowGraphsFromBytecodes([
SIMPLE_C_BYTECODE,
"Invalid bytecode!",
SIMPLE_C_BYTECODE,
])
g = []
# We can still get all of the valid CFGs out of input[0] and input[2]. The
# exception from input[1] is will be raised once all processes have completed.
g.append(next(generator))
g.append(next(generator))
g.append(next(generator))
g.append(next(generator))
# Instead of StopIteration, an ExceptionBuffer will be thrown, which contains
# all the that were thrown, along with the inputs that caused the exception.
with pytest.raises(llvm_util.ExceptionBuffer) as e_ctx:
next(generator)
assert len(e_ctx.value.errors) == 1
assert e_ctx.value.errors[0].input == "Invalid bytecode!"
assert isinstance(e_ctx.value.errors[0].error, opt.OptException)
def main(argv):
"""Main entry point."""
if len(argv) > 1:
raise app.UsageError("Unknown arguments: '{}'.".format(' '.join(argv[1:])))
sys.exit(pytest.main([__file__, '-vv']))
if __name__ == '__main__':
flags.FLAGS(['argv[0]', '-v=1'])
app.run(main)
| [
"chrisc.101@gmail.com"
] | chrisc.101@gmail.com |
d4e55138a9716437c8d40bfe037e255c935053c5 | b1969b629718d7fd6a6b2a11619eae78b94449b3 | /message.py | 85389e067fbce56eb40b681d2e1c0695f57a9712 | [] | no_license | lchsk/lf2server | 80d76afbb3f87a017ddbf6ec1f84502c6e7f8e63 | ffa182a435fd81868dd868f7bcc18ef82b8ae6f6 | refs/heads/master | 2021-01-19T00:11:54.752177 | 2014-01-04T15:47:00 | 2014-01-04T15:47:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,676 | py | # -*- coding: utf-8 -*-
import json
import traceback, sys
import datetime
import time
import random
class MessageInterpreter(object):
def __init__(self, factory):
# Incoming string
self.raw_message = ''
# Incoming message in json format
self.message = {}
self.factory = factory
# Ending added to every outgoing string
self.ending = '\r\n'
self.default_char = 'luke'
self.user_admin_pair = {}
def interpret(self, pack):
# Load stuff
self.raw_message = pack[0]
self.protocol = pack[1]
magic_char = '*'
jsons = self.raw_message.split(magic_char)
for msg in jsons:
#self.message = json.loads(self.raw_message)
if msg != '':
try:
self.message = json.loads(msg)
except:
#pass
self.factory.logfile.write(time.strftime("%m/%d/%Y %H:%M:%S") + '\tMISSED: ' + repr(msg) + '\n')
############
# Messages
############
# 10 NewConnection
if self.message['id'] == 10:
self._data = {}
self._data['platform'] = self.message['platform']
self._data['client'] = pack[1]
self._data['pos'] = { 'x': 0, 'y': 0 }
# Player's character (string)
self._data['char'] = None
self.factory.users[self.message['username']] = self._data
# 13 ConnectionEstablished
return_msg = {'id' : 13 }
self.factory.users[self.message['username']]['client'].transport.write(json.dumps(return_msg) + self.ending)
# 1 CreateGame
elif self.message['id'] == 1:
admin = self.message['admin']
self.user_admin_pair[admin] = admin
character = self.message['character']
dat = datetime.datetime.now()
self.factory.games[admin] = { 'open' : True, 'date' : dat.strftime('%m/%d/%Y %H:%M:%S'), 'players': [admin] }
# Save character
self.factory.users[admin]['char'] = character
# 2 GameCreated
return_msg = {'id' : 2, 'date' : dat.strftime('%m/%d/%Y %H:%M:%S') }
self.factory.users[admin]['client'].transport.write(json.dumps(return_msg) + self.ending)
# 3 GetOpenGames
elif self.message['id'] == 3:
open_games = [ (admin, items) for admin, items in self.factory.games.iteritems() if items['open'] == True]
ret_games = {}
for index, game in enumerate(open_games):
ret_games[index] = {}
ret_games[index]['admin'] = game[0]
ret_games[index]['date'] = game[1]['date']
# 4 OpenGamesList
return_msg = {'id' : 4}
return_msg = dict(return_msg.items() + ret_games.items())
self.protocol.transport.write(json.dumps(return_msg) + self.ending)
# 5 SignIn
elif self.message['id'] == 5:
admin = self.message['admin']
user = self.message['user']
character = self.message['character']
self.factory.users[user]['char'] = character
if user not in self.factory.games[admin]['players']:
self.factory.games[admin]['players'].append(user)
self.user_admin_pair[user] = admin
# 7 GetSignedInPlayers
elif self.message['id'] == 7:
admin = self.message['admin']
# 8 SignedInPlayers
return_msg = {'id' : 8, 'players' : self.factory.games[admin]['players']}
self.factory.users[admin]['client'].transport.write(json.dumps(return_msg) + self.ending)
# 9 StartGame
elif self.message['id'] == 9:
admin = self.message['admin']
self.factory.games[admin]['open'] = False
self.create_heroes(admin)
# 11 IsGameStarted
elif self.message['id'] == 11:
admin = self.message['admin']
if self.factory.games[admin]['open']:
# 12 GameNotStarted
return_msg = {'id' : 12}
self.protocol.transport.write(json.dumps(return_msg) + self.ending)
else:
# game is on
self.create_heroes(admin)
# 14 UpdatePosition
elif self.message['id'] == 14:
user = self.message['user']
admin = self.user_admin_pair[user]
x = self.message['x']
y = self.message['y']
# Update positions
self.factory.users[user]['pos']['x'] = x
self.factory.users[user]['pos']['y'] = y
# 15 PositionUpdated
return_msg = {'id' : 15}
# Add positions of every player
for u in self.factory.games[admin]['players']:
return_msg[u] = []
return_msg[u].append(self.factory.users[u]['pos']['x'])
return_msg[u].append(self.factory.users[u]['pos']['y'])
for u in self.factory.users:
if u != user:
self.factory.users[u]['client'].transport.write(json.dumps(return_msg) + self.ending)
# 16 ChangeState
elif self.message['id'] == 16:
user = self.message['user']
state = self.message['state']
return_msg = {'id' : 17, 'user' : user, 'state' : state}
for u in self.factory.users:
if u != user:
self.factory.users[u]['client'].transport.write(json.dumps(return_msg) + self.ending)
# 18 Hit
elif self.message['id'] == 18:
user = self.message['user'] # victim
return_msg = {'id' : 19, 'user' : user}
for u in self.factory.users:
#if u != user:
self.factory.users[u]['client'].transport.write(json.dumps(return_msg) + self.ending)
# 20 HealthInfo
elif self.message['id'] == 20:
users = self.message['users']
health = self.message['health']
return_msg = {'id' : 21, 'users' : users, 'health' : health}
for u in self.factory.users:
#if u != user:
self.factory.users[u]['client'].transport.write(json.dumps(return_msg) + self.ending)
def error(self, pack):
print 'Error...'
#self.factory.logfile.write(time.strftime("%m/%d/%Y %H:%M:%S") + '\t' + traceback.print_exc(file=sys.stdout) + '\n')
traceback.print_exc(file=sys.stdout)
# 6 CreateHeroes
def create_heroes(self, admin):
char_list = []
posx = []
posy = []
# set up players starting positions
players_len = len(self.factory.games[admin]['players'])
step = 1.0 / players_len
current_step = step / 2.0
for i in xrange(0, players_len):
posx.append(current_step)
posy.append(0.19 / random.randint(1, 10))
current_step += step
for p in self.factory.games[admin]['players']:
c = self.factory.users[p]['char']
if c == None:
c = default_char
char_list.append(c)
return_msg = {'id' : 6, 'players' : self.factory.games[admin]['players'], 'characters' : char_list, 'posx' : posx, 'posy' : posy}
for user in self.factory.users:
self.factory.users[user]['client'].transport.write(json.dumps(return_msg) + self.ending)
| [
"mjlechowski@gmail.com"
] | mjlechowski@gmail.com |
a5063362194b242a21296389b20c2120b0384e9d | 56270ab66c1979f4b0029217ee8d1936d0e174e0 | /untitled0.py | bd829e807a31e92b4e7f1538a2737365a0fbd7e7 | [] | no_license | aJamesjr07/Assignment-1 | 7c583712fa7e1f7beca6ae724dae84a89a3eaf43 | 2fc08dbe4fb2faf1fe839be98cbd75300d7764f3 | refs/heads/master | 2022-12-03T21:56:39.084832 | 2020-08-11T14:21:53 | 2020-08-11T14:21:53 | 286,761,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,842 | py | # -*- coding: utf-8 -*-
"""Untitled0.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SBXv6vFt7OuPxjUJeNOxiEGOLt4141Bs
"""
import numpy as np # linear algebra
import pandas as pd # data processing
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation, Conv2D, MaxPooling2D
import os
print(os.listdir("../input"))
import zipfile
with zipfile.ZipFile("../input/train.zip","r") as z:
z.extractall(".")
with zipfile.ZipFile("../input/test1.zip","r") as z:
z.extractall(".")
#train data
main_dir = "/kaggle/working/"
train_dir = "train"
path = os.path.join(main_dir,train_dir)
for p in os.listdir(path):
category = p.split(".")[0]
img_array = cv2.imread(os.path.join(path,p),cv2.IMREAD_GRAYSCALE)
new_img_array = cv2.resize(img_array, dsize=(80, 80))
plt.imshow(new_img_array,cmap="gray")
break
X = []
y = []
convert = lambda category : int(category == 'dog')
def create_test_data(path):
for p in os.listdir(path):
category = p.split(".")[0]
category = convert(category)
img_array = cv2.imread(os.path.join(path,p),cv2.IMREAD_GRAYSCALE)
new_img_array = cv2.resize(img_array, dsize=(80, 80))
X.append(new_img_array)
y.append(category)
create_test_data(path)
X = np.array(X).reshape(-1, 80,80,1)
y = np.array(y)
X = X/255.0
#test data
model = Sequential()
# Adds a densely-connected layer with 64 units to the model:
model.add(Conv2D(64,(3,3), activation = 'relu', input_shape = X.shape[1:]))
model.add(MaxPooling2D(pool_size = (2,2)))
# Add another:
model.add(Conv2D(64,(3,3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
# Add a softmax layer with 10 output units:
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer="adam",
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(X, y, epochs=10, batch_size=32, validation_split=0.2)
train_dir = "test1"
path = os.path.join(main_dir,train_dir)
#os.listdir(path)
X_test = []
id_line = []
def create_test1_data(path):
for p in os.listdir(path):
id_line.append(p.split(".")[0])
img_array = cv2.imread(os.path.join(path,p),cv2.IMREAD_GRAYSCALE)
new_img_array = cv2.resize(img_array, dsize=(80, 80))
X_test.append(new_img_array)
create_test1_data(path)
X_test = np.array(X_test).reshape(-1,80,80,1)
X_test = X_test/255
predictions = model.predict(X_test)
predicted_val = [int(round(p[0])) for p in predictions]
submission_df = pd.DataFrame({'id':id_line, 'label':predicted_val})
submission_df.to_csv("submission.csv", index=False
| [
"noreply@github.com"
] | aJamesjr07.noreply@github.com |
436317d5e2aff0f9ed457bd3beaadfe87ff38b4d | 8104f04c4a31232962f82c9f237f87f7be3909b0 | /project-3/optim/PolyScheduler.py | ca90d3e3a5ce3d008dd6604541bd32195a7c21f6 | [] | no_license | AndrewZhou924/Digital-Image-Processing-Course-Design | 6624d4c819a0f980ddda103577de0677f134b38c | f3a4a29fd7c4dda25d249c2c36fd43efda2d8460 | refs/heads/master | 2021-07-07T00:49:24.584572 | 2020-03-31T07:35:37 | 2020-03-31T07:35:37 | 241,303,727 | 0 | 1 | null | 2021-06-08T21:07:18 | 2020-02-18T07:57:59 | Jupyter Notebook | UTF-8 | Python | false | false | 802 | py | from .Scheduler import Scheduler
class PolyScheduler(Scheduler):
def __init__(self, epoch_num, base_lr, power=2, warmup_epoch=0, final_lr=0.0, warmup_begin_lr=0.0):
super().__init__()
self.lr = 0.
self.epoch_num = epoch_num
self.base_lr = base_lr
self.power = power
self.warmup_epoch = warmup_epoch
self.final_lr = final_lr
self.warmup_begin_lr = warmup_begin_lr
def step(self, epoch):
if epoch<self.warmup_epoch:
self.lr = self.warmup_begin_lr + (self.base_lr - self.warmup_begin_lr) * (epoch+1) / self.warmup_epoch
else:
self.lr = self.final_lr + (self.base_lr - self.final_lr) * pow(1-float(epoch+1-self.warmup_epoch)/float(self.epoch_num), self.power) if epoch<self.epoch_num else 0 | [
"vealocia@gmail.com"
] | vealocia@gmail.com |
2b887ca5322df9eb742eec5d14620c6a8c37621d | b5921afe6ea5cd8b3dcfc83147ab5893134a93d0 | /tl/contrib/tweepy/auth.py | 51ed3d90ae2fd53d749c402f1806617c2846a51b | [
"LicenseRef-scancode-other-permissive"
] | permissive | techdragon/tl | aaeb46e18849c04ad436e0e786401621a4be82ee | 6aba8aeafbc92cabdfd7bec11964f7c3f9cb835d | refs/heads/master | 2021-01-17T16:13:18.636457 | 2012-11-02T10:08:10 | 2012-11-02T10:08:10 | 9,296,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,226 | py | # Tweepy
# Copyright 2009 Joshua Roesslein
# See LICENSE
from urllib.request import Request, urlopen
from urllib.parse import quote
import base64
from tweepy import oauth
from tweepy.error import TweepError
from tweepy.api import API
class AuthHandler(object):
def apply_auth(self, url, method, headers, parameters):
"""Apply authentication headers to request"""
raise NotImplementedError
def get_username(self):
"""Return the username of the authenticated user"""
raise NotImplementedError
class BasicAuthHandler(AuthHandler):
def __init__(self, username, password):
self.username = username
self._b64up = base64.b64encode(bytes('%s:%s' % (username, password), 'ascii'))
def apply_auth(self, url, method, headers, parameters):
headers['Authorization'] = 'Basic %s' % self._b64up.decode()
def get_username(self):
return self.username
class OAuthHandler(AuthHandler):
REQUEST_TOKEN_URL = 'http://api.twitter.com/oauth/request_token'
AUTHORIZATION_URL = 'http://api.twitter.com/oauth/authorize'
AUTHENTICATE_URL = 'http://api.twitter.com/oauth/authenticate'
ACCESS_TOKEN_URL = 'http://api.twitter.com/oauth/access_token'
def __init__(self, consumer_key, consumer_secret, callback=None):
self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self._sigmethod = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.request_token = None
self.access_token = None
self.callback = callback
self.username = None
def apply_auth(self, url, method, headers, parameters):
request = oauth.OAuthRequest.from_consumer_and_token(self._consumer,
http_url=url, http_method=method, token=self.access_token, parameters=parameters)
request.sign_request(self._sigmethod, self._consumer, self.access_token)
headers.update(request.to_header())
def _get_request_token(self):
try:
request = oauth.OAuthRequest.from_consumer_and_token(self._consumer,
http_url = self.REQUEST_TOKEN_URL, callback=self.callback)
request.sign_request(self._sigmethod, self._consumer, None)
resp = urlopen(Request(self.REQUEST_TOKEN_URL,
headers=request.to_header()), timeout=5.0)
return oauth.OAuthToken.from_string(resp.read().decode())
except Exception as e:
raise TweepError(e)
def set_access_token(self, key, secret):
self.access_token = oauth.OAuthToken(key, secret)
def get_authorization_url(self):
"""Get the authorization URL to redirect the user"""
try:
# get the request token
self.request_token = self._get_request_token()
# build auth request and return as url
request = oauth.OAuthRequest.from_token_and_callback(
token=self.request_token, http_url=self.AUTHORIZATION_URL)
return request.to_url()
except Exception as e:
raise TweepError(e)
def get_access_token(self, verifier=None):
"""
After user has authorized the request token, get access token
with user supplied verifier.
"""
try:
# build request
request = oauth.OAuthRequest.from_consumer_and_token(
self._consumer,
token=self.request_token, http_url=self.ACCESS_TOKEN_URL,
verifier=str(verifier)
)
request.sign_request(self._sigmethod, self._consumer, self.request_token)
# send request
resp = urlopen(Request(self.ACCESS_TOKEN_URL, headers=request.to_header()))
self.access_token = oauth.OAuthToken.from_string(resp.read().decode())
return self.access_token
except Exception as e:
raise TweepError(e)
def get_username(self):
if self.username is None:
api = API(self)
user = api.verify_credentials()
if user:
self.username = user.screen_name
else:
raise TweepError("Unable to get username, invalid oauth token!")
return self.username
| [
"feedbackflow@gmail.com"
] | feedbackflow@gmail.com |
87f68bcf618d998027044494849ca6cc6cbdb568 | b488060127559a3910ad5bf6642061019cc5f7df | /app/auth/views.py | f16dd5a46f53c65e4f7cb58c19eb52ce58c65ca7 | [] | no_license | hypnopompicindex/flasky | 1cf4e104bf68a192348049d651ddf7e35c6c6e0d | 2131bb49decd8a17d25078ab37205f12e22aefa1 | refs/heads/master | 2016-09-05T16:04:45.933010 | 2014-08-29T22:25:55 | 2014-08-29T22:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,999 | py | from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..models import User
from .forms import LoginForm
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm, \
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated() \
and not current_user.confirmed \
and request.endpoint[:5] != 'auth.':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous() or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
| [
"hypnopompicindex@gmail.com"
] | hypnopompicindex@gmail.com |
a0647338bf9bf7f1b4ad381078643e483422723e | 825930f372fdf8c9c42cd2f9b1f424ab9de90b38 | /accounts/migrations/0003_order_note.py | 92701e816ce3c74d2368fbed83add82c8b9acf2c | [] | no_license | Xasanjon/crm2 | 56cbfa05d910144c75a3cdfe7423ba68fd576534 | 52279925e64e4268830fbeae6af897aef14b64d0 | refs/heads/master | 2023-07-02T04:13:33.928305 | 2021-08-16T14:53:43 | 2021-08-16T14:53:43 | 395,755,429 | 0 | 0 | null | 2021-08-16T14:53:44 | 2021-08-13T18:30:32 | Python | UTF-8 | Python | false | false | 392 | py | # Generated by Django 3.2 on 2021-08-02 20:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20210725_0252'),
]
operations = [
migrations.AddField(
model_name='order',
name='note',
field=models.CharField(max_length=200, null=True),
),
]
| [
"xasanboy99akaxon@gmail.com"
] | xasanboy99akaxon@gmail.com |
1b803449349f7c2d236f15348e6db398d826631f | 504344fc66e8d54081a17306d3012a16bbb81ee7 | /1_start_main.py | f5b040ad17b8d6c087939daec2d577d8e233f917 | [] | no_license | Ryanshuai/auto_pubg | 814753644a8e8e7aa3d7ca3c346a9e05b825c00d | 696f33f888efc441a74e142db878e836bbf3efee | refs/heads/master | 2022-09-21T12:13:24.155393 | 2020-11-12T20:03:43 | 2020-11-12T20:03:43 | 153,748,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | from PyQt5 import QtCore, QtGui, QtWidgets
from screen_parameter import show_position_y, show_position_x, show_size_y, show_size_x
from press_gun.robot import Robot
from state.all_states import All_States
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(show_size_x, show_size_y)
Dialog.move(show_position_x, show_position_y)
Dialog.setWindowFlag(QtCore.Qt.WindowStaysOnTopHint)
Dialog.setWindowFlag(QtCore.Qt.FramelessWindowHint)
# Dialog.setAttribute(QtCore.Qt.WA_TranslucentBackground, True)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
Dialog.setFont(font)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(0, 0, show_size_x, show_size_y))
self.label.setObjectName("label")
QtCore.QMetaObject.connectSlotsByName(Dialog)
# self.robot = Robot(All_States(), is_calibrating=True)
self.robot = Robot(All_States())
self.robot.temp_qobject.state_str_signal[str].connect(self.retranslateUi)
def retranslateUi(self, text):
_translate = QtCore.QCoreApplication.translate
self.label.setText(_translate("Dialog", text))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"1018718155@qq.com"
] | 1018718155@qq.com |
e3dcf09841e745cba160d63eb1c9356f434ba51f | bef6c39a273d3d6ff96ce22f5457310a9392df47 | /venv/Scripts/django-admin.py | de18f95c90277a1418fdb8e6319cdad6df4835bf | [] | no_license | genesis80013/holamundo2 | c59c20a378df50a3accfa8e5a3458e0beee80bdd | 2cfc9311728a4ea812393cf5f036f1a7a9e62c20 | refs/heads/master | 2023-02-03T06:48:11.287946 | 2020-12-17T01:35:46 | 2020-12-17T01:35:46 | 322,136,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | #!C:\Users\Genesis\Documents\UTEQ\tesis\practicas\holamundo\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"45234703+genesis80013@users.noreply.github.com"
] | 45234703+genesis80013@users.noreply.github.com |
802889fdf2a2055d19fb5e5785c6887a118bc1c1 | 9442b97a4bfdd0cdc90e5f498e8d16d8483aa5a1 | /ProblemSolverCentral/urls.py | 4c9be1e5559c97710521715b04d7564fd3c6e3a8 | [] | no_license | nauman-pucit/problem_solver | 9af25cf1507e28b76caf08df10818ee9ed0ca331 | 4da7f1d7f02695504887287db28f808c637a2286 | refs/heads/master | 2021-09-03T08:13:28.734243 | 2018-01-07T11:17:21 | 2018-01-07T11:17:21 | 116,555,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | """ProblemSolverCentral URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import RedirectView
from rest_framework import routers
from authentication.views import SignInAuthenticate, Logout, CheckEmailExist, \
AllUsersViewSet, CreateSuperUser
router = routers.DefaultRouter()
router.register(r'all_user_accounts', AllUsersViewSet)
urlpatterns = [
url(r'^$', RedirectView.as_view(url='social_network/')),
url(r'^social_network/', include('SocialImpactNetwork.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('allauth.urls')),
url(r'^sign_up/$', AllUsersViewSet.as_view({'post': 'create'}), name='sign_up'),
url(r'^check_email_exist/$', CheckEmailExist.as_view(), name='check_email_exist'),
url(r'^user_account/$', AllUsersViewSet.as_view({'post': 'retrieve'}), name='user_account'),
url(r'^update_user_account/$', AllUsersViewSet.as_view({'post': 'update'}), name='update_user_account'),
url(r'^sign_in_authenticate/$', SignInAuthenticate.as_view(), name='sign_in_authenticate'),
url(r'^log_out/$', Logout.as_view(), name='log_out'),
url(r'^change_role/$', CreateSuperUser.as_view(), name='change_user_role'),
]
urlpatterns += router.urls
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"nauman.pucit@gmail.com"
] | nauman.pucit@gmail.com |
729df1668f38187379126774eb98b50fe086bc12 | 57b1e8d4265c2985ded9bd033592f94bb16123d1 | /Python_2017_summer/Study_Day3/D3_01_input.py | 65c8b229dfc04efe21006e977116886ed3922e5b | [] | no_license | cloud-cloudbooks/Python_2018 | 0fd035184b0e31c6ab06ef5c83c67dc40cdb0ece | ba711f7f23367c5a6db2d8919d2ef87aad713a0f | refs/heads/master | 2021-08-30T18:52:09.124973 | 2017-12-19T02:12:42 | 2017-12-19T02:12:42 | 114,704,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # 입력 처리 input 명령어
#
name = input("이름이 무엇입니까? ")
print("안녕하세요", name, "님")
# input으로 숫자 두 개를 사용자한테서 입력받기
x = input("x값을 입력하세요 : ")
a = int(x)
x = input("y값을 입력하세요 : ")
b = int(x)
print(a*b) | [
"cloud@cloudbooks.co.kr"
] | cloud@cloudbooks.co.kr |
a8854b058391a3e400e059150fc9e2444400ab81 | d4b049d91795b5f8899f5ee60151a04be8890af9 | /litapplications/candidates/migrations/0037_auto_20170604_1531.py | 673c7eb72fcbdf9751afa92d8101506c0ee2c1c1 | [] | no_license | thatandromeda/litapplications | 3ab879c6edee1fd8424c3546eead47659699655a | d8b67d0b82ea14fb71b871f7563b7096640e4c25 | refs/heads/master | 2020-05-21T23:59:07.004211 | 2017-12-08T03:25:24 | 2017-12-08T03:25:24 | 64,570,749 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-06-04 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('candidates', '0036_auto_20170410_0025'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='year_end',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='appointment',
name='year_start',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"andromeda.yelton@gmail.com"
] | andromeda.yelton@gmail.com |
cf7998ad81daa62c4fba1aad1cf014844efd51c8 | c532e4d7466188ebbcca32413f592491eac9d7f8 | /leetcode/392.is-subsequence.py | e6cd21e9c3757a79129f956abc34459736ec87de | [] | no_license | Zedmor/hackerrank-puzzles | a1ff8601ea6d2bb3d2095909dfe00ef32346b74f | 2cc179bdb33a97294a2bf99dbda278e935165943 | refs/heads/master | 2023-01-10T13:57:26.649360 | 2023-01-04T03:27:05 | 2023-01-04T03:27:05 | 68,768,901 | 0 | 0 | null | 2017-03-05T18:24:18 | 2016-09-21T01:46:35 | Jupyter Notebook | UTF-8 | Python | false | false | 1,609 | py | #
# @lc app=leetcode id=392 lang=python3
#
# [392] Is Subsequence
#
# https://leetcode.com/problems/is-subsequence/description/
#
# algorithms
# Easy (47.97%)
# Total Accepted: 233.6K
# Total Submissions: 474.3K
# Testcase Example: '"abc"\n"ahbgdc"'
#
# Given a string s and a string t, check if s is subsequence of t.
#
# A subsequence of a string is a new string which is formed from the original
# string by deleting some (can be none) of the characters without disturbing
# the relative positions of the remaining characters. (ie, "ace" is a
# subsequence of "abcde" while "aec" is not).
#
# Follow up:
# If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you
# want to check one by one to see if T has its subsequence. In this scenario,
# how would you change your code?
#
# Credits:
# Special thanks to @pbrother for adding this problem and creating all test
# cases.
#
#
# Example 1:
# Input: s = "abc", t = "ahbgdc"
# Output: true
# Example 2:
# Input: s = "axc", t = "ahbgdc"
# Output: false
#
#
# Constraints:
#
#
# 0 <= s.length <= 100
# 0 <= t.length <= 10^4
# Both strings consists only of lowercase characters.
#
#
#
class Solution:
"""
>>> Solution().isSubsequence('abc', 'ahbgdc')
True
>>> Solution().isSubsequence('axc', 'ahbgdc')
False
"""
def isSubsequence(self, s: str, t: str) -> bool:
pointer_s = 0
pointer_t = 0
while pointer_t < len(t) and pointer_s < len(s):
if t[pointer_t] == s[pointer_s]:
pointer_s += 1
pointer_t += 1
return pointer_s == len(s)
| [
"zedmor@gmail.com"
] | zedmor@gmail.com |
8ee4bb76f96e2354994b15cfc2903c1056a63de8 | 29df72553575c6d18241109ffd9e3bb02c21cf8c | /rl/agent.py | 12f283a20394ca097889a3f40a2a3b61eae27659 | [] | no_license | vladhc/rl-udacity | 3678e944feebb3c4f00b337a45340a897e6e973a | 22326031d2465a62a5f0d540cece5ec4adba87be | refs/heads/master | 2020-05-09T18:48:28.566816 | 2019-07-14T13:09:27 | 2019-07-14T13:09:27 | 181,355,384 | 1 | 2 | null | 2019-09-06T15:36:34 | 2019-04-14T18:59:24 | Python | UTF-8 | Python | false | false | 2,835 | py | from rl import Reinforce, QLearning, ActorCritic, PPO, MultiPPO
def create_agent(env, args):
action_space = env.action_space
observation_shape = env.observation_space.shape
print("Action space: {}".format(action_space))
print("Observation space: {}".format(env.observation_space))
agent_type = args["agent"]
baseline = args["baseline"]
baseline_learning_rate = args["baseline_learning_rate"]
gamma = args["gamma"]
learning_rate = args["learning_rate"]
if agent_type == "qlearning":
return QLearning(
action_size=action_space.n,
observation_shape=observation_shape,
beta_decay=args["beta_decay"],
gamma=gamma,
learning_rate=learning_rate,
soft=args["soft"],
dueling=args["dueling"],
double=args["double"],
noisy=args["noisy"],
priority=args["priority"],
replay_buffer_size=args["replay_buffer_size"],
min_replay_buffer_size=args["min_replay_buffer_size"],
target_update_freq=args["target_update_freq"],
train_freq=args["train_freq"],
tau=args["tau"],
batch_size=args["batch_size"],
epsilon_start=args["epsilon_start"],
epsilon_end=args["epsilon_end"],
epsilon_decay=args["epsilon_decay"])
elif agent_type == "reinforce":
return Reinforce(
action_size=action_space.n,
observation_shape=observation_shape,
gamma=gamma,
learning_rate=learning_rate,
baseline=baseline,
baseline_learning_rate=baseline_learning_rate)
elif agent_type == "actor-critic":
return ActorCritic(
action_size=action_space.n,
observation_shape=observation_shape,
gamma=gamma,
learning_rate=learning_rate)
elif agent_type == "ppo":
return PPO(
action_space=action_space,
observation_shape=observation_shape,
n_envs=env.n_envs,
gamma=gamma,
horizon=args["horizon"],
epochs=args["ppo_epochs"],
gae_lambda=args["gae_lambda"],
learning_rate=learning_rate)
elif agent_type == 'multippo':
return MultiPPO(
action_space=action_space,
observation_shape=observation_shape,
n_envs=env.n_envs,
n_agents=env.n_agents,
gamma=gamma,
horizon=args["horizon"],
epochs=args["ppo_epochs"],
gae_lambda=args["gae_lambda"],
learning_rate=learning_rate)
| [
"vladhc@gmail.com"
] | vladhc@gmail.com |
b5e97ffee6a1cf13d2ddcebf6237fca2dfb79d96 | 24c94bc45940e119d47e94d5e68e988ea6e6fae7 | /pydoctor/test/__init__.py | cbfcf25c97caebc7b5950c06ef3e6caf76833a2a | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | The-Genius-Thories/pydoctor | 9e247f6c234fd18cfeac52d6440b707903152f92 | 5e20f51ae0a34183ee41c1068a5b5aac506628d5 | refs/heads/master | 2021-01-23T05:18:50.971558 | 2017-03-14T07:42:54 | 2017-03-14T07:42:54 | 86,296,525 | 0 | 0 | null | 2017-03-27T05:55:59 | 2017-03-27T05:55:59 | null | UTF-8 | Python | false | false | 29 | py | """PyDoctor's test suite."""
| [
"michael.hudson@canonical.com"
] | michael.hudson@canonical.com |
56a53cf0a36b5b36076f79e659a49128f7fa1265 | ada026a8588611f18a0bae44619aea6dc89c07a7 | /backend/event/models.py | 94899bc08145ed779dc022f61534cb2e63f156b5 | [] | no_license | crowdbotics-apps/iqraa-25096 | 5a363ec49766352d23de9348bfddcaed187b98c8 | 42def0722c287182c100ef46a4284236fbd2f04e | refs/heads/master | 2023-03-22T23:40:21.685747 | 2021-03-18T09:17:50 | 2021-03-18T09:17:50 | 349,008,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,882 | py | from django.conf import settings
from django.db import models
class Vendor(models.Model):
"Generated Model"
name = models.TextField()
logo_image = models.SlugField(
null=True,
blank=True,
max_length=50,
)
type = models.TextField(
null=True,
blank=True,
)
website = models.URLField(
null=True,
blank=True,
)
location = models.ForeignKey(
"event.Location",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="vendor_location",
)
category = models.ForeignKey(
"event.Category",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="vendor_category",
)
class MySchedule(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="myschedule_user",
)
schedule = models.ForeignKey(
"event.Schedule",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="myschedule_schedule",
)
class Faq(models.Model):
"Generated Model"
title = models.CharField(
max_length=256,
)
description = models.TextField()
class Sponsor(models.Model):
"Generated Model"
name = models.TextField()
logo_image = models.SlugField(
max_length=50,
)
sponsor_level = models.TextField()
presenter = models.BooleanField()
website = models.URLField(
null=True,
blank=True,
)
location = models.ForeignKey(
"event.Location",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="sponsor_location",
)
class Favorites(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="favorites_user",
)
vendor = models.ForeignKey(
"event.Vendor",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="favorites_vendor",
)
class VendorDetail(models.Model):
"Generated Model"
website = models.URLField()
description = models.TextField()
associated_name = models.TextField(
null=True,
blank=True,
)
vendor_id = models.ForeignKey(
"event.Vendor",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="vendordetail_vendor_id",
)
class Location(models.Model):
"Generated Model"
amenities = models.TextField(
null=True,
blank=True,
)
name = models.CharField(
null=True,
blank=True,
max_length=256,
)
image = models.SlugField(
null=True,
blank=True,
max_length=50,
)
class Presenter(models.Model):
"Generated Model"
name = models.CharField(
max_length=256,
)
title = models.CharField(
max_length=256,
)
schedule = models.ForeignKey(
"event.Schedule",
on_delete=models.CASCADE,
related_name="presenter_schedule",
)
class Schedule(models.Model):
"Generated Model"
dateTime = models.DateTimeField()
description = models.TextField(
null=True,
blank=True,
)
track = models.TextField(
null=True,
blank=True,
)
location = models.ForeignKey(
"event.Location",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="schedule_location",
)
class Category(models.Model):
"Generated Model"
description = models.TextField()
name = models.CharField(
null=True,
blank=True,
max_length=256,
)
# Create your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
56f214efda239288f6fcf2f4b2a0c34393d2b28e | fbfd802dfbad2efa660b1554c634d9b96a20f220 | /rest/sessions_start.py | 8551ac31dc222d780d394f859842bc3a33ad87e6 | [] | no_license | laser-me/cassandra_python | a448e4b0cdb7e739555d2b77b257979078f9f7b1 | 28c67ce6d40d0c2f21a27540f2eb754adc66f485 | refs/heads/master | 2020-03-23T08:29:06.341376 | 2018-07-19T13:25:11 | 2018-07-19T13:25:11 | 141,329,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from flask import jsonify
from flask_restful import Resource
from datetime import datetime, timedelta
from helpers.cassandra import execute
# SELECT * FROM events_start_by_country where ts >= '2016-11-22T20:40:50' ALLOW FILTERING;
class SessionsStart(Resource):
def get(self, hours):
hours_ago = datetime.now() - timedelta(hours=hours)
timestamp = hours_ago.strftime('%Y-%m-%dT%H:%M:%S')
# just in case limiting
cql = """SELECT * FROM events_start_by_country where ts >= %s LIMIT 20 ALLOW FILTERING;"""
reply = execute(cql, [timestamp])
return jsonify(reply)
| [
"laser.me@yandex.ru"
] | laser.me@yandex.ru |
a65a5fe2737f2506964095d71631ff9e74b89d51 | 1b7f4cd39bf7e4a2cf667ac13244e5138ee86cb2 | /agents/displays/human_display.py | 4ad2949060ec0816a46e1db1e5ae89c9fd33bade | [
"MIT"
] | permissive | cjreynol/willsmith | 02f793003a914a21b181839bbd58108046f312d6 | 39d3b8caef8ba5825f3a0272c7fd61a2f78ef2b5 | refs/heads/master | 2020-07-15T13:25:57.613707 | 2018-06-12T00:18:19 | 2018-06-12T00:18:19 | 205,572,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | from tkinter import Button, Entry, Label, END
from willsmith.gui_display_controller import GUIDisplayController
class HumanDisplay(GUIDisplayController):
"""
Creates a Tkinter GUI that allows the user to input their moves.
"""
WINDOW_TITLE = "Human Agent"
LABEL_FONT = ("Courier New", 14)
def __init__(self):
super().__init__()
self.input_prompt_label = None
self.input_entry = None
self.submit_button = None
def _initialize_widgets(self):
self.input_prompt_label = Label(self.root, font = self.LABEL_FONT,
text = "<prompt here>")
self.input_entry = Entry(self.root)
self.submit_button = Button(self.root, text = "Submit")
def _place_widgets(self):
self.input_prompt_label.grid(row = 0, column = 0, columnspan = 2)
self.input_entry.grid(row = 1, column = 0)
self.submit_button.grid(row = 1, column = 1)
def _update_display(self, agent, action):
self._reset_display(agent)
def _reset_display(self, agent):
self.input_entry.delete(0, END)
def _submit_entry():
pass
| [
"cjreynol13@aol.com"
] | cjreynol13@aol.com |
cf011e7785fadb13add387804237a74bc4a3c4e5 | 152ea447e7b665bd795f84dd8e7e73ff2eeeb864 | /video_facial_landmarks.py | 92fffda8793513364e4e84f176e7085beee70ad1 | [] | no_license | angel-stha/Facial-Landmarks | be86a66394514cc4dc46126987b5d4b9a90392ba | 23a7da053ce38a264e19e5b7ac90d4f0df8cd051 | refs/heads/master | 2023-05-11T06:10:22.372849 | 2021-05-26T12:08:37 | 2021-05-26T12:08:37 | 370,675,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,298 | py | # USAGE
# python video_facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat
# python video_facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat --picamera 1
# import the necessary packages
from imutils.video import VideoStream
from imutils import face_utils
import datetime
import argparse
import imutils
import time
import dlib
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
ap.add_argument("-r", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
frame = vs.read()
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
result = cv2.VideoWriter('output/webcam.avi',cv2.VideoWriter_fourcc(*'MJPG'),
10, (500,500))
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
for (x, y) in shape:
cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)
# show the frame
cv2.imshow("Frame", frame)
result.write(frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() | [
"angelshrestha33@gmail.com"
] | angelshrestha33@gmail.com |
2a4ebee7a17396272a25195c34485eee9ed82a04 | d53d253ae49cd0497cb58f10a2e5b305d0d98cec | /code/handlers/local_handler.py | f753debb505d2689221ee3e0cd08215b866a5afe | [
"MIT"
] | permissive | hfiuza/ApartmentFinder | 9a7cc00b2f62538166103a170a5683bd2cc7acf7 | 476b5364f9ea807037e374934b0b14959143addd | refs/heads/master | 2020-07-06T08:44:24.192414 | 2020-03-09T18:33:32 | 2020-03-09T18:33:32 | 202,959,672 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import pandas as pd
import os
from constants import LOCAL_PATH, APARTMENTS_FILENAME
def basic_write(df):
df.to_csv(os.path.join(LOCAL_PATH, APARTMENTS_FILENAME))
def basic_read():
return pd.read_csv(os.path.join(LOCAL_PATH, APARTMENTS_FILENAME))
| [
"henriquegfn@gmail.com"
] | henriquegfn@gmail.com |
9999bb084c19897bd8e0f40f1449c5ab8305baec | 2a6d385c7737aea3c6b49eef9252babb7557b909 | /MCTools/test/lheTreeMaker.py | 7c6aa48eac2b99e552e3669d2e943613a8222e6a | [] | no_license | Sam-Harper/usercode | 1b302a4b647e479d27a9501f9576bd04b07e111a | fa43427fac80d773978ea67b78be58d264f39ec8 | refs/heads/120XNtup | 2022-08-26T12:59:53.388853 | 2022-07-12T16:52:46 | 2022-07-12T16:52:46 | 15,675,175 | 1 | 11 | null | 2022-07-21T13:27:57 | 2014-01-06T13:54:22 | Python | UTF-8 | Python | false | false | 2,051 | py | # Import configurations
import FWCore.ParameterSet.Config as cms
# set up process
process = cms.Process("PDF")
# initialize MessageLogger and output report
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000),
limit = cms.untracked.int32(10000000)
)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
import sys
filePrefex="file:"
if(sys.argv[2].find("/pnfs/")==0):
filePrefex="dcap://heplnx209.pp.rl.ac.uk:22125"
if(sys.argv[2].find("/store/")==0):
filePrefex=""
process.source = cms.Source("LHESource",
# fileNames = cms.untracked.vstring(filePrefex+sys.argv[2]),
# inputCommands = cms.untracked.vstring("drop *","keep *_source_*_*"),
fileNames = cms.untracked.vstring(),
)
for i in range(2,len(sys.argv)-2):
print filePrefex+sys.argv[i]
process.source.fileNames.extend([filePrefex+sys.argv[i],])
process.lheTreeMaker = cms.EDAnalyzer("LHETreeMaker",
datasetCode=cms.int32(-1),
# lheEventTag=cms.InputTag("externalLHEProducer"),
lheEventTag=cms.InputTag("source"),
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("output.root")
)
isCrabJob=False #script seds this if its a crab job
#if 1, its a crab job...
if isCrabJob:
print "using crab specified filename"
process.TFileService.fileName= "OUTPUTFILE"
process.lheTreeMaker.datasetCode = DATASETCODE
else:
print "using user specified filename"
process.TFileService.fileName= sys.argv[len(sys.argv)-1]
process.lheTreeMaker.datasetCode = int(sys.argv[len(sys.argv)-2])
process.p = cms.Path(
process.lheTreeMaker)
| [
"sam.j.harper@gmail.com"
] | sam.j.harper@gmail.com |
6fdc4e0fca38fb80ce40e5680d102692c83e94c8 | 815b69743fa7fe21e60416f322e13a280e34340c | /messageEvent.py | 731ab264e01f0a57e56d26dc4ed914725c7f1905 | [] | no_license | MohammedAdain/gui-Using-Python | a3ed4d2703e3e19ebb1a5fb06620b1e8da51f5be | a9662f11ad87bd9fd16730411fb1728a39bc2754 | refs/heads/master | 2023-03-14T16:37:42.873077 | 2021-04-04T04:15:55 | 2021-04-04T04:15:55 | 354,452,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | from tkinter import *
def motion(event):
print("Mouse position: (%s %s)" % (event.x, event.y))
return
def hello(event):
print("Single Click, Button-l")
def quit(event):
print("Double Click, so let's stop")
import sys; sys.exit()
master = Tk()
whatever_you_do = "Whatever you do will be insignificant, but it is very important that you do it.\n(Mahatma Gandhi)"
msg = Message(master, text = whatever_you_do)
msg.config(bg='lightgreen', font=('times', 24, 'italic'))
msg.bind('<Motion>',motion)
msg.pack()
widget = Button(None, text='Mouse Clicks')
widget.pack()
widget.bind('<Button-1>', hello)
widget.bind('<Double-1>', quit)
widget.mainloop()
mainloop()
| [
"adain@edsoft.com"
] | adain@edsoft.com |
ce73b02605176f3517e6fef78e667b56270a8591 | 18145f775eb9421d6b7e931bc101dc32b08844c9 | /genCSV.py | 901e6ce670371193516c7042758a63ca59bf0057 | [] | no_license | bloomcyber/trust-simulation | 806b57193a66640ad9dd4cf826126543bf205f38 | 8224abc22bb2ee0c4dde22870bf57476e015e05a | refs/heads/master | 2022-12-27T15:43:16.744897 | 2020-10-02T08:42:40 | 2020-10-02T08:42:40 | 294,372,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | from subprocess import check_output
import pandas as pd
import sys
def get_one_line(filepath, line_number):
return check_output([
"sed",
"-n",
"%sp" % line_number,
filepath
])
def getdata(filename,readfor):
arr=[0,1,2,3]
if(readfor=='-usr:pre_trusted'):
arr[0]=int(get_one_line(filename,10).decode("utf-8")[25:-1])
elif readfor=='-usr:purely':
arr[0]=int(get_one_line(filename,12).decode("utf-8")[25:-1])
elif readfor=='-usr:feedback':
arr[0]=int(get_one_line(filename,13).decode("utf-8")[25:-1])
elif readfor=='-usr:provider':
arr[0]=int(get_one_line(filename,14).decode("utf-8")[25:-1])
elif readfor=='-usr:disguise':
arr[0]=int(get_one_line(filename,15).decode("utf-8")[25:-1])
elif readfor=='-usr:sybil':
arr[0]=int(get_one_line(filename,16).decode("utf-8")[25:-1])
else:
print('invalid first argument to scipt file')
exit()
#arr[0]=ar1
#print(arr[0])
arr[1]=int(get_one_line(filename,47).decode("utf-8")[25:-1])
#arr[1]=ar1
#print(arr[1])
arr[2]=int(get_one_line(filename,48).decode("utf-8")[25:-1])
#arr+=ar1
#print(arr[2])
arr[3]=int(get_one_line(filename,49).decode("utf-8")[25:-1])
#arr+=ar1
#print(arr[3])
#print(type(arr[0]))
return arr
algo = sys.argv[1] #For naming convenience algo name
file= sys.argv[2] #file name
step=int(sys.argv[3])
i=int(sys.argv[4]) #upper limit
readfor=sys.argv[5]
arr={}
for i in range(0,i+1,step):
s=file+str(i)+"."+algo
arr[i]=getdata(s,readfor)
df = pd.DataFrame(arr)
print(df)
t=algo+".csv"
df.to_csv(t, mode='w',index= False, header=False)
print('saved to ',t)
| [
"noreply@github.com"
] | bloomcyber.noreply@github.com |
80766577f6aacb94547926a5d8d96a7835dae62f | 2b068c9c81bb1972418ab86efdae04a732dcf3c4 | /newsApp/views.py | 077d08b871c4d2bbb3352bd212a1aa150762df99 | [] | no_license | lharvey2142/newsRank | 69faa35e114bb84cb4dbc9581e11e89eb6d54975 | 5435df0ff94482f609783731b944adfa6c6400ba | refs/heads/master | 2021-01-20T02:37:20.461462 | 2018-02-28T23:17:22 | 2018-02-28T23:17:22 | 89,428,133 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,681 | py | from django.shortcuts import render
# Create your views here.
from django.core.urlresolvers import reverse
import sys
sys.path.append('/Users/froyvalencia/newsRank')
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','mysite.settings')
import django
django.setup()
from django.contrib.auth.models import User
users = User.objects.all()
from newsApp.models import Article
import newspaper
import nltk
#imports for search
import urllib.parse
from robobrowser import RoboBrowser
from bs4 import BeautifulSoup
import re
def getAddress(a):
import urllib.parse
parsed_uri = urllib.parse.urlparse(a.address)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
return domain
def newsfeatures(a):
features = {}
features['url'] = getAddress(a)
#print('url cleaned')
#print(features['url'])
#features['address'] = a.address
#features['title'] = a.title
#features['body'] = a.body
#features['length'] = len(a.body)
return features
'''
def deleteExtras():
print("starting delete")
Article.objects.filter(result='').delete()
print('end delete')
from django.db.models import Count
print("starting delete doubles")
for a in Article.objects.values_list('address', flat=True).distinct():
Article.objects.filter(pk__in=Article.objects.filter(address=a).values_list('id', flat=True)[1:]).delete()
print('end delete doubles')
#duplicates = Article.objects.values('address').annotate(address_count=Count('address')).filter(address_count__gt=1)
#for data in duplicates:
# address = data['address']
# Article.objects.filter(address=address).order_by('address')[1:].delete()
'''
def extract(request):
#deleteExtras()
print(request.GET.get('url_to_clean'))
a = newspaper.Article(request.GET.get('url_to_clean'))
a.download()
current = Article.objects.filter(address=request.GET.get('url_to_clean'))
article = Article(
address = a.url,
title = a.title,
body = a.text,
date = a.publish_date,
)
#article.save() #uncomment when saving to database
a.parse()
a.nlp()
if len(current) > 0:
return render(
request,
'newsApp/extract.html',
{
'accuracy':"Retrieved from database assumed 100%",
'result':current[0].result,
'url':a.url,
'title': a.title,
'authors':a.authors,
'text': a.text,
'publish_date': a.publish_date,
'keywords':a.keywords,
'summary':a.summary,
'videos':a.movies,
'html':a.html,
'top_image':a.top_image
},
)
#classification logic
true_entries = Article.objects.filter(result='reliable')
fake_entries = Article.objects.filter(result='unreliable')
#print('true entries')print(len(true_entries))
#print('fake entries')print(len(fake_entries))
import random
fake = [(newsfeatures(n),'unreliable') for n in fake_entries]
true = [(newsfeatures(n),'reliable') for n in true_entries]
random.shuffle(fake)
random.shuffle(true)
labeled_data = (true + fake)
random.shuffle(labeled_data)
feature_set = [(n, res) for (n, res) in labeled_data]
train_set = feature_set[:5500]
test_set = feature_set[5500:]
classifier = nltk.NaiveBayesClassifier.train(train_set)
result = classifier.classify(newsfeatures(article))
accuracy = str(round(nltk.classify.accuracy(classifier, test_set) * 100, 2)) + "%"
#message = article.address + " is probably " + result + ". (accuracy : " + str(round(nltk.classify.accuracy(classifier, test_set) * 100, 2)) + "%)"
#end classification logic
return render(
request,
'newsApp/extract.html',
{
'accuracy':accuracy,
'result':result,
'url':a.url,
'title': a.title,
'authors':a.authors,
'text': a.text,
'publish_date': a.publish_date,
'keywords':a.keywords,
'summary':a.summary,
'videos':a.movies,
'html':a.html,
'top_image':a.top_image
},
)
def search(request):
print(request.GET.get('q'))
link = request.GET.get('q')
# create the browser and change the useragent
br = RoboBrowser()
# replace space with +, look up the word in google, and return 100 links
term = link.replace(" ","+")
query = "https://www.google.com/search?q="+term
br.open(query)
htmltext = str(br.parsed)
soup = BeautifulSoup(htmltext, "lxml")
search = soup.findAll('div', attrs = {'id':'search'})
searchtext = str(search[0])
soup1 = BeautifulSoup(searchtext)
list_items = soup1.findAll('li')
#splitting the text so the it would direct to the website
regex = "q(?!.*q).*?&"
pattern = re.compile(regex)
results_array = []
print(list_items)
for li in list_items:
soup2 = BeautifulSoup(str(li))
links = soup2.findAll('a')
source_link = links[0]
source_url = re.findall(pattern, str(source_link))
if len(source_url)>0:
print('loop')
results_array.append(str(source_url[0].replace("q=","").replace("&","")))
final = []
for n in results_array:
print(n)
if n.find('http') != -1:
final.append(n[n.find('http'):])
elif n.find('www.') != -1:
final.append('http://'+n[n.find('www.'):])
for link in final:
print(link)
return render(request, 'newsApp/links.html', {'links': final,'query':link})
| [
"froyvalencia@gmail.com"
] | froyvalencia@gmail.com |
5ad5a813c1c775a599ada42dbdfdc2a84f76dd34 | 0438eaa48bd5aa4363f9fbc3980cabdd57051610 | /test/lib/opensocial-python-client/build/lib/oauth/__init__.py | 37a30fc0c855a95a528ca80c0f065808e66877ec | [
"Apache-2.0"
] | permissive | mozilla/weaveserver-identity | 4d4261a0d2a00f4588867ac87a85881122d14da8 | cc4b5651710dbc2941ebc5c15a1b5aeac7a09abf | refs/heads/master | 2023-08-22T01:48:38.650181 | 2010-03-10T19:32:13 | 2010-03-10T19:32:13 | 875,480 | 2 | 2 | null | 2019-03-28T03:37:37 | 2010-08-31T18:15:46 | Python | UTF-8 | Python | false | false | 18,619 | py | import cgi
import urllib
import time
import random
import urlparse
import hmac
import base64
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join(str(random.randint(0, 9)) for i in range(length))
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
@staticmethod
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems())
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values)
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
@staticmethod
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
@staticmethod
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
@staticmethod
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = escape(callback)
return OAuthRequest(http_method, http_url, parameters)
# util function: turn Authorization: header into parameters, has to do some unescaping
@staticmethod
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
# util function: turn url string into parameters, has to do some unescaping
@staticmethod
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = oauth_data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return base64.b64encode(hashed.digest())
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
| [
"mhanson@mozilla.com"
] | mhanson@mozilla.com |
e988f8b8da91a7a855a13291c4bc74d81d30bdc1 | cce9b0564353cb8060a616ff685c73a16467def1 | /flask-blog/blog.py | c8ced379579d9638fe27efa7567b6fc4303cdad8 | [] | no_license | xirain/realpython-test | 381cb9cb2bb11b610b4caf239b29e1a03388c612 | e7d34b0f2e364c2c623b88f141508fbfc3e2e2f4 | refs/heads/master | 2021-01-19T10:31:18.938779 | 2015-08-05T08:45:04 | 2015-08-05T08:45:04 | 39,760,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,160 | py | # blog.py - controller
from flask import Flask, render_template,request, session, flash, redirect, url_for, g
import sqlite3
from functools import wraps
# configuration
DATABASE = 'blog.db'
USERNAME = 'admin'
PASSWORD = 'admin'
SECRET_KEY = "\x9e\xfeS\xf7`\xb5\xe0\xb8\x0b\x1c_\xbe'\xaeB\xa0\x91\xdaFtYl\xf0\xea"
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
@app.route('/', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME'] or request.form['password'] != app.config['USERNAME']:
error = 'Invalid Credentials. Please try again.'
else:
session['logged_in'] = True
return redirect(url_for('main'))
return render_template('login.html', error=error)
@app.route('/main')
@login_required
def main():
g.db = connect_db()
cur = g.db.execute('select * from posts')
posts = [dict(title=row[0], post=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('main.html', posts=posts)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('login'))
@app.route('/add', methods=['POST'])
@login_required
def add():
title = request.form['title']
post = request.form['post']
if not title or not post:
flash("All fields are required. Please try again.")
return redirect(url_for('main'))
else:
g.db = connect_db()
g.db.execute('insert into posts (title,post) values(?,?)', [request.form['title'], request.form['post']])
g.db.commit()
g.db.close()
flash('New entry was successfully posted!')
return redirect(url_for('main'))
if __name__ == '__main__':
app.run(debug=True) | [
"wangy675@163.com"
] | wangy675@163.com |
1ac20949699cae64ef0396dfd3214cf9042edb6c | c4a9c2686c876a5a54e02de07c32a830edccab49 | /TestCases/test_FetchUser.py | 6ba199c5db6b8332a1228ed378d2c3f5d270ea9b | [] | no_license | idrismys/Mobiquity | 1175067e5d836c1b70abd518e0db475c9db02513 | c269b32016f982d1c5fc9a48b1a4641dc047584a | refs/heads/main | 2023-04-05T16:55:25.599298 | 2021-04-05T15:01:24 | 2021-04-05T15:01:24 | 354,870,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | import re
from GET_request import userdata
def test_status():
'''To fetch the user Delphine details and validate if Delphine is the user'''
assert userdata.get_request()
def test_user():
'''To get the user details'''
assert userdata.get_user()
def test_posts():
'''To get the posts return by the user'''
assert userdata.get_posts()
def test_comments():
'''To get the comments from the post'''
assert userdata.get_comments()
def test_validate_emailid():
regex = '^[a-zA-Z0-9_+&*-]+(?:\\.[a-zA-Z0-9_+&*-]+)*@(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,7}$'
if (re.search(regex, userdata.get_emailId())):
return True
else:
return False
| [
"noreply@github.com"
] | idrismys.noreply@github.com |
8e1660ff5cde5328e6f3aad787189e00c9d6364f | 2bbca3a523961a92399037a34bf8f7970c78226f | /pymcaspec/__init__.py | a160ceb03bd995ad0053bfcb96956fef1b48f422 | [] | no_license | siddharth-maddali/pymcaspec | 9fc7a6a01531bd0e288468b1ca1de0f49914f115 | f1cf69ee445e7953a68f0959168644cbb9bf2010 | refs/heads/master | 2023-06-26T08:02:53.931894 | 2021-07-31T16:14:33 | 2021-07-31T16:14:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | from pymcaspec.pymcaspec import specfile
| [
"mpmdean@gmail.com"
] | mpmdean@gmail.com |
479c4005bc40c5356945d77fee28e62ea2ed1a73 | 5f06aed9d2daf802c30a3145407cc505f517a7a3 | /lqr.py | 70e79013ec887ce78364edad418fe2027f8e718e | [] | no_license | bloom1123/ae504-project | f90b2ddffef40c859516511650a3360e686050fb | 74d6a444eb2ddc5fb5ab63627c42793c4747b6a4 | refs/heads/main | 2023-05-27T23:19:26.694416 | 2021-06-04T20:53:10 | 2021-06-04T20:53:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | import numpy as np
from control import StateSpace, lqr
from scipy import linalg
class LQR():
def __init__(self, A, B, C, D, Q, R):
sys = StateSpace(A, B, C, D)
K, S, E = lqr(sys, Q, R)
P = linalg.solve_continuous_are(A, B, Q, R)
K = np.dot(np.linalg.inv(R), np.dot(B.T, P))
self.K = K
def apply_state_controller(self, K, x):
# feedback controller
u = -np.dot(K, x) # u = -Kx
if u > 0:
return 1, u # if force_dem > 0 -> move cart right
else:
return 0, u # if force_dem <= 0 -> move cart left | [
"rupaln@Rupals-MacBook-Pro.local"
] | rupaln@Rupals-MacBook-Pro.local |
6bc9a2e642545ddc7f78dfd07ec3bae2d3ce5618 | 8294b6c63e95ec6d70bef1ef3f4304766dcc52fa | /tf_learning/t2_loading video source.py | 2606f21affffb465e1f1a06cf357ce6181a74d4a | [] | no_license | Foundream/ML_practise | 789b1ba9925dde6f2e3a0f086bd3fb7e177caeb6 | 346fafbdedb8497a18f753216a4d7451d11df0ce | refs/heads/master | 2023-03-19T02:15:09.511826 | 2019-10-18T09:56:27 | 2019-10-18T09:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | #-*- coding: utf-8 -*-
import cv2
import numpy as np
cap = cv2.VideoCapture('F:/python/image/video_sample.mp4')
#0 表示打开笔记本的内置摄像头,如果参数为路径则 按路径打开视频
fourcc = cv2.VideoWriter_fourcc(*'XVID') #指定编码器
#fourcc = -1
out = cv2.VideoWriter('F:/python/image/output.avi', fourcc, 20.0, (640, 480))
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#out.write(frame)
cv2.imshow('gray', gray) #显示灰色视频
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):#此处代码是防止bug? ord 是将字符转化为相应整数
#waitkey(1) 参数为1 表示延时1ms切换到下一帧图像
#0 视频暂停
#1000 延时太久,卡顿
break
cap.release()
out.release()
cv2.destroyAllWindows()
| [
"LybCrystal@163.com"
] | LybCrystal@163.com |
f30268df793ae4ebb2f3520f3a02abd6ca852d76 | 33f7a383760442d437592f367d3a5b6d5100088b | /venv/bin/pip2.7 | 07bda9241c2f900489d67de26049ba4059135eb8 | [] | no_license | kreyyser/SwampDragon-Messenger | 935837771a7c2b1eb75e0cbc042666664f3202a7 | 72db670b7df61304f1b072637a08eeb8d0f2791d | refs/heads/master | 2021-06-02T12:48:29.713445 | 2016-06-13T08:37:52 | 2016-06-13T08:37:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | 7 | #!/home/vagrant/conference/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kreyser2007@mail.ru"
] | kreyser2007@mail.ru |
22fcbd946d08b1b0360883cebf92843acdabaae0 | 853c6a09af16fd4dd8a53efa8bde631e63315b59 | /Programmers/correct braket.py | 0936aa47db0765ce63be6d8daa769dea7d790e1b | [] | no_license | Areum0921/Abox | 92840897b53e9bbab35c0e0aae5a017ab19a0500 | f4739c0c0835054afeca82484769e71fb8de47c8 | refs/heads/master | 2021-12-13T11:16:33.583366 | 2021-10-10T08:09:50 | 2021-10-10T08:09:50 | 176,221,995 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | def solution(s):
answer = True
stack = []
for i in s:
if (i == '('):
stack.append(i)
else:
if (stack):
stack.pop()
else:
return False # 짝이 안맞을때
if (stack): # 스택에 남아 있는게 있을때
return False
return True | [
"a90907@gmail.com"
] | a90907@gmail.com |
af1df097bd547ba10773333cd95a29217a8fcd38 | 54f2617410e95fa7c07e5acaa45dbdf1e7bf7c8a | /tasks/sortinghat | 884aafdedb355852f0df9f45680735b7cd3beafa | [
"MIT"
] | permissive | daniel-j-h/pegasus | 368fb6251c887ba33a346d926c94a4e01585d962 | db01738c73ae9ed85772f25f1a7af72fcc6caa71 | refs/heads/master | 2021-01-16T22:13:20.303729 | 2016-05-02T21:51:41 | 2016-05-02T21:51:41 | 58,081,999 | 0 | 0 | null | 2016-05-04T20:29:35 | 2016-05-04T20:29:34 | null | UTF-8 | Python | false | false | 746 | #!/usr/bin/env python3
from collections import namedtuple
import random
def main():
Wizard = namedtuple('Wizard', 'name house')
houses = ['Gryffindor', 'Hufflepuff', 'Slytherin', 'Ravenclaw']
myhouse = random.choice(houses)
me = Wizard('Jane Doe', myhouse)
print("Welcome to Hogwarts! This year, new wizards are sorted into random houses, and it's immutable. I have the sorted list..")
name = input("What's your name? ")
yourhouse = random.choice(houses)
you = Wizard(name, yourhouse)
if me.house == you.house:
print("Hmm..looks like you're a", me.house, "too..Pleasure to meet you! I'm", me.name + ".")
else:
print("Ugh, you're a", you.house + ".")
if __name__ == "__main__":
main() | [
"mariam.mrf@hotmail.com"
] | mariam.mrf@hotmail.com | |
4b3961aa5d8906bd87af450467577e695d335f83 | b0c0008213c633e6d32d8536a98934047f38ba17 | /consumer.py | e8cd2071c6864f984bb83cc67f04e9e66677ddc7 | [] | no_license | benthomasson/kafka-test | 8363f6a880544a6037e88d01b33954524b3b38ac | 95b1e89dd5a009b47a35ac5886c1980e2c5d5fcc | refs/heads/master | 2020-06-13T17:34:55.464840 | 2019-07-01T19:49:22 | 2019-07-01T19:49:22 | 194,734,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
consumer [options]
Options:
-h, --help Show this page
--debug Show debug logging
--verbose Show verbose logging
"""
from docopt import docopt
import logging
import sys
from kafka import KafkaConsumer
logger = logging.getLogger('consumer')
def main(args=None):
if args is None:
args = sys.argv[1:]
parsed_args = docopt(__doc__, args)
if parsed_args['--debug']:
logging.basicConfig(level=logging.DEBUG)
elif parsed_args['--verbose']:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
consumer = KafkaConsumer('my_favorite_topic', bootstrap_servers='127.0.0.1:9092', group_id="mygroup", auto_offset_reset='earliest')
for msg in consumer:
print(msg)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv[1:]))
| [
"bthomass@redhat.com"
] | bthomass@redhat.com |
54a2e50782bfdc0adbf20311b66314e4309644e5 | 7b0d877a8155cee067eeb29edb48b1f0e72bf5ff | /sdofNewmexico.py | 78d4f1e719c8f7fcdf20653ac14544dd87ec8201 | [] | no_license | nancysackman/oscillator | 14a370fb697c7901121e77c75f1c49d4ec4e48e9 | 7557e2a315fc9d1d4d0e91ae3327edb9f152addf | refs/heads/master | 2021-06-17T01:35:46.500361 | 2019-05-26T04:55:55 | 2019-05-26T04:55:55 | 135,765,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,942 | py | import math
from numpy.linalg import inv
import matplotlib #.pyplot as plt - for crash
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt #added for crash
import numpy as np
import sys
#for frequency domain
import obspy
from obspy import read
from obspy.signal.tf_misfit import plot_tfr
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from scipy.fftpack import fft
#import numpy.fft as fft
import csv #downloading csv for seismograph
import requests #access files of IRIS webservices (eventually)
import re #regular expression
import webbrowser
import requests
building_reader=csv.reader(open("/Users/nancysackman/Code/building/MI1520170919181440.csv"))
#manually fill in from txt files
sta="MI15" #khz,pcdr TLIG
chan="S00E" #hnn,hnn BHN
net = "UNAM" #NZ,PR MX
starttime = "2017-09-19T18:14:40"
samplingtime="2017-09-19T18:15:00" #start time of instrument recording
depth="57"
M="7.1"
gain=0
for header in building_reader: #skip header information until reached first row of meaningful data
if (header[0]!="Time"):
next(building_reader)
else:
break
F0=[] #seismogram F=m*a,
for row in building_reader:
F0.append(float(row[1]))
F0=np.array(F0)#making into numpy array
arr_out = [] #array out
F0=F0/100 #change to cm for Mexico
arr_out = [] #array out
#variables
for s in range (1,2): #s is number of stories
rp=s*0.1 #resonance period, 0.1*number of stories, free period
g=9.81 #acceleration due to gravity
m = 5410000*s/g #(TL/g)*s ,5410000, #mass, units kg for 6 story 50mx50m
k = 11700000 #spring constant,k=9.8MN/m which is 9800000, k=AE/L 11700000, k reinforced concrete 100000000
CD = 2*math.sqrt(m*k) #critical damping
c = .05*CD #actual damping, units kg/s
#Damping - We want a 5% damping ratio, .05=c/2*sqrt(m*k)
#CDE=np.sqrt((-c**2 + 4*m*k)/(2.0*m)) #sq((-c^2 + 4*m*k))2*m critical damping equation
#E=c/CD #ratio of damping coefficient to critical damping - Critical Damping Coefficient
NF = math.sqrt(k/m) #natural frequency, units Hz
T=(2*math.pi)/NF #1/NF, units seconds
f=1/T
delta_t = .01 #.005KHZ, #0.02HOM, AK; 0.01 HOLY, UW; .005 K223, AK
omega = NF*(2*math.pi)/T #units Hz, w=sqrt(k/m) or 2pi/T, rads/sec
#Now we come back to the oscillator program after variables have been selected
#initial state
y=np.array([0,0]) #[velocity, Displacement] y is a
A = np.array([[m,0],[0,1]]) #matrix, list of two lists, first row is m, second row is 0 and 1
B = np.array([[c,k], [-1,0]]) #damping and spring constant
F = np.array([0.0, 0.0]) #forcing vector
Y = [] #for plotting
force = []
acceleration = []
t=0.0 #genius! there is no lagtime
time = []
for ForcingValue in F0:
time.append(t)
t=t+delta_t
F[0]=ForcingValue#*np.sin(omega*t) #need to verify that displacement is calculated this way or by another function
y = y+delta_t*inv(A).dot(F*m-B.dot(y)) #F=m*a, since forcing function was acceleration, need to convert to force
Y.append(y[1])
#force.append(F[0])#
acceleration.append(F[0])
KE = 0.5*m*y[0]**2
PE = 0.5*k*y[1]**2
t=[i for i in time]
ax1=plt.subplot()
ax1.plot(time,acceleration)
plt.xlim((0,175))
plt.ylabel('Acceleration m/s^2')
plt.xlabel('Time in Seconds')
plt.legend(['Force'],loc='upper right')
ax2=ax1.twinx()
ax2.plot(time,Y,c='r',linewidth=3.0)
plt.xlabel('Time in Seconds')
plt.ylabel('Displacement in Meters')
plt.grid(True)
plt.legend(['Displacement'],loc='upper center')
plt.title('Building Displacement - Story '+ str(s)+' '+ str(sta)+ ' '+ str(chan)+ ' '+ str(starttime))
plt.show()
#print stuff
print('absolute max acceleration',max((acceleration),key=abs))
print('max acceleration occurs at',acceleration.index(max((acceleration),key=abs))*delta_t,'seconds')
print('maximum displacement in meters',max((Y),key=abs))
print('max displacement occurs at',Y.index(max((Y),key=abs))*delta_t,'seconds')
#send out to CSV file
arrSDOF=[sta,chan,net,delta_t,starttime,samplingtime,gain,s,rp,m,k,NF,T,f,KE,PE,max((acceleration),key=abs), acceleration.index(max(acceleration))*delta_t, max((Y),key=abs), Y.index(max((Y),key=abs))*delta_t]
with open('sdofResultspython.csv',mode='a') as csv_out:
writer=csv.writer(csv_out)
writer.writerow(arrSDOF)
#The next part is going to look at building displacement based on the number of stories,
#mass, spring constant, critical damping ratio. We need to equate or figure out
#the frequency of the acceleration or forcing function from the seismogramself.
#To do this I have equated the masses from F=ma and omega = sqrt(k/m)=2pifself.
#If I set the equations equal to each other based on m, then f=sqrt(F*k/4pi^2*a)
#new code called frequency
#a is acceleration from spectogram
#f=sqrt(F*k/4*math.pi**2*a)
| [
"noreply@github.com"
] | nancysackman.noreply@github.com |
02b1b111c99cf004798da0c8d0f6d0586e524c1d | dd7c7f19d1599669dd39af3e9b6e2bd198ad4ed4 | /backend/tests/blockchain/test_hex_to_binary.py | 0acf03c0d1d9f69499be572bde56822461680995 | [] | no_license | komfysach/komfykrypto-pychain | 46fe47d337ba6aff804f6c810cebeb94e15578d4 | 119c3978b819d8def705f7d35074b9334c596770 | refs/heads/main | 2023-05-06T13:30:40.344383 | 2021-05-24T09:30:21 | 2021-05-24T09:30:21 | 370,708,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from backend.util.hex_to_binary import hex_to_binary
def test_hex_to_binary():
original_number = 789
hex_number = hex(original_number)[2:]
binary_number = hex_to_binary(hex_number)
assert int(binary_number, 2) == original_number | [
"sach@getkomfy.net"
] | sach@getkomfy.net |
00c911e4c527233070ba2fa5de35f5171b0ca8aa | f9b047b25184787af88fd151f2a6226b6b342954 | /investmentTornadoServer/job/utils.py | 581a1d268a82693f0a2333452e34f3017c4a5356 | [] | no_license | CallMeJiaGu/TonrnadoRecomendServer | 4bfc3dd6d6a432321f80b12d66bb424fbc1a0911 | 54bb21191b16da27c20ce64ab14762bc777e30ca | refs/heads/master | 2020-03-23T19:31:43.291995 | 2019-07-04T03:04:58 | 2019-07-04T03:04:58 | 141,984,920 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def l2m_str(l):
return '{%s}' % ','.join(['"%s":'%a + str(b) for a,b in l])
if __name__ == '__main__':
l = [('227948d3-b19a-11e6-8836-005056b3f30e',0.123787622),('fa4d2d6d-b199-11e6-8836-005056b3f30e',0.93)]
print l2m_str(l)
| [
"646696382@qq.com"
] | 646696382@qq.com |
7491ded17babba2e25a320468b4c7f2d03ec8014 | 2d67afd40a0425c843aa8643df9f7d5653ad0369 | /python/leetcode/836_Rectangle_Overlap.py | 07a9c93dabe68189566acdbdd57f7dd25eead09a | [] | no_license | bobcaoge/my-code | 2f4ff5e276bb6e657f5a63108407ebfbb11fbf64 | 70bdd75b6af2e1811c1beab22050c01d28d7373e | refs/heads/master | 2022-12-23T22:38:10.003058 | 2020-07-02T03:52:43 | 2020-07-02T03:52:43 | 248,733,053 | 0 | 0 | null | 2022-12-10T05:41:57 | 2020-03-20T10:55:55 | Python | UTF-8 | Python | false | false | 683 | py | # /usr/bin/python3.6
# -*- coding:utf-8 -*-
class Solution(object):
def isRectangleOverlap(self, rec1, rec2):
"""
:type rec1: List[int]
:type rec2: List[int]
:rtype: bool
"""
return not (rec2[0] >= rec1[2] or rec2[1] >= rec1[3] or rec2[2] <= rec1[0] or rec2[3] <= rec1[1])
def isRectangleOverlap1(self, rec1, rec2):
"""
:type rec1: List[int]
:type rec2: List[int]
:rtype: bool
"""
x1,y1, x2, y2 = rec1
x11,y11, x22, y22 = rec2
return not (x11 >= x2 or y11 >= y2 or x22 <= x1 or y22 <= y1)
def main():
s = Solution()
if __name__ == "__main__":
main()
| [
"378082326@qq.com"
] | 378082326@qq.com |
9407844ed952f899533179be6b9a92d0a84e728d | 77197c85cd53ea6f20a8646ca6969a40a5620ea4 | /billing_system/manage.py | cc242699be306fa52272a8e4ffc6a46fa5d6dcce | [] | no_license | gowrishankkar/Learning-Django | cb54b11c784f8eb433127443856a50c031173d84 | 948eba5d7d7c505886c4a54ccd53305995972338 | refs/heads/master | 2023-05-04T13:09:10.675197 | 2020-01-12T11:55:18 | 2020-01-12T11:55:18 | 222,991,681 | 0 | 0 | null | 2023-04-21T20:45:41 | 2019-11-20T17:29:09 | Python | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'billing_system.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"shankarg.kvsulur@gmail.com"
] | shankarg.kvsulur@gmail.com |
cbea43460a977b85a009490bc47d6af3c5e1144f | 319731c4788fb82813f81a63cac3f23171285815 | /config/settings/local.py | c9429ceadfc5025320a07b57849834cb9379a74c | [
"MIT"
] | permissive | seankim84/djangoproject | 2171abac27c2fc41d5d2ac8a0f86fa6772c10439 | dc442098d8e17beff086f3acd325f8fca3b41583 | refs/heads/master | 2022-12-13T23:01:53.078813 | 2018-05-16T13:19:50 | 2018-05-16T13:19:50 | 130,310,410 | 1 | 0 | MIT | 2022-12-09T08:30:55 | 2018-04-20T04:49:40 | Python | UTF-8 | Python | false | false | 2,621 | py | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY', default='l5aHiu3HISGNHG3G0JgNtu8XPMuKGZt5utS9bFe0BMPy0G3H5ZL7tX3cy7Bo28HX')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
"localhost",
"0.0.0.0",
"127.0.0.1",
]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa F405
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ['debug_toolbar'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ['django_extensions'] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| [
"rebekahpark@Rebekahs-MacBook-Pro.local"
] | rebekahpark@Rebekahs-MacBook-Pro.local |
8f73960e9cd985f473f88967aa7424ab07f7bcbe | aa692f369966074141873a473894362913750e01 | /reportform/asgi.py | 52f03e6cc3a8f2461ec7170e6d02e3bf734d97bc | [] | no_license | yashacon/Progress_form | d8747d6ba28266cabd2c88ecfdcf4816c7350569 | 0f26733383f79e9e34992cd12a308a410c27f37f | refs/heads/master | 2022-04-22T14:47:05.119632 | 2020-04-19T15:14:16 | 2020-04-19T15:14:16 | 257,029,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for reportform project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reportform.settings')
application = get_asgi_application()
| [
"17dcs017@lnmiit.ac.in"
] | 17dcs017@lnmiit.ac.in |
20688f9fbecd94fef4e8a70dc5432eb3034c6c26 | 0f8ead88551d6da281410924ba00b6940d24cd7d | /BipedalWalkerHardcore-TD3-FORK/model.py | 1682897cda862725a921607ab69d3d922d89ce9c | [] | no_license | jianyunli/Deep-Reinforcement-Learning-Algorithms | 398d4e40730edc52cc2a2c6da1d9a71bb43d19e5 | 85b1148de13cd5a2a46cce016412f783163e50a0 | refs/heads/master | 2023-06-02T06:23:12.131703 | 2021-06-17T21:58:26 | 2021-06-17T21:58:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | ## import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, state_size, action_size, seed, fc_units=400, fc1_units=300):
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc_units)
self.fc2 = nn.Linear(fc_units, fc1_units)
self.fc3 = nn.Linear(fc1_units, action_size)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.torch.tanh(self.fc3(x))
# Q1-Q2-Critic Neural Network
class Critic(nn.Module):
def __init__(self, state_size, action_size, seed, fc1_units=400, fc2_units=300):
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
# Q1 architecture
self.l1 = nn.Linear(state_size + action_size, fc1_units)
self.l2 = nn.Linear(fc1_units, fc2_units)
self.l3 = nn.Linear(fc2_units, 1)
# Q2 architecture
self.l4 = nn.Linear(state_size + action_size, fc1_units)
self.l5 = nn.Linear(fc1_units, fc2_units)
self.l6 = nn.Linear(fc2_units, 1)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xa = torch.cat([state, action], 1)
x1 = F.relu(self.l1(xa))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
x2 = F.relu(self.l4(xa))
x2 = F.relu(self.l5(x2))
x2 = self.l6(x2)
return x1, x2
class SysModel(nn.Module):
def __init__(self, state_size, action_size, fc1_units=400, fc2_units=300):
super(SysModel, self).__init__()
self.l1 = nn.Linear(state_size + action_size, fc1_units)
self.l2 = nn.Linear(fc1_units, fc2_units)
self.l3 = nn.Linear(fc2_units, state_size)
def forward(self, state, action):
"""Build a system model to predict the next state at a given state."""
xa = torch.cat([state, action], 1)
x1 = F.relu(self.l1(xa))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1 | [
"klivlend1@yahoo.com"
] | klivlend1@yahoo.com |
c33743585b9a553e3d3858a7fff83eb8abfe4436 | 7f1d31cf00f8a1fc175d67c7be6e11367179d3f6 | /tests/nlu/extractors/test_extractor.py | b0739e047c43aac5b670854f89971dc56ef5e29e | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"MIT"
] | permissive | russosanti/rasa | 226ec14e3a326ba2ad9cb0aae57c79465c88b5ab | 21fb0cc8e92cf270e4228438cb386f1d6f364563 | refs/heads/master | 2023-04-07T13:25:53.848512 | 2020-04-16T21:59:58 | 2020-04-16T21:59:58 | 256,278,604 | 0 | 1 | Apache-2.0 | 2020-04-16T17:05:06 | 2020-04-16T17:05:05 | null | UTF-8 | Python | false | false | 7,622 | py | from typing import Any, Text, Dict, List
import pytest
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.nlu.training_data import Message
from rasa.nlu.extractors.extractor import EntityExtractor
@pytest.mark.parametrize(
"text, tokens, entities, keep, expected_entities",
[
(
"Aarhus is a city",
[
Token("Aar", 0, 3),
Token("hus", 3, 6),
Token("is", 7, 9),
Token("a", 10, 11),
Token("city", 12, 16),
],
[
{"entity": "iata", "start": 0, "end": 3, "value": "Aar"},
{"entity": "city", "start": 3, "end": 6, "value": "hus"},
{"entity": "location", "start": 12, "end": 16, "value": "city"},
],
False,
[{"entity": "location", "start": 12, "end": 16, "value": "city"}],
),
(
"Aarhus",
[Token("Aar", 0, 3), Token("hus", 3, 6)],
[
{"entity": "iata", "start": 0, "end": 3, "value": "Aar"},
{"entity": "city", "start": 3, "end": 6, "value": "hus"},
],
True,
[],
),
(
"Aarhus city",
[Token("Aarhus", 0, 6), Token("city", 7, 11)],
[
{"entity": "city", "start": 0, "end": 6, "value": "Aarhus"},
{"entity": "type", "start": 7, "end": 11, "value": "city"},
],
False,
[
{"entity": "city", "start": 0, "end": 6, "value": "Aarhus"},
{"entity": "type", "start": 7, "end": 11, "value": "city"},
],
),
(
"Aarhus is a city",
[
Token("Aar", 0, 3),
Token("hus", 3, 6),
Token("is", 7, 9),
Token("a", 10, 11),
Token("city", 12, 16),
],
[
{
"entity": "city",
"start": 0,
"end": 3,
"confidence": 0.87,
"value": "Aar",
},
{
"entity": "iata",
"start": 3,
"end": 6,
"confidence": 0.43,
"value": "hus",
},
{"entity": "location", "start": 12, "end": 16, "value": "city"},
],
True,
[
{
"entity": "city",
"start": 0,
"end": 6,
"confidence": 0.87,
"value": "Aarhus",
},
{"entity": "location", "start": 12, "end": 16, "value": "city"},
],
),
(
"Aarhus",
[Token("Aa", 0, 2), Token("r", 2, 3), Token("hu", 3, 5), Token("s", 5, 6)],
[
{
"entity": "iata",
"start": 0,
"end": 2,
"confidence": 0.32,
"value": "Aa",
},
{
"entity": "city",
"start": 2,
"end": 3,
"confidence": 0.87,
"value": "r",
},
{
"entity": "iata",
"start": 3,
"end": 5,
"confidence": 0.21,
"value": "hu",
},
{
"entity": "city",
"start": 5,
"end": 6,
"confidence": 0.43,
"value": "s",
},
],
True,
[
{
"entity": "city",
"start": 0,
"end": 6,
"confidence": 0.87,
"value": "Aarhus",
}
],
),
(
"Aarhus",
[Token("Aa", 0, 2), Token("r", 2, 3), Token("hu", 3, 5), Token("s", 5, 6)],
[
{
"entity": "city",
"start": 0,
"end": 2,
"confidence": 0.32,
"value": "Aa",
}
],
True,
[
{
"entity": "city",
"start": 0,
"end": 6,
"confidence": 0.32,
"value": "Aarhus",
}
],
),
(
"Aarhus",
[Token("Aa", 0, 2), Token("r", 2, 3), Token("hu", 3, 5), Token("s", 5, 6)],
[
{
"entity": "city",
"start": 0,
"end": 2,
"confidence": 0.32,
"value": "Aa",
}
],
False,
[],
),
(
"Buenos Aires is a city",
[
Token("Buenos", 0, 6),
Token("Ai", 7, 9),
Token("res", 9, 12),
Token("is", 13, 15),
Token("a", 16, 17),
Token("city", 18, 22),
],
[
{"entity": "city", "start": 0, "end": 9, "value": "Buenos Ai"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
False,
[{"entity": "location", "start": 18, "end": 22, "value": "city"}],
),
(
"Buenos Aires is a city",
[
Token("Buenos", 0, 6),
Token("Ai", 7, 9),
Token("res", 9, 12),
Token("is", 13, 15),
Token("a", 16, 17),
Token("city", 18, 22),
],
[
{"entity": "city", "start": 0, "end": 9, "value": "Buenos Ai"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
True,
[
{"entity": "city", "start": 0, "end": 12, "value": "Buenos Aires"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
),
(
"Buenos Aires is a city",
[
Token("Buen", 0, 4),
Token("os", 4, 6),
Token("Ai", 7, 9),
Token("res", 9, 12),
Token("is", 13, 15),
Token("a", 16, 17),
Token("city", 18, 22),
],
[
{"entity": "city", "start": 4, "end": 9, "value": "os Ai"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
True,
[
{"entity": "city", "start": 0, "end": 12, "value": "Buenos Aires"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
),
],
)
def test_clean_up_entities(
text: Text,
tokens: List[Token],
entities: List[Dict[Text, Any]],
keep: bool,
expected_entities: List[Dict[Text, Any]],
):
extractor = EntityExtractor()
message = Message(text)
message.set("tokens", tokens)
updated_entities = extractor.clean_up_entities(message, entities, keep)
assert updated_entities == expected_entities
| [
"tabergma@gmail.com"
] | tabergma@gmail.com |
eaf21fc64fa4a9963db8428a6d85332bb1f68acf | d2fc4d45b115fb861097657d00b3c5cb08e8a3ad | /scenarios/bank_account_delete/executable.py | 22c888baa03eda9722fd271e2a6f2c9a58e213cb | [] | no_license | jess010/balanced-python | 81b39f0e9d3ce52d60f2453b8c98e77f07ee3acb | b7a6bf0430ad0299d96de15ea97d3d4ccfb4c958 | refs/heads/master | 2020-12-25T16:13:35.626111 | 2013-09-20T00:14:58 | 2013-09-20T00:14:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | import balanced
balanced.configure('b5de51921b2d11e389c4026ba7cac9da')
bank_account = balanced.BankAccount.find('/v1/bank_accounts/BA5YXVcU9ExcM8jXQhQt7ZY6')
bank_account.delete() | [
"ben@unfiniti.com"
] | ben@unfiniti.com |
6cc365c4f9acf9409c990e5df987800c17b73dca | c9cf7ed6dc1974b9f051d1727d6344be3b413ac8 | /product/migrations/0013_productvariant_options.py | 5d1884bdceb560d1b59bb6a66e1dc5a8a66480ed | [] | no_license | Samuelyip74/alcatelconfigurator | f4dbd1eabb40645b92a80d438bf82a1572db3fe1 | f7cc2bda2b9504fc3708e9cd08db69d611700a6a | refs/heads/master | 2020-08-02T04:24:52.822370 | 2019-10-24T16:32:32 | 2019-10-24T16:32:32 | 211,232,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # Generated by Django 2.2.3 on 2019-09-23 06:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0012_auto_20190923_1405'),
]
operations = [
migrations.AddField(
model_name='productvariant',
name='options',
field=models.ManyToManyField(to='product.ProductOption'),
),
]
| [
"samuel.yip@live.com"
] | samuel.yip@live.com |
86ba2b6052d3e743fb070ef7f0e05d157df3fe4d | 0dee7cc69ae44e30c5cb372eb17f2e469635056b | /AirBnB_clone_v3/api/v1/app.py | 3b75dd5b7cb112990fe65ac206b8bb1c37bb41c1 | [
"LicenseRef-scancode-public-domain"
] | permissive | HausCloud/Holberton | 00cd25b4a489041e041551ea8f87674d53f43713 | b39c5978698e02b9e746121d6c55d791b73e6d9b | refs/heads/master | 2022-12-13T01:06:18.968047 | 2020-09-05T18:23:00 | 2020-09-05T18:23:00 | 293,129,232 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | #!/usr/bin/python3
''' py file to connect to API '''
from os import getenv
from models import storage
from api.v1.views import app_views
from flask import Flask, Blueprint, jsonify, make_response
from flask_cors import CORS
app = Flask(__name__)
app.register_blueprint(app_views)
cors = CORS(app, resources={"/*": {"origins": "0.0.0.0"}})
@app.teardown_appcontext
def teardown_appcontext(code):
'closes storage method'
storage.close()
@app.errorhandler(404)
def errorhandler404(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == "__main__":
app.run(host=getenv('HBNB_API_HOST', '0.0.0.0'),
port=int(getenv('HBNB_API_PORT', '5000')))
| [
"hauscloud.me@gmail.com"
] | hauscloud.me@gmail.com |
4a63a1397166ed0b69110d3d9acb6fe269d7b0bd | c961141266376129d446382b24cf98022667e8d9 | /transDocx_20200914.py | 9c5c2e0eed9156a5f9bed4361a138f547c9937ec | [] | no_license | zilongxuan001/LearnPythonhandbook | a130081fdf11680070e866783094542054940867 | b5b37aecaf7a757cb2c7ae9f5448eb9b30437215 | refs/heads/master | 2023-01-19T08:29:15.837931 | 2020-11-24T06:25:23 | 2020-11-24T06:25:23 | 315,538,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # transDocx.py
# 更改肉菜周报的运维数据
from docx import Document
from docx.shared import Cm
from docx.enum.text import WD_ALIGN_PARAGRAPH
import re
document=Document(r"d:\ex\肉菜20907.doc")
paragraphs = document.paragraphs
text = re.sub('159308','235',paragraphs[18].text)
document.save()
ERROR: No matching distribution found for lxml>=2.3.2 (from python-docx)
| [
"zilongxuan001@163.com"
] | zilongxuan001@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.